xref: /llvm-project/lldb/source/Target/StackFrameList.cpp (revision 186fac33d08b34be494caa58fe63972f69c6d6ab)
1 //===-- StackFrameList.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "lldb/Target/StackFrameList.h"
10 #include "lldb/Breakpoint/Breakpoint.h"
11 #include "lldb/Breakpoint/BreakpointLocation.h"
12 #include "lldb/Core/Debugger.h"
13 #include "lldb/Core/SourceManager.h"
14 #include "lldb/Host/StreamFile.h"
15 #include "lldb/Symbol/Block.h"
16 #include "lldb/Symbol/Function.h"
17 #include "lldb/Symbol/Symbol.h"
18 #include "lldb/Target/Process.h"
19 #include "lldb/Target/RegisterContext.h"
20 #include "lldb/Target/StackFrame.h"
21 #include "lldb/Target/StackFrameRecognizer.h"
22 #include "lldb/Target/StopInfo.h"
23 #include "lldb/Target/Target.h"
24 #include "lldb/Target/Thread.h"
25 #include "lldb/Target/Unwind.h"
26 #include "lldb/Utility/LLDBLog.h"
27 #include "lldb/Utility/Log.h"
28 #include "llvm/ADT/SmallPtrSet.h"
29 
30 #include <memory>
31 
32 //#define DEBUG_STACK_FRAMES 1
33 
34 using namespace lldb;
35 using namespace lldb_private;
36 
37 // StackFrameList constructor
38 StackFrameList::StackFrameList(Thread &thread,
39                                const lldb::StackFrameListSP &prev_frames_sp,
40                                bool show_inline_frames)
41     : m_thread(thread), m_prev_frames_sp(prev_frames_sp), m_frames(),
42       m_selected_frame_idx(), m_concrete_frames_fetched(0),
43       m_current_inlined_depth(UINT32_MAX),
44       m_current_inlined_pc(LLDB_INVALID_ADDRESS),
45       m_show_inlined_frames(show_inline_frames) {
46   if (prev_frames_sp) {
47     m_current_inlined_depth = prev_frames_sp->m_current_inlined_depth;
48     m_current_inlined_pc = prev_frames_sp->m_current_inlined_pc;
49   }
50 }
51 
52 StackFrameList::~StackFrameList() {
53   // Call clear since this takes a lock and clears the stack frame list in case
54   // another thread is currently using this stack frame list
55   Clear();
56 }
57 
58 void StackFrameList::CalculateCurrentInlinedDepth() {
59   uint32_t cur_inlined_depth = GetCurrentInlinedDepth();
60   if (cur_inlined_depth == UINT32_MAX) {
61     ResetCurrentInlinedDepth();
62   }
63 }
64 
65 uint32_t StackFrameList::GetCurrentInlinedDepth() {
66   std::lock_guard<std::mutex> guard(m_inlined_depth_mutex);
67   if (m_show_inlined_frames && m_current_inlined_pc != LLDB_INVALID_ADDRESS) {
68     lldb::addr_t cur_pc = m_thread.GetRegisterContext()->GetPC();
69     if (cur_pc != m_current_inlined_pc) {
70       m_current_inlined_pc = LLDB_INVALID_ADDRESS;
71       m_current_inlined_depth = UINT32_MAX;
72       Log *log = GetLog(LLDBLog::Step);
73       if (log && log->GetVerbose())
74         LLDB_LOGF(
75             log,
76             "GetCurrentInlinedDepth: invalidating current inlined depth.\n");
77     }
78     return m_current_inlined_depth;
79   } else {
80     return UINT32_MAX;
81   }
82 }
83 
84 void StackFrameList::ResetCurrentInlinedDepth() {
85   if (!m_show_inlined_frames)
86     return;
87 
88   StopInfoSP stop_info_sp = m_thread.GetStopInfo();
89   if (!stop_info_sp)
90     return;
91 
92   bool inlined = true;
93   auto inline_depth = stop_info_sp->GetSuggestedStackFrameIndex(inlined);
94   // We're only adjusting the inlined stack here.
95   Log *log = GetLog(LLDBLog::Step);
96   if (inline_depth) {
97     std::lock_guard<std::mutex> guard(m_inlined_depth_mutex);
98     m_current_inlined_depth = *inline_depth;
99     m_current_inlined_pc = m_thread.GetRegisterContext()->GetPC();
100 
101     if (log && log->GetVerbose())
102       LLDB_LOGF(log,
103                 "ResetCurrentInlinedDepth: setting inlined "
104                 "depth: %d 0x%" PRIx64 ".\n",
105                 m_current_inlined_depth, m_current_inlined_pc);
106   } else {
107     std::lock_guard<std::mutex> guard(m_inlined_depth_mutex);
108     m_current_inlined_pc = LLDB_INVALID_ADDRESS;
109     m_current_inlined_depth = UINT32_MAX;
110     if (log && log->GetVerbose())
111       LLDB_LOGF(
112           log,
113           "ResetCurrentInlinedDepth: Invalidating current inlined depth.\n");
114   }
115 }
116 
117 bool StackFrameList::DecrementCurrentInlinedDepth() {
118   if (m_show_inlined_frames) {
119     uint32_t current_inlined_depth = GetCurrentInlinedDepth();
120     if (current_inlined_depth != UINT32_MAX) {
121       if (current_inlined_depth > 0) {
122         std::lock_guard<std::mutex> guard(m_inlined_depth_mutex);
123         m_current_inlined_depth--;
124         return true;
125       }
126     }
127   }
128   return false;
129 }
130 
131 void StackFrameList::SetCurrentInlinedDepth(uint32_t new_depth) {
132   std::lock_guard<std::mutex> guard(m_inlined_depth_mutex);
133   m_current_inlined_depth = new_depth;
134   if (new_depth == UINT32_MAX)
135     m_current_inlined_pc = LLDB_INVALID_ADDRESS;
136   else
137     m_current_inlined_pc = m_thread.GetRegisterContext()->GetPC();
138 }
139 
140 bool StackFrameList::WereAllFramesFetched() const {
141   std::shared_lock<std::shared_mutex> guard(m_list_mutex);
142   return GetAllFramesFetched();
143 }
144 
145 /// A sequence of calls that comprise some portion of a backtrace. Each frame
146 /// is represented as a pair of a callee (Function *) and an address within the
147 /// callee.
148 struct CallDescriptor {
149   Function *func;
150   CallEdge::AddrType address_type = CallEdge::AddrType::Call;
151   addr_t address = LLDB_INVALID_ADDRESS;
152 };
153 using CallSequence = std::vector<CallDescriptor>;
154 
155 /// Find the unique path through the call graph from \p begin (with return PC
156 /// \p return_pc) to \p end. On success this path is stored into \p path, and
157 /// on failure \p path is unchanged.
158 /// This function doesn't currently access StackFrameLists at all, it only looks
159 /// at the frame set in the ExecutionContext it passes around.
160 static void FindInterveningFrames(Function &begin, Function &end,
161                                   ExecutionContext &exe_ctx, Target &target,
162                                   addr_t return_pc, CallSequence &path,
163                                   ModuleList &images, Log *log) {
164   LLDB_LOG(log, "Finding frames between {0} and {1}, retn-pc={2:x}",
165            begin.GetDisplayName(), end.GetDisplayName(), return_pc);
166 
167   // Find a non-tail calling edge with the correct return PC.
168   if (log)
169     for (const auto &edge : begin.GetCallEdges())
170       LLDB_LOG(log, "FindInterveningFrames: found call with retn-PC = {0:x}",
171                edge->GetReturnPCAddress(begin, target));
172   CallEdge *first_edge = begin.GetCallEdgeForReturnAddress(return_pc, target);
173   if (!first_edge) {
174     LLDB_LOG(log, "No call edge outgoing from {0} with retn-PC == {1:x}",
175              begin.GetDisplayName(), return_pc);
176     return;
177   }
178 
179   // The first callee may not be resolved, or there may be nothing to fill in.
180   Function *first_callee = first_edge->GetCallee(images, exe_ctx);
181   if (!first_callee) {
182     LLDB_LOG(log, "Could not resolve callee");
183     return;
184   }
185   if (first_callee == &end) {
186     LLDB_LOG(log, "Not searching further, first callee is {0} (retn-PC: {1:x})",
187              end.GetDisplayName(), return_pc);
188     return;
189   }
190 
191   // Run DFS on the tail-calling edges out of the first callee to find \p end.
192   // Fully explore the set of functions reachable from the first edge via tail
193   // calls in order to detect ambiguous executions.
194   struct DFS {
195     CallSequence active_path = {};
196     CallSequence solution_path = {};
197     llvm::SmallPtrSet<Function *, 2> visited_nodes = {};
198     bool ambiguous = false;
199     Function *end;
200     ModuleList &images;
201     Target &target;
202     ExecutionContext &context;
203 
204     DFS(Function *end, ModuleList &images, Target &target,
205         ExecutionContext &context)
206         : end(end), images(images), target(target), context(context) {}
207 
208     void search(CallEdge &first_edge, Function &first_callee,
209                 CallSequence &path) {
210       dfs(first_edge, first_callee);
211       if (!ambiguous)
212         path = std::move(solution_path);
213     }
214 
215     void dfs(CallEdge &current_edge, Function &callee) {
216       // Found a path to the target function.
217       if (&callee == end) {
218         if (solution_path.empty())
219           solution_path = active_path;
220         else
221           ambiguous = true;
222         return;
223       }
224 
225       // Terminate the search if tail recursion is found, or more generally if
226       // there's more than one way to reach a target. This errs on the side of
227       // caution: it conservatively stops searching when some solutions are
228       // still possible to save time in the average case.
229       if (!visited_nodes.insert(&callee).second) {
230         ambiguous = true;
231         return;
232       }
233 
234       // Search the calls made from this callee.
235       active_path.push_back(CallDescriptor{&callee});
236       for (const auto &edge : callee.GetTailCallingEdges()) {
237         Function *next_callee = edge->GetCallee(images, context);
238         if (!next_callee)
239           continue;
240 
241         std::tie(active_path.back().address_type, active_path.back().address) =
242             edge->GetCallerAddress(callee, target);
243 
244         dfs(*edge, *next_callee);
245         if (ambiguous)
246           return;
247       }
248       active_path.pop_back();
249     }
250   };
251 
252   DFS(&end, images, target, exe_ctx).search(*first_edge, *first_callee, path);
253 }
254 
255 /// Given that \p next_frame will be appended to the frame list, synthesize
256 /// tail call frames between the current end of the list and \p next_frame.
257 /// If any frames are added, adjust the frame index of \p next_frame.
258 ///
259 ///   --------------
260 ///   |    ...     | <- Completed frames.
261 ///   --------------
262 ///   | prev_frame |
263 ///   --------------
264 ///   |    ...     | <- Artificial frames inserted here.
265 ///   --------------
266 ///   | next_frame |
267 ///   --------------
268 ///   |    ...     | <- Not-yet-visited frames.
269 ///   --------------
270 void StackFrameList::SynthesizeTailCallFrames(StackFrame &next_frame) {
271   // Cannot synthesize tail call frames when the stack is empty (there is no
272   // "previous" frame).
273   if (m_frames.empty())
274     return;
275 
276   TargetSP target_sp = next_frame.CalculateTarget();
277   if (!target_sp)
278     return;
279 
280   lldb::RegisterContextSP next_reg_ctx_sp = next_frame.GetRegisterContext();
281   if (!next_reg_ctx_sp)
282     return;
283 
284   Log *log = GetLog(LLDBLog::Step);
285 
286   StackFrame &prev_frame = *m_frames.back().get();
287 
288   // Find the functions prev_frame and next_frame are stopped in. The function
289   // objects are needed to search the lazy call graph for intervening frames.
290   Function *prev_func =
291       prev_frame.GetSymbolContext(eSymbolContextFunction).function;
292   if (!prev_func) {
293     LLDB_LOG(log, "SynthesizeTailCallFrames: can't find previous function");
294     return;
295   }
296   Function *next_func =
297       next_frame.GetSymbolContext(eSymbolContextFunction).function;
298   if (!next_func) {
299     LLDB_LOG(log, "SynthesizeTailCallFrames: can't find next function");
300     return;
301   }
302 
303   // Try to find the unique sequence of (tail) calls which led from next_frame
304   // to prev_frame.
305   CallSequence path;
306   addr_t return_pc = next_reg_ctx_sp->GetPC();
307   Target &target = *target_sp.get();
308   ModuleList &images = next_frame.CalculateTarget()->GetImages();
309   ExecutionContext exe_ctx(target_sp, /*get_process=*/true);
310   exe_ctx.SetFramePtr(&next_frame);
311   FindInterveningFrames(*next_func, *prev_func, exe_ctx, target, return_pc,
312                         path, images, log);
313 
314   // Push synthetic tail call frames.
315   for (auto calleeInfo : llvm::reverse(path)) {
316     Function *callee = calleeInfo.func;
317     uint32_t frame_idx = m_frames.size();
318     uint32_t concrete_frame_idx = next_frame.GetConcreteFrameIndex();
319     addr_t cfa = LLDB_INVALID_ADDRESS;
320     bool cfa_is_valid = false;
321     addr_t pc = calleeInfo.address;
322     // If the callee address refers to the call instruction, we do not want to
323     // subtract 1 from this value.
324     const bool behaves_like_zeroth_frame =
325         calleeInfo.address_type == CallEdge::AddrType::Call;
326     SymbolContext sc;
327     callee->CalculateSymbolContext(&sc);
328     auto synth_frame = std::make_shared<StackFrame>(
329         m_thread.shared_from_this(), frame_idx, concrete_frame_idx, cfa,
330         cfa_is_valid, pc, StackFrame::Kind::Artificial,
331         behaves_like_zeroth_frame, &sc);
332     m_frames.push_back(synth_frame);
333     LLDB_LOG(log, "Pushed frame {0} at {1:x}", callee->GetDisplayName(), pc);
334   }
335 
336   // If any frames were created, adjust next_frame's index.
337   if (!path.empty())
338     next_frame.SetFrameIndex(m_frames.size());
339 }
340 
341 bool StackFrameList::GetFramesUpTo(uint32_t end_idx,
342                                    InterruptionControl allow_interrupt) {
343   // GetFramesUpTo is always called with the intent to add frames, so get the
344   // writer lock:
345   std::unique_lock<std::shared_mutex> guard(m_list_mutex);
346   // Now that we have the lock, check to make sure someone didn't get there
347   // ahead of us:
348   if (m_frames.size() > end_idx || GetAllFramesFetched())
349     return false;
350 
351   // Do not fetch frames for an invalid thread.
352   bool was_interrupted = false;
353   if (!m_thread.IsValid())
354     return false;
355 
356   // lock the writer side of m_list_mutex as we're going to add frames here:
357   if (!m_show_inlined_frames) {
358     if (end_idx < m_concrete_frames_fetched)
359       return false;
360     // We're adding concrete frames now:
361     // FIXME: This should also be interruptible:
362     FetchOnlyConcreteFramesUpTo(end_idx);
363     return false;
364   }
365 
366   // We're adding concrete and inlined frames now:
367   was_interrupted = FetchFramesUpTo(end_idx, allow_interrupt);
368 
369 #if defined(DEBUG_STACK_FRAMES)
370   s.PutCString("\n\nNew frames:\n");
371   Dump(&s);
372   s.EOL();
373 #endif
374   return was_interrupted;
375 }
376 
377 void StackFrameList::FetchOnlyConcreteFramesUpTo(uint32_t end_idx) {
378   assert(m_thread.IsValid() && "Expected valid thread");
379   assert(m_frames.size() <= end_idx && "Expected there to be frames to fill");
380 
381   Unwind &unwinder = m_thread.GetUnwinder();
382 
383   if (end_idx < m_concrete_frames_fetched)
384     return;
385 
386   uint32_t num_frames = unwinder.GetFramesUpTo(end_idx);
387   if (num_frames <= end_idx + 1) {
388     // Done unwinding.
389     m_concrete_frames_fetched = UINT32_MAX;
390   }
391 
392   // Don't create the frames eagerly. Defer this work to GetFrameAtIndex,
393   // which can lazily query the unwinder to create frames.
394   m_frames.resize(num_frames);
395 }
396 
397 bool StackFrameList::FetchFramesUpTo(uint32_t end_idx,
398                                      InterruptionControl allow_interrupt) {
399   Unwind &unwinder = m_thread.GetUnwinder();
400   bool was_interrupted = false;
401 
402 #if defined(DEBUG_STACK_FRAMES)
403   StreamFile s(stdout, false);
404 #endif
405   // If we are hiding some frames from the outside world, we need to add
406   // those onto the total count of frames to fetch.  However, we don't need
407   // to do that if end_idx is 0 since in that case we always get the first
408   // concrete frame and all the inlined frames below it...  And of course, if
409   // end_idx is UINT32_MAX that means get all, so just do that...
410 
411   uint32_t inlined_depth = 0;
412   if (end_idx > 0 && end_idx != UINT32_MAX) {
413     inlined_depth = GetCurrentInlinedDepth();
414     if (inlined_depth != UINT32_MAX) {
415       if (end_idx > 0)
416         end_idx += inlined_depth;
417     }
418   }
419 
420   StackFrameSP unwind_frame_sp;
421   Debugger &dbg = m_thread.GetProcess()->GetTarget().GetDebugger();
422   do {
423     uint32_t idx = m_concrete_frames_fetched++;
424     lldb::addr_t pc = LLDB_INVALID_ADDRESS;
425     lldb::addr_t cfa = LLDB_INVALID_ADDRESS;
426     bool behaves_like_zeroth_frame = (idx == 0);
427     if (idx == 0) {
428       // We might have already created frame zero, only create it if we need
429       // to.
430       if (m_frames.empty()) {
431         RegisterContextSP reg_ctx_sp(m_thread.GetRegisterContext());
432 
433         if (reg_ctx_sp) {
434           const bool success = unwinder.GetFrameInfoAtIndex(
435               idx, cfa, pc, behaves_like_zeroth_frame);
436           // There shouldn't be any way not to get the frame info for frame
437           // 0. But if the unwinder can't make one, lets make one by hand
438           // with the SP as the CFA and see if that gets any further.
439           if (!success) {
440             cfa = reg_ctx_sp->GetSP();
441             pc = reg_ctx_sp->GetPC();
442           }
443 
444           unwind_frame_sp = std::make_shared<StackFrame>(
445               m_thread.shared_from_this(), m_frames.size(), idx, reg_ctx_sp,
446               cfa, pc, behaves_like_zeroth_frame, nullptr);
447           m_frames.push_back(unwind_frame_sp);
448         }
449       } else {
450         unwind_frame_sp = m_frames.front();
451         cfa = unwind_frame_sp->m_id.GetCallFrameAddress();
452       }
453     } else {
454       // Check for interruption when building the frames.
455       // Do the check in idx > 0 so that we'll always create a 0th frame.
456       if (allow_interrupt &&
457           INTERRUPT_REQUESTED(dbg, "Interrupted having fetched {0} frames",
458                               m_frames.size())) {
459         was_interrupted = true;
460         break;
461       }
462 
463       const bool success =
464           unwinder.GetFrameInfoAtIndex(idx, cfa, pc, behaves_like_zeroth_frame);
465       if (!success) {
466         // We've gotten to the end of the stack.
467         SetAllFramesFetched();
468         break;
469       }
470       const bool cfa_is_valid = true;
471       unwind_frame_sp = std::make_shared<StackFrame>(
472           m_thread.shared_from_this(), m_frames.size(), idx, cfa, cfa_is_valid,
473           pc, StackFrame::Kind::Regular, behaves_like_zeroth_frame, nullptr);
474 
475       // Create synthetic tail call frames between the previous frame and the
476       // newly-found frame. The new frame's index may change after this call,
477       // although its concrete index will stay the same.
478       SynthesizeTailCallFrames(*unwind_frame_sp.get());
479 
480       m_frames.push_back(unwind_frame_sp);
481     }
482 
483     assert(unwind_frame_sp);
484     SymbolContext unwind_sc = unwind_frame_sp->GetSymbolContext(
485         eSymbolContextBlock | eSymbolContextFunction);
486     Block *unwind_block = unwind_sc.block;
487     TargetSP target_sp = m_thread.CalculateTarget();
488     if (unwind_block) {
489       Address curr_frame_address(
490           unwind_frame_sp->GetFrameCodeAddressForSymbolication());
491 
492       SymbolContext next_frame_sc;
493       Address next_frame_address;
494 
495       while (unwind_sc.GetParentOfInlinedScope(
496           curr_frame_address, next_frame_sc, next_frame_address)) {
497         next_frame_sc.line_entry.ApplyFileMappings(target_sp);
498         behaves_like_zeroth_frame = false;
499         StackFrameSP frame_sp(new StackFrame(
500             m_thread.shared_from_this(), m_frames.size(), idx,
501             unwind_frame_sp->GetRegisterContextSP(), cfa, next_frame_address,
502             behaves_like_zeroth_frame, &next_frame_sc));
503 
504         m_frames.push_back(frame_sp);
505         unwind_sc = next_frame_sc;
506         curr_frame_address = next_frame_address;
507       }
508     }
509   } while (m_frames.size() - 1 < end_idx);
510 
511   // Don't try to merge till you've calculated all the frames in this stack.
512   if (GetAllFramesFetched() && m_prev_frames_sp) {
513     StackFrameList *prev_frames = m_prev_frames_sp.get();
514     StackFrameList *curr_frames = this;
515 
516 #if defined(DEBUG_STACK_FRAMES)
517     s.PutCString("\nprev_frames:\n");
518     prev_frames->Dump(&s);
519     s.PutCString("\ncurr_frames:\n");
520     curr_frames->Dump(&s);
521     s.EOL();
522 #endif
523     size_t curr_frame_num, prev_frame_num;
524 
525     for (curr_frame_num = curr_frames->m_frames.size(),
526         prev_frame_num = prev_frames->m_frames.size();
527          curr_frame_num > 0 && prev_frame_num > 0;
528          --curr_frame_num, --prev_frame_num) {
529       const size_t curr_frame_idx = curr_frame_num - 1;
530       const size_t prev_frame_idx = prev_frame_num - 1;
531       StackFrameSP curr_frame_sp(curr_frames->m_frames[curr_frame_idx]);
532       StackFrameSP prev_frame_sp(prev_frames->m_frames[prev_frame_idx]);
533 
534 #if defined(DEBUG_STACK_FRAMES)
535       s.Printf("\n\nCurr frame #%u ", curr_frame_idx);
536       if (curr_frame_sp)
537         curr_frame_sp->Dump(&s, true, false);
538       else
539         s.PutCString("NULL");
540       s.Printf("\nPrev frame #%u ", prev_frame_idx);
541       if (prev_frame_sp)
542         prev_frame_sp->Dump(&s, true, false);
543       else
544         s.PutCString("NULL");
545 #endif
546 
547       StackFrame *curr_frame = curr_frame_sp.get();
548       StackFrame *prev_frame = prev_frame_sp.get();
549 
550       if (curr_frame == nullptr || prev_frame == nullptr)
551         break;
552 
553       // Check the stack ID to make sure they are equal.
554       if (curr_frame->GetStackID() != prev_frame->GetStackID())
555         break;
556 
557       prev_frame->UpdatePreviousFrameFromCurrentFrame(*curr_frame);
558       // Now copy the fixed up previous frame into the current frames so the
559       // pointer doesn't change.
560       m_frames[curr_frame_idx] = prev_frame_sp;
561 
562 #if defined(DEBUG_STACK_FRAMES)
563       s.Printf("\n    Copying previous frame to current frame");
564 #endif
565     }
566     // We are done with the old stack frame list, we can release it now.
567     m_prev_frames_sp.reset();
568   }
569   // Don't report interrupted if we happen to have gotten all the frames:
570   if (!GetAllFramesFetched())
571     return was_interrupted;
572   return false;
573 }
574 
575 uint32_t StackFrameList::GetNumFrames(bool can_create) {
576   if (!WereAllFramesFetched() && can_create) {
577     // Don't allow interrupt or we might not return the correct count
578     GetFramesUpTo(UINT32_MAX, DoNotAllowInterruption);
579   }
580   uint32_t frame_idx;
581   {
582     std::shared_lock<std::shared_mutex> guard(m_list_mutex);
583     frame_idx = GetVisibleStackFrameIndex(m_frames.size());
584   }
585   return frame_idx;
586 }
587 
588 void StackFrameList::Dump(Stream *s) {
589   if (s == nullptr)
590     return;
591 
592   std::shared_lock<std::shared_mutex> guard(m_list_mutex);
593 
594   const_iterator pos, begin = m_frames.begin(), end = m_frames.end();
595   for (pos = begin; pos != end; ++pos) {
596     StackFrame *frame = (*pos).get();
597     s->Printf("%p: ", static_cast<void *>(frame));
598     if (frame) {
599       frame->GetStackID().Dump(s);
600       frame->DumpUsingSettingsFormat(s);
601     } else
602       s->Printf("frame #%u", (uint32_t)std::distance(begin, pos));
603     s->EOL();
604   }
605   s->EOL();
606 }
607 
608 StackFrameSP StackFrameList::GetFrameAtIndex(uint32_t idx) {
609   StackFrameSP frame_sp;
610   uint32_t original_idx = idx;
611 
612   // We're going to consult the m_frames.size, but if there are already
613   // enough frames for our request we don't want to block other readers, so
614   // first acquire the shared lock:
615   { // Scope for shared lock:
616     std::shared_lock<std::shared_mutex> guard(m_list_mutex);
617 
618     uint32_t inlined_depth = GetCurrentInlinedDepth();
619     if (inlined_depth != UINT32_MAX)
620       idx += inlined_depth;
621 
622     if (idx < m_frames.size())
623       frame_sp = m_frames[idx];
624 
625     if (frame_sp)
626       return frame_sp;
627   } // End of reader lock scope
628 
629   // GetFramesUpTo will fill m_frames with as many frames as you asked for, if
630   // there are that many.  If there weren't then you asked for too many frames.
631   // GetFramesUpTo returns true if interrupted:
632   if (GetFramesUpTo(idx, AllowInterruption)) {
633     Log *log = GetLog(LLDBLog::Thread);
634     LLDB_LOG(log, "GetFrameAtIndex was interrupted");
635     return {};
636   }
637 
638   { // Now we're accessing m_frames as a reader, so acquire the reader lock.
639     std::shared_lock<std::shared_mutex> guard(m_list_mutex);
640     if (idx < m_frames.size()) {
641       frame_sp = m_frames[idx];
642     } else if (original_idx == 0) {
643       // There should ALWAYS be a frame at index 0.  If something went wrong
644       // with the CurrentInlinedDepth such that there weren't as many frames as
645       // we thought taking that into account, then reset the current inlined
646       // depth and return the real zeroth frame.
647       if (m_frames.empty()) {
648         // Why do we have a thread with zero frames, that should not ever
649         // happen...
650         assert(!m_thread.IsValid() && "A valid thread has no frames.");
651       } else {
652         ResetCurrentInlinedDepth();
653         frame_sp = m_frames[original_idx];
654       }
655     }
656   } // End of reader lock scope
657 
658   return frame_sp;
659 }
660 
661 StackFrameSP
662 StackFrameList::GetFrameWithConcreteFrameIndex(uint32_t unwind_idx) {
663   // First try assuming the unwind index is the same as the frame index. The
664   // unwind index is always greater than or equal to the frame index, so it is
665   // a good place to start. If we have inlined frames we might have 5 concrete
666   // frames (frame unwind indexes go from 0-4), but we might have 15 frames
667   // after we make all the inlined frames. Most of the time the unwind frame
668   // index (or the concrete frame index) is the same as the frame index.
669   uint32_t frame_idx = unwind_idx;
670   StackFrameSP frame_sp(GetFrameAtIndex(frame_idx));
671   while (frame_sp) {
672     if (frame_sp->GetFrameIndex() == unwind_idx)
673       break;
674     frame_sp = GetFrameAtIndex(++frame_idx);
675   }
676   return frame_sp;
677 }
678 
679 static bool CompareStackID(const StackFrameSP &stack_sp,
680                            const StackID &stack_id) {
681   return stack_sp->GetStackID() < stack_id;
682 }
683 
684 StackFrameSP StackFrameList::GetFrameWithStackID(const StackID &stack_id) {
685   StackFrameSP frame_sp;
686 
687   if (stack_id.IsValid()) {
688     uint32_t frame_idx = 0;
689     {
690       // First see if the frame is already realized.  This is the scope for
691       // the shared mutex:
692       std::shared_lock<std::shared_mutex> guard(m_list_mutex);
693       // Do a binary search in case the stack frame is already in our cache
694       collection::const_iterator pos =
695           llvm::lower_bound(m_frames, stack_id, CompareStackID);
696       if (pos != m_frames.end() && (*pos)->GetStackID() == stack_id)
697         return *pos;
698     }
699     // If we needed to add more frames, we would get to here.
700     do {
701       frame_sp = GetFrameAtIndex(frame_idx);
702       if (frame_sp && frame_sp->GetStackID() == stack_id)
703         break;
704       frame_idx++;
705     } while (frame_sp);
706   }
707   return frame_sp;
708 }
709 
710 bool StackFrameList::SetFrameAtIndex(uint32_t idx, StackFrameSP &frame_sp) {
711   std::unique_lock<std::shared_mutex> guard(m_list_mutex);
712   if (idx >= m_frames.size())
713     m_frames.resize(idx + 1);
714   // Make sure allocation succeeded by checking bounds again
715   if (idx < m_frames.size()) {
716     m_frames[idx] = frame_sp;
717     return true;
718   }
719   return false; // resize failed, out of memory?
720 }
721 
722 void StackFrameList::SelectMostRelevantFrame() {
723   // Don't call into the frame recognizers on the private state thread as
724   // they can cause code to run in the target, and that can cause deadlocks
725   // when fetching stop events for the expression.
726   if (m_thread.GetProcess()->CurrentThreadIsPrivateStateThread())
727     return;
728 
729   Log *log = GetLog(LLDBLog::Thread);
730 
731   // Only the top frame should be recognized.
732   StackFrameSP frame_sp = GetFrameAtIndex(0);
733   if (!frame_sp) {
734     LLDB_LOG(log, "Failed to construct Frame #0");
735     return;
736   }
737 
738   RecognizedStackFrameSP recognized_frame_sp = frame_sp->GetRecognizedFrame();
739 
740   if (recognized_frame_sp) {
741     if (StackFrameSP most_relevant_frame_sp =
742             recognized_frame_sp->GetMostRelevantFrame()) {
743       LLDB_LOG(log, "Found most relevant frame at index {0}",
744                most_relevant_frame_sp->GetFrameIndex());
745       SetSelectedFrame(most_relevant_frame_sp.get());
746       return;
747     }
748   }
749   LLDB_LOG(log, "Frame #0 not recognized");
750 
751   // If this thread has a non-trivial StopInfo, then let it suggest
752   // a most relevant frame:
753   StopInfoSP stop_info_sp = m_thread.GetStopInfo();
754   uint32_t stack_idx = 0;
755   bool found_relevant = false;
756   if (stop_info_sp) {
757     // Here we're only asking the stop info if it wants to adjust the real stack
758     // index.  We have to ask about the m_inlined_stack_depth in
759     // Thread::ShouldStop since the plans need to reason with that info.
760     bool inlined = false;
761     std::optional<uint32_t> stack_opt =
762         stop_info_sp->GetSuggestedStackFrameIndex(inlined);
763     if (stack_opt) {
764       stack_idx = *stack_opt;
765       found_relevant = true;
766     }
767   }
768 
769   frame_sp = GetFrameAtIndex(stack_idx);
770   if (!frame_sp)
771     LLDB_LOG(log, "Stop info suggested relevant frame {0} but it didn't exist",
772              stack_idx);
773   else if (found_relevant)
774     LLDB_LOG(log, "Setting selected frame from stop info to {0}", stack_idx);
775   // Note, we don't have to worry about "inlined" frames here, because we've
776   // already calculated the inlined frame in Thread::ShouldStop, and
777   // SetSelectedFrame will take care of that adjustment for us.
778   SetSelectedFrame(frame_sp.get());
779 
780   if (!found_relevant)
781     LLDB_LOG(log, "No relevant frame!");
782 }
783 
784 uint32_t
785 StackFrameList::GetSelectedFrameIndex(SelectMostRelevant select_most_relevant) {
786   if (!m_selected_frame_idx && select_most_relevant)
787     SelectMostRelevantFrame();
788   if (!m_selected_frame_idx) {
789     // If we aren't selecting the most relevant frame, and the selected frame
790     // isn't set, then don't force a selection here, just return 0.
791     if (!select_most_relevant)
792       return 0;
793     // If the inlined stack frame is set, then use that:
794     m_selected_frame_idx = 0;
795   }
796   return *m_selected_frame_idx;
797 }
798 
799 uint32_t StackFrameList::SetSelectedFrame(lldb_private::StackFrame *frame) {
800   std::shared_lock<std::shared_mutex> guard(m_list_mutex);
801 
802   const_iterator pos;
803   const_iterator begin = m_frames.begin();
804   const_iterator end = m_frames.end();
805   m_selected_frame_idx = 0;
806 
807   for (pos = begin; pos != end; ++pos) {
808     if (pos->get() == frame) {
809       m_selected_frame_idx = std::distance(begin, pos);
810       uint32_t inlined_depth = GetCurrentInlinedDepth();
811       if (inlined_depth != UINT32_MAX)
812         m_selected_frame_idx = *m_selected_frame_idx - inlined_depth;
813       break;
814     }
815   }
816   SetDefaultFileAndLineToSelectedFrame();
817   return *m_selected_frame_idx;
818 }
819 
820 bool StackFrameList::SetSelectedFrameByIndex(uint32_t idx) {
821   StackFrameSP frame_sp(GetFrameAtIndex(idx));
822   if (frame_sp) {
823     SetSelectedFrame(frame_sp.get());
824     return true;
825   } else
826     return false;
827 }
828 
829 void StackFrameList::SetDefaultFileAndLineToSelectedFrame() {
830   if (m_thread.GetID() ==
831       m_thread.GetProcess()->GetThreadList().GetSelectedThread()->GetID()) {
832     StackFrameSP frame_sp(
833         GetFrameAtIndex(GetSelectedFrameIndex(DoNoSelectMostRelevantFrame)));
834     if (frame_sp) {
835       SymbolContext sc = frame_sp->GetSymbolContext(eSymbolContextLineEntry);
836       if (sc.line_entry.GetFile())
837         m_thread.CalculateTarget()->GetSourceManager().SetDefaultFileAndLine(
838             sc.line_entry.file_sp, sc.line_entry.line);
839     }
840   }
841 }
842 
843 // The thread has been run, reset the number stack frames to zero so we can
844 // determine how many frames we have lazily.
845 // Note, we don't actually re-use StackFrameLists, we always make a new
846 // StackFrameList every time we stop, and then copy frame information frame
847 // by frame from the old to the new StackFrameList.  So the comment above,
848 // does not describe how StackFrameLists are currently used.
849 // Clear is currently only used to clear the list in the destructor.
850 void StackFrameList::Clear() {
851   std::unique_lock<std::shared_mutex> guard(m_list_mutex);
852   m_frames.clear();
853   m_concrete_frames_fetched = 0;
854   m_selected_frame_idx.reset();
855 }
856 
857 lldb::StackFrameSP
858 StackFrameList::GetStackFrameSPForStackFramePtr(StackFrame *stack_frame_ptr) {
859   std::shared_lock<std::shared_mutex> guard(m_list_mutex);
860   const_iterator pos;
861   const_iterator begin = m_frames.begin();
862   const_iterator end = m_frames.end();
863   lldb::StackFrameSP ret_sp;
864 
865   for (pos = begin; pos != end; ++pos) {
866     if (pos->get() == stack_frame_ptr) {
867       ret_sp = (*pos);
868       break;
869     }
870   }
871   return ret_sp;
872 }
873 
874 size_t StackFrameList::GetStatus(Stream &strm, uint32_t first_frame,
875                                  uint32_t num_frames, bool show_frame_info,
876                                  uint32_t num_frames_with_source,
877                                  bool show_unique, bool show_hidden,
878                                  const char *selected_frame_marker) {
879   size_t num_frames_displayed = 0;
880 
881   if (num_frames == 0)
882     return 0;
883 
884   StackFrameSP frame_sp;
885   uint32_t frame_idx = 0;
886   uint32_t last_frame;
887 
888   // Don't let the last frame wrap around...
889   if (num_frames == UINT32_MAX)
890     last_frame = UINT32_MAX;
891   else
892     last_frame = first_frame + num_frames;
893 
894   StackFrameSP selected_frame_sp =
895       m_thread.GetSelectedFrame(DoNoSelectMostRelevantFrame);
896   const char *unselected_marker = nullptr;
897   std::string buffer;
898   if (selected_frame_marker) {
899     size_t len = strlen(selected_frame_marker);
900     buffer.insert(buffer.begin(), len, ' ');
901     unselected_marker = buffer.c_str();
902   }
903   const char *marker = nullptr;
904   for (frame_idx = first_frame; frame_idx < last_frame; ++frame_idx) {
905     frame_sp = GetFrameAtIndex(frame_idx);
906     if (!frame_sp)
907       break;
908 
909     if (selected_frame_marker != nullptr) {
910       if (frame_sp == selected_frame_sp)
911         marker = selected_frame_marker;
912       else
913         marker = unselected_marker;
914     }
915 
916     // Hide uninteresting frames unless it's the selected frame.
917     if (!show_hidden && frame_sp != selected_frame_sp && frame_sp->IsHidden())
918       continue;
919 
920     // Check for interruption here.  If we're fetching arguments, this loop
921     // can go slowly:
922     Debugger &dbg = m_thread.GetProcess()->GetTarget().GetDebugger();
923     if (INTERRUPT_REQUESTED(
924             dbg, "Interrupted dumping stack for thread {0:x} with {1} shown.",
925             m_thread.GetID(), num_frames_displayed))
926       break;
927 
928 
929     if (!frame_sp->GetStatus(strm, show_frame_info,
930                              num_frames_with_source > (first_frame - frame_idx),
931                              show_unique, marker))
932       break;
933     ++num_frames_displayed;
934   }
935 
936   strm.IndentLess();
937   return num_frames_displayed;
938 }
939