From f0f0abdf65b13f4631e3efde7fca9011a46ed85c Mon Sep 17 00:00:00 2001 From: jimingham Date: Mon, 18 Nov 2024 13:23:17 -0800 Subject: [PATCH 1/4] Convert ThreadPlanStack's mutex to a shared mutex. (#116438) I have some reports of A/B inversion deadlocks between the ThreadPlanStack and the StackFrameList accesses. There's a fair bit of reasonable code in lldb that does "While accessing the ThreadPlanStack, look at that threads's StackFrameList", and also plenty of "While accessing the ThreadPlanStack, look at the StackFrameList." In all the cases I've seen so far, there was at most one of the locks taken that were trying to mutate the list, the other three were just reading. So we could solve the deadlock by converting the two mutexes over to shared mutexes. This patch is the easy part, the ThreadPlanStack mutex. The tricky part was because these were originally recursive mutexes, and recursive access to shared mutexes is undefined behavior according to the C++ standard, I had to add a couple NoLock variants to make sure it didn't get used recursively. Then since the only remaining calls are out to ThreadPlans and ThreadPlans don't have access to their containing ThreadPlanStack, converting this to a non-recursive lock should be safe. (cherry picked from commit b42a81631491571c4b78d095917ebdddee69b04f) --- lldb/include/lldb/Target/ThreadPlanStack.h | 15 ++- lldb/source/Target/ThreadPlanStack.cpp | 108 +++++++++++---------- 2 files changed, 69 insertions(+), 54 deletions(-) diff --git a/lldb/include/lldb/Target/ThreadPlanStack.h b/lldb/include/lldb/Target/ThreadPlanStack.h index f0d258855b9efc..7e0885eb4eaea8 100644 --- a/lldb/include/lldb/Target/ThreadPlanStack.h +++ b/lldb/include/lldb/Target/ThreadPlanStack.h @@ -14,6 +14,8 @@ #include #include +#include "llvm/Support/RWMutex.h" + #include "lldb/Target/Target.h" #include "lldb/Target/Thread.h" #include "lldb/lldb-private-forward.h" @@ -102,9 +104,12 @@ class ThreadPlanStack { void SetTID(lldb::tid_t tid); private: - void PrintOneStack(Stream &s, llvm::StringRef stack_name, - const PlanStack &stack, lldb::DescriptionLevel desc_level, - bool include_internal) const; + lldb::ThreadPlanSP DiscardPlanNoLock(); + lldb::ThreadPlanSP GetCurrentPlanNoLock() const; + void PrintOneStackNoLock(Stream &s, llvm::StringRef stack_name, + const PlanStack &stack, + lldb::DescriptionLevel desc_level, + bool include_internal) const; PlanStack m_plans; ///< The stack of plans this thread is executing. PlanStack m_completed_plans; ///< Plans that have been completed by this @@ -116,8 +121,8 @@ class ThreadPlanStack { size_t m_completed_plan_checkpoint = 0; // Monotonically increasing token for // completed plan checkpoints. std::unordered_map m_completed_plan_store; - mutable std::recursive_mutex m_stack_mutex; - + mutable llvm::sys::RWMutex m_stack_mutex; + // ThreadPlanStacks shouldn't be copied. ThreadPlanStack(ThreadPlanStack &rhs) = delete; }; diff --git a/lldb/source/Target/ThreadPlanStack.cpp b/lldb/source/Target/ThreadPlanStack.cpp index 1ab149b477f21d..f38ed438f32dbc 100644 --- a/lldb/source/Target/ThreadPlanStack.cpp +++ b/lldb/source/Target/ThreadPlanStack.cpp @@ -39,21 +39,21 @@ ThreadPlanStack::ThreadPlanStack(const Thread &thread, bool make_null) { void ThreadPlanStack::DumpThreadPlans(Stream &s, lldb::DescriptionLevel desc_level, bool include_internal) const { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); s.IndentMore(); - PrintOneStack(s, "Active plan stack", m_plans, desc_level, include_internal); - PrintOneStack(s, "Completed plan stack", m_completed_plans, desc_level, - include_internal); - PrintOneStack(s, "Discarded plan stack", m_discarded_plans, desc_level, - include_internal); + PrintOneStackNoLock(s, "Active plan stack", m_plans, desc_level, + include_internal); + PrintOneStackNoLock(s, "Completed plan stack", m_completed_plans, desc_level, + include_internal); + PrintOneStackNoLock(s, "Discarded plan stack", m_discarded_plans, desc_level, + include_internal); s.IndentLess(); } -void ThreadPlanStack::PrintOneStack(Stream &s, llvm::StringRef stack_name, - const PlanStack &stack, - lldb::DescriptionLevel desc_level, - bool include_internal) const { - std::lock_guard guard(m_stack_mutex); +void ThreadPlanStack::PrintOneStackNoLock(Stream &s, llvm::StringRef stack_name, + const PlanStack &stack, + lldb::DescriptionLevel desc_level, + bool include_internal) const { // If the stack is empty, just exit: if (stack.empty()) return; @@ -82,7 +82,7 @@ void ThreadPlanStack::PrintOneStack(Stream &s, llvm::StringRef stack_name, } size_t ThreadPlanStack::CheckpointCompletedPlans() { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); m_completed_plan_checkpoint++; m_completed_plan_store.insert( std::make_pair(m_completed_plan_checkpoint, m_completed_plans)); @@ -90,7 +90,7 @@ size_t ThreadPlanStack::CheckpointCompletedPlans() { } void ThreadPlanStack::RestoreCompletedPlanCheckpoint(size_t checkpoint) { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); auto result = m_completed_plan_store.find(checkpoint); assert(result != m_completed_plan_store.end() && "Asked for a checkpoint that didn't exist"); @@ -99,13 +99,13 @@ void ThreadPlanStack::RestoreCompletedPlanCheckpoint(size_t checkpoint) { } void ThreadPlanStack::DiscardCompletedPlanCheckpoint(size_t checkpoint) { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); m_completed_plan_store.erase(checkpoint); } void ThreadPlanStack::ThreadDestroyed(Thread *thread) { // Tell the plan stacks that this thread is going away: - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); for (ThreadPlanSP plan : m_plans) plan->ThreadDestroyed(); @@ -134,20 +134,22 @@ void ThreadPlanStack::PushPlan(lldb::ThreadPlanSP new_plan_sp) { // If the thread plan doesn't already have a tracer, give it its parent's // tracer: // The first plan has to be a base plan: - std::lock_guard guard(m_stack_mutex); - assert((m_plans.size() > 0 || new_plan_sp->IsBasePlan()) && - "Zeroth plan must be a base plan"); - - if (!new_plan_sp->GetThreadPlanTracer()) { - assert(!m_plans.empty()); - new_plan_sp->SetThreadPlanTracer(m_plans.back()->GetThreadPlanTracer()); + { // Scope for Lock - DidPush often adds plans to the stack: + llvm::sys::ScopedWriter guard(m_stack_mutex); + assert((m_plans.size() > 0 || new_plan_sp->IsBasePlan()) && + "Zeroth plan must be a base plan"); + + if (!new_plan_sp->GetThreadPlanTracer()) { + assert(!m_plans.empty()); + new_plan_sp->SetThreadPlanTracer(m_plans.back()->GetThreadPlanTracer()); + } + m_plans.push_back(new_plan_sp); } - m_plans.push_back(new_plan_sp); new_plan_sp->DidPush(); } lldb::ThreadPlanSP ThreadPlanStack::PopPlan() { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); assert(m_plans.size() > 1 && "Can't pop the base thread plan"); // Note that moving the top element of the vector would leave it in an @@ -161,7 +163,11 @@ lldb::ThreadPlanSP ThreadPlanStack::PopPlan() { } lldb::ThreadPlanSP ThreadPlanStack::DiscardPlan() { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); + return DiscardPlanNoLock(); +} + +lldb::ThreadPlanSP ThreadPlanStack::DiscardPlanNoLock() { assert(m_plans.size() > 1 && "Can't discard the base thread plan"); // Note that moving the top element of the vector would leave it in an @@ -177,12 +183,12 @@ lldb::ThreadPlanSP ThreadPlanStack::DiscardPlan() { // If the input plan is nullptr, discard all plans. Otherwise make sure this // plan is in the stack, and if so discard up to and including it. void ThreadPlanStack::DiscardPlansUpToPlan(ThreadPlan *up_to_plan_ptr) { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); int stack_size = m_plans.size(); if (up_to_plan_ptr == nullptr) { for (int i = stack_size - 1; i > 0; i--) - DiscardPlan(); + DiscardPlanNoLock(); return; } @@ -197,23 +203,23 @@ void ThreadPlanStack::DiscardPlansUpToPlan(ThreadPlan *up_to_plan_ptr) { if (found_it) { bool last_one = false; for (int i = stack_size - 1; i > 0 && !last_one; i--) { - if (GetCurrentPlan().get() == up_to_plan_ptr) + if (GetCurrentPlanNoLock().get() == up_to_plan_ptr) last_one = true; - DiscardPlan(); + DiscardPlanNoLock(); } } } void ThreadPlanStack::DiscardAllPlans() { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); int stack_size = m_plans.size(); for (int i = stack_size - 1; i > 0; i--) { - DiscardPlan(); + DiscardPlanNoLock(); } } void ThreadPlanStack::DiscardConsultingControllingPlans() { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); while (true) { int controlling_plan_idx; bool discard = true; @@ -234,26 +240,30 @@ void ThreadPlanStack::DiscardConsultingControllingPlans() { // First pop all the dependent plans: for (int i = m_plans.size() - 1; i > controlling_plan_idx; i--) { - DiscardPlan(); + DiscardPlanNoLock(); } // Now discard the controlling plan itself. // The bottom-most plan never gets discarded. "OkayToDiscard" for it // means discard it's dependent plans, but not it... if (controlling_plan_idx > 0) { - DiscardPlan(); + DiscardPlanNoLock(); } } } lldb::ThreadPlanSP ThreadPlanStack::GetCurrentPlan() const { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); + return GetCurrentPlanNoLock(); +} + +lldb::ThreadPlanSP ThreadPlanStack::GetCurrentPlanNoLock() const { assert(m_plans.size() != 0 && "There will always be a base plan."); return m_plans.back(); } lldb::ThreadPlanSP ThreadPlanStack::GetCompletedPlan(bool skip_private) const { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); if (m_completed_plans.empty()) return {}; @@ -271,7 +281,7 @@ lldb::ThreadPlanSP ThreadPlanStack::GetCompletedPlan(bool skip_private) const { lldb::ThreadPlanSP ThreadPlanStack::GetPlanByIndex(uint32_t plan_idx, bool skip_private) const { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); uint32_t idx = 0; for (lldb::ThreadPlanSP plan_sp : m_plans) { @@ -286,7 +296,7 @@ lldb::ThreadPlanSP ThreadPlanStack::GetPlanByIndex(uint32_t plan_idx, lldb::ValueObjectSP ThreadPlanStack::GetReturnValueObject(bool &is_error) const { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); if (m_completed_plans.empty()) return {}; @@ -302,7 +312,7 @@ ThreadPlanStack::GetReturnValueObject(bool &is_error) const { } lldb::ExpressionVariableSP ThreadPlanStack::GetExpressionVariable() const { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); if (m_completed_plans.empty()) return {}; @@ -315,23 +325,23 @@ lldb::ExpressionVariableSP ThreadPlanStack::GetExpressionVariable() const { return {}; } bool ThreadPlanStack::AnyPlans() const { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); // There is always a base plan... return m_plans.size() > 1; } bool ThreadPlanStack::AnyCompletedPlans() const { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); return !m_completed_plans.empty(); } bool ThreadPlanStack::AnyDiscardedPlans() const { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); return !m_discarded_plans.empty(); } bool ThreadPlanStack::IsPlanDone(ThreadPlan *in_plan) const { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); for (auto plan : m_completed_plans) { if (plan.get() == in_plan) return true; @@ -340,7 +350,7 @@ bool ThreadPlanStack::IsPlanDone(ThreadPlan *in_plan) const { } bool ThreadPlanStack::WasPlanDiscarded(ThreadPlan *in_plan) const { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); for (auto plan : m_discarded_plans) { if (plan.get() == in_plan) return true; @@ -349,7 +359,7 @@ bool ThreadPlanStack::WasPlanDiscarded(ThreadPlan *in_plan) const { } ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); if (current_plan == nullptr) return nullptr; @@ -364,7 +374,7 @@ ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const { // If this is the first completed plan, the previous one is the // bottom of the regular plan stack. if (stack_size > 0 && m_completed_plans[0].get() == current_plan) { - return GetCurrentPlan().get(); + return GetCurrentPlanNoLock().get(); } // Otherwise look for it in the regular plans. @@ -377,7 +387,7 @@ ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const { } ThreadPlan *ThreadPlanStack::GetInnermostExpression() const { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); int stack_size = m_plans.size(); for (int i = stack_size - 1; i > 0; i--) { @@ -388,13 +398,13 @@ ThreadPlan *ThreadPlanStack::GetInnermostExpression() const { } void ThreadPlanStack::ClearThreadCache() { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); for (lldb::ThreadPlanSP thread_plan_sp : m_plans) thread_plan_sp->ClearThreadCache(); } void ThreadPlanStack::WillResume() { - std::lock_guard guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); m_completed_plans.clear(); m_discarded_plans.clear(); } From 30dbbe2bb085cb0e23a8923a420099a2b04bab06 Mon Sep 17 00:00:00 2001 From: Jim Ingham Date: Fri, 13 Dec 2024 12:39:41 -0800 Subject: [PATCH 2/4] Revert "Revert "Merge pull request #9493 from jimingham/swift-call-site-breakpoints"" This reverts commit c93fb6d699b398048459e2b203ccec25ed9e6ee9. --- .../lldb/Breakpoint/BreakpointLocation.h | 36 ++++ lldb/include/lldb/Breakpoint/BreakpointSite.h | 5 + lldb/include/lldb/Core/Declaration.h | 6 +- lldb/include/lldb/Target/StopInfo.h | 12 ++ .../lldb/Target/ThreadPlanStepInRange.h | 4 +- lldb/source/Breakpoint/BreakpointLocation.cpp | 63 ++++++- lldb/source/Breakpoint/BreakpointResolver.cpp | 15 ++ lldb/source/Breakpoint/BreakpointSite.cpp | 17 ++ lldb/source/Core/Declaration.cpp | 5 +- lldb/source/Symbol/Block.cpp | 2 +- lldb/source/Symbol/CompileUnit.cpp | 116 +++++++++++- lldb/source/Target/StackFrameList.cpp | 171 ++++++------------ lldb/source/Target/StopInfo.cpp | 55 ++++++ lldb/source/Target/Thread.cpp | 8 + lldb/source/Target/ThreadPlanStepInRange.cpp | 25 ++- .../source/Target/ThreadPlanStepOverRange.cpp | 2 +- lldb/source/Target/ThreadPlanStepRange.cpp | 50 ++++- .../breakpoint/same_cu_name/Makefile | 19 ++ .../TestFileBreakpoinsSameCUName.py | 32 ++++ .../breakpoint/same_cu_name/common.cpp | 8 + .../breakpoint/same_cu_name/main.cpp | 24 +++ .../gdb_remote_client/TestGDBRemoteClient.py | 35 +++- .../inline-stepping/TestInlineStepping.py | 100 +++++++++- .../inline-stepping/calling.cpp | 25 +++ lldb/test/API/lang/swift/macro/Macro.swift | 2 + .../test/API/lang/swift/macro/MacroImpl.swift | 13 ++ .../API/lang/swift/macro/TestSwiftMacro.py | 49 ++++- lldb/test/API/lang/swift/macro/main.swift | 7 +- 28 files changed, 759 insertions(+), 147 deletions(-) create mode 100644 lldb/test/API/functionalities/breakpoint/same_cu_name/Makefile create mode 100644 lldb/test/API/functionalities/breakpoint/same_cu_name/TestFileBreakpoinsSameCUName.py create mode 100644 lldb/test/API/functionalities/breakpoint/same_cu_name/common.cpp create mode 100644 lldb/test/API/functionalities/breakpoint/same_cu_name/main.cpp diff --git a/lldb/include/lldb/Breakpoint/BreakpointLocation.h b/lldb/include/lldb/Breakpoint/BreakpointLocation.h index cca00335bc3c67..3592291bb2d06e 100644 --- a/lldb/include/lldb/Breakpoint/BreakpointLocation.h +++ b/lldb/include/lldb/Breakpoint/BreakpointLocation.h @@ -11,10 +11,12 @@ #include #include +#include #include "lldb/Breakpoint/BreakpointOptions.h" #include "lldb/Breakpoint/StoppointHitCounter.h" #include "lldb/Core/Address.h" +#include "lldb/Symbol/LineEntry.h" #include "lldb/Utility/UserID.h" #include "lldb/lldb-private.h" @@ -282,6 +284,25 @@ class BreakpointLocation /// Returns the breakpoint location ID. lldb::break_id_t GetID() const { return m_loc_id; } + /// Set the line entry that should be shown to users for this location. + /// It is up to the caller to verify that this is a valid entry to show. + /// The current use of this is to distinguish among line entries from a + /// virtual inlined call stack that all share the same address. + /// The line entry must have the same start address as the address for this + /// location. + bool SetPreferredLineEntry(const LineEntry &line_entry) { + if (m_address == line_entry.range.GetBaseAddress()) { + m_preferred_line_entry = line_entry; + return true; + } + assert(0 && "Tried to set a preferred line entry with a different address"); + return false; + } + + const std::optional GetPreferredLineEntry() { + return m_preferred_line_entry; + } + protected: friend class BreakpointSite; friend class BreakpointLocationList; @@ -306,6 +327,16 @@ class BreakpointLocation /// If it returns false we should continue, otherwise stop. bool IgnoreCountShouldStop(); + /// If this location knows that the virtual stack frame it represents is + /// not frame 0, return the suggested stack frame instead. This will happen + /// when the location's address contains a "virtual inlined call stack" and + /// the breakpoint was set on a file & line that are not at the bottom of that + /// stack. For now we key off the "preferred line entry" - looking for that + /// in the blocks that start with the stop PC. + /// This version of the API doesn't take an "inlined" parameter because it + /// only changes frames in the inline stack. + std::optional GetSuggestedStackFrameIndex(); + private: void SwapLocation(lldb::BreakpointLocationSP swap_from); @@ -369,6 +400,11 @@ class BreakpointLocation lldb::break_id_t m_loc_id; ///< Breakpoint location ID. StoppointHitCounter m_hit_counter; ///< Number of times this breakpoint /// location has been hit. + /// If this exists, use it to print the stop description rather than the + /// LineEntry m_address resolves to directly. Use this for instance when the + /// location was given somewhere in the virtual inlined call stack since the + /// Address always resolves to the lowest entry in the stack. + std::optional m_preferred_line_entry; void SetShouldResolveIndirectFunctions(bool do_resolve) { m_should_resolve_indirect_functions = do_resolve; diff --git a/lldb/include/lldb/Breakpoint/BreakpointSite.h b/lldb/include/lldb/Breakpoint/BreakpointSite.h index 17b76d51c1ae53..7b3f7be23639f2 100644 --- a/lldb/include/lldb/Breakpoint/BreakpointSite.h +++ b/lldb/include/lldb/Breakpoint/BreakpointSite.h @@ -170,6 +170,11 @@ class BreakpointSite : public std::enable_shared_from_this, /// \see lldb::DescriptionLevel void GetDescription(Stream *s, lldb::DescriptionLevel level); + // This runs through all the breakpoint locations owning this site and returns + // the greatest of their suggested stack frame indexes. This only handles + // inlined stack changes. + std::optional GetSuggestedStackFrameIndex(); + /// Tell whether a breakpoint has a location at this site. /// /// \param[in] bp_id diff --git a/lldb/include/lldb/Core/Declaration.h b/lldb/include/lldb/Core/Declaration.h index 4a0e9047b54695..c864b88c6b32a3 100644 --- a/lldb/include/lldb/Core/Declaration.h +++ b/lldb/include/lldb/Core/Declaration.h @@ -84,10 +84,14 @@ class Declaration { /// \param[in] declaration /// The const Declaration object to compare with. /// + /// \param[in] full + /// Same meaning as Full in FileSpec::Equal. True means an empty + /// directory is not equal to a specified one, false means it is equal. + /// /// \return /// Returns \b true if \b declaration is at the same file and /// line, \b false otherwise. - bool FileAndLineEqual(const Declaration &declaration) const; + bool FileAndLineEqual(const Declaration &declaration, bool full) const; /// Dump a description of this object to a Stream. /// diff --git a/lldb/include/lldb/Target/StopInfo.h b/lldb/include/lldb/Target/StopInfo.h index f520ea88803e55..3f7e03dc316ee6 100644 --- a/lldb/include/lldb/Target/StopInfo.h +++ b/lldb/include/lldb/Target/StopInfo.h @@ -77,6 +77,18 @@ class StopInfo : public std::enable_shared_from_this { m_description.clear(); } + /// This gives the StopInfo a chance to suggest a stack frame to select. + /// Passing true for inlined_stack will request changes to the inlined + /// call stack. Passing false will request changes to the real stack + /// frame. The inlined stack gets adjusted before we call into the thread + /// plans so they can reason based on the correct values. The real stack + /// adjustment is handled after the frame recognizers get a chance to adjust + /// the frame. + virtual std::optional + GetSuggestedStackFrameIndex(bool inlined_stack) { + return {}; + } + virtual bool IsValidForOperatingSystemThread(Thread &thread) { return true; } /// A Continue operation can result in a false stop event diff --git a/lldb/include/lldb/Target/ThreadPlanStepInRange.h b/lldb/include/lldb/Target/ThreadPlanStepInRange.h index e5a63c9f8f7d53..5b8b4d066076a5 100644 --- a/lldb/include/lldb/Target/ThreadPlanStepInRange.h +++ b/lldb/include/lldb/Target/ThreadPlanStepInRange.h @@ -100,8 +100,8 @@ class ThreadPlanStepInRange : public ThreadPlanStepRange, bool m_step_past_prologue; // FIXME: For now hard-coded to true, we could put // a switch in for this if there's // demand for that. - bool m_virtual_step; // true if we've just done a "virtual step", i.e. just - // moved the inline stack depth. + LazyBool m_virtual_step; // true if we've just done a "virtual step", i.e. + // just moved the inline stack depth. ConstString m_step_into_target; std::vector m_step_in_deep_bps; // Places where we might // want to stop when we do a diff --git a/lldb/source/Breakpoint/BreakpointLocation.cpp b/lldb/source/Breakpoint/BreakpointLocation.cpp index ad9057c8141e99..c7ea50407ae1c7 100644 --- a/lldb/source/Breakpoint/BreakpointLocation.cpp +++ b/lldb/source/Breakpoint/BreakpointLocation.cpp @@ -508,8 +508,20 @@ void BreakpointLocation::GetDescription(Stream *s, s->PutCString("re-exported target = "); else s->PutCString("where = "); + + // If there's a preferred line entry for printing, use that. + bool show_function_info = true; + if (auto preferred = GetPreferredLineEntry()) { + sc.line_entry = *preferred; + // FIXME: We're going to get the function name wrong when the preferred + // line entry is not the lowest one. For now, just leave the function + // out in this case, but we really should also figure out how to easily + // fake the function name here. + show_function_info = false; + } sc.DumpStopContext(s, m_owner.GetTarget().GetProcessSP().get(), m_address, - false, true, false, true, true, true); + false, true, false, show_function_info, + show_function_info, show_function_info); } else { if (sc.module_sp) { s->EOL(); @@ -537,7 +549,10 @@ void BreakpointLocation::GetDescription(Stream *s, if (sc.line_entry.line > 0) { s->EOL(); s->Indent("location = "); - sc.line_entry.DumpStopContext(s, true); + if (auto preferred = GetPreferredLineEntry()) + preferred->DumpStopContext(s, true); + else + sc.line_entry.DumpStopContext(s, true); } } else { @@ -656,6 +671,50 @@ void BreakpointLocation::SendBreakpointLocationChangedEvent( } } +std::optional BreakpointLocation::GetSuggestedStackFrameIndex() { + auto preferred_opt = GetPreferredLineEntry(); + if (!preferred_opt) + return {}; + LineEntry preferred = *preferred_opt; + SymbolContext sc; + if (!m_address.CalculateSymbolContext(&sc)) + return {}; + // Don't return anything special if frame 0 is the preferred line entry. + // We not really telling the stack frame list to do anything special in that + // case. + if (!LineEntry::Compare(sc.line_entry, preferred)) + return {}; + + if (!sc.block) + return {}; + + // Blocks have their line info in Declaration form, so make one here: + Declaration preferred_decl(preferred.GetFile(), preferred.line, + preferred.column); + + uint32_t depth = 0; + Block *inlined_block = sc.block->GetContainingInlinedBlock(); + while (inlined_block) { + // If we've moved to a block that this isn't the start of, that's not + // our inlining info or call site, so we can stop here. + Address start_address; + if (!inlined_block->GetStartAddress(start_address) || + start_address != m_address) + return {}; + + const InlineFunctionInfo *info = inlined_block->GetInlinedFunctionInfo(); + if (info) { + if (preferred_decl == info->GetDeclaration()) + return depth; + if (preferred_decl == info->GetCallSite()) + return depth + 1; + } + inlined_block = inlined_block->GetInlinedParent(); + depth++; + } + return {}; +} + void BreakpointLocation::SwapLocation(BreakpointLocationSP swap_from) { m_address = swap_from->m_address; m_should_resolve_indirect_functions = diff --git a/lldb/source/Breakpoint/BreakpointResolver.cpp b/lldb/source/Breakpoint/BreakpointResolver.cpp index 8307689c7640cf..9643602d78c751 100644 --- a/lldb/source/Breakpoint/BreakpointResolver.cpp +++ b/lldb/source/Breakpoint/BreakpointResolver.cpp @@ -340,6 +340,21 @@ void BreakpointResolver::AddLocation(SearchFilter &filter, } BreakpointLocationSP bp_loc_sp(AddLocation(line_start)); + // If the address that we resolved the location to returns a different + // LineEntry from the one in the incoming SC, we're probably dealing with an + // inlined call site, so set that as the preferred LineEntry: + LineEntry resolved_entry; + if (!skipped_prologue && bp_loc_sp && + line_start.CalculateSymbolContextLineEntry(resolved_entry) && + LineEntry::Compare(resolved_entry, sc.line_entry)) { + // FIXME: The function name will also be wrong here. Do we need to record + // that as well, or can we figure that out again when we report this + // breakpoint location. + if (!bp_loc_sp->SetPreferredLineEntry(sc.line_entry)) { + LLDB_LOG(log, "Tried to add a preferred line entry that didn't have the " + "same address as this location's address."); + } + } if (log && bp_loc_sp && !GetBreakpoint()->IsInternal()) { StreamString s; bp_loc_sp->GetDescription(&s, lldb::eDescriptionLevelVerbose); diff --git a/lldb/source/Breakpoint/BreakpointSite.cpp b/lldb/source/Breakpoint/BreakpointSite.cpp index 3ca93f908e30b8..9700a57d3346e0 100644 --- a/lldb/source/Breakpoint/BreakpointSite.cpp +++ b/lldb/source/Breakpoint/BreakpointSite.cpp @@ -87,6 +87,23 @@ void BreakpointSite::GetDescription(Stream *s, lldb::DescriptionLevel level) { m_constituents.GetDescription(s, level); } +std::optional BreakpointSite::GetSuggestedStackFrameIndex() { + + std::optional result; + std::lock_guard guard(m_constituents_mutex); + for (BreakpointLocationSP loc_sp : m_constituents.BreakpointLocations()) { + std::optional loc_frame_index = + loc_sp->GetSuggestedStackFrameIndex(); + if (loc_frame_index) { + if (result) + result = std::max(*loc_frame_index, *result); + else + result = loc_frame_index; + } + } + return result; +} + bool BreakpointSite::IsInternal() const { return m_constituents.IsInternal(); } uint8_t *BreakpointSite::GetTrapOpcodeBytes() { return &m_trap_opcode[0]; } diff --git a/lldb/source/Core/Declaration.cpp b/lldb/source/Core/Declaration.cpp index 579a3999d14ea0..a485c4b9ba48a7 100644 --- a/lldb/source/Core/Declaration.cpp +++ b/lldb/source/Core/Declaration.cpp @@ -70,8 +70,9 @@ int Declaration::Compare(const Declaration &a, const Declaration &b) { return 0; } -bool Declaration::FileAndLineEqual(const Declaration &declaration) const { - int file_compare = FileSpec::Compare(this->m_file, declaration.m_file, true); +bool Declaration::FileAndLineEqual(const Declaration &declaration, + bool full) const { + int file_compare = FileSpec::Compare(this->m_file, declaration.m_file, full); return file_compare == 0 && this->m_line == declaration.m_line; } diff --git a/lldb/source/Symbol/Block.cpp b/lldb/source/Symbol/Block.cpp index f7d9c0d2d33065..5c7772a6db780d 100644 --- a/lldb/source/Symbol/Block.cpp +++ b/lldb/source/Symbol/Block.cpp @@ -230,7 +230,7 @@ Block *Block::GetContainingInlinedBlockWithCallSite( const auto *function_info = inlined_block->GetInlinedFunctionInfo(); if (function_info && - function_info->GetCallSite().FileAndLineEqual(find_call_site)) + function_info->GetCallSite().FileAndLineEqual(find_call_site, true)) return inlined_block; inlined_block = inlined_block->GetInlinedParent(); } diff --git a/lldb/source/Symbol/CompileUnit.cpp b/lldb/source/Symbol/CompileUnit.cpp index ddeacf18e855ee..e2592feec74cfa 100644 --- a/lldb/source/Symbol/CompileUnit.cpp +++ b/lldb/source/Symbol/CompileUnit.cpp @@ -249,7 +249,10 @@ void CompileUnit::ResolveSymbolContext( const SourceLocationSpec &src_location_spec, SymbolContextItem resolve_scope, SymbolContextList &sc_list) { const FileSpec file_spec = src_location_spec.GetFileSpec(); - const uint32_t line = src_location_spec.GetLine().value_or(0); + const uint32_t line = + src_location_spec.GetLine().value_or(LLDB_INVALID_LINE_NUMBER); + const uint32_t column_num = + src_location_spec.GetColumn().value_or(LLDB_INVALID_COLUMN_NUMBER); const bool check_inlines = src_location_spec.GetCheckInlines(); // First find all of the file indexes that match our "file_spec". If @@ -266,7 +269,7 @@ void CompileUnit::ResolveSymbolContext( SymbolContext sc(GetModule()); sc.comp_unit = this; - if (line == 0) { + if (line == LLDB_INVALID_LINE_NUMBER) { if (file_spec_matches_cu_file_spec && !check_inlines) { // only append the context if we aren't looking for inline call sites by // file and line and if the file spec matches that of the compile unit @@ -310,6 +313,115 @@ void CompileUnit::ResolveSymbolContext( 0, file_indexes, src_location_spec, &line_entry); } + // If we didn't manage to find a breakpoint that matched the line number + // requested, that might be because it is only an inline call site, and + // doesn't have a line entry in the line table. Scan for that here. + // + // We are making the assumption that if there was an inlined function it will + // contribute at least 1 non-call-site entry to the line table. That's handy + // because we don't move line breakpoints over function boundaries, so if we + // found a hit, and there were also a call site entry, it would have to be in + // the function containing the PC of the line table match. That way we can + // limit the call site search to that function. + // We will miss functions that ONLY exist as a call site entry. + + if (line_entry.IsValid() && + (line_entry.line != line || + (column_num != 0 && line_entry.column != column_num)) && + (resolve_scope & eSymbolContextLineEntry) && check_inlines) { + // We don't move lines over function boundaries, so the address in the + // line entry will be the in function that contained the line that might + // be a CallSite, and we can just iterate over that function to find any + // inline records, and dig up their call sites. + Address start_addr = line_entry.range.GetBaseAddress(); + Function *function = start_addr.CalculateSymbolContextFunction(); + // Record the size of the list to see if we added to it: + size_t old_sc_list_size = sc_list.GetSize(); + + Declaration sought_decl(file_spec, line, column_num); + // We use this recursive function to descend the block structure looking + // for a block that has this Declaration as in it's CallSite info. + // This function recursively scans the sibling blocks of the incoming + // block parameter. + std::function examine_block = + [&sought_decl, &sc_list, &src_location_spec, resolve_scope, + &examine_block](Block &block) -> void { + // Iterate over the sibling child blocks of the incoming block. + Block *sibling_block = block.GetFirstChild(); + while (sibling_block) { + // We only have to descend through the regular blocks, looking for + // immediate inlines, since those are the only ones that will have this + // callsite. + const InlineFunctionInfo *inline_info = + sibling_block->GetInlinedFunctionInfo(); + if (inline_info) { + // If this is the call-site we are looking for, record that: + // We need to be careful because the call site from the debug info + // will generally have a column, but the user might not have specified + // it. + Declaration found_decl = inline_info->GetCallSite(); + uint32_t sought_column = sought_decl.GetColumn(); + if (found_decl.FileAndLineEqual(sought_decl, false) && + (sought_column == LLDB_INVALID_COLUMN_NUMBER || + sought_column == found_decl.GetColumn())) { + // If we found a call site, it belongs not in this inlined block, + // but in the parent block that inlined it. + Address parent_start_addr; + if (sibling_block->GetParent()->GetStartAddress( + parent_start_addr)) { + SymbolContext sc; + parent_start_addr.CalculateSymbolContext(&sc, resolve_scope); + // Now swap out the line entry for the one we found. + LineEntry call_site_line = sc.line_entry; + call_site_line.line = found_decl.GetLine(); + call_site_line.column = found_decl.GetColumn(); + bool matches_spec = true; + // If the user asked for an exact match, we need to make sure the + // call site we found actually matches the location. + if (src_location_spec.GetExactMatch()) { + matches_spec = false; + if ((src_location_spec.GetFileSpec() == + sc.line_entry.GetFile()) && + (src_location_spec.GetLine() && + *src_location_spec.GetLine() == call_site_line.line) && + (src_location_spec.GetColumn() && + *src_location_spec.GetColumn() == call_site_line.column)) + matches_spec = true; + } + if (matches_spec && + sibling_block->GetRangeAtIndex(0, call_site_line.range)) { + SymbolContext call_site_sc(sc.target_sp, sc.module_sp, + sc.comp_unit, sc.function, sc.block, + &call_site_line, sc.symbol); + sc_list.Append(call_site_sc); + } + } + } + } + + // Descend into the child blocks: + examine_block(*sibling_block); + // Now go to the next sibling: + sibling_block = sibling_block->GetSibling(); + } + }; + + if (function) { + // We don't need to examine the function block, it can't be inlined. + Block &func_block = function->GetBlock(true); + examine_block(func_block); + } + // If we found entries here, we are done. We only get here because we + // didn't find an exact line entry for this line & column, but if we found + // an exact match from the call site info that's strictly better than + // continuing to look for matches further on in the file. + // FIXME: Should I also do this for "call site line exists between the + // given line number and the later line we found in the line table"? That's + // a closer approximation to our general sliding algorithm. + if (sc_list.GetSize() > old_sc_list_size) + return; + } + // If "exact == true", then "found_line" will be the same as "line". If // "exact == false", the "found_line" will be the closest line entry // with a line number greater than "line" and we will use this for our diff --git a/lldb/source/Target/StackFrameList.cpp b/lldb/source/Target/StackFrameList.cpp index 397582c0e0286a..8ab179a45db324 100644 --- a/lldb/source/Target/StackFrameList.cpp +++ b/lldb/source/Target/StackFrameList.cpp @@ -85,121 +85,32 @@ void StackFrameList::ResetCurrentInlinedDepth() { return; std::lock_guard guard(m_mutex); - - GetFramesUpTo(0, DoNotAllowInterruption); - if (m_frames.empty()) - return; - if (!m_frames[0]->IsInlined()) { - m_current_inlined_depth = UINT32_MAX; - m_current_inlined_pc = LLDB_INVALID_ADDRESS; - Log *log = GetLog(LLDBLog::Step); - if (log && log->GetVerbose()) - LLDB_LOGF( - log, - "ResetCurrentInlinedDepth: Invalidating current inlined depth.\n"); - return; - } - // We only need to do something special about inlined blocks when we are - // at the beginning of an inlined function: - // FIXME: We probably also have to do something special if the PC is at - // the END of an inlined function, which coincides with the end of either - // its containing function or another inlined function. - - Block *block_ptr = m_frames[0]->GetFrameBlock(); - if (!block_ptr) - return; + m_current_inlined_pc = LLDB_INVALID_ADDRESS; + m_current_inlined_depth = UINT32_MAX; - Address pc_as_address; - lldb::addr_t curr_pc = m_thread.GetRegisterContext()->GetPC(); - pc_as_address.SetLoadAddress(curr_pc, &(m_thread.GetProcess()->GetTarget())); - AddressRange containing_range; - if (!block_ptr->GetRangeContainingAddress(pc_as_address, containing_range) || - pc_as_address != containing_range.GetBaseAddress()) - return; - - // If we got here because of a breakpoint hit, then set the inlined depth - // depending on where the breakpoint was set. If we got here because of a - // crash, then set the inlined depth to the deepest most block. Otherwise, - // we stopped here naturally as the result of a step, so set ourselves in the - // containing frame of the whole set of nested inlines, so the user can then - // "virtually" step into the frames one by one, or next over the whole mess. - // Note: We don't have to handle being somewhere in the middle of the stack - // here, since ResetCurrentInlinedDepth doesn't get called if there is a - // valid inlined depth set. StopInfoSP stop_info_sp = m_thread.GetStopInfo(); if (!stop_info_sp) return; - switch (stop_info_sp->GetStopReason()) { - case eStopReasonWatchpoint: - case eStopReasonException: - case eStopReasonExec: - case eStopReasonFork: - case eStopReasonVFork: - case eStopReasonVForkDone: - case eStopReasonSignal: - // In all these cases we want to stop in the deepest frame. - m_current_inlined_pc = curr_pc; - m_current_inlined_depth = 0; - break; - case eStopReasonBreakpoint: { - // FIXME: Figure out what this break point is doing, and set the inline - // depth appropriately. Be careful to take into account breakpoints that - // implement step over prologue, since that should do the default - // calculation. For now, if the breakpoints corresponding to this hit are - // all internal, I set the stop location to the top of the inlined stack, - // since that will make things like stepping over prologues work right. - // But if there are any non-internal breakpoints I do to the bottom of the - // stack, since that was the old behavior. - uint32_t bp_site_id = stop_info_sp->GetValue(); - BreakpointSiteSP bp_site_sp( - m_thread.GetProcess()->GetBreakpointSiteList().FindByID(bp_site_id)); - bool all_internal = true; - if (bp_site_sp) { - uint32_t num_owners = bp_site_sp->GetNumberOfConstituents(); - for (uint32_t i = 0; i < num_owners; i++) { - Breakpoint &bp_ref = - bp_site_sp->GetConstituentAtIndex(i)->GetBreakpoint(); - if (!bp_ref.IsInternal()) { - all_internal = false; - } - } - } - if (!all_internal) { - m_current_inlined_pc = curr_pc; - m_current_inlined_depth = 0; - break; - } - } - [[fallthrough]]; - default: { - // Otherwise, we should set ourselves at the container of the inlining, so - // that the user can descend into them. So first we check whether we have - // more than one inlined block sharing this PC: - int num_inlined_functions = 0; - - for (Block *container_ptr = block_ptr->GetInlinedParent(); - container_ptr != nullptr; - container_ptr = container_ptr->GetInlinedParent()) { - if (!container_ptr->GetRangeContainingAddress(pc_as_address, - containing_range)) - break; - if (pc_as_address != containing_range.GetBaseAddress()) - break; - num_inlined_functions++; - } - m_current_inlined_pc = curr_pc; - m_current_inlined_depth = num_inlined_functions + 1; - Log *log = GetLog(LLDBLog::Step); + bool inlined = true; + auto inline_depth = stop_info_sp->GetSuggestedStackFrameIndex(inlined); + // We're only adjusting the inlined stack here. + Log *log = GetLog(LLDBLog::Step); + if (inline_depth) { + m_current_inlined_depth = *inline_depth; + m_current_inlined_pc = m_thread.GetRegisterContext()->GetPC(); + if (log && log->GetVerbose()) LLDB_LOGF(log, "ResetCurrentInlinedDepth: setting inlined " "depth: %d 0x%" PRIx64 ".\n", - m_current_inlined_depth, curr_pc); - - break; - } + m_current_inlined_depth, m_current_inlined_pc); + } else { + if (log && log->GetVerbose()) + LLDB_LOGF( + log, + "ResetCurrentInlinedDepth: Invalidating current inlined depth.\n"); } } @@ -811,19 +722,48 @@ void StackFrameList::SelectMostRelevantFrame() { RecognizedStackFrameSP recognized_frame_sp = frame_sp->GetRecognizedFrame(); - if (!recognized_frame_sp) { - LLDB_LOG(log, "Frame #0 not recognized"); - return; + if (recognized_frame_sp) { + if (StackFrameSP most_relevant_frame_sp = + recognized_frame_sp->GetMostRelevantFrame()) { + LLDB_LOG(log, "Found most relevant frame at index {0}", + most_relevant_frame_sp->GetFrameIndex()); + SetSelectedFrame(most_relevant_frame_sp.get()); + return; + } } + LLDB_LOG(log, "Frame #0 not recognized"); - if (StackFrameSP most_relevant_frame_sp = - recognized_frame_sp->GetMostRelevantFrame()) { - LLDB_LOG(log, "Found most relevant frame at index {0}", - most_relevant_frame_sp->GetFrameIndex()); - SetSelectedFrame(most_relevant_frame_sp.get()); - } else { - LLDB_LOG(log, "No relevant frame!"); + // If this thread has a non-trivial StopInof, then let it suggest + // a most relevant frame: + StopInfoSP stop_info_sp = m_thread.GetStopInfo(); + uint32_t stack_idx = 0; + bool found_relevant = false; + if (stop_info_sp) { + // Here we're only asking the stop info if it wants to adjust the real stack + // index. We have to ask about the m_inlined_stack_depth in + // Thread::ShouldStop since the plans need to reason with that info. + bool inlined = false; + std::optional stack_opt = + stop_info_sp->GetSuggestedStackFrameIndex(inlined); + if (stack_opt) { + stack_idx = *stack_opt; + found_relevant = true; + } } + + frame_sp = GetFrameAtIndex(stack_idx); + if (!frame_sp) + LLDB_LOG(log, "Stop info suggested relevant frame {0} but it didn't exist", + stack_idx); + else if (found_relevant) + LLDB_LOG(log, "Setting selected frame from stop info to {0}", stack_idx); + // Note, we don't have to worry about "inlined" frames here, because we've + // already calculated the inlined frame in Thread::ShouldStop, and + // SetSelectedFrame will take care of that adjustment for us. + SetSelectedFrame(frame_sp.get()); + + if (!found_relevant) + LLDB_LOG(log, "No relevant frame!"); } uint32_t StackFrameList::GetSelectedFrameIndex( @@ -836,6 +776,7 @@ uint32_t StackFrameList::GetSelectedFrameIndex( // isn't set, then don't force a selection here, just return 0. if (!select_most_relevant) return 0; + // If the inlined stack frame is set, then use that: m_selected_frame_idx = 0; } return *m_selected_frame_idx; diff --git a/lldb/source/Target/StopInfo.cpp b/lldb/source/Target/StopInfo.cpp index c379fd9d2ec78a..d12d1c8bced3ef 100644 --- a/lldb/source/Target/StopInfo.cpp +++ b/lldb/source/Target/StopInfo.cpp @@ -15,6 +15,7 @@ #include "lldb/Breakpoint/WatchpointResource.h" #include "lldb/Core/Debugger.h" #include "lldb/Expression/UserExpression.h" +#include "lldb/Symbol/Block.h" #include "lldb/Target/Process.h" #include "lldb/Target/StopInfo.h" #include "lldb/Target/Target.h" @@ -246,6 +247,22 @@ class StopInfoBreakpoint : public StopInfo { return m_description.c_str(); } + std::optional + GetSuggestedStackFrameIndex(bool inlined_stack) override { + if (!inlined_stack) + return {}; + + ThreadSP thread_sp(m_thread_wp.lock()); + if (!thread_sp) + return {}; + BreakpointSiteSP bp_site_sp( + thread_sp->GetProcess()->GetBreakpointSiteList().FindByID(m_value)); + if (!bp_site_sp) + return {}; + + return bp_site_sp->GetSuggestedStackFrameIndex(); + } + protected: bool ShouldStop(Event *event_ptr) override { // This just reports the work done by PerformAction or the synchronous @@ -1143,6 +1160,44 @@ class StopInfoTrace : public StopInfo { else return m_description.c_str(); } + + std::optional + GetSuggestedStackFrameIndex(bool inlined_stack) override { + // Trace only knows how to adjust inlined stacks: + if (!inlined_stack) + return {}; + + ThreadSP thread_sp = GetThread(); + StackFrameSP frame_0_sp = thread_sp->GetStackFrameAtIndex(0); + if (!frame_0_sp) + return {}; + if (!frame_0_sp->IsInlined()) + return {}; + Block *block_ptr = frame_0_sp->GetFrameBlock(); + if (!block_ptr) + return {}; + Address pc_address = frame_0_sp->GetFrameCodeAddress(); + AddressRange containing_range; + if (!block_ptr->GetRangeContainingAddress(pc_address, containing_range) || + pc_address != containing_range.GetBaseAddress()) + return {}; + + int num_inlined_functions = 0; + + for (Block *container_ptr = block_ptr->GetInlinedParent(); + container_ptr != nullptr; + container_ptr = container_ptr->GetInlinedParent()) { + if (!container_ptr->GetRangeContainingAddress(pc_address, + containing_range)) + break; + if (pc_address != containing_range.GetBaseAddress()) + break; + + num_inlined_functions++; + } + inlined_stack = true; + return num_inlined_functions + 1; + } }; // StopInfoException diff --git a/lldb/source/Target/Thread.cpp b/lldb/source/Target/Thread.cpp index d16fc1f898424f..fffbf1c4f13663 100644 --- a/lldb/source/Target/Thread.cpp +++ b/lldb/source/Target/Thread.cpp @@ -643,6 +643,14 @@ void Thread::WillStop() { void Thread::SetupForResume() { if (GetResumeState() != eStateSuspended) { + // First check whether this thread is going to "actually" resume at all. + // For instance, if we're stepping from one level to the next of an + // virtual inlined call stack, we just change the inlined call stack index + // without actually running this thread. In that case, for this thread we + // shouldn't push a step over breakpoint plan or do that work. + if (GetCurrentPlan()->IsVirtualStep()) + return; + // If we're at a breakpoint push the step-over breakpoint plan. Do this // before telling the current plan it will resume, since we might change // what the current plan is. diff --git a/lldb/source/Target/ThreadPlanStepInRange.cpp b/lldb/source/Target/ThreadPlanStepInRange.cpp index 90bbdcf1df69ba..9f6d77b269cf8e 100644 --- a/lldb/source/Target/ThreadPlanStepInRange.cpp +++ b/lldb/source/Target/ThreadPlanStepInRange.cpp @@ -43,7 +43,7 @@ ThreadPlanStepInRange::ThreadPlanStepInRange( "Step Range stepping in", thread, range, addr_context, stop_others), ThreadPlanShouldStopHere(this), m_step_past_prologue(true), - m_virtual_step(false), m_step_into_target(step_into_target) { + m_virtual_step(eLazyBoolCalculate), m_step_into_target(step_into_target) { SetCallbacks(); SetFlagsToDefault(); SetupAvoidNoDebug(step_in_avoids_code_without_debug_info, @@ -152,7 +152,7 @@ bool ThreadPlanStepInRange::ShouldStop(Event *event_ptr) { m_sub_plan_sp.reset(); } - if (m_virtual_step) { + if (m_virtual_step == eLazyBoolYes) { // If we've just completed a virtual step, all we need to do is check for a // ShouldStopHere plan, and otherwise we're done. // FIXME - This can be both a step in and a step out. Probably should @@ -564,7 +564,7 @@ bool ThreadPlanStepInRange::DoPlanExplainsStop(Event *event_ptr) { bool return_value = false; - if (m_virtual_step) { + if (m_virtual_step == eLazyBoolYes) { return_value = true; } else { StopInfoSP stop_info_sp = GetPrivateStopInfo(); @@ -599,10 +599,13 @@ bool ThreadPlanStepInRange::DoPlanExplainsStop(Event *event_ptr) { bool ThreadPlanStepInRange::DoWillResume(lldb::StateType resume_state, bool current_plan) { - m_virtual_step = false; + m_virtual_step = eLazyBoolCalculate; if (resume_state == eStateStepping && current_plan) { Thread &thread = GetThread(); // See if we are about to step over a virtual inlined call. + // But if we already know we're virtual stepping, don't decrement the + // inlined depth again... + bool step_without_resume = thread.DecrementCurrentInlinedDepth(); if (step_without_resume) { Log *log = GetLog(LLDBLog::Step); @@ -615,11 +618,21 @@ bool ThreadPlanStepInRange::DoWillResume(lldb::StateType resume_state, // FIXME: Maybe it would be better to create a InlineStep stop reason, but // then // the whole rest of the world would have to handle that stop reason. - m_virtual_step = true; + m_virtual_step = eLazyBoolYes; } return !step_without_resume; } return true; } -bool ThreadPlanStepInRange::IsVirtualStep() { return m_virtual_step; } +bool ThreadPlanStepInRange::IsVirtualStep() { + if (m_virtual_step == eLazyBoolCalculate) { + Thread &thread = GetThread(); + uint32_t cur_inline_depth = thread.GetCurrentInlinedDepth(); + if (cur_inline_depth == UINT32_MAX || cur_inline_depth == 0) + m_virtual_step = eLazyBoolNo; + else + m_virtual_step = eLazyBoolYes; + } + return m_virtual_step == eLazyBoolYes; +} diff --git a/lldb/source/Target/ThreadPlanStepOverRange.cpp b/lldb/source/Target/ThreadPlanStepOverRange.cpp index 4ea8d69035306d..cdc0fddd78d83b 100644 --- a/lldb/source/Target/ThreadPlanStepOverRange.cpp +++ b/lldb/source/Target/ThreadPlanStepOverRange.cpp @@ -355,7 +355,7 @@ bool ThreadPlanStepOverRange::DoWillResume(lldb::StateType resume_state, if (in_inlined_stack) { Log *log = GetLog(LLDBLog::Step); LLDB_LOGF(log, - "ThreadPlanStepInRange::DoWillResume: adjusting range to " + "ThreadPlanStepOverRange::DoWillResume: adjusting range to " "the frame at inlined depth %d.", thread.GetCurrentInlinedDepth()); StackFrameSP stack_sp = thread.GetStackFrameAtIndex(0); diff --git a/lldb/source/Target/ThreadPlanStepRange.cpp b/lldb/source/Target/ThreadPlanStepRange.cpp index 5f5c11dacd00aa..eaf518c94d081f 100644 --- a/lldb/source/Target/ThreadPlanStepRange.cpp +++ b/lldb/source/Target/ThreadPlanStepRange.cpp @@ -360,10 +360,10 @@ bool ThreadPlanStepRange::SetNextBranchBreakpoint() { !m_next_branch_bp_sp->HasResolvedLocations()) m_could_not_resolve_hw_bp = true; + BreakpointLocationSP bp_loc = + m_next_branch_bp_sp->GetLocationAtIndex(0); if (log) { lldb::break_id_t bp_site_id = LLDB_INVALID_BREAK_ID; - BreakpointLocationSP bp_loc = - m_next_branch_bp_sp->GetLocationAtIndex(0); if (bp_loc) { BreakpointSiteSP bp_site = bp_loc->GetBreakpointSite(); if (bp_site) { @@ -376,7 +376,51 @@ bool ThreadPlanStepRange::SetNextBranchBreakpoint() { m_next_branch_bp_sp->GetID(), bp_site_id, run_to_address.GetLoadAddress(&m_process.GetTarget())); } - + // The "next branch breakpoint might land on a virtual inlined call + // stack. If that's true, we should always stop at the top of the + // inlined call stack. Only virtual steps should walk deeper into the + // inlined call stack. + Block *block = run_to_address.CalculateSymbolContextBlock(); + if (bp_loc && block) { + LineEntry top_most_line_entry; + lldb::addr_t run_to_addr = run_to_address.GetFileAddress(); + for (Block *inlined_parent = block->GetContainingInlinedBlock(); + inlined_parent; + inlined_parent = inlined_parent->GetInlinedParent()) { + AddressRange range; + if (!inlined_parent->GetRangeContainingAddress(run_to_address, + range)) + break; + Address range_start_address = range.GetBaseAddress(); + // Only compare addresses here, we may have different symbol + // contexts (for virtual inlined stacks), but we just want to know + // that they are all at the same address. + if (range_start_address.GetFileAddress() != run_to_addr) + break; + const InlineFunctionInfo *inline_info = + inlined_parent->GetInlinedFunctionInfo(); + if (!inline_info) + break; + const Declaration &call_site = inline_info->GetCallSite(); + top_most_line_entry.line = call_site.GetLine(); + top_most_line_entry.column = call_site.GetColumn(); + FileSpec call_site_file_spec = call_site.GetFile(); + top_most_line_entry.original_file_sp.reset( + new SupportFile(call_site_file_spec)); + top_most_line_entry.range = range; + top_most_line_entry.file_sp.reset(); + top_most_line_entry.ApplyFileMappings( + GetThread().CalculateTarget()); + if (!top_most_line_entry.file_sp) + top_most_line_entry.file_sp = + top_most_line_entry.original_file_sp; + } + if (top_most_line_entry.IsValid()) { + LLDB_LOG(log, "Setting preferred line entry: {0}:{1}", + top_most_line_entry.GetFile(), top_most_line_entry.line); + bp_loc->SetPreferredLineEntry(top_most_line_entry); + } + } m_next_branch_bp_sp->SetThreadID(m_tid); m_next_branch_bp_sp->SetBreakpointKind("next-branch-location"); diff --git a/lldb/test/API/functionalities/breakpoint/same_cu_name/Makefile b/lldb/test/API/functionalities/breakpoint/same_cu_name/Makefile new file mode 100644 index 00000000000000..4bfdb15e777d99 --- /dev/null +++ b/lldb/test/API/functionalities/breakpoint/same_cu_name/Makefile @@ -0,0 +1,19 @@ +CXX_SOURCES := main.cpp +LD_EXTRAS := ns1.o ns2.o ns3.o ns4.o + +a.out: main.o ns1.o ns2.o ns3.o ns4.o + +ns1.o: common.cpp + $(CC) -g -c -DNAMESPACE=ns1 -o $@ $< + +ns2.o: common.cpp + $(CC) -g -c -DNAMESPACE=ns2 -o $@ $< + +ns3.o: common.cpp + $(CC) -g -c -DNAMESPACE=ns3 -o $@ $< + +ns4.o: common.cpp + $(CC) -g -c -DNAMESPACE=ns4 -o $@ $< + + +include Makefile.rules diff --git a/lldb/test/API/functionalities/breakpoint/same_cu_name/TestFileBreakpoinsSameCUName.py b/lldb/test/API/functionalities/breakpoint/same_cu_name/TestFileBreakpoinsSameCUName.py new file mode 100644 index 00000000000000..dc10d407d72302 --- /dev/null +++ b/lldb/test/API/functionalities/breakpoint/same_cu_name/TestFileBreakpoinsSameCUName.py @@ -0,0 +1,32 @@ +""" +Test setting a breakpoint by file and line when many instances of the +same file name exist in the CU list. +""" + + +import lldb +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestBreakpointSameCU(TestBase): + def test_breakpoint_same_cu(self): + self.build() + target = self.createTestTarget() + + # Break both on the line before the code: + comment_line = line_number("common.cpp", "// A comment here") + self.assertNotEqual(comment_line, 0, "line_number worked") + bkpt = target.BreakpointCreateByLocation("common.cpp", comment_line) + self.assertEqual( + bkpt.GetNumLocations(), 4, "Got the right number of breakpoints" + ) + + # And break on the code, both should work: + code_line = line_number("common.cpp", "// The line with code") + self.assertNotEqual(comment_line, 0, "line_number worked again") + bkpt = target.BreakpointCreateByLocation("common.cpp", code_line) + self.assertEqual( + bkpt.GetNumLocations(), 4, "Got the right number of breakpoints" + ) diff --git a/lldb/test/API/functionalities/breakpoint/same_cu_name/common.cpp b/lldb/test/API/functionalities/breakpoint/same_cu_name/common.cpp new file mode 100644 index 00000000000000..ed9a43f27b173a --- /dev/null +++ b/lldb/test/API/functionalities/breakpoint/same_cu_name/common.cpp @@ -0,0 +1,8 @@ +namespace NAMESPACE { +static int g_value = 0; +void DoSomeStuff() { + // A comment here + g_value++; // The line with code +} + +} // namespace NAMESPACE diff --git a/lldb/test/API/functionalities/breakpoint/same_cu_name/main.cpp b/lldb/test/API/functionalities/breakpoint/same_cu_name/main.cpp new file mode 100644 index 00000000000000..43d9e3271ece2a --- /dev/null +++ b/lldb/test/API/functionalities/breakpoint/same_cu_name/main.cpp @@ -0,0 +1,24 @@ +namespace ns1 { +extern void DoSomeStuff(); +} + +namespace ns2 { +extern void DoSomeStuff(); +} + +namespace ns3 { +extern void DoSomeStuff(); +} + +namespace ns4 { +extern void DoSomeStuff(); +} + +int main(int argc, char *argv[]) { + ns1::DoSomeStuff(); + ns2::DoSomeStuff(); + ns3::DoSomeStuff(); + ns4::DoSomeStuff(); + + return 0; +} diff --git a/lldb/test/API/functionalities/gdb_remote_client/TestGDBRemoteClient.py b/lldb/test/API/functionalities/gdb_remote_client/TestGDBRemoteClient.py index 5eb3fc3cada921..08ac9290ee85ac 100644 --- a/lldb/test/API/functionalities/gdb_remote_client/TestGDBRemoteClient.py +++ b/lldb/test/API/functionalities/gdb_remote_client/TestGDBRemoteClient.py @@ -132,12 +132,39 @@ def test_read_registers_using_g_packets(self): target = self.createTarget("a.yaml") process = self.connect(target) - self.assertEqual(1, self.server.responder.packetLog.count("g")) - self.server.responder.packetLog = [] + # We want to make sure that the process is using the g packet, but it's + # not required the "connect" should read all registers. However, it might + # have... So we need to wait till we explicitly 'read_registers' to do + # test. + # Also, even with the use-g-packet-for-reading lldb will sometimes send p0 + # early on to see if the packet is supported. So we can't say that there + # will be NO p packets. + # But there certainly should be no p packets after the g packet. + self.read_registers(process) - # Reading registers should not cause any 'p' packets to be exchanged. + print(f"\nPACKET LOG:\n{self.server.responder.packetLog}\n") + g_pos = 0 + try: + g_pos = self.server.responder.packetLog.index("g") + except err: + self.fail("'g' packet not found after fetching registers") + + try: + second_g = self.server.responder.packetLog.index("g", g_pos) + self.fail("Found more than one 'g' packet") + except: + pass + + # Make sure there aren't any `p` packets after the `g` packet: self.assertEqual( - 0, len([p for p in self.server.responder.packetLog if p.startswith("p")]) + 0, + len( + [ + p + for p in self.server.responder.packetLog[g_pos:] + if p.startswith("p") + ] + ), ) def test_read_registers_using_p_packets(self): diff --git a/lldb/test/API/functionalities/inline-stepping/TestInlineStepping.py b/lldb/test/API/functionalities/inline-stepping/TestInlineStepping.py index 752c3a9cbd286a..1501ad36519e2d 100644 --- a/lldb/test/API/functionalities/inline-stepping/TestInlineStepping.py +++ b/lldb/test/API/functionalities/inline-stepping/TestInlineStepping.py @@ -14,6 +14,7 @@ class TestInlineStepping(TestBase): compiler="icc", bugnumber="# Not really a bug. ICC combines two inlined functions.", ) + @skipIf(oslist=["linux"], archs=["arm"]) # Fails for 32 bit arm def test_with_python_api(self): """Test stepping over and into inlined functions.""" self.build() @@ -32,6 +33,12 @@ def test_step_in_template_with_python_api(self): self.build() self.step_in_template() + @add_test_categories(["pyapi"]) + def test_virtual_inline_stepping(self): + """Test stepping through a virtual inlined call stack""" + self.build() + self.virtual_inline_stepping() + def setUp(self): # Call super's setUp(). TestBase.setUp(self) @@ -285,7 +292,7 @@ def inline_stepping_step_over(self): break_1_in_main = target.BreakpointCreateBySourceRegex( "// At second call of caller_ref_1 in main.", self.main_source_spec ) - self.assertTrue(break_1_in_main, VALID_BREAKPOINT) + self.assertGreater(break_1_in_main.GetNumLocations(), 0, VALID_BREAKPOINT) # Now launch the process, and do not stop at entry point. self.process = target.LaunchSimple( @@ -311,6 +318,24 @@ def inline_stepping_step_over(self): ] self.run_step_sequence(step_sequence) + # Now make sure that next to a virtual inlined call stack + # gets the call stack depth correct. + break_2_in_main = target.BreakpointCreateBySourceRegex( + "// Call max_value specialized", self.main_source_spec + ) + self.assertGreater(break_2_in_main.GetNumLocations(), 0, VALID_BREAKPOINT) + threads = lldbutil.continue_to_breakpoint(self.process, break_2_in_main) + self.assertEqual(len(threads), 1, "Hit our second breakpoint") + self.assertEqual(threads[0].id, self.thread.id, "Stopped at right thread") + self.thread.StepOver() + frame_0 = self.thread.frames[0] + line_entry = frame_0.line_entry + self.assertEqual( + line_entry.file.basename, self.main_source_spec.basename, "File matches" + ) + target_line = line_number("calling.cpp", "// At caller_trivial_inline_1") + self.assertEqual(line_entry.line, target_line, "Lines match as well.") + def step_in_template(self): """Use Python APIs to test stepping in to templated functions.""" exe = self.getBuildArtifact("a.out") @@ -357,3 +382,76 @@ def step_in_template(self): step_sequence = [["// In max_value specialized", "into"]] self.run_step_sequence(step_sequence) + + def run_to_call_site_and_step( + self, source_regex, func_name, start_pos, one_more_step_loc=None + ): + main_spec = lldb.SBFileSpec("calling.cpp") + # Set the breakpoint by file and line, not sourced regex because + # we want to make sure we can set breakpoints on call sites: + call_site_line_num = line_number(self.main_source, source_regex) + target, process, thread, bkpt = lldbutil.run_to_line_breakpoint( + self, main_spec, call_site_line_num + ) + + # Make sure that the location is at the call site (run_to_line_breakpoint already asserted + # that there's one location.): + bkpt_loc = bkpt.location[0] + strm = lldb.SBStream() + result = bkpt_loc.GetDescription(strm, lldb.eDescriptionLevelFull) + + self.assertTrue(result, "Got a location description") + desc = strm.GetData() + self.assertIn(f"calling.cpp:{call_site_line_num}", desc, "Right line listed") + # We don't get the function name right yet - so we omit it in printing. + # Turn on this test when that is working. + # self.assertIn(func_name, desc, "Right function listed") + + pc = thread.frame[0].pc + for i in range(start_pos, 3): + thread.StepInto() + frame_0 = thread.frame[0] + + trivial_line_num = line_number( + self.main_source, f"In caller_trivial_inline_{i}." + ) + self.assertEqual( + frame_0.line_entry.line, + trivial_line_num, + f"Stepped into the caller_trivial_inline_{i}", + ) + if pc != frame_0.pc: + # If we get here, we stepped to the expected line number, but + # the compiler on this system has decided to insert an instruction + # between the call site of an inlined function with no arguments, + # returning void, and its immediate call to another void inlined function + # with no arguments. We aren't going to be testing virtual inline + # stepping for this function... + break + + if one_more_step_loc: + thread.StepInto() + frame_0 = thread.frame[0] + self.assertEqual( + frame_0.line_entry.line, + line_number(self.main_source, one_more_step_loc), + "Was able to step one more time", + ) + process.Kill() + target.Clear() + + def virtual_inline_stepping(self): + """Use the Python API's to step through a virtual inlined stack""" + self.run_to_call_site_and_step("At caller_trivial_inline_1", "main", 1) + self.run_to_call_site_and_step( + "In caller_trivial_inline_1", "caller_trivial_inline_1", 2 + ) + self.run_to_call_site_and_step( + "In caller_trivial_inline_2", "caller_trivial_inline_2", 3 + ) + self.run_to_call_site_and_step( + "In caller_trivial_inline_3", + "caller_trivial_inline_3", + 4, + "After caller_trivial_inline_3", + ) diff --git a/lldb/test/API/functionalities/inline-stepping/calling.cpp b/lldb/test/API/functionalities/inline-stepping/calling.cpp index 49179ce7c97883..ba71c25a3c648f 100644 --- a/lldb/test/API/functionalities/inline-stepping/calling.cpp +++ b/lldb/test/API/functionalities/inline-stepping/calling.cpp @@ -13,6 +13,12 @@ int called_by_inline_ref (int &value); inline void inline_trivial_1 () __attribute__((always_inline)); inline void inline_trivial_2 () __attribute__((always_inline)); +// These three should share the same initial pc so we can test +// virtual inline stepping. +inline void caller_trivial_inline_1() __attribute__((always_inline)); +inline void caller_trivial_inline_2() __attribute__((always_inline)); +inline void caller_trivial_inline_3() __attribute__((always_inline)); + void caller_trivial_1 (); void caller_trivial_2 (); @@ -79,6 +85,23 @@ caller_trivial_2 () inline_value += 1; // At increment in caller_trivial_2. } +// When you call caller_trivial_inline_1, the inlined call-site +// should share a PC with all three of the following inlined +// functions, so we can exercise "virtual inline stepping". +void caller_trivial_inline_1() { + caller_trivial_inline_2(); // In caller_trivial_inline_1. + inline_value += 1; +} + +void caller_trivial_inline_2() { + caller_trivial_inline_3(); // In caller_trivial_inline_2. + inline_value += 1; // After caller_trivial_inline_3 +} + +void caller_trivial_inline_3() { + inline_value += 1; // In caller_trivial_inline_3. +} + void called_by_inline_trivial () { @@ -132,5 +155,7 @@ main (int argc, char **argv) max_value(123, 456); // Call max_value template max_value(std::string("abc"), std::string("0022")); // Call max_value specialized + caller_trivial_inline_1(); // At caller_trivial_inline_1. + return 0; // About to return from main. } diff --git a/lldb/test/API/lang/swift/macro/Macro.swift b/lldb/test/API/lang/swift/macro/Macro.swift index 92190f7e38281b..6b0de79ec9426e 100644 --- a/lldb/test/API/lang/swift/macro/Macro.swift +++ b/lldb/test/API/lang/swift/macro/Macro.swift @@ -1 +1,3 @@ @freestanding(expression) public macro stringify(_ value: T) -> (T, String) = #externalMacro(module: "MacroImpl", type: "StringifyMacro") + +@freestanding(expression) public macro no_return(_ value: T) = #externalMacro(module: "MacroImpl", type: "NoReturnMacro") diff --git a/lldb/test/API/lang/swift/macro/MacroImpl.swift b/lldb/test/API/lang/swift/macro/MacroImpl.swift index 20cd14c6aad145..dc864407f87c1e 100644 --- a/lldb/test/API/lang/swift/macro/MacroImpl.swift +++ b/lldb/test/API/lang/swift/macro/MacroImpl.swift @@ -14,3 +14,16 @@ public struct StringifyMacro: ExpressionMacro { return "(\(argument), \(StringLiteralExprSyntax(content: argument.description)))" } } + +public struct NoReturnMacro: ExpressionMacro { + public static func expansion( + of macro: some FreestandingMacroExpansionSyntax, + in context: some MacroExpansionContext + ) -> ExprSyntax { + guard let argument = macro.argumentList.first?.expression else { + fatalError("boom") + } + + return "print(\(argument), \(StringLiteralExprSyntax(content: argument.description)))" + } +} diff --git a/lldb/test/API/lang/swift/macro/TestSwiftMacro.py b/lldb/test/API/lang/swift/macro/TestSwiftMacro.py index df8089b7efad68..d0f87141aa380d 100644 --- a/lldb/test/API/lang/swift/macro/TestSwiftMacro.py +++ b/lldb/test/API/lang/swift/macro/TestSwiftMacro.py @@ -34,19 +34,58 @@ def testDebugging(self): """Test Swift macros""" self.build(dictionary={'SWIFT_SOURCES': 'main.swift'}) self.setupPluginServerForTesting() + main_spec = lldb.SBFileSpec('main.swift') target, process, thread, bkpt = lldbutil.run_to_source_breakpoint( - self, 'break here', lldb.SBFileSpec('main.swift')) + self, 'break here', main_spec + ) + + # We're testing line breakpoint setting here: + call_site_line = lldbtest.line_number("main.swift", "#no_return(a / b)") + call_site_bp = target.BreakpointCreateByLocation(main_spec, call_site_line) thread.StepOver() thread.StepInto() + # This is the expanded macro source, we should be able to step into it. - self.expect('reg read pc', substrs=[ - '[inlined] freestanding macro expansion #1 of stringify in module a file main.swift line 5 column 11', - 'stringify' - ]) + # Don't check the actual line number so we are line independent + self.assertIn( + 'freestanding macro expansion #1 of stringify in module a file main.swift line 5 column 11', + thread.frames[0].name, + "Stopped in stringify macro" + ) self.expect('expression -- #stringify(1)', substrs=['0 = 1', '1 = "1"']) + # Step out should get us out of stringify, then in to the next macro: + thread.StepOut() + self.assertIn("a.testStringify", thread.frames[0].name, "Step out back to origin") + thread.StepInto() + self.assertIn( + "freestanding macro expansion #1 of no_return in module a file main.swift line 6 column 3", + thread.frames[0].name, + "Step out and in gets to no_return" + ) + + # We've set a breakpoint on the call site for another instance - run to that: + threads = lldbutil.continue_to_breakpoint(process, call_site_bp) + self.assertEqual(len(threads), 1, "Stopped at one thread") + thread = threads[0] + frame_0 = thread.frames[0] + line_entry_0 = frame_0.line_entry + self.assertEqual(line_entry_0.line, call_site_line, "Got the right line attribution") + self.assertEqual(line_entry_0.file, main_spec, "Got the right file attribution") + + # Now test stepping in and back out again: + thread.StepInto() + self.assertIn( + "freestanding macro expansion #3 of no_return in module a file main.swift line 8 column 3", + thread.frames[0].name, + "Step out and in gets to no_return" + ) + + thread.StepOut() + self.assertIn("a.testStringify", thread.frames[0].name, "Step out from no_return") + # Make sure we can set a symbolic breakpoint on a macro. b = target.BreakpointCreateByName("stringify") self.assertGreaterEqual(b.GetNumLocations(), 1) diff --git a/lldb/test/API/lang/swift/macro/main.swift b/lldb/test/API/lang/swift/macro/main.swift index f94cd87c4401b2..56643acfd9944a 100644 --- a/lldb/test/API/lang/swift/macro/main.swift +++ b/lldb/test/API/lang/swift/macro/main.swift @@ -1,9 +1,12 @@ import Macro - +// This test depends on the layout of the lines in this file. func testStringify(a: Int, b: Int) { print("break here") let s = #stringify(a / b) + #no_return(a) + #no_return(b) + #no_return(a / b) print(s.1) } -testStringify(a: 23, b: 0) +testStringify(a: 23, b: 1) From c5d7cb65a5a9ee21e347ff8b7e4fc2ee2f560ffb Mon Sep 17 00:00:00 2001 From: jimingham Date: Mon, 18 Nov 2024 13:23:17 -0800 Subject: [PATCH 3/4] Convert ThreadPlanStack's mutex to a shared mutex. (#116438) I have some reports of A/B inversion deadlocks between the ThreadPlanStack and the StackFrameList accesses. There's a fair bit of reasonable code in lldb that does "While accessing the ThreadPlanStack, look at that threads's StackFrameList", and also plenty of "While accessing the ThreadPlanStack, look at the StackFrameList." In all the cases I've seen so far, there was at most one of the locks taken that were trying to mutate the list, the other three were just reading. So we could solve the deadlock by converting the two mutexes over to shared mutexes. This patch is the easy part, the ThreadPlanStack mutex. The tricky part was because these were originally recursive mutexes, and recursive access to shared mutexes is undefined behavior according to the C++ standard, I had to add a couple NoLock variants to make sure it didn't get used recursively. Then since the only remaining calls are out to ThreadPlans and ThreadPlans don't have access to their containing ThreadPlanStack, converting this to a non-recursive lock should be safe. (cherry picked from commit b42a81631491571c4b78d095917ebdddee69b04f) From aeb27aee7391c2a932b98a44e66489f769516020 Mon Sep 17 00:00:00 2001 From: jimingham Date: Thu, 12 Dec 2024 12:48:41 -0800 Subject: [PATCH 4/4] Convert the StackFrameList mutex to a shared mutex. (#117252) In fact, there's only one public API in StackFrameList that changes the list explicitly. The rest only change the list if you happen to ask for more frames than lldb has currently fetched and that always adds frames "behind the user's back". So we were much more prone to deadlocking than we needed to be. This patch uses a shared_mutex instead, and when we have to add more frames (in GetFramesUpTo) we switches to exclusive long enough to add the frames, then goes back to shared. Most of the work here was actually getting the stack frame list locking to not require a recursive mutex (shared mutexes aren't recursive). I also added a test that has 5 threads progressively asking for more frames simultaneously to make sure we get back valid frames and don't deadlock. (cherry picked from commit 186fac33d08b34be494caa58fe63972f69c6d6ab) --- lldb/include/lldb/Target/StackFrameList.h | 66 +++-- lldb/source/Target/StackFrameList.cpp | 248 ++++++++++-------- lldb/source/Target/Thread.cpp | 2 +- .../api/multithreaded/TestMultithreaded.py | 17 +- .../test/API/api/multithreaded/deep_stack.cpp | 17 ++ .../test_concurrent_unwind.cpp.template | 91 +++++++ 6 files changed, 308 insertions(+), 133 deletions(-) create mode 100644 lldb/test/API/api/multithreaded/deep_stack.cpp create mode 100644 lldb/test/API/api/multithreaded/test_concurrent_unwind.cpp.template diff --git a/lldb/include/lldb/Target/StackFrameList.h b/lldb/include/lldb/Target/StackFrameList.h index 7d0e7a5b9a71b2..8a66296346f2d8 100644 --- a/lldb/include/lldb/Target/StackFrameList.h +++ b/lldb/include/lldb/Target/StackFrameList.h @@ -11,6 +11,7 @@ #include #include +#include #include #include "lldb/Target/StackFrame.h" @@ -94,24 +95,36 @@ class StackFrameList { bool show_unique = false, bool show_hidden = false, const char *frame_marker = nullptr); + /// Returns whether we have currently fetched all the frames of a stack. + bool WereAllFramesFetched() const; + protected: friend class Thread; friend class ScriptedThread; + /// Use this API to build a stack frame list (used for scripted threads, for + /// instance.) This API is not meant for StackFrameLists that have unwinders + /// and partake in lazy stack filling (using GetFramesUpTo). Rather if you + /// are building StackFrameLists with this API, you should build the entire + /// list before making it available for use. bool SetFrameAtIndex(uint32_t idx, lldb::StackFrameSP &frame_sp); - /// Realizes frames up to (and including) end_idx (which can be greater than - /// the actual number of frames.) + /// Ensures that frames up to (and including) `end_idx` are realized in the + /// StackFrameList. `end_idx` can be larger than the actual number of frames, + /// in which case all the frames will be fetched. Acquires the writer end of + /// the list mutex. /// Returns true if the function was interrupted, false otherwise. - bool GetFramesUpTo(uint32_t end_idx, - InterruptionControl allow_interrupt = AllowInterruption); - - void GetOnlyConcreteFramesUpTo(uint32_t end_idx, Unwind &unwinder); - - void SynthesizeTailCallFrames(StackFrame &next_frame); - - bool GetAllFramesFetched() { return m_concrete_frames_fetched == UINT32_MAX; } + /// Callers should first check (under the shared mutex) whether we need to + /// fetch frames or not. + bool GetFramesUpTo(uint32_t end_idx, InterruptionControl allow_interrupt); + + // This should be called with either the reader or writer end of the list + // mutex held: + bool GetAllFramesFetched() const { + return m_concrete_frames_fetched == UINT32_MAX; + } + // This should be called with the writer end of the list mutex held. void SetAllFramesFetched() { m_concrete_frames_fetched = UINT32_MAX; } bool DecrementCurrentInlinedDepth(); @@ -122,6 +135,9 @@ class StackFrameList { void SetCurrentInlinedDepth(uint32_t new_depth); + /// Calls into the stack frame recognizers and stop info to set the most + /// relevant frame. This can call out to arbitrary user code so it can't + /// hold the StackFrameList mutex. void SelectMostRelevantFrame(); typedef std::vector collection; @@ -138,11 +154,16 @@ class StackFrameList { // source of information. lldb::StackFrameListSP m_prev_frames_sp; - /// A mutex for this frame list. - // TODO: This mutex may not always be held when required. In particular, uses - // of the StackFrameList APIs in lldb_private::Thread look suspect. Consider - // passing around a lock_guard reference to enforce proper locking. - mutable std::recursive_mutex m_mutex; + /// A mutex for this frame list. The only public API that requires the + /// unique lock is Clear. All other clients take the shared lock, though + /// if we need more frames we may swap shared for unique to fulfill that + /// requirement. + mutable std::shared_mutex m_list_mutex; + + // Setting the inlined depth should be protected against other attempts to + // change it, but since it doesn't mutate the list itself, we can limit the + // critical regions it produces by having a separate mutex. + mutable std::mutex m_inlined_depth_mutex; /// A cache of frames. This may need to be updated when the program counter /// changes. @@ -171,6 +192,21 @@ class StackFrameList { const bool m_show_inlined_frames; private: + uint32_t SetSelectedFrameNoLock(lldb_private::StackFrame *frame); + lldb::StackFrameSP + GetFrameAtIndexNoLock(uint32_t idx, + std::shared_lock &guard); + + /// These two Fetch frames APIs and SynthesizeTailCallFrames are called in + /// GetFramesUpTo, they are the ones that actually add frames. They must be + /// called with the writer end of the list mutex held. + + /// Returns true if fetching frames was interrupted, false otherwise. + bool FetchFramesUpTo(uint32_t end_idx, InterruptionControl allow_interrupt); + /// Not currently interruptible so returns void. + void FetchOnlyConcreteFramesUpTo(uint32_t end_idx); + void SynthesizeTailCallFrames(StackFrame &next_frame); + StackFrameList(const StackFrameList &) = delete; const StackFrameList &operator=(const StackFrameList &) = delete; }; diff --git a/lldb/source/Target/StackFrameList.cpp b/lldb/source/Target/StackFrameList.cpp index 8ab179a45db324..4af34a61355da9 100644 --- a/lldb/source/Target/StackFrameList.cpp +++ b/lldb/source/Target/StackFrameList.cpp @@ -38,7 +38,7 @@ using namespace lldb_private; StackFrameList::StackFrameList(Thread &thread, const lldb::StackFrameListSP &prev_frames_sp, bool show_inline_frames) - : m_thread(thread), m_prev_frames_sp(prev_frames_sp), m_mutex(), m_frames(), + : m_thread(thread), m_prev_frames_sp(prev_frames_sp), m_frames(), m_selected_frame_idx(), m_concrete_frames_fetched(0), m_current_inlined_depth(UINT32_MAX), m_current_inlined_pc(LLDB_INVALID_ADDRESS), @@ -63,6 +63,7 @@ void StackFrameList::CalculateCurrentInlinedDepth() { } uint32_t StackFrameList::GetCurrentInlinedDepth() { + std::lock_guard guard(m_inlined_depth_mutex); if (m_show_inlined_frames && m_current_inlined_pc != LLDB_INVALID_ADDRESS) { lldb::addr_t cur_pc = m_thread.GetRegisterContext()->GetPC(); if (cur_pc != m_current_inlined_pc) { @@ -84,11 +85,6 @@ void StackFrameList::ResetCurrentInlinedDepth() { if (!m_show_inlined_frames) return; - std::lock_guard guard(m_mutex); - - m_current_inlined_pc = LLDB_INVALID_ADDRESS; - m_current_inlined_depth = UINT32_MAX; - StopInfoSP stop_info_sp = m_thread.GetStopInfo(); if (!stop_info_sp) return; @@ -98,6 +94,7 @@ void StackFrameList::ResetCurrentInlinedDepth() { // We're only adjusting the inlined stack here. Log *log = GetLog(LLDBLog::Step); if (inline_depth) { + std::lock_guard guard(m_inlined_depth_mutex); m_current_inlined_depth = *inline_depth; m_current_inlined_pc = m_thread.GetRegisterContext()->GetPC(); @@ -107,6 +104,9 @@ void StackFrameList::ResetCurrentInlinedDepth() { "depth: %d 0x%" PRIx64 ".\n", m_current_inlined_depth, m_current_inlined_pc); } else { + std::lock_guard guard(m_inlined_depth_mutex); + m_current_inlined_pc = LLDB_INVALID_ADDRESS; + m_current_inlined_depth = UINT32_MAX; if (log && log->GetVerbose()) LLDB_LOGF( log, @@ -119,6 +119,7 @@ bool StackFrameList::DecrementCurrentInlinedDepth() { uint32_t current_inlined_depth = GetCurrentInlinedDepth(); if (current_inlined_depth != UINT32_MAX) { if (current_inlined_depth > 0) { + std::lock_guard guard(m_inlined_depth_mutex); m_current_inlined_depth--; return true; } @@ -128,6 +129,7 @@ bool StackFrameList::DecrementCurrentInlinedDepth() { } void StackFrameList::SetCurrentInlinedDepth(uint32_t new_depth) { + std::lock_guard guard(m_inlined_depth_mutex); m_current_inlined_depth = new_depth; if (new_depth == UINT32_MAX) m_current_inlined_pc = LLDB_INVALID_ADDRESS; @@ -135,23 +137,9 @@ void StackFrameList::SetCurrentInlinedDepth(uint32_t new_depth) { m_current_inlined_pc = m_thread.GetRegisterContext()->GetPC(); } -void StackFrameList::GetOnlyConcreteFramesUpTo(uint32_t end_idx, - Unwind &unwinder) { - assert(m_thread.IsValid() && "Expected valid thread"); - assert(m_frames.size() <= end_idx && "Expected there to be frames to fill"); - - if (end_idx < m_concrete_frames_fetched) - return; - - uint32_t num_frames = unwinder.GetFramesUpTo(end_idx); - if (num_frames <= end_idx + 1) { - // Done unwinding. - m_concrete_frames_fetched = UINT32_MAX; - } - - // Don't create the frames eagerly. Defer this work to GetFrameAtIndex, - // which can lazily query the unwinder to create frames. - m_frames.resize(num_frames); +bool StackFrameList::WereAllFramesFetched() const { + std::shared_lock guard(m_list_mutex); + return GetAllFramesFetched(); } /// A sequence of calls that comprise some portion of a backtrace. Each frame @@ -167,6 +155,8 @@ using CallSequence = std::vector; /// Find the unique path through the call graph from \p begin (with return PC /// \p return_pc) to \p end. On success this path is stored into \p path, and /// on failure \p path is unchanged. +/// This function doesn't currently access StackFrameLists at all, it only looks +/// at the frame set in the ExecutionContext it passes around. static void FindInterveningFrames(Function &begin, Function &end, ExecutionContext &exe_ctx, Target &target, addr_t return_pc, CallSequence &path, @@ -350,23 +340,65 @@ void StackFrameList::SynthesizeTailCallFrames(StackFrame &next_frame) { bool StackFrameList::GetFramesUpTo(uint32_t end_idx, InterruptionControl allow_interrupt) { + // GetFramesUpTo is always called with the intent to add frames, so get the + // writer lock: + std::unique_lock guard(m_list_mutex); + // Now that we have the lock, check to make sure someone didn't get there + // ahead of us: + if (m_frames.size() > end_idx || GetAllFramesFetched()) + return false; + // Do not fetch frames for an invalid thread. bool was_interrupted = false; if (!m_thread.IsValid()) return false; - // We've already gotten more frames than asked for, or we've already finished - // unwinding, return. - if (m_frames.size() > end_idx || GetAllFramesFetched()) + // lock the writer side of m_list_mutex as we're going to add frames here: + if (!m_show_inlined_frames) { + if (end_idx < m_concrete_frames_fetched) + return false; + // We're adding concrete frames now: + // FIXME: This should also be interruptible: + FetchOnlyConcreteFramesUpTo(end_idx); return false; + } + + // We're adding concrete and inlined frames now: + was_interrupted = FetchFramesUpTo(end_idx, allow_interrupt); + +#if defined(DEBUG_STACK_FRAMES) + s.PutCString("\n\nNew frames:\n"); + Dump(&s); + s.EOL(); +#endif + return was_interrupted; +} + +void StackFrameList::FetchOnlyConcreteFramesUpTo(uint32_t end_idx) { + assert(m_thread.IsValid() && "Expected valid thread"); + assert(m_frames.size() <= end_idx && "Expected there to be frames to fill"); Unwind &unwinder = m_thread.GetUnwinder(); - if (!m_show_inlined_frames) { - GetOnlyConcreteFramesUpTo(end_idx, unwinder); - return false; + if (end_idx < m_concrete_frames_fetched) + return; + + uint32_t num_frames = unwinder.GetFramesUpTo(end_idx); + if (num_frames <= end_idx + 1) { + // Done unwinding. + m_concrete_frames_fetched = UINT32_MAX; } + // Don't create the frames eagerly. Defer this work to GetFrameAtIndex, + // which can lazily query the unwinder to create frames. + m_frames.resize(num_frames); +} + +bool StackFrameList::FetchFramesUpTo(uint32_t end_idx, + InterruptionControl allow_interrupt) { + Unwind &unwinder = m_thread.GetUnwinder(); + bool was_interrupted = false; + #if defined(DEBUG_STACK_FRAMES) StreamFile s(stdout, false); #endif @@ -421,11 +453,11 @@ bool StackFrameList::GetFramesUpTo(uint32_t end_idx, } else { // Check for interruption when building the frames. // Do the check in idx > 0 so that we'll always create a 0th frame. - if (allow_interrupt - && INTERRUPT_REQUESTED(dbg, "Interrupted having fetched {0} frames", - m_frames.size())) { - was_interrupted = true; - break; + if (allow_interrupt && + INTERRUPT_REQUESTED(dbg, "Interrupted having fetched {0} frames", + m_frames.size())) { + was_interrupted = true; + break; } const bool success = @@ -534,12 +566,6 @@ bool StackFrameList::GetFramesUpTo(uint32_t end_idx, // We are done with the old stack frame list, we can release it now. m_prev_frames_sp.reset(); } - -#if defined(DEBUG_STACK_FRAMES) - s.PutCString("\n\nNew frames:\n"); - Dump(&s); - s.EOL(); -#endif // Don't report interrupted if we happen to have gotten all the frames: if (!GetAllFramesFetched()) return was_interrupted; @@ -547,20 +573,23 @@ bool StackFrameList::GetFramesUpTo(uint32_t end_idx, } uint32_t StackFrameList::GetNumFrames(bool can_create) { - std::lock_guard guard(m_mutex); - - if (can_create) { + if (!WereAllFramesFetched() && can_create) { // Don't allow interrupt or we might not return the correct count - GetFramesUpTo(UINT32_MAX, DoNotAllowInterruption); + GetFramesUpTo(UINT32_MAX, DoNotAllowInterruption); + } + uint32_t frame_idx; + { + std::shared_lock guard(m_list_mutex); + frame_idx = GetVisibleStackFrameIndex(m_frames.size()); } - return GetVisibleStackFrameIndex(m_frames.size()); + return frame_idx; } void StackFrameList::Dump(Stream *s) { if (s == nullptr) return; - std::lock_guard guard(m_mutex); + std::shared_lock guard(m_list_mutex); const_iterator pos, begin = m_frames.begin(), end = m_frames.end(); for (pos = begin; pos != end; ++pos) { @@ -578,72 +607,53 @@ void StackFrameList::Dump(Stream *s) { StackFrameSP StackFrameList::GetFrameAtIndex(uint32_t idx) { StackFrameSP frame_sp; - std::lock_guard guard(m_mutex); uint32_t original_idx = idx; - uint32_t inlined_depth = GetCurrentInlinedDepth(); - if (inlined_depth != UINT32_MAX) - idx += inlined_depth; + // We're going to consult the m_frames.size, but if there are already + // enough frames for our request we don't want to block other readers, so + // first acquire the shared lock: + { // Scope for shared lock: + std::shared_lock guard(m_list_mutex); - if (idx < m_frames.size()) - frame_sp = m_frames[idx]; + uint32_t inlined_depth = GetCurrentInlinedDepth(); + if (inlined_depth != UINT32_MAX) + idx += inlined_depth; - if (frame_sp) - return frame_sp; + if (idx < m_frames.size()) + frame_sp = m_frames[idx]; + + if (frame_sp) + return frame_sp; + } // End of reader lock scope // GetFramesUpTo will fill m_frames with as many frames as you asked for, if // there are that many. If there weren't then you asked for too many frames. // GetFramesUpTo returns true if interrupted: - if (GetFramesUpTo(idx)) { + if (GetFramesUpTo(idx, AllowInterruption)) { Log *log = GetLog(LLDBLog::Thread); LLDB_LOG(log, "GetFrameAtIndex was interrupted"); return {}; } - if (idx < m_frames.size()) { - if (m_show_inlined_frames) { - // When inline frames are enabled we actually create all the frames in - // GetFramesUpTo. + { // Now we're accessing m_frames as a reader, so acquire the reader lock. + std::shared_lock guard(m_list_mutex); + if (idx < m_frames.size()) { frame_sp = m_frames[idx]; - } else { - addr_t pc, cfa; - bool behaves_like_zeroth_frame = (idx == 0); - if (m_thread.GetUnwinder().GetFrameInfoAtIndex( - idx, cfa, pc, behaves_like_zeroth_frame)) { - const bool cfa_is_valid = true; - frame_sp = std::make_shared( - m_thread.shared_from_this(), idx, idx, cfa, cfa_is_valid, pc, - StackFrame::Kind::Regular, behaves_like_zeroth_frame, nullptr); - - Function *function = - frame_sp->GetSymbolContext(eSymbolContextFunction).function; - if (function) { - // When we aren't showing inline functions we always use the top - // most function block as the scope. - frame_sp->SetSymbolContextScope(&function->GetBlock(false)); - } else { - // Set the symbol scope from the symbol regardless if it is nullptr - // or valid. - frame_sp->SetSymbolContextScope( - frame_sp->GetSymbolContext(eSymbolContextSymbol).symbol); - } - SetFrameAtIndex(idx, frame_sp); + } else if (original_idx == 0) { + // There should ALWAYS be a frame at index 0. If something went wrong + // with the CurrentInlinedDepth such that there weren't as many frames as + // we thought taking that into account, then reset the current inlined + // depth and return the real zeroth frame. + if (m_frames.empty()) { + // Why do we have a thread with zero frames, that should not ever + // happen... + assert(!m_thread.IsValid() && "A valid thread has no frames."); + } else { + ResetCurrentInlinedDepth(); + frame_sp = m_frames[original_idx]; } } - } else if (original_idx == 0) { - // There should ALWAYS be a frame at index 0. If something went wrong with - // the CurrentInlinedDepth such that there weren't as many frames as we - // thought taking that into account, then reset the current inlined depth - // and return the real zeroth frame. - if (m_frames.empty()) { - // Why do we have a thread with zero frames, that should not ever - // happen... - assert(!m_thread.IsValid() && "A valid thread has no frames."); - } else { - ResetCurrentInlinedDepth(); - frame_sp = m_frames[original_idx]; - } - } + } // End of reader lock scope return frame_sp; } @@ -670,19 +680,27 @@ StackFrameSP StackFrameList::GetFrameWithStackID(const StackID &stack_id) { StackFrameSP frame_sp; if (stack_id.IsValid()) { - std::lock_guard guard(m_mutex); uint32_t frame_idx = 0; - // Do a search in case the stack frame is already in our cache. - collection::const_iterator begin = m_frames.begin(); - collection::const_iterator end = m_frames.end(); - if (begin != end) { - collection::const_iterator pos = - std::find_if(begin, end, [&](StackFrameSP frame_sp) { - return frame_sp->GetStackID() == stack_id; - }); - if (pos != end) - return *pos; + { + // First see if the frame is already realized. This is the scope for + // the shared mutex: + std::shared_lock guard(m_list_mutex); + // Do a search in case the stack frame is already in our cache. + collection::const_iterator begin = m_frames.begin(); + collection::const_iterator end = m_frames.end(); + if (begin != end) { + collection::const_iterator pos = + std::find_if(begin, end, [&](StackFrameSP frame_sp) { + return frame_sp->GetStackID() == stack_id; + }); + if (pos != end) + return *pos; + } + // We're going to need to add frames starting with the + // last one in the m_frames: + frame_idx = m_frames.size(); } + // If we needed to add more frames, we would get to here. do { frame_sp = GetFrameAtIndex(frame_idx); if (frame_sp && frame_sp->GetStackID() == stack_id) @@ -694,6 +712,7 @@ StackFrameSP StackFrameList::GetFrameWithStackID(const StackID &stack_id) { } bool StackFrameList::SetFrameAtIndex(uint32_t idx, StackFrameSP &frame_sp) { + std::unique_lock guard(m_list_mutex); if (idx >= m_frames.size()) m_frames.resize(idx + 1); // Make sure allocation succeeded by checking bounds again @@ -733,7 +752,7 @@ void StackFrameList::SelectMostRelevantFrame() { } LLDB_LOG(log, "Frame #0 not recognized"); - // If this thread has a non-trivial StopInof, then let it suggest + // If this thread has a non-trivial StopInfo, then let it suggest // a most relevant frame: StopInfoSP stop_info_sp = m_thread.GetStopInfo(); uint32_t stack_idx = 0; @@ -766,9 +785,8 @@ void StackFrameList::SelectMostRelevantFrame() { LLDB_LOG(log, "No relevant frame!"); } -uint32_t StackFrameList::GetSelectedFrameIndex( - SelectMostRelevant select_most_relevant) { - std::lock_guard guard(m_mutex); +uint32_t +StackFrameList::GetSelectedFrameIndex(SelectMostRelevant select_most_relevant) { if (!m_selected_frame_idx && select_most_relevant) SelectMostRelevantFrame(); if (!m_selected_frame_idx) { @@ -783,7 +801,8 @@ uint32_t StackFrameList::GetSelectedFrameIndex( } uint32_t StackFrameList::SetSelectedFrame(lldb_private::StackFrame *frame) { - std::lock_guard guard(m_mutex); + std::shared_lock guard(m_list_mutex); + const_iterator pos; const_iterator begin = m_frames.begin(); const_iterator end = m_frames.end(); @@ -798,13 +817,11 @@ uint32_t StackFrameList::SetSelectedFrame(lldb_private::StackFrame *frame) { break; } } - SetDefaultFileAndLineToSelectedFrame(); return *m_selected_frame_idx; } bool StackFrameList::SetSelectedFrameByIndex(uint32_t idx) { - std::lock_guard guard(m_mutex); StackFrameSP frame_sp(GetFrameAtIndex(idx)); if (frame_sp) { SetSelectedFrame(frame_sp.get()); @@ -835,7 +852,7 @@ void StackFrameList::SetDefaultFileAndLineToSelectedFrame() { // does not describe how StackFrameLists are currently used. // Clear is currently only used to clear the list in the destructor. void StackFrameList::Clear() { - std::lock_guard guard(m_mutex); + std::unique_lock guard(m_list_mutex); m_frames.clear(); m_concrete_frames_fetched = 0; m_selected_frame_idx.reset(); @@ -843,6 +860,7 @@ void StackFrameList::Clear() { lldb::StackFrameSP StackFrameList::GetStackFrameSPForStackFramePtr(StackFrame *stack_frame_ptr) { + std::shared_lock guard(m_list_mutex); const_iterator pos; const_iterator begin = m_frames.begin(); const_iterator end = m_frames.end(); diff --git a/lldb/source/Target/Thread.cpp b/lldb/source/Target/Thread.cpp index fffbf1c4f13663..4224d27637657a 100644 --- a/lldb/source/Target/Thread.cpp +++ b/lldb/source/Target/Thread.cpp @@ -1513,7 +1513,7 @@ void Thread::ClearStackFrames() { // frames: // FIXME: At some point we can try to splice in the frames we have fetched // into the new frame as we make it, but let's not try that now. - if (m_curr_frames_sp && m_curr_frames_sp->GetAllFramesFetched()) + if (m_curr_frames_sp && m_curr_frames_sp->WereAllFramesFetched()) m_prev_frames_sp.swap(m_curr_frames_sp); m_curr_frames_sp.reset(); diff --git a/lldb/test/API/api/multithreaded/TestMultithreaded.py b/lldb/test/API/api/multithreaded/TestMultithreaded.py index 8067ef38079a10..f8a89c64dcf15b 100644 --- a/lldb/test/API/api/multithreaded/TestMultithreaded.py +++ b/lldb/test/API/api/multithreaded/TestMultithreaded.py @@ -23,6 +23,7 @@ def setUp(self): self.generateSource("test_listener_event_process_state.cpp") self.generateSource("test_listener_resume.cpp") self.generateSource("test_stop-hook.cpp") + self.generateSource("test_concurrent_unwind.cpp") @skipIfRemote # clang-cl does not support throw or catch (llvm.org/pr24538) @@ -92,7 +93,19 @@ def test_sb_api_listener_resume(self): "test_listener_resume", ) - def build_and_test(self, sources, test_name, args=None): + @skipIfRemote + # clang-cl does not support throw or catch (llvm.org/pr24538) + @skipIfWindows + @skipIfHostIncompatibleWithTarget + def test_concurrent_unwind(self): + """Test that you can run a python command in a stop-hook when stdin is File based.""" + self.build_and_test( + "driver.cpp test_concurrent_unwind.cpp", + "test_concurrent_unwind", + inferior_source="deep_stack.cpp", + ) + + def build_and_test(self, sources, test_name, inferior_source="inferior.cpp"): """Build LLDB test from sources, and run expecting 0 exit code""" # These tests link against host lldb API. @@ -105,7 +118,7 @@ def build_and_test(self, sources, test_name, args=None): ) self.inferior = "inferior_program" - self.buildProgram("inferior.cpp", self.inferior) + self.buildProgram(inferior_source, self.inferior) self.addTearDownHook(lambda: os.remove(self.getBuildArtifact(self.inferior))) self.buildDriver(sources, test_name) diff --git a/lldb/test/API/api/multithreaded/deep_stack.cpp b/lldb/test/API/api/multithreaded/deep_stack.cpp new file mode 100644 index 00000000000000..da89228766e425 --- /dev/null +++ b/lldb/test/API/api/multithreaded/deep_stack.cpp @@ -0,0 +1,17 @@ +// This is a test program that makes a deep stack +// so we can test unwinding from multiple threads. + +void call_me(int input) { + if (input > 1000) { + input += 1; // Set a breakpoint here + if (input > 1001) + input += 1; + return; + } else + call_me(++input); +} + +int main() { + call_me(0); + return 0; +} diff --git a/lldb/test/API/api/multithreaded/test_concurrent_unwind.cpp.template b/lldb/test/API/api/multithreaded/test_concurrent_unwind.cpp.template new file mode 100644 index 00000000000000..e5101dde79619a --- /dev/null +++ b/lldb/test/API/api/multithreaded/test_concurrent_unwind.cpp.template @@ -0,0 +1,91 @@ +#include "pseudo_barrier.h" + +#include +#include + +%include_SB_APIs% + +#include "common.h" + +using namespace lldb; + +void test (SBDebugger &dbg, std::vector args) { + +SBError error; + dbg.SetAsync(false); + SBTarget target = dbg.CreateTarget(args.at(0).c_str()); + if (!target.IsValid()) + throw Exception("Invalid target"); + + // Now set our breakpoint and launch: + SBFileSpec main_sourcefile("deep_stack.cpp"); + SBBreakpoint bkpt = target.BreakpointCreateBySourceRegex("Set a breakpoint here", + main_sourcefile); + if (bkpt.GetNumLocations() == 0) + throw Exception("Main breakpoint got no locations"); + + SBLaunchInfo launch_info = target.GetLaunchInfo(); + SBProcess process = target.Launch(launch_info, error); + if (error.Fail()) + throw Exception("Failed to launch process"); + if (!process.IsValid()) + throw Exception("Process is not valid"); + if (process.GetState() != lldb::eStateStopped) + throw Exception("Process was not stopped"); + + size_t num_threads = process.GetNumThreads(); + if (num_threads != 1) + throw Exception("Unexpected number of threads."); + SBThread cur_thread = process.GetThreadAtIndex(0); + if (!cur_thread.IsValid()) + throw Exception("Didn't get a valid thread"); + + // Record the number of frames at the point where we stopped: + const size_t num_frames = cur_thread.GetNumFrames(); + // Now step once to clear the frame cache: + cur_thread.StepOver(); + + // Create three threads and set them to getting frames simultaneously, + // and make sure we don't deadlock. + pseudo_barrier_t rendevous; + pseudo_barrier_init(rendevous, 5); + std::atomic_size_t success(true); + std::atomic_size_t largest(0); + + auto lambda = [&](size_t stride){ + pseudo_barrier_wait(rendevous); + bool younger = true; + while (1) { + size_t cursor = largest; + if (cursor > stride && !younger) { + cursor -= stride; + younger = true; + } else { + cursor += stride; + largest += stride; + younger = false; + } + SBFrame frame = cur_thread.GetFrameAtIndex(cursor); + if (!frame.IsValid()) { + if (cursor < num_frames) + success = false; + break; + } + } + + }; + + std::thread thread1(lambda, 1); + std::thread thread2(lambda, 3); + std::thread thread3(lambda, 5); + std::thread thread4(lambda, 7); + std::thread thread5(lambda, 11); + thread1.join(); + thread2.join(); + thread3.join(); + thread4.join(); + thread5.join(); + + if (!success) + throw Exception("One thread stopped before 1000"); +}