xref: /llvm-project/lldb/tools/debugserver/source/MacOSX/MachThreadList.cpp (revision ee2ed52584564a6acec25b8d9dff4653c3e55330)
1 //===-- MachThreadList.cpp --------------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //  Created by Greg Clayton on 6/19/07.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "MachThreadList.h"
15 
16 #include <inttypes.h>
17 #include <sys/sysctl.h>
18 
19 #include "DNBLog.h"
20 #include "DNBThreadResumeActions.h"
21 #include "MachProcess.h"
22 
23 MachThreadList::MachThreadList() :
24     m_threads(),
25     m_threads_mutex(PTHREAD_MUTEX_RECURSIVE),
26     m_is_64_bit(false)
27 {
28 }
29 
30 MachThreadList::~MachThreadList()
31 {
32 }
33 
34 nub_state_t
35 MachThreadList::GetState(nub_thread_t tid)
36 {
37     MachThreadSP thread_sp (GetThreadByID (tid));
38     if (thread_sp)
39         return thread_sp->GetState();
40     return eStateInvalid;
41 }
42 
43 const char *
44 MachThreadList::GetName (nub_thread_t tid)
45 {
46     MachThreadSP thread_sp (GetThreadByID (tid));
47     if (thread_sp)
48         return thread_sp->GetName();
49     return NULL;
50 }
51 
52 ThreadInfo::QoS
53 MachThreadList::GetRequestedQoS (nub_thread_t tid, nub_addr_t tsd, uint64_t dti_qos_class_index)
54 {
55     MachThreadSP thread_sp (GetThreadByID (tid));
56     if (thread_sp)
57         return thread_sp->GetRequestedQoS(tsd, dti_qos_class_index);
58     return ThreadInfo::QoS();
59 }
60 
61 nub_addr_t
62 MachThreadList::GetPThreadT (nub_thread_t tid)
63 {
64     MachThreadSP thread_sp (GetThreadByID (tid));
65     if (thread_sp)
66         return thread_sp->GetPThreadT();
67     return INVALID_NUB_ADDRESS;
68 }
69 
70 nub_addr_t
71 MachThreadList::GetDispatchQueueT (nub_thread_t tid)
72 {
73     MachThreadSP thread_sp (GetThreadByID (tid));
74     if (thread_sp)
75         return thread_sp->GetDispatchQueueT();
76     return INVALID_NUB_ADDRESS;
77 }
78 
79 nub_addr_t
80 MachThreadList::GetTSDAddressForThread (nub_thread_t tid, uint64_t plo_pthread_tsd_base_address_offset, uint64_t plo_pthread_tsd_base_offset, uint64_t plo_pthread_tsd_entry_size)
81 {
82     MachThreadSP thread_sp (GetThreadByID (tid));
83     if (thread_sp)
84         return thread_sp->GetTSDAddressForThread(plo_pthread_tsd_base_address_offset, plo_pthread_tsd_base_offset, plo_pthread_tsd_entry_size);
85     return INVALID_NUB_ADDRESS;
86 }
87 
88 nub_thread_t
89 MachThreadList::SetCurrentThread(nub_thread_t tid)
90 {
91     MachThreadSP thread_sp (GetThreadByID (tid));
92     if (thread_sp)
93     {
94         m_current_thread = thread_sp;
95         return tid;
96     }
97     return INVALID_NUB_THREAD;
98 }
99 
100 
101 bool
102 MachThreadList::GetThreadStoppedReason(nub_thread_t tid, struct DNBThreadStopInfo *stop_info) const
103 {
104     MachThreadSP thread_sp (GetThreadByID (tid));
105     if (thread_sp)
106         return thread_sp->GetStopException().GetStopInfo(stop_info);
107     return false;
108 }
109 
110 bool
111 MachThreadList::GetIdentifierInfo (nub_thread_t tid, thread_identifier_info_data_t *ident_info)
112 {
113     thread_t mach_port_number = GetMachPortNumberByThreadID (tid);
114 
115     mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT;
116     return ::thread_info (mach_port_number, THREAD_IDENTIFIER_INFO, (thread_info_t)ident_info, &count) == KERN_SUCCESS;
117 }
118 
119 void
120 MachThreadList::DumpThreadStoppedReason (nub_thread_t tid) const
121 {
122     MachThreadSP thread_sp (GetThreadByID (tid));
123     if (thread_sp)
124         thread_sp->GetStopException().DumpStopReason();
125 }
126 
127 const char *
128 MachThreadList::GetThreadInfo (nub_thread_t tid) const
129 {
130     MachThreadSP thread_sp (GetThreadByID (tid));
131     if (thread_sp)
132         return thread_sp->GetBasicInfoAsString();
133     return NULL;
134 }
135 
136 MachThreadSP
137 MachThreadList::GetThreadByID (nub_thread_t tid) const
138 {
139     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
140     MachThreadSP thread_sp;
141     const size_t num_threads = m_threads.size();
142     for (size_t idx = 0; idx < num_threads; ++idx)
143     {
144         if (m_threads[idx]->ThreadID() == tid)
145         {
146             thread_sp = m_threads[idx];
147             break;
148         }
149     }
150     return thread_sp;
151 }
152 
153 MachThreadSP
154 MachThreadList::GetThreadByMachPortNumber (thread_t mach_port_number) const
155 {
156     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
157     MachThreadSP thread_sp;
158     const size_t num_threads = m_threads.size();
159     for (size_t idx = 0; idx < num_threads; ++idx)
160     {
161         if (m_threads[idx]->MachPortNumber() == mach_port_number)
162         {
163             thread_sp = m_threads[idx];
164             break;
165         }
166     }
167     return thread_sp;
168 }
169 
170 nub_thread_t
171 MachThreadList::GetThreadIDByMachPortNumber (thread_t mach_port_number) const
172 {
173     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
174     MachThreadSP thread_sp;
175     const size_t num_threads = m_threads.size();
176     for (size_t idx = 0; idx < num_threads; ++idx)
177     {
178         if (m_threads[idx]->MachPortNumber() == mach_port_number)
179         {
180             return m_threads[idx]->ThreadID();
181         }
182     }
183     return INVALID_NUB_THREAD;
184 }
185 
186 thread_t
187 MachThreadList::GetMachPortNumberByThreadID (nub_thread_t globally_unique_id) const
188 {
189     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
190     MachThreadSP thread_sp;
191     const size_t num_threads = m_threads.size();
192     for (size_t idx = 0; idx < num_threads; ++idx)
193     {
194         if (m_threads[idx]->ThreadID() == globally_unique_id)
195         {
196             return m_threads[idx]->MachPortNumber();
197         }
198     }
199     return 0;
200 }
201 
202 bool
203 MachThreadList::GetRegisterValue (nub_thread_t tid, uint32_t set, uint32_t reg, DNBRegisterValue *reg_value ) const
204 {
205     MachThreadSP thread_sp (GetThreadByID (tid));
206     if (thread_sp)
207         return thread_sp->GetRegisterValue(set, reg, reg_value);
208 
209     return false;
210 }
211 
212 bool
213 MachThreadList::SetRegisterValue (nub_thread_t tid, uint32_t set, uint32_t reg, const DNBRegisterValue *reg_value ) const
214 {
215     MachThreadSP thread_sp (GetThreadByID (tid));
216     if (thread_sp)
217         return thread_sp->SetRegisterValue(set, reg, reg_value);
218 
219     return false;
220 }
221 
222 nub_size_t
223 MachThreadList::GetRegisterContext (nub_thread_t tid, void *buf, size_t buf_len)
224 {
225     MachThreadSP thread_sp (GetThreadByID (tid));
226     if (thread_sp)
227         return thread_sp->GetRegisterContext (buf, buf_len);
228     return 0;
229 }
230 
231 nub_size_t
232 MachThreadList::SetRegisterContext (nub_thread_t tid, const void *buf, size_t buf_len)
233 {
234     MachThreadSP thread_sp (GetThreadByID (tid));
235     if (thread_sp)
236         return thread_sp->SetRegisterContext (buf, buf_len);
237     return 0;
238 }
239 
240 uint32_t
241 MachThreadList::SaveRegisterState (nub_thread_t tid)
242 {
243     MachThreadSP thread_sp (GetThreadByID (tid));
244     if (thread_sp)
245         return thread_sp->SaveRegisterState ();
246     return 0;
247 }
248 
249 bool
250 MachThreadList::RestoreRegisterState (nub_thread_t tid, uint32_t save_id)
251 {
252     MachThreadSP thread_sp (GetThreadByID (tid));
253     if (thread_sp)
254         return thread_sp->RestoreRegisterState (save_id);
255     return 0;
256 }
257 
258 
259 nub_size_t
260 MachThreadList::NumThreads () const
261 {
262     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
263     return m_threads.size();
264 }
265 
266 nub_thread_t
267 MachThreadList::ThreadIDAtIndex (nub_size_t idx) const
268 {
269     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
270     if (idx < m_threads.size())
271         return m_threads[idx]->ThreadID();
272     return INVALID_NUB_THREAD;
273 }
274 
275 nub_thread_t
276 MachThreadList::CurrentThreadID ( )
277 {
278     MachThreadSP thread_sp;
279     CurrentThread(thread_sp);
280     if (thread_sp.get())
281         return thread_sp->ThreadID();
282     return INVALID_NUB_THREAD;
283 }
284 
285 bool
286 MachThreadList::NotifyException(MachException::Data& exc)
287 {
288     MachThreadSP thread_sp (GetThreadByMachPortNumber (exc.thread_port));
289     if (thread_sp)
290     {
291         thread_sp->NotifyException(exc);
292         return true;
293     }
294     return false;
295 }
296 
297 void
298 MachThreadList::Clear()
299 {
300     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
301     m_threads.clear();
302 }
303 
304 uint32_t
305 MachThreadList::UpdateThreadList(MachProcess *process, bool update, MachThreadList::collection *new_threads)
306 {
307     // locker will keep a mutex locked until it goes out of scope
308     DNBLogThreadedIf (LOG_THREAD, "MachThreadList::UpdateThreadList (pid = %4.4x, update = %u) process stop count = %u", process->ProcessID(), update, process->StopCount());
309     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
310 
311     if (process->StopCount() == 0)
312     {
313         int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, process->ProcessID() };
314         struct kinfo_proc processInfo;
315         size_t bufsize = sizeof(processInfo);
316         if (sysctl(mib, (unsigned)(sizeof(mib)/sizeof(int)), &processInfo, &bufsize, NULL, 0) == 0 && bufsize > 0)
317         {
318             if (processInfo.kp_proc.p_flag & P_LP64)
319                 m_is_64_bit = true;
320         }
321 #if defined (__i386__) || defined (__x86_64__)
322         if (m_is_64_bit)
323             DNBArchProtocol::SetArchitecture(CPU_TYPE_X86_64);
324         else
325             DNBArchProtocol::SetArchitecture(CPU_TYPE_I386);
326 #elif defined (__arm__) || defined (__arm64__) || defined (__aarch64__)
327         if (m_is_64_bit)
328             DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM64);
329         else
330             DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM);
331 #endif
332     }
333 
334     if (m_threads.empty() || update)
335     {
336         thread_array_t thread_list = NULL;
337         mach_msg_type_number_t thread_list_count = 0;
338         task_t task = process->Task().TaskPort();
339         DNBError err(::task_threads (task, &thread_list, &thread_list_count), DNBError::MachKernel);
340 
341         if (DNBLogCheckLogBit(LOG_THREAD) || err.Fail())
342             err.LogThreaded("::task_threads ( task = 0x%4.4x, thread_list => %p, thread_list_count => %u )", task, thread_list, thread_list_count);
343 
344         if (err.Error() == KERN_SUCCESS && thread_list_count > 0)
345         {
346             MachThreadList::collection currThreads;
347             size_t idx;
348             // Iterator through the current thread list and see which threads
349             // we already have in our list (keep them), which ones we don't
350             // (add them), and which ones are not around anymore (remove them).
351             for (idx = 0; idx < thread_list_count; ++idx)
352             {
353                 const thread_t mach_port_num = thread_list[idx];
354 
355                 uint64_t unique_thread_id = MachThread::GetGloballyUniqueThreadIDForMachPortID (mach_port_num);
356                 MachThreadSP thread_sp (GetThreadByID (unique_thread_id));
357                 if (thread_sp)
358                 {
359                     // Keep the existing thread class
360                     currThreads.push_back(thread_sp);
361                 }
362                 else
363                 {
364                     // We don't have this thread, lets add it.
365                     thread_sp.reset(new MachThread(process, m_is_64_bit, unique_thread_id, mach_port_num));
366 
367                     // Add the new thread regardless of its is user ready state...
368                     // Make sure the thread is ready to be displayed and shown to users
369                     // before we add this thread to our list...
370                     if (thread_sp->IsUserReady())
371                     {
372                         if (new_threads)
373                             new_threads->push_back(thread_sp);
374 
375                         currThreads.push_back(thread_sp);
376                     }
377                 }
378             }
379 
380             m_threads.swap(currThreads);
381             m_current_thread.reset();
382 
383             // Free the vm memory given to us by ::task_threads()
384             vm_size_t thread_list_size = (vm_size_t) (thread_list_count * sizeof (thread_t));
385             ::vm_deallocate (::mach_task_self(),
386                              (vm_address_t)thread_list,
387                              thread_list_size);
388         }
389     }
390     return static_cast<uint32_t>(m_threads.size());
391 }
392 
393 
394 void
395 MachThreadList::CurrentThread (MachThreadSP& thread_sp)
396 {
397     // locker will keep a mutex locked until it goes out of scope
398     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
399     if (m_current_thread.get() == NULL)
400     {
401         // Figure out which thread is going to be our current thread.
402         // This is currently done by finding the first thread in the list
403         // that has a valid exception.
404         const size_t num_threads = m_threads.size();
405         for (uint32_t idx = 0; idx < num_threads; ++idx)
406         {
407             if (m_threads[idx]->GetStopException().IsValid())
408             {
409                 m_current_thread = m_threads[idx];
410                 break;
411             }
412         }
413     }
414     thread_sp = m_current_thread;
415 }
416 
417 void
418 MachThreadList::Dump() const
419 {
420     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
421     const size_t num_threads = m_threads.size();
422     for (uint32_t idx = 0; idx < num_threads; ++idx)
423     {
424         m_threads[idx]->Dump(idx);
425     }
426 }
427 
428 
429 void
430 MachThreadList::ProcessWillResume(MachProcess *process, const DNBThreadResumeActions &thread_actions)
431 {
432     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
433 
434     // Update our thread list, because sometimes libdispatch or the kernel
435     // will spawn threads while a task is suspended.
436     MachThreadList::collection new_threads;
437 
438     // First figure out if we were planning on running only one thread, and if so force that thread to resume.
439     bool run_one_thread;
440     nub_thread_t solo_thread = INVALID_NUB_THREAD;
441     if (thread_actions.GetSize() > 0
442         && thread_actions.NumActionsWithState(eStateStepping) + thread_actions.NumActionsWithState (eStateRunning) == 1)
443     {
444         run_one_thread = true;
445         const DNBThreadResumeAction *action_ptr = thread_actions.GetFirst();
446         size_t num_actions = thread_actions.GetSize();
447         for (size_t i = 0; i < num_actions; i++, action_ptr++)
448         {
449             if (action_ptr->state == eStateStepping || action_ptr->state == eStateRunning)
450             {
451                 solo_thread = action_ptr->tid;
452                 break;
453             }
454         }
455     }
456     else
457         run_one_thread = false;
458 
459     UpdateThreadList(process, true, &new_threads);
460 
461     DNBThreadResumeAction resume_new_threads = { -1U, eStateRunning, 0, INVALID_NUB_ADDRESS };
462     // If we are planning to run only one thread, any new threads should be suspended.
463     if (run_one_thread)
464         resume_new_threads.state = eStateSuspended;
465 
466     const size_t num_new_threads = new_threads.size();
467     const size_t num_threads = m_threads.size();
468     for (uint32_t idx = 0; idx < num_threads; ++idx)
469     {
470         MachThread *thread = m_threads[idx].get();
471         bool handled = false;
472         for (uint32_t new_idx = 0; new_idx < num_new_threads; ++new_idx)
473         {
474             if (thread == new_threads[new_idx].get())
475             {
476                 thread->ThreadWillResume(&resume_new_threads);
477                 handled = true;
478                 break;
479             }
480         }
481 
482         if (!handled)
483         {
484             const DNBThreadResumeAction *thread_action = thread_actions.GetActionForThread (thread->ThreadID(), true);
485             // There must always be a thread action for every thread.
486             assert (thread_action);
487             bool others_stopped = false;
488             if (solo_thread == thread->ThreadID())
489                 others_stopped = true;
490             thread->ThreadWillResume (thread_action, others_stopped);
491         }
492     }
493 
494     if (new_threads.size())
495     {
496         for (uint32_t idx = 0; idx < num_new_threads; ++idx)
497         {
498             DNBLogThreadedIf (LOG_THREAD, "MachThreadList::ProcessWillResume (pid = %4.4x) stop-id=%u, resuming newly discovered thread: 0x%8.8" PRIx64 ", thread-is-user-ready=%i)",
499                               process->ProcessID(),
500                               process->StopCount(),
501                               new_threads[idx]->ThreadID(),
502                               new_threads[idx]->IsUserReady());
503         }
504     }
505 }
506 
507 uint32_t
508 MachThreadList::ProcessDidStop(MachProcess *process)
509 {
510     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
511     // Update our thread list
512     const uint32_t num_threads = UpdateThreadList(process, true);
513     for (uint32_t idx = 0; idx < num_threads; ++idx)
514     {
515         m_threads[idx]->ThreadDidStop();
516     }
517     return num_threads;
518 }
519 
520 //----------------------------------------------------------------------
521 // Check each thread in our thread list to see if we should notify our
522 // client of the current halt in execution.
523 //
524 // Breakpoints can have callback functions associated with them than
525 // can return true to stop, or false to continue executing the inferior.
526 //
527 // RETURNS
528 //    true if we should stop and notify our clients
529 //    false if we should resume our child process and skip notification
530 //----------------------------------------------------------------------
531 bool
532 MachThreadList::ShouldStop(bool &step_more)
533 {
534     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
535     uint32_t should_stop = false;
536     const size_t num_threads = m_threads.size();
537     for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx)
538     {
539         should_stop = m_threads[idx]->ShouldStop(step_more);
540     }
541     return should_stop;
542 }
543 
544 
545 void
546 MachThreadList::NotifyBreakpointChanged (const DNBBreakpoint *bp)
547 {
548     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
549     const size_t num_threads = m_threads.size();
550     for (uint32_t idx = 0; idx < num_threads; ++idx)
551     {
552         m_threads[idx]->NotifyBreakpointChanged(bp);
553     }
554 }
555 
556 
557 uint32_t
558 MachThreadList::EnableHardwareBreakpoint (const DNBBreakpoint* bp) const
559 {
560     if (bp != NULL)
561     {
562         const size_t num_threads = m_threads.size();
563         for (uint32_t idx = 0; idx < num_threads; ++idx)
564             m_threads[idx]->EnableHardwareBreakpoint(bp);
565     }
566     return INVALID_NUB_HW_INDEX;
567 }
568 
569 bool
570 MachThreadList::DisableHardwareBreakpoint (const DNBBreakpoint* bp) const
571 {
572     if (bp != NULL)
573     {
574         const size_t num_threads = m_threads.size();
575         for (uint32_t idx = 0; idx < num_threads; ++idx)
576             m_threads[idx]->DisableHardwareBreakpoint(bp);
577     }
578     return false;
579 }
580 
581 // DNBWatchpointSet() -> MachProcess::CreateWatchpoint() -> MachProcess::EnableWatchpoint()
582 // -> MachThreadList::EnableHardwareWatchpoint().
583 uint32_t
584 MachThreadList::EnableHardwareWatchpoint (const DNBBreakpoint* wp) const
585 {
586     uint32_t hw_index = INVALID_NUB_HW_INDEX;
587     if (wp != NULL)
588     {
589         PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
590         const size_t num_threads = m_threads.size();
591         // On Mac OS X we have to prime the control registers for new threads.  We do this
592         // using the control register data for the first thread, for lack of a better way of choosing.
593         bool also_set_on_task = true;
594         for (uint32_t idx = 0; idx < num_threads; ++idx)
595         {
596             if ((hw_index = m_threads[idx]->EnableHardwareWatchpoint(wp, also_set_on_task)) == INVALID_NUB_HW_INDEX)
597             {
598                 // We know that idx failed for some reason.  Let's rollback the transaction for [0, idx).
599                 for (uint32_t i = 0; i < idx; ++i)
600                     m_threads[i]->RollbackTransForHWP();
601                 return INVALID_NUB_HW_INDEX;
602             }
603             also_set_on_task = false;
604         }
605         // Notify each thread to commit the pending transaction.
606         for (uint32_t idx = 0; idx < num_threads; ++idx)
607             m_threads[idx]->FinishTransForHWP();
608 
609     }
610     return hw_index;
611 }
612 
613 bool
614 MachThreadList::DisableHardwareWatchpoint (const DNBBreakpoint* wp) const
615 {
616     if (wp != NULL)
617     {
618         PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
619         const size_t num_threads = m_threads.size();
620 
621         // On Mac OS X we have to prime the control registers for new threads.  We do this
622         // using the control register data for the first thread, for lack of a better way of choosing.
623         bool also_set_on_task = true;
624         for (uint32_t idx = 0; idx < num_threads; ++idx)
625         {
626             if (!m_threads[idx]->DisableHardwareWatchpoint(wp, also_set_on_task))
627             {
628                 // We know that idx failed for some reason.  Let's rollback the transaction for [0, idx).
629                 for (uint32_t i = 0; i < idx; ++i)
630                     m_threads[i]->RollbackTransForHWP();
631                 return false;
632             }
633             also_set_on_task = false;
634         }
635         // Notify each thread to commit the pending transaction.
636         for (uint32_t idx = 0; idx < num_threads; ++idx)
637             m_threads[idx]->FinishTransForHWP();
638 
639         return true;
640     }
641     return false;
642 }
643 
644 uint32_t
645 MachThreadList::NumSupportedHardwareWatchpoints () const
646 {
647     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
648     const size_t num_threads = m_threads.size();
649     // Use an arbitrary thread to retrieve the number of supported hardware watchpoints.
650     if (num_threads)
651         return m_threads[0]->NumSupportedHardwareWatchpoints();
652     return 0;
653 }
654 
655 uint32_t
656 MachThreadList::GetThreadIndexForThreadStoppedWithSignal (const int signo) const
657 {
658     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
659     uint32_t should_stop = false;
660     const size_t num_threads = m_threads.size();
661     for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx)
662     {
663         if (m_threads[idx]->GetStopException().SoftSignal () == signo)
664             return idx;
665     }
666     return UINT32_MAX;
667 }
668 
669