xref: /llvm-project/lldb/tools/debugserver/source/MacOSX/MachThreadList.cpp (revision 8b3af63b8993e45b1783853a3fcf6f36bfbed81b)
1 //===-- MachThreadList.cpp --------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //  Created by Greg Clayton on 6/19/07.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "MachThreadList.h"
14 
15 #include "DNBLog.h"
16 #include "DNBThreadResumeActions.h"
17 #include "MachProcess.h"
18 
19 #include <inttypes.h>
20 #include <sys/sysctl.h>
21 
22 #include <memory>
23 
24 MachThreadList::MachThreadList()
25     : m_threads(), m_threads_mutex(PTHREAD_MUTEX_RECURSIVE),
26       m_is_64_bit(false) {}
27 
28 MachThreadList::~MachThreadList() {}
29 
30 nub_state_t MachThreadList::GetState(nub_thread_t tid) {
31   MachThreadSP thread_sp(GetThreadByID(tid));
32   if (thread_sp)
33     return thread_sp->GetState();
34   return eStateInvalid;
35 }
36 
37 const char *MachThreadList::GetName(nub_thread_t tid) {
38   MachThreadSP thread_sp(GetThreadByID(tid));
39   if (thread_sp)
40     return thread_sp->GetName();
41   return NULL;
42 }
43 
44 ThreadInfo::QoS MachThreadList::GetRequestedQoS(nub_thread_t tid,
45                                                 nub_addr_t tsd,
46                                                 uint64_t dti_qos_class_index) {
47   MachThreadSP thread_sp(GetThreadByID(tid));
48   if (thread_sp)
49     return thread_sp->GetRequestedQoS(tsd, dti_qos_class_index);
50   return ThreadInfo::QoS();
51 }
52 
53 nub_addr_t MachThreadList::GetPThreadT(nub_thread_t tid) {
54   MachThreadSP thread_sp(GetThreadByID(tid));
55   if (thread_sp)
56     return thread_sp->GetPThreadT();
57   return INVALID_NUB_ADDRESS;
58 }
59 
60 nub_addr_t MachThreadList::GetDispatchQueueT(nub_thread_t tid) {
61   MachThreadSP thread_sp(GetThreadByID(tid));
62   if (thread_sp)
63     return thread_sp->GetDispatchQueueT();
64   return INVALID_NUB_ADDRESS;
65 }
66 
67 nub_addr_t MachThreadList::GetTSDAddressForThread(
68     nub_thread_t tid, uint64_t plo_pthread_tsd_base_address_offset,
69     uint64_t plo_pthread_tsd_base_offset, uint64_t plo_pthread_tsd_entry_size) {
70   MachThreadSP thread_sp(GetThreadByID(tid));
71   if (thread_sp)
72     return thread_sp->GetTSDAddressForThread(
73         plo_pthread_tsd_base_address_offset, plo_pthread_tsd_base_offset,
74         plo_pthread_tsd_entry_size);
75   return INVALID_NUB_ADDRESS;
76 }
77 
78 nub_thread_t MachThreadList::SetCurrentThread(nub_thread_t tid) {
79   MachThreadSP thread_sp(GetThreadByID(tid));
80   if (thread_sp) {
81     m_current_thread = thread_sp;
82     return tid;
83   }
84   return INVALID_NUB_THREAD;
85 }
86 
87 bool MachThreadList::GetThreadStoppedReason(
88     nub_thread_t tid, struct DNBThreadStopInfo *stop_info) const {
89   MachThreadSP thread_sp(GetThreadByID(tid));
90   if (thread_sp)
91     return thread_sp->GetStopException().GetStopInfo(stop_info);
92   return false;
93 }
94 
95 bool MachThreadList::GetIdentifierInfo(
96     nub_thread_t tid, thread_identifier_info_data_t *ident_info) {
97   thread_t mach_port_number = GetMachPortNumberByThreadID(tid);
98 
99   mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT;
100   return ::thread_info(mach_port_number, THREAD_IDENTIFIER_INFO,
101                        (thread_info_t)ident_info, &count) == KERN_SUCCESS;
102 }
103 
104 void MachThreadList::DumpThreadStoppedReason(nub_thread_t tid) const {
105   MachThreadSP thread_sp(GetThreadByID(tid));
106   if (thread_sp)
107     thread_sp->GetStopException().DumpStopReason();
108 }
109 
110 const char *MachThreadList::GetThreadInfo(nub_thread_t tid) const {
111   MachThreadSP thread_sp(GetThreadByID(tid));
112   if (thread_sp)
113     return thread_sp->GetBasicInfoAsString();
114   return NULL;
115 }
116 
117 MachThreadSP MachThreadList::GetThreadByID(nub_thread_t tid) const {
118   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
119   MachThreadSP thread_sp;
120   const size_t num_threads = m_threads.size();
121   for (size_t idx = 0; idx < num_threads; ++idx) {
122     if (m_threads[idx]->ThreadID() == tid) {
123       thread_sp = m_threads[idx];
124       break;
125     }
126   }
127   return thread_sp;
128 }
129 
130 MachThreadSP
131 MachThreadList::GetThreadByMachPortNumber(thread_t mach_port_number) const {
132   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
133   MachThreadSP thread_sp;
134   const size_t num_threads = m_threads.size();
135   for (size_t idx = 0; idx < num_threads; ++idx) {
136     if (m_threads[idx]->MachPortNumber() == mach_port_number) {
137       thread_sp = m_threads[idx];
138       break;
139     }
140   }
141   return thread_sp;
142 }
143 
144 nub_thread_t
145 MachThreadList::GetThreadIDByMachPortNumber(thread_t mach_port_number) const {
146   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
147   MachThreadSP thread_sp;
148   const size_t num_threads = m_threads.size();
149   for (size_t idx = 0; idx < num_threads; ++idx) {
150     if (m_threads[idx]->MachPortNumber() == mach_port_number) {
151       return m_threads[idx]->ThreadID();
152     }
153   }
154   return INVALID_NUB_THREAD;
155 }
156 
157 thread_t MachThreadList::GetMachPortNumberByThreadID(
158     nub_thread_t globally_unique_id) const {
159   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
160   MachThreadSP thread_sp;
161   const size_t num_threads = m_threads.size();
162   for (size_t idx = 0; idx < num_threads; ++idx) {
163     if (m_threads[idx]->ThreadID() == globally_unique_id) {
164       return m_threads[idx]->MachPortNumber();
165     }
166   }
167   return 0;
168 }
169 
170 bool MachThreadList::GetRegisterValue(nub_thread_t tid, uint32_t set,
171                                       uint32_t reg,
172                                       DNBRegisterValue *reg_value) const {
173   MachThreadSP thread_sp(GetThreadByID(tid));
174   if (thread_sp)
175     return thread_sp->GetRegisterValue(set, reg, reg_value);
176 
177   return false;
178 }
179 
180 bool MachThreadList::SetRegisterValue(nub_thread_t tid, uint32_t set,
181                                       uint32_t reg,
182                                       const DNBRegisterValue *reg_value) const {
183   MachThreadSP thread_sp(GetThreadByID(tid));
184   if (thread_sp)
185     return thread_sp->SetRegisterValue(set, reg, reg_value);
186 
187   return false;
188 }
189 
190 nub_size_t MachThreadList::GetRegisterContext(nub_thread_t tid, void *buf,
191                                               size_t buf_len) {
192   MachThreadSP thread_sp(GetThreadByID(tid));
193   if (thread_sp)
194     return thread_sp->GetRegisterContext(buf, buf_len);
195   return 0;
196 }
197 
198 nub_size_t MachThreadList::SetRegisterContext(nub_thread_t tid, const void *buf,
199                                               size_t buf_len) {
200   MachThreadSP thread_sp(GetThreadByID(tid));
201   if (thread_sp)
202     return thread_sp->SetRegisterContext(buf, buf_len);
203   return 0;
204 }
205 
206 uint32_t MachThreadList::SaveRegisterState(nub_thread_t tid) {
207   MachThreadSP thread_sp(GetThreadByID(tid));
208   if (thread_sp)
209     return thread_sp->SaveRegisterState();
210   return 0;
211 }
212 
213 bool MachThreadList::RestoreRegisterState(nub_thread_t tid, uint32_t save_id) {
214   MachThreadSP thread_sp(GetThreadByID(tid));
215   if (thread_sp)
216     return thread_sp->RestoreRegisterState(save_id);
217   return 0;
218 }
219 
220 nub_size_t MachThreadList::NumThreads() const {
221   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
222   return m_threads.size();
223 }
224 
225 nub_thread_t MachThreadList::ThreadIDAtIndex(nub_size_t idx) const {
226   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
227   if (idx < m_threads.size())
228     return m_threads[idx]->ThreadID();
229   return INVALID_NUB_THREAD;
230 }
231 
232 nub_thread_t MachThreadList::CurrentThreadID() {
233   MachThreadSP thread_sp;
234   CurrentThread(thread_sp);
235   if (thread_sp.get())
236     return thread_sp->ThreadID();
237   return INVALID_NUB_THREAD;
238 }
239 
240 bool MachThreadList::NotifyException(MachException::Data &exc) {
241   MachThreadSP thread_sp(GetThreadByMachPortNumber(exc.thread_port));
242   if (thread_sp) {
243     thread_sp->NotifyException(exc);
244     return true;
245   }
246   return false;
247 }
248 
249 void MachThreadList::Clear() {
250   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
251   m_threads.clear();
252 }
253 
254 uint32_t
255 MachThreadList::UpdateThreadList(MachProcess *process, bool update,
256                                  MachThreadList::collection *new_threads) {
257   // locker will keep a mutex locked until it goes out of scope
258   DNBLogThreadedIf(LOG_THREAD, "MachThreadList::UpdateThreadList (pid = %4.4x, "
259                                "update = %u) process stop count = %u",
260                    process->ProcessID(), update, process->StopCount());
261   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
262 
263   if (process->StopCount() == 0) {
264     int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, process->ProcessID()};
265     struct kinfo_proc processInfo;
266     size_t bufsize = sizeof(processInfo);
267     if (sysctl(mib, (unsigned)(sizeof(mib) / sizeof(int)), &processInfo,
268                &bufsize, NULL, 0) == 0 &&
269         bufsize > 0) {
270       if (processInfo.kp_proc.p_flag & P_LP64)
271         m_is_64_bit = true;
272     }
273 #if defined(__i386__) || defined(__x86_64__)
274     if (m_is_64_bit)
275       DNBArchProtocol::SetArchitecture(CPU_TYPE_X86_64);
276     else
277       DNBArchProtocol::SetArchitecture(CPU_TYPE_I386);
278 #elif defined(__arm__) || defined(__arm64__) || defined(__aarch64__)
279     if (m_is_64_bit)
280       DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM64);
281     else
282       DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM);
283 #endif
284   }
285 
286   if (m_threads.empty() || update) {
287     thread_array_t thread_list = NULL;
288     mach_msg_type_number_t thread_list_count = 0;
289     task_t task = process->Task().TaskPort();
290     DNBError err(::task_threads(task, &thread_list, &thread_list_count),
291                  DNBError::MachKernel);
292 
293     if (DNBLogCheckLogBit(LOG_THREAD) || err.Fail())
294       err.LogThreaded("::task_threads ( task = 0x%4.4x, thread_list => %p, "
295                       "thread_list_count => %u )",
296                       task, thread_list, thread_list_count);
297 
298     if (err.Status() == KERN_SUCCESS && thread_list_count > 0) {
299       MachThreadList::collection currThreads;
300       size_t idx;
301       // Iterator through the current thread list and see which threads
302       // we already have in our list (keep them), which ones we don't
303       // (add them), and which ones are not around anymore (remove them).
304       for (idx = 0; idx < thread_list_count; ++idx) {
305         const thread_t mach_port_num = thread_list[idx];
306 
307         uint64_t unique_thread_id =
308             MachThread::GetGloballyUniqueThreadIDForMachPortID(mach_port_num);
309         MachThreadSP thread_sp(GetThreadByID(unique_thread_id));
310         if (thread_sp) {
311           // Keep the existing thread class
312           currThreads.push_back(thread_sp);
313         } else {
314           // We don't have this thread, lets add it.
315           thread_sp = std::make_shared<MachThread>(
316               process, m_is_64_bit, unique_thread_id, mach_port_num);
317 
318           // Add the new thread regardless of its is user ready state...
319           // Make sure the thread is ready to be displayed and shown to users
320           // before we add this thread to our list...
321           if (thread_sp->IsUserReady()) {
322             if (new_threads)
323               new_threads->push_back(thread_sp);
324 
325             currThreads.push_back(thread_sp);
326           }
327         }
328       }
329 
330       m_threads.swap(currThreads);
331       m_current_thread.reset();
332 
333       // Free the vm memory given to us by ::task_threads()
334       vm_size_t thread_list_size =
335           (vm_size_t)(thread_list_count * sizeof(thread_t));
336       ::vm_deallocate(::mach_task_self(), (vm_address_t)thread_list,
337                       thread_list_size);
338     }
339   }
340   return static_cast<uint32_t>(m_threads.size());
341 }
342 
343 void MachThreadList::CurrentThread(MachThreadSP &thread_sp) {
344   // locker will keep a mutex locked until it goes out of scope
345   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
346   if (m_current_thread.get() == NULL) {
347     // Figure out which thread is going to be our current thread.
348     // This is currently done by finding the first thread in the list
349     // that has a valid exception.
350     const size_t num_threads = m_threads.size();
351     for (uint32_t idx = 0; idx < num_threads; ++idx) {
352       if (m_threads[idx]->GetStopException().IsValid()) {
353         m_current_thread = m_threads[idx];
354         break;
355       }
356     }
357   }
358   thread_sp = m_current_thread;
359 }
360 
361 void MachThreadList::Dump() const {
362   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
363   const size_t num_threads = m_threads.size();
364   for (uint32_t idx = 0; idx < num_threads; ++idx) {
365     m_threads[idx]->Dump(idx);
366   }
367 }
368 
369 void MachThreadList::ProcessWillResume(
370     MachProcess *process, const DNBThreadResumeActions &thread_actions) {
371   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
372 
373   // Update our thread list, because sometimes libdispatch or the kernel
374   // will spawn threads while a task is suspended.
375   MachThreadList::collection new_threads;
376 
377   // First figure out if we were planning on running only one thread, and if so
378   // force that thread to resume.
379   bool run_one_thread;
380   nub_thread_t solo_thread = INVALID_NUB_THREAD;
381   if (thread_actions.GetSize() > 0 &&
382       thread_actions.NumActionsWithState(eStateStepping) +
383               thread_actions.NumActionsWithState(eStateRunning) ==
384           1) {
385     run_one_thread = true;
386     const DNBThreadResumeAction *action_ptr = thread_actions.GetFirst();
387     size_t num_actions = thread_actions.GetSize();
388     for (size_t i = 0; i < num_actions; i++, action_ptr++) {
389       if (action_ptr->state == eStateStepping ||
390           action_ptr->state == eStateRunning) {
391         solo_thread = action_ptr->tid;
392         break;
393       }
394     }
395   } else
396     run_one_thread = false;
397 
398   UpdateThreadList(process, true, &new_threads);
399 
400   DNBThreadResumeAction resume_new_threads = {-1U, eStateRunning, 0,
401                                               INVALID_NUB_ADDRESS};
402   // If we are planning to run only one thread, any new threads should be
403   // suspended.
404   if (run_one_thread)
405     resume_new_threads.state = eStateSuspended;
406 
407   const size_t num_new_threads = new_threads.size();
408   const size_t num_threads = m_threads.size();
409   for (uint32_t idx = 0; idx < num_threads; ++idx) {
410     MachThread *thread = m_threads[idx].get();
411     bool handled = false;
412     for (uint32_t new_idx = 0; new_idx < num_new_threads; ++new_idx) {
413       if (thread == new_threads[new_idx].get()) {
414         thread->ThreadWillResume(&resume_new_threads);
415         handled = true;
416         break;
417       }
418     }
419 
420     if (!handled) {
421       const DNBThreadResumeAction *thread_action =
422           thread_actions.GetActionForThread(thread->ThreadID(), true);
423       // There must always be a thread action for every thread.
424       assert(thread_action);
425       bool others_stopped = false;
426       if (solo_thread == thread->ThreadID())
427         others_stopped = true;
428       thread->ThreadWillResume(thread_action, others_stopped);
429     }
430   }
431 
432   if (new_threads.size()) {
433     for (uint32_t idx = 0; idx < num_new_threads; ++idx) {
434       DNBLogThreadedIf(
435           LOG_THREAD, "MachThreadList::ProcessWillResume (pid = %4.4x) "
436                       "stop-id=%u, resuming newly discovered thread: "
437                       "0x%8.8" PRIx64 ", thread-is-user-ready=%i)",
438           process->ProcessID(), process->StopCount(),
439           new_threads[idx]->ThreadID(), new_threads[idx]->IsUserReady());
440     }
441   }
442 }
443 
444 uint32_t MachThreadList::ProcessDidStop(MachProcess *process) {
445   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
446   // Update our thread list
447   const uint32_t num_threads = UpdateThreadList(process, true);
448   for (uint32_t idx = 0; idx < num_threads; ++idx) {
449     m_threads[idx]->ThreadDidStop();
450   }
451   return num_threads;
452 }
453 
454 // Check each thread in our thread list to see if we should notify our
455 // client of the current halt in execution.
456 //
457 // Breakpoints can have callback functions associated with them than
458 // can return true to stop, or false to continue executing the inferior.
459 //
460 // RETURNS
461 //    true if we should stop and notify our clients
462 //    false if we should resume our child process and skip notification
463 bool MachThreadList::ShouldStop(bool &step_more) {
464   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
465   uint32_t should_stop = false;
466   const size_t num_threads = m_threads.size();
467   for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx) {
468     should_stop = m_threads[idx]->ShouldStop(step_more);
469   }
470   return should_stop;
471 }
472 
473 void MachThreadList::NotifyBreakpointChanged(const DNBBreakpoint *bp) {
474   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
475   const size_t num_threads = m_threads.size();
476   for (uint32_t idx = 0; idx < num_threads; ++idx) {
477     m_threads[idx]->NotifyBreakpointChanged(bp);
478   }
479 }
480 
481 uint32_t
482 MachThreadList::EnableHardwareBreakpoint(const DNBBreakpoint *bp) const {
483   if (bp != NULL) {
484     const size_t num_threads = m_threads.size();
485     for (uint32_t idx = 0; idx < num_threads; ++idx)
486       m_threads[idx]->EnableHardwareBreakpoint(bp);
487   }
488   return INVALID_NUB_HW_INDEX;
489 }
490 
491 bool MachThreadList::DisableHardwareBreakpoint(const DNBBreakpoint *bp) const {
492   if (bp != NULL) {
493     const size_t num_threads = m_threads.size();
494     for (uint32_t idx = 0; idx < num_threads; ++idx)
495       m_threads[idx]->DisableHardwareBreakpoint(bp);
496   }
497   return false;
498 }
499 
500 // DNBWatchpointSet() -> MachProcess::CreateWatchpoint() ->
501 // MachProcess::EnableWatchpoint()
502 // -> MachThreadList::EnableHardwareWatchpoint().
503 uint32_t
504 MachThreadList::EnableHardwareWatchpoint(const DNBBreakpoint *wp) const {
505   uint32_t hw_index = INVALID_NUB_HW_INDEX;
506   if (wp != NULL) {
507     PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
508     const size_t num_threads = m_threads.size();
509     // On Mac OS X we have to prime the control registers for new threads.  We
510     // do this
511     // using the control register data for the first thread, for lack of a
512     // better way of choosing.
513     bool also_set_on_task = true;
514     for (uint32_t idx = 0; idx < num_threads; ++idx) {
515       if ((hw_index = m_threads[idx]->EnableHardwareWatchpoint(
516                wp, also_set_on_task)) == INVALID_NUB_HW_INDEX) {
517         // We know that idx failed for some reason.  Let's rollback the
518         // transaction for [0, idx).
519         for (uint32_t i = 0; i < idx; ++i)
520           m_threads[i]->RollbackTransForHWP();
521         return INVALID_NUB_HW_INDEX;
522       }
523       also_set_on_task = false;
524     }
525     // Notify each thread to commit the pending transaction.
526     for (uint32_t idx = 0; idx < num_threads; ++idx)
527       m_threads[idx]->FinishTransForHWP();
528   }
529   return hw_index;
530 }
531 
532 bool MachThreadList::DisableHardwareWatchpoint(const DNBBreakpoint *wp) const {
533   if (wp != NULL) {
534     PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
535     const size_t num_threads = m_threads.size();
536 
537     // On Mac OS X we have to prime the control registers for new threads.  We
538     // do this
539     // using the control register data for the first thread, for lack of a
540     // better way of choosing.
541     bool also_set_on_task = true;
542     for (uint32_t idx = 0; idx < num_threads; ++idx) {
543       if (!m_threads[idx]->DisableHardwareWatchpoint(wp, also_set_on_task)) {
544         // We know that idx failed for some reason.  Let's rollback the
545         // transaction for [0, idx).
546         for (uint32_t i = 0; i < idx; ++i)
547           m_threads[i]->RollbackTransForHWP();
548         return false;
549       }
550       also_set_on_task = false;
551     }
552     // Notify each thread to commit the pending transaction.
553     for (uint32_t idx = 0; idx < num_threads; ++idx)
554       m_threads[idx]->FinishTransForHWP();
555 
556     return true;
557   }
558   return false;
559 }
560 
561 uint32_t MachThreadList::NumSupportedHardwareWatchpoints() const {
562   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
563   const size_t num_threads = m_threads.size();
564   // Use an arbitrary thread to retrieve the number of supported hardware
565   // watchpoints.
566   if (num_threads)
567     return m_threads[0]->NumSupportedHardwareWatchpoints();
568   return 0;
569 }
570 
571 uint32_t MachThreadList::GetThreadIndexForThreadStoppedWithSignal(
572     const int signo) const {
573   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
574   uint32_t should_stop = false;
575   const size_t num_threads = m_threads.size();
576   for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx) {
577     if (m_threads[idx]->GetStopException().SoftSignal() == signo)
578       return idx;
579   }
580   return UINT32_MAX;
581 }
582