1 //===-- MachThreadList.cpp --------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Created by Greg Clayton on 6/19/07. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MachThreadList.h" 15 16 #include <inttypes.h> 17 #include <sys/sysctl.h> 18 19 #include "DNBLog.h" 20 #include "DNBThreadResumeActions.h" 21 #include "MachProcess.h" 22 23 MachThreadList::MachThreadList() 24 : m_threads(), m_threads_mutex(PTHREAD_MUTEX_RECURSIVE), 25 m_is_64_bit(false) {} 26 27 MachThreadList::~MachThreadList() {} 28 29 nub_state_t MachThreadList::GetState(nub_thread_t tid) { 30 MachThreadSP thread_sp(GetThreadByID(tid)); 31 if (thread_sp) 32 return thread_sp->GetState(); 33 return eStateInvalid; 34 } 35 36 const char *MachThreadList::GetName(nub_thread_t tid) { 37 MachThreadSP thread_sp(GetThreadByID(tid)); 38 if (thread_sp) 39 return thread_sp->GetName(); 40 return NULL; 41 } 42 43 ThreadInfo::QoS MachThreadList::GetRequestedQoS(nub_thread_t tid, 44 nub_addr_t tsd, 45 uint64_t dti_qos_class_index) { 46 MachThreadSP thread_sp(GetThreadByID(tid)); 47 if (thread_sp) 48 return thread_sp->GetRequestedQoS(tsd, dti_qos_class_index); 49 return ThreadInfo::QoS(); 50 } 51 52 nub_addr_t MachThreadList::GetPThreadT(nub_thread_t tid) { 53 MachThreadSP thread_sp(GetThreadByID(tid)); 54 if (thread_sp) 55 return thread_sp->GetPThreadT(); 56 return INVALID_NUB_ADDRESS; 57 } 58 59 nub_addr_t MachThreadList::GetDispatchQueueT(nub_thread_t tid) { 60 MachThreadSP thread_sp(GetThreadByID(tid)); 61 if (thread_sp) 62 return thread_sp->GetDispatchQueueT(); 63 return INVALID_NUB_ADDRESS; 64 } 65 66 nub_addr_t MachThreadList::GetTSDAddressForThread( 67 nub_thread_t tid, uint64_t plo_pthread_tsd_base_address_offset, 68 uint64_t plo_pthread_tsd_base_offset, uint64_t plo_pthread_tsd_entry_size) { 69 MachThreadSP thread_sp(GetThreadByID(tid)); 70 if (thread_sp) 71 return thread_sp->GetTSDAddressForThread( 72 plo_pthread_tsd_base_address_offset, plo_pthread_tsd_base_offset, 73 plo_pthread_tsd_entry_size); 74 return INVALID_NUB_ADDRESS; 75 } 76 77 nub_thread_t MachThreadList::SetCurrentThread(nub_thread_t tid) { 78 MachThreadSP thread_sp(GetThreadByID(tid)); 79 if (thread_sp) { 80 m_current_thread = thread_sp; 81 return tid; 82 } 83 return INVALID_NUB_THREAD; 84 } 85 86 bool MachThreadList::GetThreadStoppedReason( 87 nub_thread_t tid, struct DNBThreadStopInfo *stop_info) const { 88 MachThreadSP thread_sp(GetThreadByID(tid)); 89 if (thread_sp) 90 return thread_sp->GetStopException().GetStopInfo(stop_info); 91 return false; 92 } 93 94 bool MachThreadList::GetIdentifierInfo( 95 nub_thread_t tid, thread_identifier_info_data_t *ident_info) { 96 thread_t mach_port_number = GetMachPortNumberByThreadID(tid); 97 98 mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT; 99 return ::thread_info(mach_port_number, THREAD_IDENTIFIER_INFO, 100 (thread_info_t)ident_info, &count) == KERN_SUCCESS; 101 } 102 103 void MachThreadList::DumpThreadStoppedReason(nub_thread_t tid) const { 104 MachThreadSP thread_sp(GetThreadByID(tid)); 105 if (thread_sp) 106 thread_sp->GetStopException().DumpStopReason(); 107 } 108 109 const char *MachThreadList::GetThreadInfo(nub_thread_t tid) const { 110 MachThreadSP thread_sp(GetThreadByID(tid)); 111 if (thread_sp) 112 return thread_sp->GetBasicInfoAsString(); 113 return NULL; 114 } 115 116 MachThreadSP MachThreadList::GetThreadByID(nub_thread_t tid) const { 117 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex); 118 MachThreadSP thread_sp; 119 const size_t num_threads = m_threads.size(); 120 for (size_t idx = 0; idx < num_threads; ++idx) { 121 if (m_threads[idx]->ThreadID() == tid) { 122 thread_sp = m_threads[idx]; 123 break; 124 } 125 } 126 return thread_sp; 127 } 128 129 MachThreadSP 130 MachThreadList::GetThreadByMachPortNumber(thread_t mach_port_number) const { 131 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex); 132 MachThreadSP thread_sp; 133 const size_t num_threads = m_threads.size(); 134 for (size_t idx = 0; idx < num_threads; ++idx) { 135 if (m_threads[idx]->MachPortNumber() == mach_port_number) { 136 thread_sp = m_threads[idx]; 137 break; 138 } 139 } 140 return thread_sp; 141 } 142 143 nub_thread_t 144 MachThreadList::GetThreadIDByMachPortNumber(thread_t mach_port_number) const { 145 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex); 146 MachThreadSP thread_sp; 147 const size_t num_threads = m_threads.size(); 148 for (size_t idx = 0; idx < num_threads; ++idx) { 149 if (m_threads[idx]->MachPortNumber() == mach_port_number) { 150 return m_threads[idx]->ThreadID(); 151 } 152 } 153 return INVALID_NUB_THREAD; 154 } 155 156 thread_t MachThreadList::GetMachPortNumberByThreadID( 157 nub_thread_t globally_unique_id) const { 158 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex); 159 MachThreadSP thread_sp; 160 const size_t num_threads = m_threads.size(); 161 for (size_t idx = 0; idx < num_threads; ++idx) { 162 if (m_threads[idx]->ThreadID() == globally_unique_id) { 163 return m_threads[idx]->MachPortNumber(); 164 } 165 } 166 return 0; 167 } 168 169 bool MachThreadList::GetRegisterValue(nub_thread_t tid, uint32_t set, 170 uint32_t reg, 171 DNBRegisterValue *reg_value) const { 172 MachThreadSP thread_sp(GetThreadByID(tid)); 173 if (thread_sp) 174 return thread_sp->GetRegisterValue(set, reg, reg_value); 175 176 return false; 177 } 178 179 bool MachThreadList::SetRegisterValue(nub_thread_t tid, uint32_t set, 180 uint32_t reg, 181 const DNBRegisterValue *reg_value) const { 182 MachThreadSP thread_sp(GetThreadByID(tid)); 183 if (thread_sp) 184 return thread_sp->SetRegisterValue(set, reg, reg_value); 185 186 return false; 187 } 188 189 nub_size_t MachThreadList::GetRegisterContext(nub_thread_t tid, void *buf, 190 size_t buf_len) { 191 MachThreadSP thread_sp(GetThreadByID(tid)); 192 if (thread_sp) 193 return thread_sp->GetRegisterContext(buf, buf_len); 194 return 0; 195 } 196 197 nub_size_t MachThreadList::SetRegisterContext(nub_thread_t tid, const void *buf, 198 size_t buf_len) { 199 MachThreadSP thread_sp(GetThreadByID(tid)); 200 if (thread_sp) 201 return thread_sp->SetRegisterContext(buf, buf_len); 202 return 0; 203 } 204 205 uint32_t MachThreadList::SaveRegisterState(nub_thread_t tid) { 206 MachThreadSP thread_sp(GetThreadByID(tid)); 207 if (thread_sp) 208 return thread_sp->SaveRegisterState(); 209 return 0; 210 } 211 212 bool MachThreadList::RestoreRegisterState(nub_thread_t tid, uint32_t save_id) { 213 MachThreadSP thread_sp(GetThreadByID(tid)); 214 if (thread_sp) 215 return thread_sp->RestoreRegisterState(save_id); 216 return 0; 217 } 218 219 nub_size_t MachThreadList::NumThreads() const { 220 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex); 221 return m_threads.size(); 222 } 223 224 nub_thread_t MachThreadList::ThreadIDAtIndex(nub_size_t idx) const { 225 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex); 226 if (idx < m_threads.size()) 227 return m_threads[idx]->ThreadID(); 228 return INVALID_NUB_THREAD; 229 } 230 231 nub_thread_t MachThreadList::CurrentThreadID() { 232 MachThreadSP thread_sp; 233 CurrentThread(thread_sp); 234 if (thread_sp.get()) 235 return thread_sp->ThreadID(); 236 return INVALID_NUB_THREAD; 237 } 238 239 bool MachThreadList::NotifyException(MachException::Data &exc) { 240 MachThreadSP thread_sp(GetThreadByMachPortNumber(exc.thread_port)); 241 if (thread_sp) { 242 thread_sp->NotifyException(exc); 243 return true; 244 } 245 return false; 246 } 247 248 void MachThreadList::Clear() { 249 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex); 250 m_threads.clear(); 251 } 252 253 uint32_t 254 MachThreadList::UpdateThreadList(MachProcess *process, bool update, 255 MachThreadList::collection *new_threads) { 256 // locker will keep a mutex locked until it goes out of scope 257 DNBLogThreadedIf(LOG_THREAD, "MachThreadList::UpdateThreadList (pid = %4.4x, " 258 "update = %u) process stop count = %u", 259 process->ProcessID(), update, process->StopCount()); 260 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex); 261 262 if (process->StopCount() == 0) { 263 int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, process->ProcessID()}; 264 struct kinfo_proc processInfo; 265 size_t bufsize = sizeof(processInfo); 266 if (sysctl(mib, (unsigned)(sizeof(mib) / sizeof(int)), &processInfo, 267 &bufsize, NULL, 0) == 0 && 268 bufsize > 0) { 269 if (processInfo.kp_proc.p_flag & P_LP64) 270 m_is_64_bit = true; 271 } 272 #if defined(__i386__) || defined(__x86_64__) 273 if (m_is_64_bit) 274 DNBArchProtocol::SetArchitecture(CPU_TYPE_X86_64); 275 else 276 DNBArchProtocol::SetArchitecture(CPU_TYPE_I386); 277 #elif defined(__arm__) || defined(__arm64__) || defined(__aarch64__) 278 if (m_is_64_bit) 279 DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM64); 280 else 281 DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM); 282 #endif 283 } 284 285 if (m_threads.empty() || update) { 286 thread_array_t thread_list = NULL; 287 mach_msg_type_number_t thread_list_count = 0; 288 task_t task = process->Task().TaskPort(); 289 DNBError err(::task_threads(task, &thread_list, &thread_list_count), 290 DNBError::MachKernel); 291 292 if (DNBLogCheckLogBit(LOG_THREAD) || err.Fail()) 293 err.LogThreaded("::task_threads ( task = 0x%4.4x, thread_list => %p, " 294 "thread_list_count => %u )", 295 task, thread_list, thread_list_count); 296 297 if (err.Error() == KERN_SUCCESS && thread_list_count > 0) { 298 MachThreadList::collection currThreads; 299 size_t idx; 300 // Iterator through the current thread list and see which threads 301 // we already have in our list (keep them), which ones we don't 302 // (add them), and which ones are not around anymore (remove them). 303 for (idx = 0; idx < thread_list_count; ++idx) { 304 const thread_t mach_port_num = thread_list[idx]; 305 306 uint64_t unique_thread_id = 307 MachThread::GetGloballyUniqueThreadIDForMachPortID(mach_port_num); 308 MachThreadSP thread_sp(GetThreadByID(unique_thread_id)); 309 if (thread_sp) { 310 // Keep the existing thread class 311 currThreads.push_back(thread_sp); 312 } else { 313 // We don't have this thread, lets add it. 314 thread_sp.reset(new MachThread(process, m_is_64_bit, unique_thread_id, 315 mach_port_num)); 316 317 // Add the new thread regardless of its is user ready state... 318 // Make sure the thread is ready to be displayed and shown to users 319 // before we add this thread to our list... 320 if (thread_sp->IsUserReady()) { 321 if (new_threads) 322 new_threads->push_back(thread_sp); 323 324 currThreads.push_back(thread_sp); 325 } 326 } 327 } 328 329 m_threads.swap(currThreads); 330 m_current_thread.reset(); 331 332 // Free the vm memory given to us by ::task_threads() 333 vm_size_t thread_list_size = 334 (vm_size_t)(thread_list_count * sizeof(thread_t)); 335 ::vm_deallocate(::mach_task_self(), (vm_address_t)thread_list, 336 thread_list_size); 337 } 338 } 339 return static_cast<uint32_t>(m_threads.size()); 340 } 341 342 void MachThreadList::CurrentThread(MachThreadSP &thread_sp) { 343 // locker will keep a mutex locked until it goes out of scope 344 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex); 345 if (m_current_thread.get() == NULL) { 346 // Figure out which thread is going to be our current thread. 347 // This is currently done by finding the first thread in the list 348 // that has a valid exception. 349 const size_t num_threads = m_threads.size(); 350 for (uint32_t idx = 0; idx < num_threads; ++idx) { 351 if (m_threads[idx]->GetStopException().IsValid()) { 352 m_current_thread = m_threads[idx]; 353 break; 354 } 355 } 356 } 357 thread_sp = m_current_thread; 358 } 359 360 void MachThreadList::Dump() const { 361 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex); 362 const size_t num_threads = m_threads.size(); 363 for (uint32_t idx = 0; idx < num_threads; ++idx) { 364 m_threads[idx]->Dump(idx); 365 } 366 } 367 368 void MachThreadList::ProcessWillResume( 369 MachProcess *process, const DNBThreadResumeActions &thread_actions) { 370 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex); 371 372 // Update our thread list, because sometimes libdispatch or the kernel 373 // will spawn threads while a task is suspended. 374 MachThreadList::collection new_threads; 375 376 // First figure out if we were planning on running only one thread, and if so 377 // force that thread to resume. 378 bool run_one_thread; 379 nub_thread_t solo_thread = INVALID_NUB_THREAD; 380 if (thread_actions.GetSize() > 0 && 381 thread_actions.NumActionsWithState(eStateStepping) + 382 thread_actions.NumActionsWithState(eStateRunning) == 383 1) { 384 run_one_thread = true; 385 const DNBThreadResumeAction *action_ptr = thread_actions.GetFirst(); 386 size_t num_actions = thread_actions.GetSize(); 387 for (size_t i = 0; i < num_actions; i++, action_ptr++) { 388 if (action_ptr->state == eStateStepping || 389 action_ptr->state == eStateRunning) { 390 solo_thread = action_ptr->tid; 391 break; 392 } 393 } 394 } else 395 run_one_thread = false; 396 397 UpdateThreadList(process, true, &new_threads); 398 399 DNBThreadResumeAction resume_new_threads = {-1U, eStateRunning, 0, 400 INVALID_NUB_ADDRESS}; 401 // If we are planning to run only one thread, any new threads should be 402 // suspended. 403 if (run_one_thread) 404 resume_new_threads.state = eStateSuspended; 405 406 const size_t num_new_threads = new_threads.size(); 407 const size_t num_threads = m_threads.size(); 408 for (uint32_t idx = 0; idx < num_threads; ++idx) { 409 MachThread *thread = m_threads[idx].get(); 410 bool handled = false; 411 for (uint32_t new_idx = 0; new_idx < num_new_threads; ++new_idx) { 412 if (thread == new_threads[new_idx].get()) { 413 thread->ThreadWillResume(&resume_new_threads); 414 handled = true; 415 break; 416 } 417 } 418 419 if (!handled) { 420 const DNBThreadResumeAction *thread_action = 421 thread_actions.GetActionForThread(thread->ThreadID(), true); 422 // There must always be a thread action for every thread. 423 assert(thread_action); 424 bool others_stopped = false; 425 if (solo_thread == thread->ThreadID()) 426 others_stopped = true; 427 thread->ThreadWillResume(thread_action, others_stopped); 428 } 429 } 430 431 if (new_threads.size()) { 432 for (uint32_t idx = 0; idx < num_new_threads; ++idx) { 433 DNBLogThreadedIf( 434 LOG_THREAD, "MachThreadList::ProcessWillResume (pid = %4.4x) " 435 "stop-id=%u, resuming newly discovered thread: " 436 "0x%8.8" PRIx64 ", thread-is-user-ready=%i)", 437 process->ProcessID(), process->StopCount(), 438 new_threads[idx]->ThreadID(), new_threads[idx]->IsUserReady()); 439 } 440 } 441 } 442 443 uint32_t MachThreadList::ProcessDidStop(MachProcess *process) { 444 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex); 445 // Update our thread list 446 const uint32_t num_threads = UpdateThreadList(process, true); 447 for (uint32_t idx = 0; idx < num_threads; ++idx) { 448 m_threads[idx]->ThreadDidStop(); 449 } 450 return num_threads; 451 } 452 453 //---------------------------------------------------------------------- 454 // Check each thread in our thread list to see if we should notify our 455 // client of the current halt in execution. 456 // 457 // Breakpoints can have callback functions associated with them than 458 // can return true to stop, or false to continue executing the inferior. 459 // 460 // RETURNS 461 // true if we should stop and notify our clients 462 // false if we should resume our child process and skip notification 463 //---------------------------------------------------------------------- 464 bool MachThreadList::ShouldStop(bool &step_more) { 465 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex); 466 uint32_t should_stop = false; 467 const size_t num_threads = m_threads.size(); 468 for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx) { 469 should_stop = m_threads[idx]->ShouldStop(step_more); 470 } 471 return should_stop; 472 } 473 474 void MachThreadList::NotifyBreakpointChanged(const DNBBreakpoint *bp) { 475 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex); 476 const size_t num_threads = m_threads.size(); 477 for (uint32_t idx = 0; idx < num_threads; ++idx) { 478 m_threads[idx]->NotifyBreakpointChanged(bp); 479 } 480 } 481 482 uint32_t 483 MachThreadList::EnableHardwareBreakpoint(const DNBBreakpoint *bp) const { 484 if (bp != NULL) { 485 const size_t num_threads = m_threads.size(); 486 for (uint32_t idx = 0; idx < num_threads; ++idx) 487 m_threads[idx]->EnableHardwareBreakpoint(bp); 488 } 489 return INVALID_NUB_HW_INDEX; 490 } 491 492 bool MachThreadList::DisableHardwareBreakpoint(const DNBBreakpoint *bp) const { 493 if (bp != NULL) { 494 const size_t num_threads = m_threads.size(); 495 for (uint32_t idx = 0; idx < num_threads; ++idx) 496 m_threads[idx]->DisableHardwareBreakpoint(bp); 497 } 498 return false; 499 } 500 501 // DNBWatchpointSet() -> MachProcess::CreateWatchpoint() -> 502 // MachProcess::EnableWatchpoint() 503 // -> MachThreadList::EnableHardwareWatchpoint(). 504 uint32_t 505 MachThreadList::EnableHardwareWatchpoint(const DNBBreakpoint *wp) const { 506 uint32_t hw_index = INVALID_NUB_HW_INDEX; 507 if (wp != NULL) { 508 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex); 509 const size_t num_threads = m_threads.size(); 510 // On Mac OS X we have to prime the control registers for new threads. We 511 // do this 512 // using the control register data for the first thread, for lack of a 513 // better way of choosing. 514 bool also_set_on_task = true; 515 for (uint32_t idx = 0; idx < num_threads; ++idx) { 516 if ((hw_index = m_threads[idx]->EnableHardwareWatchpoint( 517 wp, also_set_on_task)) == INVALID_NUB_HW_INDEX) { 518 // We know that idx failed for some reason. Let's rollback the 519 // transaction for [0, idx). 520 for (uint32_t i = 0; i < idx; ++i) 521 m_threads[i]->RollbackTransForHWP(); 522 return INVALID_NUB_HW_INDEX; 523 } 524 also_set_on_task = false; 525 } 526 // Notify each thread to commit the pending transaction. 527 for (uint32_t idx = 0; idx < num_threads; ++idx) 528 m_threads[idx]->FinishTransForHWP(); 529 } 530 return hw_index; 531 } 532 533 bool MachThreadList::DisableHardwareWatchpoint(const DNBBreakpoint *wp) const { 534 if (wp != NULL) { 535 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex); 536 const size_t num_threads = m_threads.size(); 537 538 // On Mac OS X we have to prime the control registers for new threads. We 539 // do this 540 // using the control register data for the first thread, for lack of a 541 // better way of choosing. 542 bool also_set_on_task = true; 543 for (uint32_t idx = 0; idx < num_threads; ++idx) { 544 if (!m_threads[idx]->DisableHardwareWatchpoint(wp, also_set_on_task)) { 545 // We know that idx failed for some reason. Let's rollback the 546 // transaction for [0, idx). 547 for (uint32_t i = 0; i < idx; ++i) 548 m_threads[i]->RollbackTransForHWP(); 549 return false; 550 } 551 also_set_on_task = false; 552 } 553 // Notify each thread to commit the pending transaction. 554 for (uint32_t idx = 0; idx < num_threads; ++idx) 555 m_threads[idx]->FinishTransForHWP(); 556 557 return true; 558 } 559 return false; 560 } 561 562 uint32_t MachThreadList::NumSupportedHardwareWatchpoints() const { 563 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex); 564 const size_t num_threads = m_threads.size(); 565 // Use an arbitrary thread to retrieve the number of supported hardware 566 // watchpoints. 567 if (num_threads) 568 return m_threads[0]->NumSupportedHardwareWatchpoints(); 569 return 0; 570 } 571 572 uint32_t MachThreadList::GetThreadIndexForThreadStoppedWithSignal( 573 const int signo) const { 574 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex); 575 uint32_t should_stop = false; 576 const size_t num_threads = m_threads.size(); 577 for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx) { 578 if (m_threads[idx]->GetStopException().SoftSignal() == signo) 579 return idx; 580 } 581 return UINT32_MAX; 582 } 583