1 //===-- MachThreadList.cpp --------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Created by Greg Clayton on 6/19/07. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MachThreadList.h" 15 16 #include <inttypes.h> 17 #include <sys/sysctl.h> 18 19 #include "DNBLog.h" 20 #include "DNBThreadResumeActions.h" 21 #include "MachProcess.h" 22 23 MachThreadList::MachThreadList() : 24 m_threads(), 25 m_threads_mutex(PTHREAD_MUTEX_RECURSIVE) 26 { 27 } 28 29 MachThreadList::~MachThreadList() 30 { 31 } 32 33 nub_state_t 34 MachThreadList::GetState(nub_thread_t tid) 35 { 36 MachThreadSP thread_sp (GetThreadByID (tid)); 37 if (thread_sp) 38 return thread_sp->GetState(); 39 return eStateInvalid; 40 } 41 42 const char * 43 MachThreadList::GetName (nub_thread_t tid) 44 { 45 MachThreadSP thread_sp (GetThreadByID (tid)); 46 if (thread_sp) 47 return thread_sp->GetName(); 48 return NULL; 49 } 50 51 nub_thread_t 52 MachThreadList::SetCurrentThread(nub_thread_t tid) 53 { 54 MachThreadSP thread_sp (GetThreadByID (tid)); 55 if (thread_sp) 56 { 57 m_current_thread = thread_sp; 58 return tid; 59 } 60 return INVALID_NUB_THREAD; 61 } 62 63 64 bool 65 MachThreadList::GetThreadStoppedReason(nub_thread_t tid, struct DNBThreadStopInfo *stop_info) const 66 { 67 MachThreadSP thread_sp (GetThreadByID (tid)); 68 if (thread_sp) 69 return thread_sp->GetStopException().GetStopInfo(stop_info); 70 return false; 71 } 72 73 bool 74 MachThreadList::GetIdentifierInfo (nub_thread_t tid, thread_identifier_info_data_t *ident_info) 75 { 76 thread_t mach_port_number = GetMachPortNumberByThreadID (tid); 77 78 mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT; 79 return ::thread_info (mach_port_number, THREAD_IDENTIFIER_INFO, (thread_info_t)ident_info, &count) == KERN_SUCCESS; 80 } 81 82 void 83 MachThreadList::DumpThreadStoppedReason (nub_thread_t tid) const 84 { 85 MachThreadSP thread_sp (GetThreadByID (tid)); 86 if (thread_sp) 87 thread_sp->GetStopException().DumpStopReason(); 88 } 89 90 const char * 91 MachThreadList::GetThreadInfo (nub_thread_t tid) const 92 { 93 MachThreadSP thread_sp (GetThreadByID (tid)); 94 if (thread_sp) 95 return thread_sp->GetBasicInfoAsString(); 96 return NULL; 97 } 98 99 MachThreadSP 100 MachThreadList::GetThreadByID (nub_thread_t tid) const 101 { 102 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex); 103 MachThreadSP thread_sp; 104 const size_t num_threads = m_threads.size(); 105 for (size_t idx = 0; idx < num_threads; ++idx) 106 { 107 if (m_threads[idx]->ThreadID() == tid) 108 { 109 thread_sp = m_threads[idx]; 110 break; 111 } 112 } 113 return thread_sp; 114 } 115 116 MachThreadSP 117 MachThreadList::GetThreadByMachPortNumber (thread_t mach_port_number) const 118 { 119 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex); 120 MachThreadSP thread_sp; 121 const size_t num_threads = m_threads.size(); 122 for (size_t idx = 0; idx < num_threads; ++idx) 123 { 124 if (m_threads[idx]->MachPortNumber() == mach_port_number) 125 { 126 thread_sp = m_threads[idx]; 127 break; 128 } 129 } 130 return thread_sp; 131 } 132 133 nub_thread_t 134 MachThreadList::GetThreadIDByMachPortNumber (thread_t mach_port_number) const 135 { 136 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex); 137 MachThreadSP thread_sp; 138 const size_t num_threads = m_threads.size(); 139 for (size_t idx = 0; idx < num_threads; ++idx) 140 { 141 if (m_threads[idx]->MachPortNumber() == mach_port_number) 142 { 143 return m_threads[idx]->ThreadID(); 144 } 145 } 146 return INVALID_NUB_THREAD; 147 } 148 149 thread_t 150 MachThreadList::GetMachPortNumberByThreadID (nub_thread_t globally_unique_id) const 151 { 152 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex); 153 MachThreadSP thread_sp; 154 const size_t num_threads = m_threads.size(); 155 for (size_t idx = 0; idx < num_threads; ++idx) 156 { 157 if (m_threads[idx]->ThreadID() == globally_unique_id) 158 { 159 return m_threads[idx]->MachPortNumber(); 160 } 161 } 162 return 0; 163 } 164 165 bool 166 MachThreadList::GetRegisterValue (nub_thread_t tid, uint32_t reg_set_idx, uint32_t reg_idx, DNBRegisterValue *reg_value ) const 167 { 168 MachThreadSP thread_sp (GetThreadByID (tid)); 169 if (thread_sp) 170 return thread_sp->GetRegisterValue(reg_set_idx, reg_idx, reg_value); 171 172 return false; 173 } 174 175 bool 176 MachThreadList::SetRegisterValue (nub_thread_t tid, uint32_t reg_set_idx, uint32_t reg_idx, const DNBRegisterValue *reg_value ) const 177 { 178 MachThreadSP thread_sp (GetThreadByID (tid)); 179 if (thread_sp) 180 return thread_sp->SetRegisterValue(reg_set_idx, reg_idx, reg_value); 181 182 return false; 183 } 184 185 nub_size_t 186 MachThreadList::GetRegisterContext (nub_thread_t tid, void *buf, size_t buf_len) 187 { 188 MachThreadSP thread_sp (GetThreadByID (tid)); 189 if (thread_sp) 190 return thread_sp->GetRegisterContext (buf, buf_len); 191 return 0; 192 } 193 194 nub_size_t 195 MachThreadList::SetRegisterContext (nub_thread_t tid, const void *buf, size_t buf_len) 196 { 197 MachThreadSP thread_sp (GetThreadByID (tid)); 198 if (thread_sp) 199 return thread_sp->SetRegisterContext (buf, buf_len); 200 return 0; 201 } 202 203 uint32_t 204 MachThreadList::SaveRegisterState (nub_thread_t tid) 205 { 206 MachThreadSP thread_sp (GetThreadByID (tid)); 207 if (thread_sp) 208 return thread_sp->SaveRegisterState (); 209 return 0; 210 } 211 212 bool 213 MachThreadList::RestoreRegisterState (nub_thread_t tid, uint32_t save_id) 214 { 215 MachThreadSP thread_sp (GetThreadByID (tid)); 216 if (thread_sp) 217 return thread_sp->RestoreRegisterState (save_id); 218 return 0; 219 } 220 221 222 nub_size_t 223 MachThreadList::NumThreads () const 224 { 225 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex); 226 return m_threads.size(); 227 } 228 229 nub_thread_t 230 MachThreadList::ThreadIDAtIndex (nub_size_t idx) const 231 { 232 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex); 233 if (idx < m_threads.size()) 234 return m_threads[idx]->ThreadID(); 235 return INVALID_NUB_THREAD; 236 } 237 238 nub_thread_t 239 MachThreadList::CurrentThreadID ( ) 240 { 241 MachThreadSP thread_sp; 242 CurrentThread(thread_sp); 243 if (thread_sp.get()) 244 return thread_sp->ThreadID(); 245 return INVALID_NUB_THREAD; 246 } 247 248 bool 249 MachThreadList::NotifyException(MachException::Data& exc) 250 { 251 MachThreadSP thread_sp (GetThreadByMachPortNumber (exc.thread_port)); 252 if (thread_sp) 253 { 254 thread_sp->NotifyException(exc); 255 return true; 256 } 257 return false; 258 } 259 260 void 261 MachThreadList::Clear() 262 { 263 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex); 264 m_threads.clear(); 265 } 266 267 uint32_t 268 MachThreadList::UpdateThreadList(MachProcess *process, bool update, MachThreadList::collection *new_threads) 269 { 270 // locker will keep a mutex locked until it goes out of scope 271 DNBLogThreadedIf (LOG_THREAD, "MachThreadList::UpdateThreadList (pid = %4.4x, update = %u) process stop count = %u", process->ProcessID(), update, process->StopCount()); 272 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex); 273 274 #if defined (__i386__) || defined (__x86_64__) 275 if (process->StopCount() == 0) 276 { 277 int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, process->ProcessID() }; 278 struct kinfo_proc processInfo; 279 size_t bufsize = sizeof(processInfo); 280 bool is_64_bit = false; 281 if (sysctl(mib, (unsigned)(sizeof(mib)/sizeof(int)), &processInfo, &bufsize, NULL, 0) == 0 && bufsize > 0) 282 { 283 if (processInfo.kp_proc.p_flag & P_LP64) 284 is_64_bit = true; 285 } 286 if (is_64_bit) 287 DNBArchProtocol::SetArchitecture(CPU_TYPE_X86_64); 288 else 289 DNBArchProtocol::SetArchitecture(CPU_TYPE_I386); 290 } 291 #endif 292 293 if (m_threads.empty() || update) 294 { 295 thread_array_t thread_list = NULL; 296 mach_msg_type_number_t thread_list_count = 0; 297 task_t task = process->Task().TaskPort(); 298 DNBError err(::task_threads (task, &thread_list, &thread_list_count), DNBError::MachKernel); 299 300 if (DNBLogCheckLogBit(LOG_THREAD) || err.Fail()) 301 err.LogThreaded("::task_threads ( task = 0x%4.4x, thread_list => %p, thread_list_count => %u )", task, thread_list, thread_list_count); 302 303 if (err.Error() == KERN_SUCCESS && thread_list_count > 0) 304 { 305 MachThreadList::collection currThreads; 306 size_t idx; 307 // Iterator through the current thread list and see which threads 308 // we already have in our list (keep them), which ones we don't 309 // (add them), and which ones are not around anymore (remove them). 310 for (idx = 0; idx < thread_list_count; ++idx) 311 { 312 const thread_t mach_port_num = thread_list[idx]; 313 314 uint64_t unique_thread_id = MachThread::GetGloballyUniqueThreadIDForMachPortID (mach_port_num); 315 MachThreadSP thread_sp (GetThreadByID (unique_thread_id)); 316 if (thread_sp) 317 { 318 // Keep the existing thread class 319 currThreads.push_back(thread_sp); 320 } 321 else 322 { 323 // We don't have this thread, lets add it. 324 thread_sp.reset(new MachThread(process, unique_thread_id, mach_port_num)); 325 326 // Add the new thread regardless of its is user ready state... 327 // Make sure the thread is ready to be displayed and shown to users 328 // before we add this thread to our list... 329 if (thread_sp->IsUserReady()) 330 { 331 if (new_threads) 332 new_threads->push_back(thread_sp); 333 334 currThreads.push_back(thread_sp); 335 } 336 } 337 } 338 339 m_threads.swap(currThreads); 340 m_current_thread.reset(); 341 342 // Free the vm memory given to us by ::task_threads() 343 vm_size_t thread_list_size = (vm_size_t) (thread_list_count * sizeof (thread_t)); 344 ::vm_deallocate (::mach_task_self(), 345 (vm_address_t)thread_list, 346 thread_list_size); 347 } 348 } 349 return m_threads.size(); 350 } 351 352 353 void 354 MachThreadList::CurrentThread (MachThreadSP& thread_sp) 355 { 356 // locker will keep a mutex locked until it goes out of scope 357 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex); 358 if (m_current_thread.get() == NULL) 359 { 360 // Figure out which thread is going to be our current thread. 361 // This is currently done by finding the first thread in the list 362 // that has a valid exception. 363 const uint32_t num_threads = m_threads.size(); 364 for (uint32_t idx = 0; idx < num_threads; ++idx) 365 { 366 if (m_threads[idx]->GetStopException().IsValid()) 367 { 368 m_current_thread = m_threads[idx]; 369 break; 370 } 371 } 372 } 373 thread_sp = m_current_thread; 374 } 375 376 void 377 MachThreadList::Dump() const 378 { 379 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex); 380 const uint32_t num_threads = m_threads.size(); 381 for (uint32_t idx = 0; idx < num_threads; ++idx) 382 { 383 m_threads[idx]->Dump(idx); 384 } 385 } 386 387 388 void 389 MachThreadList::ProcessWillResume(MachProcess *process, const DNBThreadResumeActions &thread_actions) 390 { 391 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex); 392 393 // Update our thread list, because sometimes libdispatch or the kernel 394 // will spawn threads while a task is suspended. 395 MachThreadList::collection new_threads; 396 397 // First figure out if we were planning on running only one thread, and if so force that thread to resume. 398 bool run_one_thread; 399 nub_thread_t solo_thread = INVALID_NUB_THREAD; 400 if (thread_actions.GetSize() > 0 401 && thread_actions.NumActionsWithState(eStateStepping) + thread_actions.NumActionsWithState (eStateRunning) == 1) 402 { 403 run_one_thread = true; 404 const DNBThreadResumeAction *action_ptr = thread_actions.GetFirst(); 405 size_t num_actions = thread_actions.GetSize(); 406 for (size_t i = 0; i < num_actions; i++, action_ptr++) 407 { 408 if (action_ptr->state == eStateStepping || action_ptr->state == eStateRunning) 409 { 410 solo_thread = action_ptr->tid; 411 break; 412 } 413 } 414 } 415 else 416 run_one_thread = false; 417 418 UpdateThreadList(process, true, &new_threads); 419 420 DNBThreadResumeAction resume_new_threads = { -1U, eStateRunning, 0, INVALID_NUB_ADDRESS }; 421 // If we are planning to run only one thread, any new threads should be suspended. 422 if (run_one_thread) 423 resume_new_threads.state = eStateSuspended; 424 425 const uint32_t num_new_threads = new_threads.size(); 426 const uint32_t num_threads = m_threads.size(); 427 for (uint32_t idx = 0; idx < num_threads; ++idx) 428 { 429 MachThread *thread = m_threads[idx].get(); 430 bool handled = false; 431 for (uint32_t new_idx = 0; new_idx < num_new_threads; ++new_idx) 432 { 433 if (thread == new_threads[new_idx].get()) 434 { 435 thread->ThreadWillResume(&resume_new_threads); 436 handled = true; 437 break; 438 } 439 } 440 441 if (!handled) 442 { 443 const DNBThreadResumeAction *thread_action = thread_actions.GetActionForThread (thread->ThreadID(), true); 444 // There must always be a thread action for every thread. 445 assert (thread_action); 446 bool others_stopped = false; 447 if (solo_thread == thread->ThreadID()) 448 others_stopped = true; 449 thread->ThreadWillResume (thread_action, others_stopped); 450 } 451 } 452 453 if (new_threads.size()) 454 { 455 for (uint32_t idx = 0; idx < num_new_threads; ++idx) 456 { 457 DNBLogThreadedIf (LOG_THREAD, "MachThreadList::ProcessWillResume (pid = %4.4x) stop-id=%u, resuming newly discovered thread: 0x%8.8" PRIx64 ", thread-is-user-ready=%i)", 458 process->ProcessID(), 459 process->StopCount(), 460 new_threads[idx]->ThreadID(), 461 new_threads[idx]->IsUserReady()); 462 } 463 } 464 } 465 466 uint32_t 467 MachThreadList::ProcessDidStop(MachProcess *process) 468 { 469 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex); 470 // Update our thread list 471 const uint32_t num_threads = UpdateThreadList(process, true); 472 for (uint32_t idx = 0; idx < num_threads; ++idx) 473 { 474 m_threads[idx]->ThreadDidStop(); 475 } 476 return num_threads; 477 } 478 479 //---------------------------------------------------------------------- 480 // Check each thread in our thread list to see if we should notify our 481 // client of the current halt in execution. 482 // 483 // Breakpoints can have callback functions associated with them than 484 // can return true to stop, or false to continue executing the inferior. 485 // 486 // RETURNS 487 // true if we should stop and notify our clients 488 // false if we should resume our child process and skip notification 489 //---------------------------------------------------------------------- 490 bool 491 MachThreadList::ShouldStop(bool &step_more) 492 { 493 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex); 494 uint32_t should_stop = false; 495 const uint32_t num_threads = m_threads.size(); 496 for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx) 497 { 498 should_stop = m_threads[idx]->ShouldStop(step_more); 499 } 500 return should_stop; 501 } 502 503 504 void 505 MachThreadList::NotifyBreakpointChanged (const DNBBreakpoint *bp) 506 { 507 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex); 508 const uint32_t num_threads = m_threads.size(); 509 for (uint32_t idx = 0; idx < num_threads; ++idx) 510 { 511 m_threads[idx]->NotifyBreakpointChanged(bp); 512 } 513 } 514 515 516 uint32_t 517 MachThreadList::EnableHardwareBreakpoint (const DNBBreakpoint* bp) const 518 { 519 if (bp != NULL) 520 { 521 const uint32_t num_threads = m_threads.size(); 522 for (uint32_t idx = 0; idx < num_threads; ++idx) 523 m_threads[idx]->EnableHardwareBreakpoint(bp); 524 } 525 return INVALID_NUB_HW_INDEX; 526 } 527 528 bool 529 MachThreadList::DisableHardwareBreakpoint (const DNBBreakpoint* bp) const 530 { 531 if (bp != NULL) 532 { 533 const uint32_t num_threads = m_threads.size(); 534 for (uint32_t idx = 0; idx < num_threads; ++idx) 535 m_threads[idx]->DisableHardwareBreakpoint(bp); 536 } 537 return false; 538 } 539 540 // DNBWatchpointSet() -> MachProcess::CreateWatchpoint() -> MachProcess::EnableWatchpoint() 541 // -> MachThreadList::EnableHardwareWatchpoint(). 542 uint32_t 543 MachThreadList::EnableHardwareWatchpoint (const DNBBreakpoint* wp) const 544 { 545 uint32_t hw_index = INVALID_NUB_HW_INDEX; 546 if (wp != NULL) 547 { 548 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex); 549 const uint32_t num_threads = m_threads.size(); 550 // On Mac OS X we have to prime the control registers for new threads. We do this 551 // using the control register data for the first thread, for lack of a better way of choosing. 552 bool also_set_on_task = true; 553 for (uint32_t idx = 0; idx < num_threads; ++idx) 554 { 555 if ((hw_index = m_threads[idx]->EnableHardwareWatchpoint(wp, also_set_on_task)) == INVALID_NUB_HW_INDEX) 556 { 557 // We know that idx failed for some reason. Let's rollback the transaction for [0, idx). 558 for (uint32_t i = 0; i < idx; ++i) 559 m_threads[i]->RollbackTransForHWP(); 560 return INVALID_NUB_HW_INDEX; 561 } 562 also_set_on_task = false; 563 } 564 // Notify each thread to commit the pending transaction. 565 for (uint32_t idx = 0; idx < num_threads; ++idx) 566 m_threads[idx]->FinishTransForHWP(); 567 568 } 569 return hw_index; 570 } 571 572 bool 573 MachThreadList::DisableHardwareWatchpoint (const DNBBreakpoint* wp) const 574 { 575 if (wp != NULL) 576 { 577 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex); 578 const uint32_t num_threads = m_threads.size(); 579 580 // On Mac OS X we have to prime the control registers for new threads. We do this 581 // using the control register data for the first thread, for lack of a better way of choosing. 582 bool also_set_on_task = true; 583 for (uint32_t idx = 0; idx < num_threads; ++idx) 584 { 585 if (!m_threads[idx]->DisableHardwareWatchpoint(wp, also_set_on_task)) 586 { 587 // We know that idx failed for some reason. Let's rollback the transaction for [0, idx). 588 for (uint32_t i = 0; i < idx; ++i) 589 m_threads[i]->RollbackTransForHWP(); 590 return false; 591 } 592 also_set_on_task = false; 593 } 594 // Notify each thread to commit the pending transaction. 595 for (uint32_t idx = 0; idx < num_threads; ++idx) 596 m_threads[idx]->FinishTransForHWP(); 597 598 return true; 599 } 600 return false; 601 } 602 603 uint32_t 604 MachThreadList::NumSupportedHardwareWatchpoints () const 605 { 606 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex); 607 const uint32_t num_threads = m_threads.size(); 608 // Use an arbitrary thread to retrieve the number of supported hardware watchpoints. 609 if (num_threads) 610 return m_threads[0]->NumSupportedHardwareWatchpoints(); 611 return 0; 612 } 613 614 uint32_t 615 MachThreadList::GetThreadIndexForThreadStoppedWithSignal (const int signo) const 616 { 617 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex); 618 uint32_t should_stop = false; 619 const uint32_t num_threads = m_threads.size(); 620 for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx) 621 { 622 if (m_threads[idx]->GetStopException().SoftSignal () == signo) 623 return idx; 624 } 625 return UINT32_MAX; 626 } 627 628