1 /* Low level interface to ptrace, for the remote server for GDB. 2 Copyright (C) 1995-2020 Free Software Foundation, Inc. 3 4 This file is part of GDB. 5 6 This program is free software; you can redistribute it and/or modify 7 it under the terms of the GNU General Public License as published by 8 the Free Software Foundation; either version 3 of the License, or 9 (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 GNU General Public License for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 18 19 #include "server.h" 20 #include "linux-low.h" 21 #include "nat/linux-osdata.h" 22 #include "gdbsupport/agent.h" 23 #include "tdesc.h" 24 #include "gdbsupport/rsp-low.h" 25 #include "gdbsupport/signals-state-save-restore.h" 26 #include "nat/linux-nat.h" 27 #include "nat/linux-waitpid.h" 28 #include "gdbsupport/gdb_wait.h" 29 #include "nat/gdb_ptrace.h" 30 #include "nat/linux-ptrace.h" 31 #include "nat/linux-procfs.h" 32 #include "nat/linux-personality.h" 33 #include <signal.h> 34 #include <sys/ioctl.h> 35 #include <fcntl.h> 36 #include <unistd.h> 37 #include <sys/syscall.h> 38 #include <sched.h> 39 #include <ctype.h> 40 #include <pwd.h> 41 #include <sys/types.h> 42 #include <dirent.h> 43 #include <sys/stat.h> 44 #include <sys/vfs.h> 45 #include <sys/uio.h> 46 #include "gdbsupport/filestuff.h" 47 #include "tracepoint.h" 48 #include <inttypes.h> 49 #include "gdbsupport/common-inferior.h" 50 #include "nat/fork-inferior.h" 51 #include "gdbsupport/environ.h" 52 #include "gdbsupport/gdb-sigmask.h" 53 #include "gdbsupport/scoped_restore.h" 54 #ifndef ELFMAG0 55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h 56 then ELFMAG0 will have been defined. If it didn't get included by 57 gdb_proc_service.h then including it will likely introduce a duplicate 58 definition of elf_fpregset_t. */ 59 #include <elf.h> 60 #endif 61 #include "nat/linux-namespaces.h" 62 63 #ifdef HAVE_PERSONALITY 64 # include <sys/personality.h> 65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE 66 # define ADDR_NO_RANDOMIZE 0x0040000 67 # endif 68 #endif 69 70 #ifndef O_LARGEFILE 71 #define O_LARGEFILE 0 72 #endif 73 74 #ifndef AT_HWCAP2 75 #define AT_HWCAP2 26 76 #endif 77 78 /* Some targets did not define these ptrace constants from the start, 79 so gdbserver defines them locally here. In the future, these may 80 be removed after they are added to asm/ptrace.h. */ 81 #if !(defined(PT_TEXT_ADDR) \ 82 || defined(PT_DATA_ADDR) \ 83 || defined(PT_TEXT_END_ADDR)) 84 #if defined(__mcoldfire__) 85 /* These are still undefined in 3.10 kernels. */ 86 #define PT_TEXT_ADDR 49*4 87 #define PT_DATA_ADDR 50*4 88 #define PT_TEXT_END_ADDR 51*4 89 /* These are still undefined in 3.10 kernels. */ 90 #elif defined(__TMS320C6X__) 91 #define PT_TEXT_ADDR (0x10000*4) 92 #define PT_DATA_ADDR (0x10004*4) 93 #define PT_TEXT_END_ADDR (0x10008*4) 94 #endif 95 #endif 96 97 #if (defined(__UCLIBC__) \ 98 && defined(HAS_NOMMU) \ 99 && defined(PT_TEXT_ADDR) \ 100 && defined(PT_DATA_ADDR) \ 101 && defined(PT_TEXT_END_ADDR)) 102 #define SUPPORTS_READ_OFFSETS 103 #endif 104 105 #ifdef HAVE_LINUX_BTRACE 106 # include "nat/linux-btrace.h" 107 # include "gdbsupport/btrace-common.h" 108 #endif 109 110 #ifndef HAVE_ELF32_AUXV_T 111 /* Copied from glibc's elf.h. */ 112 typedef struct 113 { 114 uint32_t a_type; /* Entry type */ 115 union 116 { 117 uint32_t a_val; /* Integer value */ 118 /* We use to have pointer elements added here. We cannot do that, 119 though, since it does not work when using 32-bit definitions 120 on 64-bit platforms and vice versa. */ 121 } a_un; 122 } Elf32_auxv_t; 123 #endif 124 125 #ifndef HAVE_ELF64_AUXV_T 126 /* Copied from glibc's elf.h. */ 127 typedef struct 128 { 129 uint64_t a_type; /* Entry type */ 130 union 131 { 132 uint64_t a_val; /* Integer value */ 133 /* We use to have pointer elements added here. We cannot do that, 134 though, since it does not work when using 32-bit definitions 135 on 64-bit platforms and vice versa. */ 136 } a_un; 137 } Elf64_auxv_t; 138 #endif 139 140 /* Does the current host support PTRACE_GETREGSET? */ 141 int have_ptrace_getregset = -1; 142 143 /* LWP accessors. */ 144 145 /* See nat/linux-nat.h. */ 146 147 ptid_t 148 ptid_of_lwp (struct lwp_info *lwp) 149 { 150 return ptid_of (get_lwp_thread (lwp)); 151 } 152 153 /* See nat/linux-nat.h. */ 154 155 void 156 lwp_set_arch_private_info (struct lwp_info *lwp, 157 struct arch_lwp_info *info) 158 { 159 lwp->arch_private = info; 160 } 161 162 /* See nat/linux-nat.h. */ 163 164 struct arch_lwp_info * 165 lwp_arch_private_info (struct lwp_info *lwp) 166 { 167 return lwp->arch_private; 168 } 169 170 /* See nat/linux-nat.h. */ 171 172 int 173 lwp_is_stopped (struct lwp_info *lwp) 174 { 175 return lwp->stopped; 176 } 177 178 /* See nat/linux-nat.h. */ 179 180 enum target_stop_reason 181 lwp_stop_reason (struct lwp_info *lwp) 182 { 183 return lwp->stop_reason; 184 } 185 186 /* See nat/linux-nat.h. */ 187 188 int 189 lwp_is_stepping (struct lwp_info *lwp) 190 { 191 return lwp->stepping; 192 } 193 194 /* A list of all unknown processes which receive stop signals. Some 195 other process will presumably claim each of these as forked 196 children momentarily. */ 197 198 struct simple_pid_list 199 { 200 /* The process ID. */ 201 int pid; 202 203 /* The status as reported by waitpid. */ 204 int status; 205 206 /* Next in chain. */ 207 struct simple_pid_list *next; 208 }; 209 static struct simple_pid_list *stopped_pids; 210 211 /* Trivial list manipulation functions to keep track of a list of new 212 stopped processes. */ 213 214 static void 215 add_to_pid_list (struct simple_pid_list **listp, int pid, int status) 216 { 217 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list); 218 219 new_pid->pid = pid; 220 new_pid->status = status; 221 new_pid->next = *listp; 222 *listp = new_pid; 223 } 224 225 static int 226 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp) 227 { 228 struct simple_pid_list **p; 229 230 for (p = listp; *p != NULL; p = &(*p)->next) 231 if ((*p)->pid == pid) 232 { 233 struct simple_pid_list *next = (*p)->next; 234 235 *statusp = (*p)->status; 236 xfree (*p); 237 *p = next; 238 return 1; 239 } 240 return 0; 241 } 242 243 enum stopping_threads_kind 244 { 245 /* Not stopping threads presently. */ 246 NOT_STOPPING_THREADS, 247 248 /* Stopping threads. */ 249 STOPPING_THREADS, 250 251 /* Stopping and suspending threads. */ 252 STOPPING_AND_SUSPENDING_THREADS 253 }; 254 255 /* This is set while stop_all_lwps is in effect. */ 256 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS; 257 258 /* FIXME make into a target method? */ 259 int using_threads = 1; 260 261 /* True if we're presently stabilizing threads (moving them out of 262 jump pads). */ 263 static int stabilizing_threads; 264 265 static void unsuspend_all_lwps (struct lwp_info *except); 266 static void mark_lwp_dead (struct lwp_info *lwp, int wstat); 267 static int lwp_is_marked_dead (struct lwp_info *lwp); 268 static int kill_lwp (unsigned long lwpid, int signo); 269 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info); 270 static int linux_low_ptrace_options (int attached); 271 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp); 272 273 /* When the event-loop is doing a step-over, this points at the thread 274 being stepped. */ 275 ptid_t step_over_bkpt; 276 277 bool 278 linux_process_target::low_supports_breakpoints () 279 { 280 return false; 281 } 282 283 CORE_ADDR 284 linux_process_target::low_get_pc (regcache *regcache) 285 { 286 return 0; 287 } 288 289 void 290 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc) 291 { 292 gdb_assert_not_reached ("linux target op low_set_pc is not implemented"); 293 } 294 295 std::vector<CORE_ADDR> 296 linux_process_target::low_get_next_pcs (regcache *regcache) 297 { 298 gdb_assert_not_reached ("linux target op low_get_next_pcs is not " 299 "implemented"); 300 } 301 302 int 303 linux_process_target::low_decr_pc_after_break () 304 { 305 return 0; 306 } 307 308 /* True if LWP is stopped in its stepping range. */ 309 310 static int 311 lwp_in_step_range (struct lwp_info *lwp) 312 { 313 CORE_ADDR pc = lwp->stop_pc; 314 315 return (pc >= lwp->step_range_start && pc < lwp->step_range_end); 316 } 317 318 /* The read/write ends of the pipe registered as waitable file in the 319 event loop. */ 320 static int linux_event_pipe[2] = { -1, -1 }; 321 322 /* True if we're currently in async mode. */ 323 #define target_is_async_p() (linux_event_pipe[0] != -1) 324 325 static void send_sigstop (struct lwp_info *lwp); 326 327 /* Return non-zero if HEADER is a 64-bit ELF file. */ 328 329 static int 330 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine) 331 { 332 if (header->e_ident[EI_MAG0] == ELFMAG0 333 && header->e_ident[EI_MAG1] == ELFMAG1 334 && header->e_ident[EI_MAG2] == ELFMAG2 335 && header->e_ident[EI_MAG3] == ELFMAG3) 336 { 337 *machine = header->e_machine; 338 return header->e_ident[EI_CLASS] == ELFCLASS64; 339 340 } 341 *machine = EM_NONE; 342 return -1; 343 } 344 345 /* Return non-zero if FILE is a 64-bit ELF file, 346 zero if the file is not a 64-bit ELF file, 347 and -1 if the file is not accessible or doesn't exist. */ 348 349 static int 350 elf_64_file_p (const char *file, unsigned int *machine) 351 { 352 Elf64_Ehdr header; 353 int fd; 354 355 fd = open (file, O_RDONLY); 356 if (fd < 0) 357 return -1; 358 359 if (read (fd, &header, sizeof (header)) != sizeof (header)) 360 { 361 close (fd); 362 return 0; 363 } 364 close (fd); 365 366 return elf_64_header_p (&header, machine); 367 } 368 369 /* Accepts an integer PID; Returns true if the executable PID is 370 running is a 64-bit ELF file.. */ 371 372 int 373 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine) 374 { 375 char file[PATH_MAX]; 376 377 sprintf (file, "/proc/%d/exe", pid); 378 return elf_64_file_p (file, machine); 379 } 380 381 void 382 linux_process_target::delete_lwp (lwp_info *lwp) 383 { 384 struct thread_info *thr = get_lwp_thread (lwp); 385 386 if (debug_threads) 387 debug_printf ("deleting %ld\n", lwpid_of (thr)); 388 389 remove_thread (thr); 390 391 low_delete_thread (lwp->arch_private); 392 393 delete lwp; 394 } 395 396 void 397 linux_process_target::low_delete_thread (arch_lwp_info *info) 398 { 399 /* Default implementation should be overridden if architecture-specific 400 info is being used. */ 401 gdb_assert (info == nullptr); 402 } 403 404 process_info * 405 linux_process_target::add_linux_process (int pid, int attached) 406 { 407 struct process_info *proc; 408 409 proc = add_process (pid, attached); 410 proc->priv = XCNEW (struct process_info_private); 411 412 proc->priv->arch_private = low_new_process (); 413 414 return proc; 415 } 416 417 arch_process_info * 418 linux_process_target::low_new_process () 419 { 420 return nullptr; 421 } 422 423 void 424 linux_process_target::low_delete_process (arch_process_info *info) 425 { 426 /* Default implementation must be overridden if architecture-specific 427 info exists. */ 428 gdb_assert (info == nullptr); 429 } 430 431 void 432 linux_process_target::low_new_fork (process_info *parent, process_info *child) 433 { 434 /* Nop. */ 435 } 436 437 void 438 linux_process_target::arch_setup_thread (thread_info *thread) 439 { 440 struct thread_info *saved_thread; 441 442 saved_thread = current_thread; 443 current_thread = thread; 444 445 low_arch_setup (); 446 447 current_thread = saved_thread; 448 } 449 450 int 451 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp, 452 int wstat) 453 { 454 client_state &cs = get_client_state (); 455 struct lwp_info *event_lwp = *orig_event_lwp; 456 int event = linux_ptrace_get_extended_event (wstat); 457 struct thread_info *event_thr = get_lwp_thread (event_lwp); 458 struct lwp_info *new_lwp; 459 460 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE); 461 462 /* All extended events we currently use are mid-syscall. Only 463 PTRACE_EVENT_STOP is delivered more like a signal-stop, but 464 you have to be using PTRACE_SEIZE to get that. */ 465 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY; 466 467 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK) 468 || (event == PTRACE_EVENT_CLONE)) 469 { 470 ptid_t ptid; 471 unsigned long new_pid; 472 int ret, status; 473 474 /* Get the pid of the new lwp. */ 475 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0, 476 &new_pid); 477 478 /* If we haven't already seen the new PID stop, wait for it now. */ 479 if (!pull_pid_from_list (&stopped_pids, new_pid, &status)) 480 { 481 /* The new child has a pending SIGSTOP. We can't affect it until it 482 hits the SIGSTOP, but we're already attached. */ 483 484 ret = my_waitpid (new_pid, &status, __WALL); 485 486 if (ret == -1) 487 perror_with_name ("waiting for new child"); 488 else if (ret != new_pid) 489 warning ("wait returned unexpected PID %d", ret); 490 else if (!WIFSTOPPED (status)) 491 warning ("wait returned unexpected status 0x%x", status); 492 } 493 494 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK) 495 { 496 struct process_info *parent_proc; 497 struct process_info *child_proc; 498 struct lwp_info *child_lwp; 499 struct thread_info *child_thr; 500 struct target_desc *tdesc; 501 502 ptid = ptid_t (new_pid, new_pid, 0); 503 504 if (debug_threads) 505 { 506 debug_printf ("HEW: Got fork event from LWP %ld, " 507 "new child is %d\n", 508 ptid_of (event_thr).lwp (), 509 ptid.pid ()); 510 } 511 512 /* Add the new process to the tables and clone the breakpoint 513 lists of the parent. We need to do this even if the new process 514 will be detached, since we will need the process object and the 515 breakpoints to remove any breakpoints from memory when we 516 detach, and the client side will access registers. */ 517 child_proc = add_linux_process (new_pid, 0); 518 gdb_assert (child_proc != NULL); 519 child_lwp = add_lwp (ptid); 520 gdb_assert (child_lwp != NULL); 521 child_lwp->stopped = 1; 522 child_lwp->must_set_ptrace_flags = 1; 523 child_lwp->status_pending_p = 0; 524 child_thr = get_lwp_thread (child_lwp); 525 child_thr->last_resume_kind = resume_stop; 526 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED; 527 528 /* If we're suspending all threads, leave this one suspended 529 too. If the fork/clone parent is stepping over a breakpoint, 530 all other threads have been suspended already. Leave the 531 child suspended too. */ 532 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS 533 || event_lwp->bp_reinsert != 0) 534 { 535 if (debug_threads) 536 debug_printf ("HEW: leaving child suspended\n"); 537 child_lwp->suspended = 1; 538 } 539 540 parent_proc = get_thread_process (event_thr); 541 child_proc->attached = parent_proc->attached; 542 543 if (event_lwp->bp_reinsert != 0 544 && supports_software_single_step () 545 && event == PTRACE_EVENT_VFORK) 546 { 547 /* If we leave single-step breakpoints there, child will 548 hit it, so uninsert single-step breakpoints from parent 549 (and child). Once vfork child is done, reinsert 550 them back to parent. */ 551 uninsert_single_step_breakpoints (event_thr); 552 } 553 554 clone_all_breakpoints (child_thr, event_thr); 555 556 tdesc = allocate_target_description (); 557 copy_target_description (tdesc, parent_proc->tdesc); 558 child_proc->tdesc = tdesc; 559 560 /* Clone arch-specific process data. */ 561 low_new_fork (parent_proc, child_proc); 562 563 /* Save fork info in the parent thread. */ 564 if (event == PTRACE_EVENT_FORK) 565 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED; 566 else if (event == PTRACE_EVENT_VFORK) 567 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED; 568 569 event_lwp->waitstatus.value.related_pid = ptid; 570 571 /* The status_pending field contains bits denoting the 572 extended event, so when the pending event is handled, 573 the handler will look at lwp->waitstatus. */ 574 event_lwp->status_pending_p = 1; 575 event_lwp->status_pending = wstat; 576 577 /* Link the threads until the parent event is passed on to 578 higher layers. */ 579 event_lwp->fork_relative = child_lwp; 580 child_lwp->fork_relative = event_lwp; 581 582 /* If the parent thread is doing step-over with single-step 583 breakpoints, the list of single-step breakpoints are cloned 584 from the parent's. Remove them from the child process. 585 In case of vfork, we'll reinsert them back once vforked 586 child is done. */ 587 if (event_lwp->bp_reinsert != 0 588 && supports_software_single_step ()) 589 { 590 /* The child process is forked and stopped, so it is safe 591 to access its memory without stopping all other threads 592 from other processes. */ 593 delete_single_step_breakpoints (child_thr); 594 595 gdb_assert (has_single_step_breakpoints (event_thr)); 596 gdb_assert (!has_single_step_breakpoints (child_thr)); 597 } 598 599 /* Report the event. */ 600 return 0; 601 } 602 603 if (debug_threads) 604 debug_printf ("HEW: Got clone event " 605 "from LWP %ld, new child is LWP %ld\n", 606 lwpid_of (event_thr), new_pid); 607 608 ptid = ptid_t (pid_of (event_thr), new_pid, 0); 609 new_lwp = add_lwp (ptid); 610 611 /* Either we're going to immediately resume the new thread 612 or leave it stopped. resume_one_lwp is a nop if it 613 thinks the thread is currently running, so set this first 614 before calling resume_one_lwp. */ 615 new_lwp->stopped = 1; 616 617 /* If we're suspending all threads, leave this one suspended 618 too. If the fork/clone parent is stepping over a breakpoint, 619 all other threads have been suspended already. Leave the 620 child suspended too. */ 621 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS 622 || event_lwp->bp_reinsert != 0) 623 new_lwp->suspended = 1; 624 625 /* Normally we will get the pending SIGSTOP. But in some cases 626 we might get another signal delivered to the group first. 627 If we do get another signal, be sure not to lose it. */ 628 if (WSTOPSIG (status) != SIGSTOP) 629 { 630 new_lwp->stop_expected = 1; 631 new_lwp->status_pending_p = 1; 632 new_lwp->status_pending = status; 633 } 634 else if (cs.report_thread_events) 635 { 636 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED; 637 new_lwp->status_pending_p = 1; 638 new_lwp->status_pending = status; 639 } 640 641 #ifdef USE_THREAD_DB 642 thread_db_notice_clone (event_thr, ptid); 643 #endif 644 645 /* Don't report the event. */ 646 return 1; 647 } 648 else if (event == PTRACE_EVENT_VFORK_DONE) 649 { 650 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE; 651 652 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ()) 653 { 654 reinsert_single_step_breakpoints (event_thr); 655 656 gdb_assert (has_single_step_breakpoints (event_thr)); 657 } 658 659 /* Report the event. */ 660 return 0; 661 } 662 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events) 663 { 664 struct process_info *proc; 665 std::vector<int> syscalls_to_catch; 666 ptid_t event_ptid; 667 pid_t event_pid; 668 669 if (debug_threads) 670 { 671 debug_printf ("HEW: Got exec event from LWP %ld\n", 672 lwpid_of (event_thr)); 673 } 674 675 /* Get the event ptid. */ 676 event_ptid = ptid_of (event_thr); 677 event_pid = event_ptid.pid (); 678 679 /* Save the syscall list from the execing process. */ 680 proc = get_thread_process (event_thr); 681 syscalls_to_catch = std::move (proc->syscalls_to_catch); 682 683 /* Delete the execing process and all its threads. */ 684 mourn (proc); 685 current_thread = NULL; 686 687 /* Create a new process/lwp/thread. */ 688 proc = add_linux_process (event_pid, 0); 689 event_lwp = add_lwp (event_ptid); 690 event_thr = get_lwp_thread (event_lwp); 691 gdb_assert (current_thread == event_thr); 692 arch_setup_thread (event_thr); 693 694 /* Set the event status. */ 695 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD; 696 event_lwp->waitstatus.value.execd_pathname 697 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr))); 698 699 /* Mark the exec status as pending. */ 700 event_lwp->stopped = 1; 701 event_lwp->status_pending_p = 1; 702 event_lwp->status_pending = wstat; 703 event_thr->last_resume_kind = resume_continue; 704 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE; 705 706 /* Update syscall state in the new lwp, effectively mid-syscall too. */ 707 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY; 708 709 /* Restore the list to catch. Don't rely on the client, which is free 710 to avoid sending a new list when the architecture doesn't change. 711 Also, for ANY_SYSCALL, the architecture doesn't really matter. */ 712 proc->syscalls_to_catch = std::move (syscalls_to_catch); 713 714 /* Report the event. */ 715 *orig_event_lwp = event_lwp; 716 return 0; 717 } 718 719 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event); 720 } 721 722 CORE_ADDR 723 linux_process_target::get_pc (lwp_info *lwp) 724 { 725 struct thread_info *saved_thread; 726 struct regcache *regcache; 727 CORE_ADDR pc; 728 729 if (!low_supports_breakpoints ()) 730 return 0; 731 732 saved_thread = current_thread; 733 current_thread = get_lwp_thread (lwp); 734 735 regcache = get_thread_regcache (current_thread, 1); 736 pc = low_get_pc (regcache); 737 738 if (debug_threads) 739 debug_printf ("pc is 0x%lx\n", (long) pc); 740 741 current_thread = saved_thread; 742 return pc; 743 } 744 745 void 746 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno) 747 { 748 struct thread_info *saved_thread; 749 struct regcache *regcache; 750 751 saved_thread = current_thread; 752 current_thread = get_lwp_thread (lwp); 753 754 regcache = get_thread_regcache (current_thread, 1); 755 low_get_syscall_trapinfo (regcache, sysno); 756 757 if (debug_threads) 758 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno); 759 760 current_thread = saved_thread; 761 } 762 763 void 764 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno) 765 { 766 /* By default, report an unknown system call number. */ 767 *sysno = UNKNOWN_SYSCALL; 768 } 769 770 bool 771 linux_process_target::save_stop_reason (lwp_info *lwp) 772 { 773 CORE_ADDR pc; 774 CORE_ADDR sw_breakpoint_pc; 775 struct thread_info *saved_thread; 776 #if USE_SIGTRAP_SIGINFO 777 siginfo_t siginfo; 778 #endif 779 780 if (!low_supports_breakpoints ()) 781 return false; 782 783 pc = get_pc (lwp); 784 sw_breakpoint_pc = pc - low_decr_pc_after_break (); 785 786 /* breakpoint_at reads from the current thread. */ 787 saved_thread = current_thread; 788 current_thread = get_lwp_thread (lwp); 789 790 #if USE_SIGTRAP_SIGINFO 791 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread), 792 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0) 793 { 794 if (siginfo.si_signo == SIGTRAP) 795 { 796 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code) 797 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code)) 798 { 799 /* The si_code is ambiguous on this arch -- check debug 800 registers. */ 801 if (!check_stopped_by_watchpoint (lwp)) 802 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT; 803 } 804 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)) 805 { 806 /* If we determine the LWP stopped for a SW breakpoint, 807 trust it. Particularly don't check watchpoint 808 registers, because at least on s390, we'd find 809 stopped-by-watchpoint as long as there's a watchpoint 810 set. */ 811 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT; 812 } 813 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code)) 814 { 815 /* This can indicate either a hardware breakpoint or 816 hardware watchpoint. Check debug registers. */ 817 if (!check_stopped_by_watchpoint (lwp)) 818 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT; 819 } 820 else if (siginfo.si_code == TRAP_TRACE) 821 { 822 /* We may have single stepped an instruction that 823 triggered a watchpoint. In that case, on some 824 architectures (such as x86), instead of TRAP_HWBKPT, 825 si_code indicates TRAP_TRACE, and we need to check 826 the debug registers separately. */ 827 if (!check_stopped_by_watchpoint (lwp)) 828 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP; 829 } 830 } 831 } 832 #else 833 /* We may have just stepped a breakpoint instruction. E.g., in 834 non-stop mode, GDB first tells the thread A to step a range, and 835 then the user inserts a breakpoint inside the range. In that 836 case we need to report the breakpoint PC. */ 837 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc) 838 && low_breakpoint_at (sw_breakpoint_pc)) 839 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT; 840 841 if (hardware_breakpoint_inserted_here (pc)) 842 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT; 843 844 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON) 845 check_stopped_by_watchpoint (lwp); 846 #endif 847 848 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT) 849 { 850 if (debug_threads) 851 { 852 struct thread_info *thr = get_lwp_thread (lwp); 853 854 debug_printf ("CSBB: %s stopped by software breakpoint\n", 855 target_pid_to_str (ptid_of (thr))); 856 } 857 858 /* Back up the PC if necessary. */ 859 if (pc != sw_breakpoint_pc) 860 { 861 struct regcache *regcache 862 = get_thread_regcache (current_thread, 1); 863 low_set_pc (regcache, sw_breakpoint_pc); 864 } 865 866 /* Update this so we record the correct stop PC below. */ 867 pc = sw_breakpoint_pc; 868 } 869 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT) 870 { 871 if (debug_threads) 872 { 873 struct thread_info *thr = get_lwp_thread (lwp); 874 875 debug_printf ("CSBB: %s stopped by hardware breakpoint\n", 876 target_pid_to_str (ptid_of (thr))); 877 } 878 } 879 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT) 880 { 881 if (debug_threads) 882 { 883 struct thread_info *thr = get_lwp_thread (lwp); 884 885 debug_printf ("CSBB: %s stopped by hardware watchpoint\n", 886 target_pid_to_str (ptid_of (thr))); 887 } 888 } 889 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP) 890 { 891 if (debug_threads) 892 { 893 struct thread_info *thr = get_lwp_thread (lwp); 894 895 debug_printf ("CSBB: %s stopped by trace\n", 896 target_pid_to_str (ptid_of (thr))); 897 } 898 } 899 900 lwp->stop_pc = pc; 901 current_thread = saved_thread; 902 return true; 903 } 904 905 lwp_info * 906 linux_process_target::add_lwp (ptid_t ptid) 907 { 908 struct lwp_info *lwp; 909 910 lwp = new lwp_info {}; 911 912 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE; 913 914 lwp->thread = add_thread (ptid, lwp); 915 916 low_new_thread (lwp); 917 918 return lwp; 919 } 920 921 void 922 linux_process_target::low_new_thread (lwp_info *info) 923 { 924 /* Nop. */ 925 } 926 927 /* Callback to be used when calling fork_inferior, responsible for 928 actually initiating the tracing of the inferior. */ 929 930 static void 931 linux_ptrace_fun () 932 { 933 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, 934 (PTRACE_TYPE_ARG4) 0) < 0) 935 trace_start_error_with_name ("ptrace"); 936 937 if (setpgid (0, 0) < 0) 938 trace_start_error_with_name ("setpgid"); 939 940 /* If GDBserver is connected to gdb via stdio, redirect the inferior's 941 stdout to stderr so that inferior i/o doesn't corrupt the connection. 942 Also, redirect stdin to /dev/null. */ 943 if (remote_connection_is_stdio ()) 944 { 945 if (close (0) < 0) 946 trace_start_error_with_name ("close"); 947 if (open ("/dev/null", O_RDONLY) < 0) 948 trace_start_error_with_name ("open"); 949 if (dup2 (2, 1) < 0) 950 trace_start_error_with_name ("dup2"); 951 if (write (2, "stdin/stdout redirected\n", 952 sizeof ("stdin/stdout redirected\n") - 1) < 0) 953 { 954 /* Errors ignored. */; 955 } 956 } 957 } 958 959 /* Start an inferior process and returns its pid. 960 PROGRAM is the name of the program to be started, and PROGRAM_ARGS 961 are its arguments. */ 962 963 int 964 linux_process_target::create_inferior (const char *program, 965 const std::vector<char *> &program_args) 966 { 967 client_state &cs = get_client_state (); 968 struct lwp_info *new_lwp; 969 int pid; 970 ptid_t ptid; 971 972 { 973 maybe_disable_address_space_randomization restore_personality 974 (cs.disable_randomization); 975 std::string str_program_args = construct_inferior_arguments (program_args); 976 977 pid = fork_inferior (program, 978 str_program_args.c_str (), 979 get_environ ()->envp (), linux_ptrace_fun, 980 NULL, NULL, NULL, NULL); 981 } 982 983 add_linux_process (pid, 0); 984 985 ptid = ptid_t (pid, pid, 0); 986 new_lwp = add_lwp (ptid); 987 new_lwp->must_set_ptrace_flags = 1; 988 989 post_fork_inferior (pid, program); 990 991 return pid; 992 } 993 994 /* Implement the post_create_inferior target_ops method. */ 995 996 void 997 linux_process_target::post_create_inferior () 998 { 999 struct lwp_info *lwp = get_thread_lwp (current_thread); 1000 1001 low_arch_setup (); 1002 1003 if (lwp->must_set_ptrace_flags) 1004 { 1005 struct process_info *proc = current_process (); 1006 int options = linux_low_ptrace_options (proc->attached); 1007 1008 linux_enable_event_reporting (lwpid_of (current_thread), options); 1009 lwp->must_set_ptrace_flags = 0; 1010 } 1011 } 1012 1013 int 1014 linux_process_target::attach_lwp (ptid_t ptid) 1015 { 1016 struct lwp_info *new_lwp; 1017 int lwpid = ptid.lwp (); 1018 1019 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0) 1020 != 0) 1021 return errno; 1022 1023 new_lwp = add_lwp (ptid); 1024 1025 /* We need to wait for SIGSTOP before being able to make the next 1026 ptrace call on this LWP. */ 1027 new_lwp->must_set_ptrace_flags = 1; 1028 1029 if (linux_proc_pid_is_stopped (lwpid)) 1030 { 1031 if (debug_threads) 1032 debug_printf ("Attached to a stopped process\n"); 1033 1034 /* The process is definitely stopped. It is in a job control 1035 stop, unless the kernel predates the TASK_STOPPED / 1036 TASK_TRACED distinction, in which case it might be in a 1037 ptrace stop. Make sure it is in a ptrace stop; from there we 1038 can kill it, signal it, et cetera. 1039 1040 First make sure there is a pending SIGSTOP. Since we are 1041 already attached, the process can not transition from stopped 1042 to running without a PTRACE_CONT; so we know this signal will 1043 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is 1044 probably already in the queue (unless this kernel is old 1045 enough to use TASK_STOPPED for ptrace stops); but since 1046 SIGSTOP is not an RT signal, it can only be queued once. */ 1047 kill_lwp (lwpid, SIGSTOP); 1048 1049 /* Finally, resume the stopped process. This will deliver the 1050 SIGSTOP (or a higher priority signal, just like normal 1051 PTRACE_ATTACH), which we'll catch later on. */ 1052 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0); 1053 } 1054 1055 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH 1056 brings it to a halt. 1057 1058 There are several cases to consider here: 1059 1060 1) gdbserver has already attached to the process and is being notified 1061 of a new thread that is being created. 1062 In this case we should ignore that SIGSTOP and resume the 1063 process. This is handled below by setting stop_expected = 1, 1064 and the fact that add_thread sets last_resume_kind == 1065 resume_continue. 1066 1067 2) This is the first thread (the process thread), and we're attaching 1068 to it via attach_inferior. 1069 In this case we want the process thread to stop. 1070 This is handled by having linux_attach set last_resume_kind == 1071 resume_stop after we return. 1072 1073 If the pid we are attaching to is also the tgid, we attach to and 1074 stop all the existing threads. Otherwise, we attach to pid and 1075 ignore any other threads in the same group as this pid. 1076 1077 3) GDB is connecting to gdbserver and is requesting an enumeration of all 1078 existing threads. 1079 In this case we want the thread to stop. 1080 FIXME: This case is currently not properly handled. 1081 We should wait for the SIGSTOP but don't. Things work apparently 1082 because enough time passes between when we ptrace (ATTACH) and when 1083 gdb makes the next ptrace call on the thread. 1084 1085 On the other hand, if we are currently trying to stop all threads, we 1086 should treat the new thread as if we had sent it a SIGSTOP. This works 1087 because we are guaranteed that the add_lwp call above added us to the 1088 end of the list, and so the new thread has not yet reached 1089 wait_for_sigstop (but will). */ 1090 new_lwp->stop_expected = 1; 1091 1092 return 0; 1093 } 1094 1095 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not 1096 already attached. Returns true if a new LWP is found, false 1097 otherwise. */ 1098 1099 static int 1100 attach_proc_task_lwp_callback (ptid_t ptid) 1101 { 1102 /* Is this a new thread? */ 1103 if (find_thread_ptid (ptid) == NULL) 1104 { 1105 int lwpid = ptid.lwp (); 1106 int err; 1107 1108 if (debug_threads) 1109 debug_printf ("Found new lwp %d\n", lwpid); 1110 1111 err = the_linux_target->attach_lwp (ptid); 1112 1113 /* Be quiet if we simply raced with the thread exiting. EPERM 1114 is returned if the thread's task still exists, and is marked 1115 as exited or zombie, as well as other conditions, so in that 1116 case, confirm the status in /proc/PID/status. */ 1117 if (err == ESRCH 1118 || (err == EPERM && linux_proc_pid_is_gone (lwpid))) 1119 { 1120 if (debug_threads) 1121 { 1122 debug_printf ("Cannot attach to lwp %d: " 1123 "thread is gone (%d: %s)\n", 1124 lwpid, err, safe_strerror (err)); 1125 } 1126 } 1127 else if (err != 0) 1128 { 1129 std::string reason 1130 = linux_ptrace_attach_fail_reason_string (ptid, err); 1131 1132 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ()); 1133 } 1134 1135 return 1; 1136 } 1137 return 0; 1138 } 1139 1140 static void async_file_mark (void); 1141 1142 /* Attach to PID. If PID is the tgid, attach to it and all 1143 of its threads. */ 1144 1145 int 1146 linux_process_target::attach (unsigned long pid) 1147 { 1148 struct process_info *proc; 1149 struct thread_info *initial_thread; 1150 ptid_t ptid = ptid_t (pid, pid, 0); 1151 int err; 1152 1153 proc = add_linux_process (pid, 1); 1154 1155 /* Attach to PID. We will check for other threads 1156 soon. */ 1157 err = attach_lwp (ptid); 1158 if (err != 0) 1159 { 1160 remove_process (proc); 1161 1162 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err); 1163 error ("Cannot attach to process %ld: %s", pid, reason.c_str ()); 1164 } 1165 1166 /* Don't ignore the initial SIGSTOP if we just attached to this 1167 process. It will be collected by wait shortly. */ 1168 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0)); 1169 initial_thread->last_resume_kind = resume_stop; 1170 1171 /* We must attach to every LWP. If /proc is mounted, use that to 1172 find them now. On the one hand, the inferior may be using raw 1173 clone instead of using pthreads. On the other hand, even if it 1174 is using pthreads, GDB may not be connected yet (thread_db needs 1175 to do symbol lookups, through qSymbol). Also, thread_db walks 1176 structures in the inferior's address space to find the list of 1177 threads/LWPs, and those structures may well be corrupted. Note 1178 that once thread_db is loaded, we'll still use it to list threads 1179 and associate pthread info with each LWP. */ 1180 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback); 1181 1182 /* GDB will shortly read the xml target description for this 1183 process, to figure out the process' architecture. But the target 1184 description is only filled in when the first process/thread in 1185 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do 1186 that now, otherwise, if GDB is fast enough, it could read the 1187 target description _before_ that initial stop. */ 1188 if (non_stop) 1189 { 1190 struct lwp_info *lwp; 1191 int wstat, lwpid; 1192 ptid_t pid_ptid = ptid_t (pid); 1193 1194 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL); 1195 gdb_assert (lwpid > 0); 1196 1197 lwp = find_lwp_pid (ptid_t (lwpid)); 1198 1199 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP) 1200 { 1201 lwp->status_pending_p = 1; 1202 lwp->status_pending = wstat; 1203 } 1204 1205 initial_thread->last_resume_kind = resume_continue; 1206 1207 async_file_mark (); 1208 1209 gdb_assert (proc->tdesc != NULL); 1210 } 1211 1212 return 0; 1213 } 1214 1215 static int 1216 last_thread_of_process_p (int pid) 1217 { 1218 bool seen_one = false; 1219 1220 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg) 1221 { 1222 if (!seen_one) 1223 { 1224 /* This is the first thread of this process we see. */ 1225 seen_one = true; 1226 return false; 1227 } 1228 else 1229 { 1230 /* This is the second thread of this process we see. */ 1231 return true; 1232 } 1233 }); 1234 1235 return thread == NULL; 1236 } 1237 1238 /* Kill LWP. */ 1239 1240 static void 1241 linux_kill_one_lwp (struct lwp_info *lwp) 1242 { 1243 struct thread_info *thr = get_lwp_thread (lwp); 1244 int pid = lwpid_of (thr); 1245 1246 /* PTRACE_KILL is unreliable. After stepping into a signal handler, 1247 there is no signal context, and ptrace(PTRACE_KILL) (or 1248 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like 1249 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better 1250 alternative is to kill with SIGKILL. We only need one SIGKILL 1251 per process, not one for each thread. But since we still support 1252 support debugging programs using raw clone without CLONE_THREAD, 1253 we send one for each thread. For years, we used PTRACE_KILL 1254 only, so we're being a bit paranoid about some old kernels where 1255 PTRACE_KILL might work better (dubious if there are any such, but 1256 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL 1257 second, and so we're fine everywhere. */ 1258 1259 errno = 0; 1260 kill_lwp (pid, SIGKILL); 1261 if (debug_threads) 1262 { 1263 int save_errno = errno; 1264 1265 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n", 1266 target_pid_to_str (ptid_of (thr)), 1267 save_errno ? safe_strerror (save_errno) : "OK"); 1268 } 1269 1270 errno = 0; 1271 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0); 1272 if (debug_threads) 1273 { 1274 int save_errno = errno; 1275 1276 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n", 1277 target_pid_to_str (ptid_of (thr)), 1278 save_errno ? safe_strerror (save_errno) : "OK"); 1279 } 1280 } 1281 1282 /* Kill LWP and wait for it to die. */ 1283 1284 static void 1285 kill_wait_lwp (struct lwp_info *lwp) 1286 { 1287 struct thread_info *thr = get_lwp_thread (lwp); 1288 int pid = ptid_of (thr).pid (); 1289 int lwpid = ptid_of (thr).lwp (); 1290 int wstat; 1291 int res; 1292 1293 if (debug_threads) 1294 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid); 1295 1296 do 1297 { 1298 linux_kill_one_lwp (lwp); 1299 1300 /* Make sure it died. Notes: 1301 1302 - The loop is most likely unnecessary. 1303 1304 - We don't use wait_for_event as that could delete lwps 1305 while we're iterating over them. We're not interested in 1306 any pending status at this point, only in making sure all 1307 wait status on the kernel side are collected until the 1308 process is reaped. 1309 1310 - We don't use __WALL here as the __WALL emulation relies on 1311 SIGCHLD, and killing a stopped process doesn't generate 1312 one, nor an exit status. 1313 */ 1314 res = my_waitpid (lwpid, &wstat, 0); 1315 if (res == -1 && errno == ECHILD) 1316 res = my_waitpid (lwpid, &wstat, __WCLONE); 1317 } while (res > 0 && WIFSTOPPED (wstat)); 1318 1319 /* Even if it was stopped, the child may have already disappeared. 1320 E.g., if it was killed by SIGKILL. */ 1321 if (res < 0 && errno != ECHILD) 1322 perror_with_name ("kill_wait_lwp"); 1323 } 1324 1325 /* Callback for `for_each_thread'. Kills an lwp of a given process, 1326 except the leader. */ 1327 1328 static void 1329 kill_one_lwp_callback (thread_info *thread, int pid) 1330 { 1331 struct lwp_info *lwp = get_thread_lwp (thread); 1332 1333 /* We avoid killing the first thread here, because of a Linux kernel (at 1334 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before 1335 the children get a chance to be reaped, it will remain a zombie 1336 forever. */ 1337 1338 if (lwpid_of (thread) == pid) 1339 { 1340 if (debug_threads) 1341 debug_printf ("lkop: is last of process %s\n", 1342 target_pid_to_str (thread->id)); 1343 return; 1344 } 1345 1346 kill_wait_lwp (lwp); 1347 } 1348 1349 int 1350 linux_process_target::kill (process_info *process) 1351 { 1352 int pid = process->pid; 1353 1354 /* If we're killing a running inferior, make sure it is stopped 1355 first, as PTRACE_KILL will not work otherwise. */ 1356 stop_all_lwps (0, NULL); 1357 1358 for_each_thread (pid, [&] (thread_info *thread) 1359 { 1360 kill_one_lwp_callback (thread, pid); 1361 }); 1362 1363 /* See the comment in linux_kill_one_lwp. We did not kill the first 1364 thread in the list, so do so now. */ 1365 lwp_info *lwp = find_lwp_pid (ptid_t (pid)); 1366 1367 if (lwp == NULL) 1368 { 1369 if (debug_threads) 1370 debug_printf ("lk_1: cannot find lwp for pid: %d\n", 1371 pid); 1372 } 1373 else 1374 kill_wait_lwp (lwp); 1375 1376 mourn (process); 1377 1378 /* Since we presently can only stop all lwps of all processes, we 1379 need to unstop lwps of other processes. */ 1380 unstop_all_lwps (0, NULL); 1381 return 0; 1382 } 1383 1384 /* Get pending signal of THREAD, for detaching purposes. This is the 1385 signal the thread last stopped for, which we need to deliver to the 1386 thread when detaching, otherwise, it'd be suppressed/lost. */ 1387 1388 static int 1389 get_detach_signal (struct thread_info *thread) 1390 { 1391 client_state &cs = get_client_state (); 1392 enum gdb_signal signo = GDB_SIGNAL_0; 1393 int status; 1394 struct lwp_info *lp = get_thread_lwp (thread); 1395 1396 if (lp->status_pending_p) 1397 status = lp->status_pending; 1398 else 1399 { 1400 /* If the thread had been suspended by gdbserver, and it stopped 1401 cleanly, then it'll have stopped with SIGSTOP. But we don't 1402 want to deliver that SIGSTOP. */ 1403 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED 1404 || thread->last_status.value.sig == GDB_SIGNAL_0) 1405 return 0; 1406 1407 /* Otherwise, we may need to deliver the signal we 1408 intercepted. */ 1409 status = lp->last_status; 1410 } 1411 1412 if (!WIFSTOPPED (status)) 1413 { 1414 if (debug_threads) 1415 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n", 1416 target_pid_to_str (ptid_of (thread))); 1417 return 0; 1418 } 1419 1420 /* Extended wait statuses aren't real SIGTRAPs. */ 1421 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status)) 1422 { 1423 if (debug_threads) 1424 debug_printf ("GPS: lwp %s had stopped with extended " 1425 "status: no pending signal\n", 1426 target_pid_to_str (ptid_of (thread))); 1427 return 0; 1428 } 1429 1430 signo = gdb_signal_from_host (WSTOPSIG (status)); 1431 1432 if (cs.program_signals_p && !cs.program_signals[signo]) 1433 { 1434 if (debug_threads) 1435 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n", 1436 target_pid_to_str (ptid_of (thread)), 1437 gdb_signal_to_string (signo)); 1438 return 0; 1439 } 1440 else if (!cs.program_signals_p 1441 /* If we have no way to know which signals GDB does not 1442 want to have passed to the program, assume 1443 SIGTRAP/SIGINT, which is GDB's default. */ 1444 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT)) 1445 { 1446 if (debug_threads) 1447 debug_printf ("GPS: lwp %s had signal %s, " 1448 "but we don't know if we should pass it. " 1449 "Default to not.\n", 1450 target_pid_to_str (ptid_of (thread)), 1451 gdb_signal_to_string (signo)); 1452 return 0; 1453 } 1454 else 1455 { 1456 if (debug_threads) 1457 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n", 1458 target_pid_to_str (ptid_of (thread)), 1459 gdb_signal_to_string (signo)); 1460 1461 return WSTOPSIG (status); 1462 } 1463 } 1464 1465 void 1466 linux_process_target::detach_one_lwp (lwp_info *lwp) 1467 { 1468 struct thread_info *thread = get_lwp_thread (lwp); 1469 int sig; 1470 int lwpid; 1471 1472 /* If there is a pending SIGSTOP, get rid of it. */ 1473 if (lwp->stop_expected) 1474 { 1475 if (debug_threads) 1476 debug_printf ("Sending SIGCONT to %s\n", 1477 target_pid_to_str (ptid_of (thread))); 1478 1479 kill_lwp (lwpid_of (thread), SIGCONT); 1480 lwp->stop_expected = 0; 1481 } 1482 1483 /* Pass on any pending signal for this thread. */ 1484 sig = get_detach_signal (thread); 1485 1486 /* Preparing to resume may try to write registers, and fail if the 1487 lwp is zombie. If that happens, ignore the error. We'll handle 1488 it below, when detach fails with ESRCH. */ 1489 try 1490 { 1491 /* Flush any pending changes to the process's registers. */ 1492 regcache_invalidate_thread (thread); 1493 1494 /* Finally, let it resume. */ 1495 low_prepare_to_resume (lwp); 1496 } 1497 catch (const gdb_exception_error &ex) 1498 { 1499 if (!check_ptrace_stopped_lwp_gone (lwp)) 1500 throw; 1501 } 1502 1503 lwpid = lwpid_of (thread); 1504 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0, 1505 (PTRACE_TYPE_ARG4) (long) sig) < 0) 1506 { 1507 int save_errno = errno; 1508 1509 /* We know the thread exists, so ESRCH must mean the lwp is 1510 zombie. This can happen if one of the already-detached 1511 threads exits the whole thread group. In that case we're 1512 still attached, and must reap the lwp. */ 1513 if (save_errno == ESRCH) 1514 { 1515 int ret, status; 1516 1517 ret = my_waitpid (lwpid, &status, __WALL); 1518 if (ret == -1) 1519 { 1520 warning (_("Couldn't reap LWP %d while detaching: %s"), 1521 lwpid, safe_strerror (errno)); 1522 } 1523 else if (!WIFEXITED (status) && !WIFSIGNALED (status)) 1524 { 1525 warning (_("Reaping LWP %d while detaching " 1526 "returned unexpected status 0x%x"), 1527 lwpid, status); 1528 } 1529 } 1530 else 1531 { 1532 error (_("Can't detach %s: %s"), 1533 target_pid_to_str (ptid_of (thread)), 1534 safe_strerror (save_errno)); 1535 } 1536 } 1537 else if (debug_threads) 1538 { 1539 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n", 1540 target_pid_to_str (ptid_of (thread)), 1541 strsignal (sig)); 1542 } 1543 1544 delete_lwp (lwp); 1545 } 1546 1547 int 1548 linux_process_target::detach (process_info *process) 1549 { 1550 struct lwp_info *main_lwp; 1551 1552 /* As there's a step over already in progress, let it finish first, 1553 otherwise nesting a stabilize_threads operation on top gets real 1554 messy. */ 1555 complete_ongoing_step_over (); 1556 1557 /* Stop all threads before detaching. First, ptrace requires that 1558 the thread is stopped to successfully detach. Second, thread_db 1559 may need to uninstall thread event breakpoints from memory, which 1560 only works with a stopped process anyway. */ 1561 stop_all_lwps (0, NULL); 1562 1563 #ifdef USE_THREAD_DB 1564 thread_db_detach (process); 1565 #endif 1566 1567 /* Stabilize threads (move out of jump pads). */ 1568 target_stabilize_threads (); 1569 1570 /* Detach from the clone lwps first. If the thread group exits just 1571 while we're detaching, we must reap the clone lwps before we're 1572 able to reap the leader. */ 1573 for_each_thread (process->pid, [this] (thread_info *thread) 1574 { 1575 /* We don't actually detach from the thread group leader just yet. 1576 If the thread group exits, we must reap the zombie clone lwps 1577 before we're able to reap the leader. */ 1578 if (thread->id.pid () == thread->id.lwp ()) 1579 return; 1580 1581 lwp_info *lwp = get_thread_lwp (thread); 1582 detach_one_lwp (lwp); 1583 }); 1584 1585 main_lwp = find_lwp_pid (ptid_t (process->pid)); 1586 detach_one_lwp (main_lwp); 1587 1588 mourn (process); 1589 1590 /* Since we presently can only stop all lwps of all processes, we 1591 need to unstop lwps of other processes. */ 1592 unstop_all_lwps (0, NULL); 1593 return 0; 1594 } 1595 1596 /* Remove all LWPs that belong to process PROC from the lwp list. */ 1597 1598 void 1599 linux_process_target::mourn (process_info *process) 1600 { 1601 struct process_info_private *priv; 1602 1603 #ifdef USE_THREAD_DB 1604 thread_db_mourn (process); 1605 #endif 1606 1607 for_each_thread (process->pid, [this] (thread_info *thread) 1608 { 1609 delete_lwp (get_thread_lwp (thread)); 1610 }); 1611 1612 /* Freeing all private data. */ 1613 priv = process->priv; 1614 low_delete_process (priv->arch_private); 1615 free (priv); 1616 process->priv = NULL; 1617 1618 remove_process (process); 1619 } 1620 1621 void 1622 linux_process_target::join (int pid) 1623 { 1624 int status, ret; 1625 1626 do { 1627 ret = my_waitpid (pid, &status, 0); 1628 if (WIFEXITED (status) || WIFSIGNALED (status)) 1629 break; 1630 } while (ret != -1 || errno != ECHILD); 1631 } 1632 1633 /* Return true if the given thread is still alive. */ 1634 1635 bool 1636 linux_process_target::thread_alive (ptid_t ptid) 1637 { 1638 struct lwp_info *lwp = find_lwp_pid (ptid); 1639 1640 /* We assume we always know if a thread exits. If a whole process 1641 exited but we still haven't been able to report it to GDB, we'll 1642 hold on to the last lwp of the dead process. */ 1643 if (lwp != NULL) 1644 return !lwp_is_marked_dead (lwp); 1645 else 1646 return 0; 1647 } 1648 1649 bool 1650 linux_process_target::thread_still_has_status_pending (thread_info *thread) 1651 { 1652 struct lwp_info *lp = get_thread_lwp (thread); 1653 1654 if (!lp->status_pending_p) 1655 return 0; 1656 1657 if (thread->last_resume_kind != resume_stop 1658 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT 1659 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)) 1660 { 1661 struct thread_info *saved_thread; 1662 CORE_ADDR pc; 1663 int discard = 0; 1664 1665 gdb_assert (lp->last_status != 0); 1666 1667 pc = get_pc (lp); 1668 1669 saved_thread = current_thread; 1670 current_thread = thread; 1671 1672 if (pc != lp->stop_pc) 1673 { 1674 if (debug_threads) 1675 debug_printf ("PC of %ld changed\n", 1676 lwpid_of (thread)); 1677 discard = 1; 1678 } 1679 1680 #if !USE_SIGTRAP_SIGINFO 1681 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT 1682 && !low_breakpoint_at (pc)) 1683 { 1684 if (debug_threads) 1685 debug_printf ("previous SW breakpoint of %ld gone\n", 1686 lwpid_of (thread)); 1687 discard = 1; 1688 } 1689 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT 1690 && !hardware_breakpoint_inserted_here (pc)) 1691 { 1692 if (debug_threads) 1693 debug_printf ("previous HW breakpoint of %ld gone\n", 1694 lwpid_of (thread)); 1695 discard = 1; 1696 } 1697 #endif 1698 1699 current_thread = saved_thread; 1700 1701 if (discard) 1702 { 1703 if (debug_threads) 1704 debug_printf ("discarding pending breakpoint status\n"); 1705 lp->status_pending_p = 0; 1706 return 0; 1707 } 1708 } 1709 1710 return 1; 1711 } 1712 1713 /* Returns true if LWP is resumed from the client's perspective. */ 1714 1715 static int 1716 lwp_resumed (struct lwp_info *lwp) 1717 { 1718 struct thread_info *thread = get_lwp_thread (lwp); 1719 1720 if (thread->last_resume_kind != resume_stop) 1721 return 1; 1722 1723 /* Did gdb send us a `vCont;t', but we haven't reported the 1724 corresponding stop to gdb yet? If so, the thread is still 1725 resumed/running from gdb's perspective. */ 1726 if (thread->last_resume_kind == resume_stop 1727 && thread->last_status.kind == TARGET_WAITKIND_IGNORE) 1728 return 1; 1729 1730 return 0; 1731 } 1732 1733 bool 1734 linux_process_target::status_pending_p_callback (thread_info *thread, 1735 ptid_t ptid) 1736 { 1737 struct lwp_info *lp = get_thread_lwp (thread); 1738 1739 /* Check if we're only interested in events from a specific process 1740 or a specific LWP. */ 1741 if (!thread->id.matches (ptid)) 1742 return 0; 1743 1744 if (!lwp_resumed (lp)) 1745 return 0; 1746 1747 if (lp->status_pending_p 1748 && !thread_still_has_status_pending (thread)) 1749 { 1750 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL); 1751 return 0; 1752 } 1753 1754 return lp->status_pending_p; 1755 } 1756 1757 struct lwp_info * 1758 find_lwp_pid (ptid_t ptid) 1759 { 1760 thread_info *thread = find_thread ([&] (thread_info *thr_arg) 1761 { 1762 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid (); 1763 return thr_arg->id.lwp () == lwp; 1764 }); 1765 1766 if (thread == NULL) 1767 return NULL; 1768 1769 return get_thread_lwp (thread); 1770 } 1771 1772 /* Return the number of known LWPs in the tgid given by PID. */ 1773 1774 static int 1775 num_lwps (int pid) 1776 { 1777 int count = 0; 1778 1779 for_each_thread (pid, [&] (thread_info *thread) 1780 { 1781 count++; 1782 }); 1783 1784 return count; 1785 } 1786 1787 /* See nat/linux-nat.h. */ 1788 1789 struct lwp_info * 1790 iterate_over_lwps (ptid_t filter, 1791 gdb::function_view<iterate_over_lwps_ftype> callback) 1792 { 1793 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg) 1794 { 1795 lwp_info *lwp = get_thread_lwp (thr_arg); 1796 1797 return callback (lwp); 1798 }); 1799 1800 if (thread == NULL) 1801 return NULL; 1802 1803 return get_thread_lwp (thread); 1804 } 1805 1806 void 1807 linux_process_target::check_zombie_leaders () 1808 { 1809 for_each_process ([this] (process_info *proc) { 1810 pid_t leader_pid = pid_of (proc); 1811 struct lwp_info *leader_lp; 1812 1813 leader_lp = find_lwp_pid (ptid_t (leader_pid)); 1814 1815 if (debug_threads) 1816 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, " 1817 "num_lwps=%d, zombie=%d\n", 1818 leader_pid, leader_lp!= NULL, num_lwps (leader_pid), 1819 linux_proc_pid_is_zombie (leader_pid)); 1820 1821 if (leader_lp != NULL && !leader_lp->stopped 1822 /* Check if there are other threads in the group, as we may 1823 have raced with the inferior simply exiting. */ 1824 && !last_thread_of_process_p (leader_pid) 1825 && linux_proc_pid_is_zombie (leader_pid)) 1826 { 1827 /* A leader zombie can mean one of two things: 1828 1829 - It exited, and there's an exit status pending 1830 available, or only the leader exited (not the whole 1831 program). In the latter case, we can't waitpid the 1832 leader's exit status until all other threads are gone. 1833 1834 - There are 3 or more threads in the group, and a thread 1835 other than the leader exec'd. On an exec, the Linux 1836 kernel destroys all other threads (except the execing 1837 one) in the thread group, and resets the execing thread's 1838 tid to the tgid. No exit notification is sent for the 1839 execing thread -- from the ptracer's perspective, it 1840 appears as though the execing thread just vanishes. 1841 Until we reap all other threads except the leader and the 1842 execing thread, the leader will be zombie, and the 1843 execing thread will be in `D (disc sleep)'. As soon as 1844 all other threads are reaped, the execing thread changes 1845 it's tid to the tgid, and the previous (zombie) leader 1846 vanishes, giving place to the "new" leader. We could try 1847 distinguishing the exit and exec cases, by waiting once 1848 more, and seeing if something comes out, but it doesn't 1849 sound useful. The previous leader _does_ go away, and 1850 we'll re-add the new one once we see the exec event 1851 (which is just the same as what would happen if the 1852 previous leader did exit voluntarily before some other 1853 thread execs). */ 1854 1855 if (debug_threads) 1856 debug_printf ("CZL: Thread group leader %d zombie " 1857 "(it exited, or another thread execd).\n", 1858 leader_pid); 1859 1860 delete_lwp (leader_lp); 1861 } 1862 }); 1863 } 1864 1865 /* Callback for `find_thread'. Returns the first LWP that is not 1866 stopped. */ 1867 1868 static bool 1869 not_stopped_callback (thread_info *thread, ptid_t filter) 1870 { 1871 if (!thread->id.matches (filter)) 1872 return false; 1873 1874 lwp_info *lwp = get_thread_lwp (thread); 1875 1876 return !lwp->stopped; 1877 } 1878 1879 /* Increment LWP's suspend count. */ 1880 1881 static void 1882 lwp_suspended_inc (struct lwp_info *lwp) 1883 { 1884 lwp->suspended++; 1885 1886 if (debug_threads && lwp->suspended > 4) 1887 { 1888 struct thread_info *thread = get_lwp_thread (lwp); 1889 1890 debug_printf ("LWP %ld has a suspiciously high suspend count," 1891 " suspended=%d\n", lwpid_of (thread), lwp->suspended); 1892 } 1893 } 1894 1895 /* Decrement LWP's suspend count. */ 1896 1897 static void 1898 lwp_suspended_decr (struct lwp_info *lwp) 1899 { 1900 lwp->suspended--; 1901 1902 if (lwp->suspended < 0) 1903 { 1904 struct thread_info *thread = get_lwp_thread (lwp); 1905 1906 internal_error (__FILE__, __LINE__, 1907 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread), 1908 lwp->suspended); 1909 } 1910 } 1911 1912 /* This function should only be called if the LWP got a SIGTRAP. 1913 1914 Handle any tracepoint steps or hits. Return true if a tracepoint 1915 event was handled, 0 otherwise. */ 1916 1917 static int 1918 handle_tracepoints (struct lwp_info *lwp) 1919 { 1920 struct thread_info *tinfo = get_lwp_thread (lwp); 1921 int tpoint_related_event = 0; 1922 1923 gdb_assert (lwp->suspended == 0); 1924 1925 /* If this tracepoint hit causes a tracing stop, we'll immediately 1926 uninsert tracepoints. To do this, we temporarily pause all 1927 threads, unpatch away, and then unpause threads. We need to make 1928 sure the unpausing doesn't resume LWP too. */ 1929 lwp_suspended_inc (lwp); 1930 1931 /* And we need to be sure that any all-threads-stopping doesn't try 1932 to move threads out of the jump pads, as it could deadlock the 1933 inferior (LWP could be in the jump pad, maybe even holding the 1934 lock.) */ 1935 1936 /* Do any necessary step collect actions. */ 1937 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc); 1938 1939 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc); 1940 1941 /* See if we just hit a tracepoint and do its main collect 1942 actions. */ 1943 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc); 1944 1945 lwp_suspended_decr (lwp); 1946 1947 gdb_assert (lwp->suspended == 0); 1948 gdb_assert (!stabilizing_threads 1949 || (lwp->collecting_fast_tracepoint 1950 != fast_tpoint_collect_result::not_collecting)); 1951 1952 if (tpoint_related_event) 1953 { 1954 if (debug_threads) 1955 debug_printf ("got a tracepoint event\n"); 1956 return 1; 1957 } 1958 1959 return 0; 1960 } 1961 1962 fast_tpoint_collect_result 1963 linux_process_target::linux_fast_tracepoint_collecting 1964 (lwp_info *lwp, fast_tpoint_collect_status *status) 1965 { 1966 CORE_ADDR thread_area; 1967 struct thread_info *thread = get_lwp_thread (lwp); 1968 1969 /* Get the thread area address. This is used to recognize which 1970 thread is which when tracing with the in-process agent library. 1971 We don't read anything from the address, and treat it as opaque; 1972 it's the address itself that we assume is unique per-thread. */ 1973 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1) 1974 return fast_tpoint_collect_result::not_collecting; 1975 1976 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status); 1977 } 1978 1979 int 1980 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp) 1981 { 1982 return -1; 1983 } 1984 1985 bool 1986 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat) 1987 { 1988 struct thread_info *saved_thread; 1989 1990 saved_thread = current_thread; 1991 current_thread = get_lwp_thread (lwp); 1992 1993 if ((wstat == NULL 1994 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP)) 1995 && supports_fast_tracepoints () 1996 && agent_loaded_p ()) 1997 { 1998 struct fast_tpoint_collect_status status; 1999 2000 if (debug_threads) 2001 debug_printf ("Checking whether LWP %ld needs to move out of the " 2002 "jump pad.\n", 2003 lwpid_of (current_thread)); 2004 2005 fast_tpoint_collect_result r 2006 = linux_fast_tracepoint_collecting (lwp, &status); 2007 2008 if (wstat == NULL 2009 || (WSTOPSIG (*wstat) != SIGILL 2010 && WSTOPSIG (*wstat) != SIGFPE 2011 && WSTOPSIG (*wstat) != SIGSEGV 2012 && WSTOPSIG (*wstat) != SIGBUS)) 2013 { 2014 lwp->collecting_fast_tracepoint = r; 2015 2016 if (r != fast_tpoint_collect_result::not_collecting) 2017 { 2018 if (r == fast_tpoint_collect_result::before_insn 2019 && lwp->exit_jump_pad_bkpt == NULL) 2020 { 2021 /* Haven't executed the original instruction yet. 2022 Set breakpoint there, and wait till it's hit, 2023 then single-step until exiting the jump pad. */ 2024 lwp->exit_jump_pad_bkpt 2025 = set_breakpoint_at (status.adjusted_insn_addr, NULL); 2026 } 2027 2028 if (debug_threads) 2029 debug_printf ("Checking whether LWP %ld needs to move out of " 2030 "the jump pad...it does\n", 2031 lwpid_of (current_thread)); 2032 current_thread = saved_thread; 2033 2034 return true; 2035 } 2036 } 2037 else 2038 { 2039 /* If we get a synchronous signal while collecting, *and* 2040 while executing the (relocated) original instruction, 2041 reset the PC to point at the tpoint address, before 2042 reporting to GDB. Otherwise, it's an IPA lib bug: just 2043 report the signal to GDB, and pray for the best. */ 2044 2045 lwp->collecting_fast_tracepoint 2046 = fast_tpoint_collect_result::not_collecting; 2047 2048 if (r != fast_tpoint_collect_result::not_collecting 2049 && (status.adjusted_insn_addr <= lwp->stop_pc 2050 && lwp->stop_pc < status.adjusted_insn_addr_end)) 2051 { 2052 siginfo_t info; 2053 struct regcache *regcache; 2054 2055 /* The si_addr on a few signals references the address 2056 of the faulting instruction. Adjust that as 2057 well. */ 2058 if ((WSTOPSIG (*wstat) == SIGILL 2059 || WSTOPSIG (*wstat) == SIGFPE 2060 || WSTOPSIG (*wstat) == SIGBUS 2061 || WSTOPSIG (*wstat) == SIGSEGV) 2062 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread), 2063 (PTRACE_TYPE_ARG3) 0, &info) == 0 2064 /* Final check just to make sure we don't clobber 2065 the siginfo of non-kernel-sent signals. */ 2066 && (uintptr_t) info.si_addr == lwp->stop_pc) 2067 { 2068 info.si_addr = (void *) (uintptr_t) status.tpoint_addr; 2069 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread), 2070 (PTRACE_TYPE_ARG3) 0, &info); 2071 } 2072 2073 regcache = get_thread_regcache (current_thread, 1); 2074 low_set_pc (regcache, status.tpoint_addr); 2075 lwp->stop_pc = status.tpoint_addr; 2076 2077 /* Cancel any fast tracepoint lock this thread was 2078 holding. */ 2079 force_unlock_trace_buffer (); 2080 } 2081 2082 if (lwp->exit_jump_pad_bkpt != NULL) 2083 { 2084 if (debug_threads) 2085 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. " 2086 "stopping all threads momentarily.\n"); 2087 2088 stop_all_lwps (1, lwp); 2089 2090 delete_breakpoint (lwp->exit_jump_pad_bkpt); 2091 lwp->exit_jump_pad_bkpt = NULL; 2092 2093 unstop_all_lwps (1, lwp); 2094 2095 gdb_assert (lwp->suspended >= 0); 2096 } 2097 } 2098 } 2099 2100 if (debug_threads) 2101 debug_printf ("Checking whether LWP %ld needs to move out of the " 2102 "jump pad...no\n", 2103 lwpid_of (current_thread)); 2104 2105 current_thread = saved_thread; 2106 return false; 2107 } 2108 2109 /* Enqueue one signal in the "signals to report later when out of the 2110 jump pad" list. */ 2111 2112 static void 2113 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat) 2114 { 2115 struct thread_info *thread = get_lwp_thread (lwp); 2116 2117 if (debug_threads) 2118 debug_printf ("Deferring signal %d for LWP %ld.\n", 2119 WSTOPSIG (*wstat), lwpid_of (thread)); 2120 2121 if (debug_threads) 2122 { 2123 for (const auto &sig : lwp->pending_signals_to_report) 2124 debug_printf (" Already queued %d\n", 2125 sig.signal); 2126 2127 debug_printf (" (no more currently queued signals)\n"); 2128 } 2129 2130 /* Don't enqueue non-RT signals if they are already in the deferred 2131 queue. (SIGSTOP being the easiest signal to see ending up here 2132 twice) */ 2133 if (WSTOPSIG (*wstat) < __SIGRTMIN) 2134 { 2135 for (const auto &sig : lwp->pending_signals_to_report) 2136 { 2137 if (sig.signal == WSTOPSIG (*wstat)) 2138 { 2139 if (debug_threads) 2140 debug_printf ("Not requeuing already queued non-RT signal %d" 2141 " for LWP %ld\n", 2142 sig.signal, 2143 lwpid_of (thread)); 2144 return; 2145 } 2146 } 2147 } 2148 2149 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat)); 2150 2151 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0, 2152 &lwp->pending_signals_to_report.back ().info); 2153 } 2154 2155 /* Dequeue one signal from the "signals to report later when out of 2156 the jump pad" list. */ 2157 2158 static int 2159 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat) 2160 { 2161 struct thread_info *thread = get_lwp_thread (lwp); 2162 2163 if (!lwp->pending_signals_to_report.empty ()) 2164 { 2165 const pending_signal &p_sig = lwp->pending_signals_to_report.front (); 2166 2167 *wstat = W_STOPCODE (p_sig.signal); 2168 if (p_sig.info.si_signo != 0) 2169 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0, 2170 &p_sig.info); 2171 2172 lwp->pending_signals_to_report.pop_front (); 2173 2174 if (debug_threads) 2175 debug_printf ("Reporting deferred signal %d for LWP %ld.\n", 2176 WSTOPSIG (*wstat), lwpid_of (thread)); 2177 2178 if (debug_threads) 2179 { 2180 for (const auto &sig : lwp->pending_signals_to_report) 2181 debug_printf (" Still queued %d\n", 2182 sig.signal); 2183 2184 debug_printf (" (no more queued signals)\n"); 2185 } 2186 2187 return 1; 2188 } 2189 2190 return 0; 2191 } 2192 2193 bool 2194 linux_process_target::check_stopped_by_watchpoint (lwp_info *child) 2195 { 2196 struct thread_info *saved_thread = current_thread; 2197 current_thread = get_lwp_thread (child); 2198 2199 if (low_stopped_by_watchpoint ()) 2200 { 2201 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT; 2202 child->stopped_data_address = low_stopped_data_address (); 2203 } 2204 2205 current_thread = saved_thread; 2206 2207 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT; 2208 } 2209 2210 bool 2211 linux_process_target::low_stopped_by_watchpoint () 2212 { 2213 return false; 2214 } 2215 2216 CORE_ADDR 2217 linux_process_target::low_stopped_data_address () 2218 { 2219 return 0; 2220 } 2221 2222 /* Return the ptrace options that we want to try to enable. */ 2223 2224 static int 2225 linux_low_ptrace_options (int attached) 2226 { 2227 client_state &cs = get_client_state (); 2228 int options = 0; 2229 2230 if (!attached) 2231 options |= PTRACE_O_EXITKILL; 2232 2233 if (cs.report_fork_events) 2234 options |= PTRACE_O_TRACEFORK; 2235 2236 if (cs.report_vfork_events) 2237 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE); 2238 2239 if (cs.report_exec_events) 2240 options |= PTRACE_O_TRACEEXEC; 2241 2242 options |= PTRACE_O_TRACESYSGOOD; 2243 2244 return options; 2245 } 2246 2247 lwp_info * 2248 linux_process_target::filter_event (int lwpid, int wstat) 2249 { 2250 client_state &cs = get_client_state (); 2251 struct lwp_info *child; 2252 struct thread_info *thread; 2253 int have_stop_pc = 0; 2254 2255 child = find_lwp_pid (ptid_t (lwpid)); 2256 2257 /* Check for stop events reported by a process we didn't already 2258 know about - anything not already in our LWP list. 2259 2260 If we're expecting to receive stopped processes after 2261 fork, vfork, and clone events, then we'll just add the 2262 new one to our list and go back to waiting for the event 2263 to be reported - the stopped process might be returned 2264 from waitpid before or after the event is. 2265 2266 But note the case of a non-leader thread exec'ing after the 2267 leader having exited, and gone from our lists (because 2268 check_zombie_leaders deleted it). The non-leader thread 2269 changes its tid to the tgid. */ 2270 2271 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP 2272 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC) 2273 { 2274 ptid_t child_ptid; 2275 2276 /* A multi-thread exec after we had seen the leader exiting. */ 2277 if (debug_threads) 2278 { 2279 debug_printf ("LLW: Re-adding thread group leader LWP %d" 2280 "after exec.\n", lwpid); 2281 } 2282 2283 child_ptid = ptid_t (lwpid, lwpid, 0); 2284 child = add_lwp (child_ptid); 2285 child->stopped = 1; 2286 current_thread = child->thread; 2287 } 2288 2289 /* If we didn't find a process, one of two things presumably happened: 2290 - A process we started and then detached from has exited. Ignore it. 2291 - A process we are controlling has forked and the new child's stop 2292 was reported to us by the kernel. Save its PID. */ 2293 if (child == NULL && WIFSTOPPED (wstat)) 2294 { 2295 add_to_pid_list (&stopped_pids, lwpid, wstat); 2296 return NULL; 2297 } 2298 else if (child == NULL) 2299 return NULL; 2300 2301 thread = get_lwp_thread (child); 2302 2303 child->stopped = 1; 2304 2305 child->last_status = wstat; 2306 2307 /* Check if the thread has exited. */ 2308 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat))) 2309 { 2310 if (debug_threads) 2311 debug_printf ("LLFE: %d exited.\n", lwpid); 2312 2313 if (finish_step_over (child)) 2314 { 2315 /* Unsuspend all other LWPs, and set them back running again. */ 2316 unsuspend_all_lwps (child); 2317 } 2318 2319 /* If there is at least one more LWP, then the exit signal was 2320 not the end of the debugged application and should be 2321 ignored, unless GDB wants to hear about thread exits. */ 2322 if (cs.report_thread_events 2323 || last_thread_of_process_p (pid_of (thread))) 2324 { 2325 /* Since events are serialized to GDB core, and we can't 2326 report this one right now. Leave the status pending for 2327 the next time we're able to report it. */ 2328 mark_lwp_dead (child, wstat); 2329 return child; 2330 } 2331 else 2332 { 2333 delete_lwp (child); 2334 return NULL; 2335 } 2336 } 2337 2338 gdb_assert (WIFSTOPPED (wstat)); 2339 2340 if (WIFSTOPPED (wstat)) 2341 { 2342 struct process_info *proc; 2343 2344 /* Architecture-specific setup after inferior is running. */ 2345 proc = find_process_pid (pid_of (thread)); 2346 if (proc->tdesc == NULL) 2347 { 2348 if (proc->attached) 2349 { 2350 /* This needs to happen after we have attached to the 2351 inferior and it is stopped for the first time, but 2352 before we access any inferior registers. */ 2353 arch_setup_thread (thread); 2354 } 2355 else 2356 { 2357 /* The process is started, but GDBserver will do 2358 architecture-specific setup after the program stops at 2359 the first instruction. */ 2360 child->status_pending_p = 1; 2361 child->status_pending = wstat; 2362 return child; 2363 } 2364 } 2365 } 2366 2367 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags) 2368 { 2369 struct process_info *proc = find_process_pid (pid_of (thread)); 2370 int options = linux_low_ptrace_options (proc->attached); 2371 2372 linux_enable_event_reporting (lwpid, options); 2373 child->must_set_ptrace_flags = 0; 2374 } 2375 2376 /* Always update syscall_state, even if it will be filtered later. */ 2377 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP) 2378 { 2379 child->syscall_state 2380 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY 2381 ? TARGET_WAITKIND_SYSCALL_RETURN 2382 : TARGET_WAITKIND_SYSCALL_ENTRY); 2383 } 2384 else 2385 { 2386 /* Almost all other ptrace-stops are known to be outside of system 2387 calls, with further exceptions in handle_extended_wait. */ 2388 child->syscall_state = TARGET_WAITKIND_IGNORE; 2389 } 2390 2391 /* Be careful to not overwrite stop_pc until save_stop_reason is 2392 called. */ 2393 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP 2394 && linux_is_extended_waitstatus (wstat)) 2395 { 2396 child->stop_pc = get_pc (child); 2397 if (handle_extended_wait (&child, wstat)) 2398 { 2399 /* The event has been handled, so just return without 2400 reporting it. */ 2401 return NULL; 2402 } 2403 } 2404 2405 if (linux_wstatus_maybe_breakpoint (wstat)) 2406 { 2407 if (save_stop_reason (child)) 2408 have_stop_pc = 1; 2409 } 2410 2411 if (!have_stop_pc) 2412 child->stop_pc = get_pc (child); 2413 2414 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP 2415 && child->stop_expected) 2416 { 2417 if (debug_threads) 2418 debug_printf ("Expected stop.\n"); 2419 child->stop_expected = 0; 2420 2421 if (thread->last_resume_kind == resume_stop) 2422 { 2423 /* We want to report the stop to the core. Treat the 2424 SIGSTOP as a normal event. */ 2425 if (debug_threads) 2426 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n", 2427 target_pid_to_str (ptid_of (thread))); 2428 } 2429 else if (stopping_threads != NOT_STOPPING_THREADS) 2430 { 2431 /* Stopping threads. We don't want this SIGSTOP to end up 2432 pending. */ 2433 if (debug_threads) 2434 debug_printf ("LLW: SIGSTOP caught for %s " 2435 "while stopping threads.\n", 2436 target_pid_to_str (ptid_of (thread))); 2437 return NULL; 2438 } 2439 else 2440 { 2441 /* This is a delayed SIGSTOP. Filter out the event. */ 2442 if (debug_threads) 2443 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n", 2444 child->stepping ? "step" : "continue", 2445 target_pid_to_str (ptid_of (thread))); 2446 2447 resume_one_lwp (child, child->stepping, 0, NULL); 2448 return NULL; 2449 } 2450 } 2451 2452 child->status_pending_p = 1; 2453 child->status_pending = wstat; 2454 return child; 2455 } 2456 2457 bool 2458 linux_process_target::maybe_hw_step (thread_info *thread) 2459 { 2460 if (supports_hardware_single_step ()) 2461 return true; 2462 else 2463 { 2464 /* GDBserver must insert single-step breakpoint for software 2465 single step. */ 2466 gdb_assert (has_single_step_breakpoints (thread)); 2467 return false; 2468 } 2469 } 2470 2471 void 2472 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread) 2473 { 2474 struct lwp_info *lp = get_thread_lwp (thread); 2475 2476 if (lp->stopped 2477 && !lp->suspended 2478 && !lp->status_pending_p 2479 && thread->last_status.kind == TARGET_WAITKIND_IGNORE) 2480 { 2481 int step = 0; 2482 2483 if (thread->last_resume_kind == resume_step) 2484 step = maybe_hw_step (thread); 2485 2486 if (debug_threads) 2487 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n", 2488 target_pid_to_str (ptid_of (thread)), 2489 paddress (lp->stop_pc), 2490 step); 2491 2492 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL); 2493 } 2494 } 2495 2496 int 2497 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid, 2498 ptid_t filter_ptid, 2499 int *wstatp, int options) 2500 { 2501 struct thread_info *event_thread; 2502 struct lwp_info *event_child, *requested_child; 2503 sigset_t block_mask, prev_mask; 2504 2505 retry: 2506 /* N.B. event_thread points to the thread_info struct that contains 2507 event_child. Keep them in sync. */ 2508 event_thread = NULL; 2509 event_child = NULL; 2510 requested_child = NULL; 2511 2512 /* Check for a lwp with a pending status. */ 2513 2514 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ()) 2515 { 2516 event_thread = find_thread_in_random ([&] (thread_info *thread) 2517 { 2518 return status_pending_p_callback (thread, filter_ptid); 2519 }); 2520 2521 if (event_thread != NULL) 2522 event_child = get_thread_lwp (event_thread); 2523 if (debug_threads && event_thread) 2524 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread)); 2525 } 2526 else if (filter_ptid != null_ptid) 2527 { 2528 requested_child = find_lwp_pid (filter_ptid); 2529 2530 if (stopping_threads == NOT_STOPPING_THREADS 2531 && requested_child->status_pending_p 2532 && (requested_child->collecting_fast_tracepoint 2533 != fast_tpoint_collect_result::not_collecting)) 2534 { 2535 enqueue_one_deferred_signal (requested_child, 2536 &requested_child->status_pending); 2537 requested_child->status_pending_p = 0; 2538 requested_child->status_pending = 0; 2539 resume_one_lwp (requested_child, 0, 0, NULL); 2540 } 2541 2542 if (requested_child->suspended 2543 && requested_child->status_pending_p) 2544 { 2545 internal_error (__FILE__, __LINE__, 2546 "requesting an event out of a" 2547 " suspended child?"); 2548 } 2549 2550 if (requested_child->status_pending_p) 2551 { 2552 event_child = requested_child; 2553 event_thread = get_lwp_thread (event_child); 2554 } 2555 } 2556 2557 if (event_child != NULL) 2558 { 2559 if (debug_threads) 2560 debug_printf ("Got an event from pending child %ld (%04x)\n", 2561 lwpid_of (event_thread), event_child->status_pending); 2562 *wstatp = event_child->status_pending; 2563 event_child->status_pending_p = 0; 2564 event_child->status_pending = 0; 2565 current_thread = event_thread; 2566 return lwpid_of (event_thread); 2567 } 2568 2569 /* But if we don't find a pending event, we'll have to wait. 2570 2571 We only enter this loop if no process has a pending wait status. 2572 Thus any action taken in response to a wait status inside this 2573 loop is responding as soon as we detect the status, not after any 2574 pending events. */ 2575 2576 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block 2577 all signals while here. */ 2578 sigfillset (&block_mask); 2579 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask); 2580 2581 /* Always pull all events out of the kernel. We'll randomly select 2582 an event LWP out of all that have events, to prevent 2583 starvation. */ 2584 while (event_child == NULL) 2585 { 2586 pid_t ret = 0; 2587 2588 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace 2589 quirks: 2590 2591 - If the thread group leader exits while other threads in the 2592 thread group still exist, waitpid(TGID, ...) hangs. That 2593 waitpid won't return an exit status until the other threads 2594 in the group are reaped. 2595 2596 - When a non-leader thread execs, that thread just vanishes 2597 without reporting an exit (so we'd hang if we waited for it 2598 explicitly in that case). The exec event is reported to 2599 the TGID pid. */ 2600 errno = 0; 2601 ret = my_waitpid (-1, wstatp, options | WNOHANG); 2602 2603 if (debug_threads) 2604 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n", 2605 ret, errno ? safe_strerror (errno) : "ERRNO-OK"); 2606 2607 if (ret > 0) 2608 { 2609 if (debug_threads) 2610 { 2611 debug_printf ("LLW: waitpid %ld received %s\n", 2612 (long) ret, status_to_str (*wstatp)); 2613 } 2614 2615 /* Filter all events. IOW, leave all events pending. We'll 2616 randomly select an event LWP out of all that have events 2617 below. */ 2618 filter_event (ret, *wstatp); 2619 /* Retry until nothing comes out of waitpid. A single 2620 SIGCHLD can indicate more than one child stopped. */ 2621 continue; 2622 } 2623 2624 /* Now that we've pulled all events out of the kernel, resume 2625 LWPs that don't have an interesting event to report. */ 2626 if (stopping_threads == NOT_STOPPING_THREADS) 2627 for_each_thread ([this] (thread_info *thread) 2628 { 2629 resume_stopped_resumed_lwps (thread); 2630 }); 2631 2632 /* ... and find an LWP with a status to report to the core, if 2633 any. */ 2634 event_thread = find_thread_in_random ([&] (thread_info *thread) 2635 { 2636 return status_pending_p_callback (thread, filter_ptid); 2637 }); 2638 2639 if (event_thread != NULL) 2640 { 2641 event_child = get_thread_lwp (event_thread); 2642 *wstatp = event_child->status_pending; 2643 event_child->status_pending_p = 0; 2644 event_child->status_pending = 0; 2645 break; 2646 } 2647 2648 /* Check for zombie thread group leaders. Those can't be reaped 2649 until all other threads in the thread group are. */ 2650 check_zombie_leaders (); 2651 2652 auto not_stopped = [&] (thread_info *thread) 2653 { 2654 return not_stopped_callback (thread, wait_ptid); 2655 }; 2656 2657 /* If there are no resumed children left in the set of LWPs we 2658 want to wait for, bail. We can't just block in 2659 waitpid/sigsuspend, because lwps might have been left stopped 2660 in trace-stop state, and we'd be stuck forever waiting for 2661 their status to change (which would only happen if we resumed 2662 them). Even if WNOHANG is set, this return code is preferred 2663 over 0 (below), as it is more detailed. */ 2664 if (find_thread (not_stopped) == NULL) 2665 { 2666 if (debug_threads) 2667 debug_printf ("LLW: exit (no unwaited-for LWP)\n"); 2668 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL); 2669 return -1; 2670 } 2671 2672 /* No interesting event to report to the caller. */ 2673 if ((options & WNOHANG)) 2674 { 2675 if (debug_threads) 2676 debug_printf ("WNOHANG set, no event found\n"); 2677 2678 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL); 2679 return 0; 2680 } 2681 2682 /* Block until we get an event reported with SIGCHLD. */ 2683 if (debug_threads) 2684 debug_printf ("sigsuspend'ing\n"); 2685 2686 sigsuspend (&prev_mask); 2687 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL); 2688 goto retry; 2689 } 2690 2691 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL); 2692 2693 current_thread = event_thread; 2694 2695 return lwpid_of (event_thread); 2696 } 2697 2698 int 2699 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options) 2700 { 2701 return wait_for_event_filtered (ptid, ptid, wstatp, options); 2702 } 2703 2704 /* Select one LWP out of those that have events pending. */ 2705 2706 static void 2707 select_event_lwp (struct lwp_info **orig_lp) 2708 { 2709 struct thread_info *event_thread = NULL; 2710 2711 /* In all-stop, give preference to the LWP that is being 2712 single-stepped. There will be at most one, and it's the LWP that 2713 the core is most interested in. If we didn't do this, then we'd 2714 have to handle pending step SIGTRAPs somehow in case the core 2715 later continues the previously-stepped thread, otherwise we'd 2716 report the pending SIGTRAP, and the core, not having stepped the 2717 thread, wouldn't understand what the trap was for, and therefore 2718 would report it to the user as a random signal. */ 2719 if (!non_stop) 2720 { 2721 event_thread = find_thread ([] (thread_info *thread) 2722 { 2723 lwp_info *lp = get_thread_lwp (thread); 2724 2725 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE 2726 && thread->last_resume_kind == resume_step 2727 && lp->status_pending_p); 2728 }); 2729 2730 if (event_thread != NULL) 2731 { 2732 if (debug_threads) 2733 debug_printf ("SEL: Select single-step %s\n", 2734 target_pid_to_str (ptid_of (event_thread))); 2735 } 2736 } 2737 if (event_thread == NULL) 2738 { 2739 /* No single-stepping LWP. Select one at random, out of those 2740 which have had events. */ 2741 2742 event_thread = find_thread_in_random ([&] (thread_info *thread) 2743 { 2744 lwp_info *lp = get_thread_lwp (thread); 2745 2746 /* Only resumed LWPs that have an event pending. */ 2747 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE 2748 && lp->status_pending_p); 2749 }); 2750 } 2751 2752 if (event_thread != NULL) 2753 { 2754 struct lwp_info *event_lp = get_thread_lwp (event_thread); 2755 2756 /* Switch the event LWP. */ 2757 *orig_lp = event_lp; 2758 } 2759 } 2760 2761 /* Decrement the suspend count of all LWPs, except EXCEPT, if non 2762 NULL. */ 2763 2764 static void 2765 unsuspend_all_lwps (struct lwp_info *except) 2766 { 2767 for_each_thread ([&] (thread_info *thread) 2768 { 2769 lwp_info *lwp = get_thread_lwp (thread); 2770 2771 if (lwp != except) 2772 lwp_suspended_decr (lwp); 2773 }); 2774 } 2775 2776 static bool lwp_running (thread_info *thread); 2777 2778 /* Stabilize threads (move out of jump pads). 2779 2780 If a thread is midway collecting a fast tracepoint, we need to 2781 finish the collection and move it out of the jump pad before 2782 reporting the signal. 2783 2784 This avoids recursion while collecting (when a signal arrives 2785 midway, and the signal handler itself collects), which would trash 2786 the trace buffer. In case the user set a breakpoint in a signal 2787 handler, this avoids the backtrace showing the jump pad, etc.. 2788 Most importantly, there are certain things we can't do safely if 2789 threads are stopped in a jump pad (or in its callee's). For 2790 example: 2791 2792 - starting a new trace run. A thread still collecting the 2793 previous run, could trash the trace buffer when resumed. The trace 2794 buffer control structures would have been reset but the thread had 2795 no way to tell. The thread could even midway memcpy'ing to the 2796 buffer, which would mean that when resumed, it would clobber the 2797 trace buffer that had been set for a new run. 2798 2799 - we can't rewrite/reuse the jump pads for new tracepoints 2800 safely. Say you do tstart while a thread is stopped midway while 2801 collecting. When the thread is later resumed, it finishes the 2802 collection, and returns to the jump pad, to execute the original 2803 instruction that was under the tracepoint jump at the time the 2804 older run had been started. If the jump pad had been rewritten 2805 since for something else in the new run, the thread would now 2806 execute the wrong / random instructions. */ 2807 2808 void 2809 linux_process_target::stabilize_threads () 2810 { 2811 thread_info *thread_stuck = find_thread ([this] (thread_info *thread) 2812 { 2813 return stuck_in_jump_pad (thread); 2814 }); 2815 2816 if (thread_stuck != NULL) 2817 { 2818 if (debug_threads) 2819 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n", 2820 lwpid_of (thread_stuck)); 2821 return; 2822 } 2823 2824 thread_info *saved_thread = current_thread; 2825 2826 stabilizing_threads = 1; 2827 2828 /* Kick 'em all. */ 2829 for_each_thread ([this] (thread_info *thread) 2830 { 2831 move_out_of_jump_pad (thread); 2832 }); 2833 2834 /* Loop until all are stopped out of the jump pads. */ 2835 while (find_thread (lwp_running) != NULL) 2836 { 2837 struct target_waitstatus ourstatus; 2838 struct lwp_info *lwp; 2839 int wstat; 2840 2841 /* Note that we go through the full wait even loop. While 2842 moving threads out of jump pad, we need to be able to step 2843 over internal breakpoints and such. */ 2844 wait_1 (minus_one_ptid, &ourstatus, 0); 2845 2846 if (ourstatus.kind == TARGET_WAITKIND_STOPPED) 2847 { 2848 lwp = get_thread_lwp (current_thread); 2849 2850 /* Lock it. */ 2851 lwp_suspended_inc (lwp); 2852 2853 if (ourstatus.value.sig != GDB_SIGNAL_0 2854 || current_thread->last_resume_kind == resume_stop) 2855 { 2856 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig)); 2857 enqueue_one_deferred_signal (lwp, &wstat); 2858 } 2859 } 2860 } 2861 2862 unsuspend_all_lwps (NULL); 2863 2864 stabilizing_threads = 0; 2865 2866 current_thread = saved_thread; 2867 2868 if (debug_threads) 2869 { 2870 thread_stuck = find_thread ([this] (thread_info *thread) 2871 { 2872 return stuck_in_jump_pad (thread); 2873 }); 2874 2875 if (thread_stuck != NULL) 2876 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n", 2877 lwpid_of (thread_stuck)); 2878 } 2879 } 2880 2881 /* Convenience function that is called when the kernel reports an 2882 event that is not passed out to GDB. */ 2883 2884 static ptid_t 2885 ignore_event (struct target_waitstatus *ourstatus) 2886 { 2887 /* If we got an event, there may still be others, as a single 2888 SIGCHLD can indicate more than one child stopped. This forces 2889 another target_wait call. */ 2890 async_file_mark (); 2891 2892 ourstatus->kind = TARGET_WAITKIND_IGNORE; 2893 return null_ptid; 2894 } 2895 2896 ptid_t 2897 linux_process_target::filter_exit_event (lwp_info *event_child, 2898 target_waitstatus *ourstatus) 2899 { 2900 client_state &cs = get_client_state (); 2901 struct thread_info *thread = get_lwp_thread (event_child); 2902 ptid_t ptid = ptid_of (thread); 2903 2904 if (!last_thread_of_process_p (pid_of (thread))) 2905 { 2906 if (cs.report_thread_events) 2907 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED; 2908 else 2909 ourstatus->kind = TARGET_WAITKIND_IGNORE; 2910 2911 delete_lwp (event_child); 2912 } 2913 return ptid; 2914 } 2915 2916 /* Returns 1 if GDB is interested in any event_child syscalls. */ 2917 2918 static int 2919 gdb_catching_syscalls_p (struct lwp_info *event_child) 2920 { 2921 struct thread_info *thread = get_lwp_thread (event_child); 2922 struct process_info *proc = get_thread_process (thread); 2923 2924 return !proc->syscalls_to_catch.empty (); 2925 } 2926 2927 bool 2928 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child) 2929 { 2930 int sysno; 2931 struct thread_info *thread = get_lwp_thread (event_child); 2932 struct process_info *proc = get_thread_process (thread); 2933 2934 if (proc->syscalls_to_catch.empty ()) 2935 return false; 2936 2937 if (proc->syscalls_to_catch[0] == ANY_SYSCALL) 2938 return true; 2939 2940 get_syscall_trapinfo (event_child, &sysno); 2941 2942 for (int iter : proc->syscalls_to_catch) 2943 if (iter == sysno) 2944 return true; 2945 2946 return false; 2947 } 2948 2949 ptid_t 2950 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus, 2951 int target_options) 2952 { 2953 client_state &cs = get_client_state (); 2954 int w; 2955 struct lwp_info *event_child; 2956 int options; 2957 int pid; 2958 int step_over_finished; 2959 int bp_explains_trap; 2960 int maybe_internal_trap; 2961 int report_to_gdb; 2962 int trace_event; 2963 int in_step_range; 2964 int any_resumed; 2965 2966 if (debug_threads) 2967 { 2968 debug_enter (); 2969 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid)); 2970 } 2971 2972 /* Translate generic target options into linux options. */ 2973 options = __WALL; 2974 if (target_options & TARGET_WNOHANG) 2975 options |= WNOHANG; 2976 2977 bp_explains_trap = 0; 2978 trace_event = 0; 2979 in_step_range = 0; 2980 ourstatus->kind = TARGET_WAITKIND_IGNORE; 2981 2982 auto status_pending_p_any = [&] (thread_info *thread) 2983 { 2984 return status_pending_p_callback (thread, minus_one_ptid); 2985 }; 2986 2987 auto not_stopped = [&] (thread_info *thread) 2988 { 2989 return not_stopped_callback (thread, minus_one_ptid); 2990 }; 2991 2992 /* Find a resumed LWP, if any. */ 2993 if (find_thread (status_pending_p_any) != NULL) 2994 any_resumed = 1; 2995 else if (find_thread (not_stopped) != NULL) 2996 any_resumed = 1; 2997 else 2998 any_resumed = 0; 2999 3000 if (step_over_bkpt == null_ptid) 3001 pid = wait_for_event (ptid, &w, options); 3002 else 3003 { 3004 if (debug_threads) 3005 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n", 3006 target_pid_to_str (step_over_bkpt)); 3007 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG); 3008 } 3009 3010 if (pid == 0 || (pid == -1 && !any_resumed)) 3011 { 3012 gdb_assert (target_options & TARGET_WNOHANG); 3013 3014 if (debug_threads) 3015 { 3016 debug_printf ("wait_1 ret = null_ptid, " 3017 "TARGET_WAITKIND_IGNORE\n"); 3018 debug_exit (); 3019 } 3020 3021 ourstatus->kind = TARGET_WAITKIND_IGNORE; 3022 return null_ptid; 3023 } 3024 else if (pid == -1) 3025 { 3026 if (debug_threads) 3027 { 3028 debug_printf ("wait_1 ret = null_ptid, " 3029 "TARGET_WAITKIND_NO_RESUMED\n"); 3030 debug_exit (); 3031 } 3032 3033 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED; 3034 return null_ptid; 3035 } 3036 3037 event_child = get_thread_lwp (current_thread); 3038 3039 /* wait_for_event only returns an exit status for the last 3040 child of a process. Report it. */ 3041 if (WIFEXITED (w) || WIFSIGNALED (w)) 3042 { 3043 if (WIFEXITED (w)) 3044 { 3045 ourstatus->kind = TARGET_WAITKIND_EXITED; 3046 ourstatus->value.integer = WEXITSTATUS (w); 3047 3048 if (debug_threads) 3049 { 3050 debug_printf ("wait_1 ret = %s, exited with " 3051 "retcode %d\n", 3052 target_pid_to_str (ptid_of (current_thread)), 3053 WEXITSTATUS (w)); 3054 debug_exit (); 3055 } 3056 } 3057 else 3058 { 3059 ourstatus->kind = TARGET_WAITKIND_SIGNALLED; 3060 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w)); 3061 3062 if (debug_threads) 3063 { 3064 debug_printf ("wait_1 ret = %s, terminated with " 3065 "signal %d\n", 3066 target_pid_to_str (ptid_of (current_thread)), 3067 WTERMSIG (w)); 3068 debug_exit (); 3069 } 3070 } 3071 3072 if (ourstatus->kind == TARGET_WAITKIND_EXITED) 3073 return filter_exit_event (event_child, ourstatus); 3074 3075 return ptid_of (current_thread); 3076 } 3077 3078 /* If step-over executes a breakpoint instruction, in the case of a 3079 hardware single step it means a gdb/gdbserver breakpoint had been 3080 planted on top of a permanent breakpoint, in the case of a software 3081 single step it may just mean that gdbserver hit the reinsert breakpoint. 3082 The PC has been adjusted by save_stop_reason to point at 3083 the breakpoint address. 3084 So in the case of the hardware single step advance the PC manually 3085 past the breakpoint and in the case of software single step advance only 3086 if it's not the single_step_breakpoint we are hitting. 3087 This avoids that a program would keep trapping a permanent breakpoint 3088 forever. */ 3089 if (step_over_bkpt != null_ptid 3090 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT 3091 && (event_child->stepping 3092 || !single_step_breakpoint_inserted_here (event_child->stop_pc))) 3093 { 3094 int increment_pc = 0; 3095 int breakpoint_kind = 0; 3096 CORE_ADDR stop_pc = event_child->stop_pc; 3097 3098 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc); 3099 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc); 3100 3101 if (debug_threads) 3102 { 3103 debug_printf ("step-over for %s executed software breakpoint\n", 3104 target_pid_to_str (ptid_of (current_thread))); 3105 } 3106 3107 if (increment_pc != 0) 3108 { 3109 struct regcache *regcache 3110 = get_thread_regcache (current_thread, 1); 3111 3112 event_child->stop_pc += increment_pc; 3113 low_set_pc (regcache, event_child->stop_pc); 3114 3115 if (!low_breakpoint_at (event_child->stop_pc)) 3116 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON; 3117 } 3118 } 3119 3120 /* If this event was not handled before, and is not a SIGTRAP, we 3121 report it. SIGILL and SIGSEGV are also treated as traps in case 3122 a breakpoint is inserted at the current PC. If this target does 3123 not support internal breakpoints at all, we also report the 3124 SIGTRAP without further processing; it's of no concern to us. */ 3125 maybe_internal_trap 3126 = (low_supports_breakpoints () 3127 && (WSTOPSIG (w) == SIGTRAP 3128 || ((WSTOPSIG (w) == SIGILL 3129 || WSTOPSIG (w) == SIGSEGV) 3130 && low_breakpoint_at (event_child->stop_pc)))); 3131 3132 if (maybe_internal_trap) 3133 { 3134 /* Handle anything that requires bookkeeping before deciding to 3135 report the event or continue waiting. */ 3136 3137 /* First check if we can explain the SIGTRAP with an internal 3138 breakpoint, or if we should possibly report the event to GDB. 3139 Do this before anything that may remove or insert a 3140 breakpoint. */ 3141 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc); 3142 3143 /* We have a SIGTRAP, possibly a step-over dance has just 3144 finished. If so, tweak the state machine accordingly, 3145 reinsert breakpoints and delete any single-step 3146 breakpoints. */ 3147 step_over_finished = finish_step_over (event_child); 3148 3149 /* Now invoke the callbacks of any internal breakpoints there. */ 3150 check_breakpoints (event_child->stop_pc); 3151 3152 /* Handle tracepoint data collecting. This may overflow the 3153 trace buffer, and cause a tracing stop, removing 3154 breakpoints. */ 3155 trace_event = handle_tracepoints (event_child); 3156 3157 if (bp_explains_trap) 3158 { 3159 if (debug_threads) 3160 debug_printf ("Hit a gdbserver breakpoint.\n"); 3161 } 3162 } 3163 else 3164 { 3165 /* We have some other signal, possibly a step-over dance was in 3166 progress, and it should be cancelled too. */ 3167 step_over_finished = finish_step_over (event_child); 3168 } 3169 3170 /* We have all the data we need. Either report the event to GDB, or 3171 resume threads and keep waiting for more. */ 3172 3173 /* If we're collecting a fast tracepoint, finish the collection and 3174 move out of the jump pad before delivering a signal. See 3175 linux_stabilize_threads. */ 3176 3177 if (WIFSTOPPED (w) 3178 && WSTOPSIG (w) != SIGTRAP 3179 && supports_fast_tracepoints () 3180 && agent_loaded_p ()) 3181 { 3182 if (debug_threads) 3183 debug_printf ("Got signal %d for LWP %ld. Check if we need " 3184 "to defer or adjust it.\n", 3185 WSTOPSIG (w), lwpid_of (current_thread)); 3186 3187 /* Allow debugging the jump pad itself. */ 3188 if (current_thread->last_resume_kind != resume_step 3189 && maybe_move_out_of_jump_pad (event_child, &w)) 3190 { 3191 enqueue_one_deferred_signal (event_child, &w); 3192 3193 if (debug_threads) 3194 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n", 3195 WSTOPSIG (w), lwpid_of (current_thread)); 3196 3197 resume_one_lwp (event_child, 0, 0, NULL); 3198 3199 if (debug_threads) 3200 debug_exit (); 3201 return ignore_event (ourstatus); 3202 } 3203 } 3204 3205 if (event_child->collecting_fast_tracepoint 3206 != fast_tpoint_collect_result::not_collecting) 3207 { 3208 if (debug_threads) 3209 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). " 3210 "Check if we're already there.\n", 3211 lwpid_of (current_thread), 3212 (int) event_child->collecting_fast_tracepoint); 3213 3214 trace_event = 1; 3215 3216 event_child->collecting_fast_tracepoint 3217 = linux_fast_tracepoint_collecting (event_child, NULL); 3218 3219 if (event_child->collecting_fast_tracepoint 3220 != fast_tpoint_collect_result::before_insn) 3221 { 3222 /* No longer need this breakpoint. */ 3223 if (event_child->exit_jump_pad_bkpt != NULL) 3224 { 3225 if (debug_threads) 3226 debug_printf ("No longer need exit-jump-pad bkpt; removing it." 3227 "stopping all threads momentarily.\n"); 3228 3229 /* Other running threads could hit this breakpoint. 3230 We don't handle moribund locations like GDB does, 3231 instead we always pause all threads when removing 3232 breakpoints, so that any step-over or 3233 decr_pc_after_break adjustment is always taken 3234 care of while the breakpoint is still 3235 inserted. */ 3236 stop_all_lwps (1, event_child); 3237 3238 delete_breakpoint (event_child->exit_jump_pad_bkpt); 3239 event_child->exit_jump_pad_bkpt = NULL; 3240 3241 unstop_all_lwps (1, event_child); 3242 3243 gdb_assert (event_child->suspended >= 0); 3244 } 3245 } 3246 3247 if (event_child->collecting_fast_tracepoint 3248 == fast_tpoint_collect_result::not_collecting) 3249 { 3250 if (debug_threads) 3251 debug_printf ("fast tracepoint finished " 3252 "collecting successfully.\n"); 3253 3254 /* We may have a deferred signal to report. */ 3255 if (dequeue_one_deferred_signal (event_child, &w)) 3256 { 3257 if (debug_threads) 3258 debug_printf ("dequeued one signal.\n"); 3259 } 3260 else 3261 { 3262 if (debug_threads) 3263 debug_printf ("no deferred signals.\n"); 3264 3265 if (stabilizing_threads) 3266 { 3267 ourstatus->kind = TARGET_WAITKIND_STOPPED; 3268 ourstatus->value.sig = GDB_SIGNAL_0; 3269 3270 if (debug_threads) 3271 { 3272 debug_printf ("wait_1 ret = %s, stopped " 3273 "while stabilizing threads\n", 3274 target_pid_to_str (ptid_of (current_thread))); 3275 debug_exit (); 3276 } 3277 3278 return ptid_of (current_thread); 3279 } 3280 } 3281 } 3282 } 3283 3284 /* Check whether GDB would be interested in this event. */ 3285 3286 /* Check if GDB is interested in this syscall. */ 3287 if (WIFSTOPPED (w) 3288 && WSTOPSIG (w) == SYSCALL_SIGTRAP 3289 && !gdb_catch_this_syscall (event_child)) 3290 { 3291 if (debug_threads) 3292 { 3293 debug_printf ("Ignored syscall for LWP %ld.\n", 3294 lwpid_of (current_thread)); 3295 } 3296 3297 resume_one_lwp (event_child, event_child->stepping, 0, NULL); 3298 3299 if (debug_threads) 3300 debug_exit (); 3301 return ignore_event (ourstatus); 3302 } 3303 3304 /* If GDB is not interested in this signal, don't stop other 3305 threads, and don't report it to GDB. Just resume the inferior 3306 right away. We do this for threading-related signals as well as 3307 any that GDB specifically requested we ignore. But never ignore 3308 SIGSTOP if we sent it ourselves, and do not ignore signals when 3309 stepping - they may require special handling to skip the signal 3310 handler. Also never ignore signals that could be caused by a 3311 breakpoint. */ 3312 if (WIFSTOPPED (w) 3313 && current_thread->last_resume_kind != resume_step 3314 && ( 3315 #if defined (USE_THREAD_DB) && !defined (__ANDROID__) 3316 (current_process ()->priv->thread_db != NULL 3317 && (WSTOPSIG (w) == __SIGRTMIN 3318 || WSTOPSIG (w) == __SIGRTMIN + 1)) 3319 || 3320 #endif 3321 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))] 3322 && !(WSTOPSIG (w) == SIGSTOP 3323 && current_thread->last_resume_kind == resume_stop) 3324 && !linux_wstatus_maybe_breakpoint (w)))) 3325 { 3326 siginfo_t info, *info_p; 3327 3328 if (debug_threads) 3329 debug_printf ("Ignored signal %d for LWP %ld.\n", 3330 WSTOPSIG (w), lwpid_of (current_thread)); 3331 3332 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread), 3333 (PTRACE_TYPE_ARG3) 0, &info) == 0) 3334 info_p = &info; 3335 else 3336 info_p = NULL; 3337 3338 if (step_over_finished) 3339 { 3340 /* We cancelled this thread's step-over above. We still 3341 need to unsuspend all other LWPs, and set them back 3342 running again while the signal handler runs. */ 3343 unsuspend_all_lwps (event_child); 3344 3345 /* Enqueue the pending signal info so that proceed_all_lwps 3346 doesn't lose it. */ 3347 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p); 3348 3349 proceed_all_lwps (); 3350 } 3351 else 3352 { 3353 resume_one_lwp (event_child, event_child->stepping, 3354 WSTOPSIG (w), info_p); 3355 } 3356 3357 if (debug_threads) 3358 debug_exit (); 3359 3360 return ignore_event (ourstatus); 3361 } 3362 3363 /* Note that all addresses are always "out of the step range" when 3364 there's no range to begin with. */ 3365 in_step_range = lwp_in_step_range (event_child); 3366 3367 /* If GDB wanted this thread to single step, and the thread is out 3368 of the step range, we always want to report the SIGTRAP, and let 3369 GDB handle it. Watchpoints should always be reported. So should 3370 signals we can't explain. A SIGTRAP we can't explain could be a 3371 GDB breakpoint --- we may or not support Z0 breakpoints. If we 3372 do, we're be able to handle GDB breakpoints on top of internal 3373 breakpoints, by handling the internal breakpoint and still 3374 reporting the event to GDB. If we don't, we're out of luck, GDB 3375 won't see the breakpoint hit. If we see a single-step event but 3376 the thread should be continuing, don't pass the trap to gdb. 3377 That indicates that we had previously finished a single-step but 3378 left the single-step pending -- see 3379 complete_ongoing_step_over. */ 3380 report_to_gdb = (!maybe_internal_trap 3381 || (current_thread->last_resume_kind == resume_step 3382 && !in_step_range) 3383 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT 3384 || (!in_step_range 3385 && !bp_explains_trap 3386 && !trace_event 3387 && !step_over_finished 3388 && !(current_thread->last_resume_kind == resume_continue 3389 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)) 3390 || (gdb_breakpoint_here (event_child->stop_pc) 3391 && gdb_condition_true_at_breakpoint (event_child->stop_pc) 3392 && gdb_no_commands_at_breakpoint (event_child->stop_pc)) 3393 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE); 3394 3395 run_breakpoint_commands (event_child->stop_pc); 3396 3397 /* We found no reason GDB would want us to stop. We either hit one 3398 of our own breakpoints, or finished an internal step GDB 3399 shouldn't know about. */ 3400 if (!report_to_gdb) 3401 { 3402 if (debug_threads) 3403 { 3404 if (bp_explains_trap) 3405 debug_printf ("Hit a gdbserver breakpoint.\n"); 3406 if (step_over_finished) 3407 debug_printf ("Step-over finished.\n"); 3408 if (trace_event) 3409 debug_printf ("Tracepoint event.\n"); 3410 if (lwp_in_step_range (event_child)) 3411 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n", 3412 paddress (event_child->stop_pc), 3413 paddress (event_child->step_range_start), 3414 paddress (event_child->step_range_end)); 3415 } 3416 3417 /* We're not reporting this breakpoint to GDB, so apply the 3418 decr_pc_after_break adjustment to the inferior's regcache 3419 ourselves. */ 3420 3421 if (low_supports_breakpoints ()) 3422 { 3423 struct regcache *regcache 3424 = get_thread_regcache (current_thread, 1); 3425 low_set_pc (regcache, event_child->stop_pc); 3426 } 3427 3428 if (step_over_finished) 3429 { 3430 /* If we have finished stepping over a breakpoint, we've 3431 stopped and suspended all LWPs momentarily except the 3432 stepping one. This is where we resume them all again. 3433 We're going to keep waiting, so use proceed, which 3434 handles stepping over the next breakpoint. */ 3435 unsuspend_all_lwps (event_child); 3436 } 3437 else 3438 { 3439 /* Remove the single-step breakpoints if any. Note that 3440 there isn't single-step breakpoint if we finished stepping 3441 over. */ 3442 if (supports_software_single_step () 3443 && has_single_step_breakpoints (current_thread)) 3444 { 3445 stop_all_lwps (0, event_child); 3446 delete_single_step_breakpoints (current_thread); 3447 unstop_all_lwps (0, event_child); 3448 } 3449 } 3450 3451 if (debug_threads) 3452 debug_printf ("proceeding all threads.\n"); 3453 proceed_all_lwps (); 3454 3455 if (debug_threads) 3456 debug_exit (); 3457 3458 return ignore_event (ourstatus); 3459 } 3460 3461 if (debug_threads) 3462 { 3463 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE) 3464 { 3465 std::string str 3466 = target_waitstatus_to_string (&event_child->waitstatus); 3467 3468 debug_printf ("LWP %ld: extended event with waitstatus %s\n", 3469 lwpid_of (get_lwp_thread (event_child)), str.c_str ()); 3470 } 3471 if (current_thread->last_resume_kind == resume_step) 3472 { 3473 if (event_child->step_range_start == event_child->step_range_end) 3474 debug_printf ("GDB wanted to single-step, reporting event.\n"); 3475 else if (!lwp_in_step_range (event_child)) 3476 debug_printf ("Out of step range, reporting event.\n"); 3477 } 3478 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT) 3479 debug_printf ("Stopped by watchpoint.\n"); 3480 else if (gdb_breakpoint_here (event_child->stop_pc)) 3481 debug_printf ("Stopped by GDB breakpoint.\n"); 3482 if (debug_threads) 3483 debug_printf ("Hit a non-gdbserver trap event.\n"); 3484 } 3485 3486 /* Alright, we're going to report a stop. */ 3487 3488 /* Remove single-step breakpoints. */ 3489 if (supports_software_single_step ()) 3490 { 3491 /* Remove single-step breakpoints or not. It it is true, stop all 3492 lwps, so that other threads won't hit the breakpoint in the 3493 staled memory. */ 3494 int remove_single_step_breakpoints_p = 0; 3495 3496 if (non_stop) 3497 { 3498 remove_single_step_breakpoints_p 3499 = has_single_step_breakpoints (current_thread); 3500 } 3501 else 3502 { 3503 /* In all-stop, a stop reply cancels all previous resume 3504 requests. Delete all single-step breakpoints. */ 3505 3506 find_thread ([&] (thread_info *thread) { 3507 if (has_single_step_breakpoints (thread)) 3508 { 3509 remove_single_step_breakpoints_p = 1; 3510 return true; 3511 } 3512 3513 return false; 3514 }); 3515 } 3516 3517 if (remove_single_step_breakpoints_p) 3518 { 3519 /* If we remove single-step breakpoints from memory, stop all lwps, 3520 so that other threads won't hit the breakpoint in the staled 3521 memory. */ 3522 stop_all_lwps (0, event_child); 3523 3524 if (non_stop) 3525 { 3526 gdb_assert (has_single_step_breakpoints (current_thread)); 3527 delete_single_step_breakpoints (current_thread); 3528 } 3529 else 3530 { 3531 for_each_thread ([] (thread_info *thread){ 3532 if (has_single_step_breakpoints (thread)) 3533 delete_single_step_breakpoints (thread); 3534 }); 3535 } 3536 3537 unstop_all_lwps (0, event_child); 3538 } 3539 } 3540 3541 if (!stabilizing_threads) 3542 { 3543 /* In all-stop, stop all threads. */ 3544 if (!non_stop) 3545 stop_all_lwps (0, NULL); 3546 3547 if (step_over_finished) 3548 { 3549 if (!non_stop) 3550 { 3551 /* If we were doing a step-over, all other threads but 3552 the stepping one had been paused in start_step_over, 3553 with their suspend counts incremented. We don't want 3554 to do a full unstop/unpause, because we're in 3555 all-stop mode (so we want threads stopped), but we 3556 still need to unsuspend the other threads, to 3557 decrement their `suspended' count back. */ 3558 unsuspend_all_lwps (event_child); 3559 } 3560 else 3561 { 3562 /* If we just finished a step-over, then all threads had 3563 been momentarily paused. In all-stop, that's fine, 3564 we want threads stopped by now anyway. In non-stop, 3565 we need to re-resume threads that GDB wanted to be 3566 running. */ 3567 unstop_all_lwps (1, event_child); 3568 } 3569 } 3570 3571 /* If we're not waiting for a specific LWP, choose an event LWP 3572 from among those that have had events. Giving equal priority 3573 to all LWPs that have had events helps prevent 3574 starvation. */ 3575 if (ptid == minus_one_ptid) 3576 { 3577 event_child->status_pending_p = 1; 3578 event_child->status_pending = w; 3579 3580 select_event_lwp (&event_child); 3581 3582 /* current_thread and event_child must stay in sync. */ 3583 current_thread = get_lwp_thread (event_child); 3584 3585 event_child->status_pending_p = 0; 3586 w = event_child->status_pending; 3587 } 3588 3589 3590 /* Stabilize threads (move out of jump pads). */ 3591 if (!non_stop) 3592 target_stabilize_threads (); 3593 } 3594 else 3595 { 3596 /* If we just finished a step-over, then all threads had been 3597 momentarily paused. In all-stop, that's fine, we want 3598 threads stopped by now anyway. In non-stop, we need to 3599 re-resume threads that GDB wanted to be running. */ 3600 if (step_over_finished) 3601 unstop_all_lwps (1, event_child); 3602 } 3603 3604 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE) 3605 { 3606 /* If the reported event is an exit, fork, vfork or exec, let 3607 GDB know. */ 3608 3609 /* Break the unreported fork relationship chain. */ 3610 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED 3611 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED) 3612 { 3613 event_child->fork_relative->fork_relative = NULL; 3614 event_child->fork_relative = NULL; 3615 } 3616 3617 *ourstatus = event_child->waitstatus; 3618 /* Clear the event lwp's waitstatus since we handled it already. */ 3619 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE; 3620 } 3621 else 3622 ourstatus->kind = TARGET_WAITKIND_STOPPED; 3623 3624 /* Now that we've selected our final event LWP, un-adjust its PC if 3625 it was a software breakpoint, and the client doesn't know we can 3626 adjust the breakpoint ourselves. */ 3627 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT 3628 && !cs.swbreak_feature) 3629 { 3630 int decr_pc = low_decr_pc_after_break (); 3631 3632 if (decr_pc != 0) 3633 { 3634 struct regcache *regcache 3635 = get_thread_regcache (current_thread, 1); 3636 low_set_pc (regcache, event_child->stop_pc + decr_pc); 3637 } 3638 } 3639 3640 if (WSTOPSIG (w) == SYSCALL_SIGTRAP) 3641 { 3642 get_syscall_trapinfo (event_child, 3643 &ourstatus->value.syscall_number); 3644 ourstatus->kind = event_child->syscall_state; 3645 } 3646 else if (current_thread->last_resume_kind == resume_stop 3647 && WSTOPSIG (w) == SIGSTOP) 3648 { 3649 /* A thread that has been requested to stop by GDB with vCont;t, 3650 and it stopped cleanly, so report as SIG0. The use of 3651 SIGSTOP is an implementation detail. */ 3652 ourstatus->value.sig = GDB_SIGNAL_0; 3653 } 3654 else if (current_thread->last_resume_kind == resume_stop 3655 && WSTOPSIG (w) != SIGSTOP) 3656 { 3657 /* A thread that has been requested to stop by GDB with vCont;t, 3658 but, it stopped for other reasons. */ 3659 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w)); 3660 } 3661 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED) 3662 { 3663 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w)); 3664 } 3665 3666 gdb_assert (step_over_bkpt == null_ptid); 3667 3668 if (debug_threads) 3669 { 3670 debug_printf ("wait_1 ret = %s, %d, %d\n", 3671 target_pid_to_str (ptid_of (current_thread)), 3672 ourstatus->kind, ourstatus->value.sig); 3673 debug_exit (); 3674 } 3675 3676 if (ourstatus->kind == TARGET_WAITKIND_EXITED) 3677 return filter_exit_event (event_child, ourstatus); 3678 3679 return ptid_of (current_thread); 3680 } 3681 3682 /* Get rid of any pending event in the pipe. */ 3683 static void 3684 async_file_flush (void) 3685 { 3686 int ret; 3687 char buf; 3688 3689 do 3690 ret = read (linux_event_pipe[0], &buf, 1); 3691 while (ret >= 0 || (ret == -1 && errno == EINTR)); 3692 } 3693 3694 /* Put something in the pipe, so the event loop wakes up. */ 3695 static void 3696 async_file_mark (void) 3697 { 3698 int ret; 3699 3700 async_file_flush (); 3701 3702 do 3703 ret = write (linux_event_pipe[1], "+", 1); 3704 while (ret == 0 || (ret == -1 && errno == EINTR)); 3705 3706 /* Ignore EAGAIN. If the pipe is full, the event loop will already 3707 be awakened anyway. */ 3708 } 3709 3710 ptid_t 3711 linux_process_target::wait (ptid_t ptid, 3712 target_waitstatus *ourstatus, 3713 int target_options) 3714 { 3715 ptid_t event_ptid; 3716 3717 /* Flush the async file first. */ 3718 if (target_is_async_p ()) 3719 async_file_flush (); 3720 3721 do 3722 { 3723 event_ptid = wait_1 (ptid, ourstatus, target_options); 3724 } 3725 while ((target_options & TARGET_WNOHANG) == 0 3726 && event_ptid == null_ptid 3727 && ourstatus->kind == TARGET_WAITKIND_IGNORE); 3728 3729 /* If at least one stop was reported, there may be more. A single 3730 SIGCHLD can signal more than one child stop. */ 3731 if (target_is_async_p () 3732 && (target_options & TARGET_WNOHANG) != 0 3733 && event_ptid != null_ptid) 3734 async_file_mark (); 3735 3736 return event_ptid; 3737 } 3738 3739 /* Send a signal to an LWP. */ 3740 3741 static int 3742 kill_lwp (unsigned long lwpid, int signo) 3743 { 3744 int ret; 3745 3746 errno = 0; 3747 ret = syscall (__NR_tkill, lwpid, signo); 3748 if (errno == ENOSYS) 3749 { 3750 /* If tkill fails, then we are not using nptl threads, a 3751 configuration we no longer support. */ 3752 perror_with_name (("tkill")); 3753 } 3754 return ret; 3755 } 3756 3757 void 3758 linux_stop_lwp (struct lwp_info *lwp) 3759 { 3760 send_sigstop (lwp); 3761 } 3762 3763 static void 3764 send_sigstop (struct lwp_info *lwp) 3765 { 3766 int pid; 3767 3768 pid = lwpid_of (get_lwp_thread (lwp)); 3769 3770 /* If we already have a pending stop signal for this process, don't 3771 send another. */ 3772 if (lwp->stop_expected) 3773 { 3774 if (debug_threads) 3775 debug_printf ("Have pending sigstop for lwp %d\n", pid); 3776 3777 return; 3778 } 3779 3780 if (debug_threads) 3781 debug_printf ("Sending sigstop to lwp %d\n", pid); 3782 3783 lwp->stop_expected = 1; 3784 kill_lwp (pid, SIGSTOP); 3785 } 3786 3787 static void 3788 send_sigstop (thread_info *thread, lwp_info *except) 3789 { 3790 struct lwp_info *lwp = get_thread_lwp (thread); 3791 3792 /* Ignore EXCEPT. */ 3793 if (lwp == except) 3794 return; 3795 3796 if (lwp->stopped) 3797 return; 3798 3799 send_sigstop (lwp); 3800 } 3801 3802 /* Increment the suspend count of an LWP, and stop it, if not stopped 3803 yet. */ 3804 static void 3805 suspend_and_send_sigstop (thread_info *thread, lwp_info *except) 3806 { 3807 struct lwp_info *lwp = get_thread_lwp (thread); 3808 3809 /* Ignore EXCEPT. */ 3810 if (lwp == except) 3811 return; 3812 3813 lwp_suspended_inc (lwp); 3814 3815 send_sigstop (thread, except); 3816 } 3817 3818 static void 3819 mark_lwp_dead (struct lwp_info *lwp, int wstat) 3820 { 3821 /* Store the exit status for later. */ 3822 lwp->status_pending_p = 1; 3823 lwp->status_pending = wstat; 3824 3825 /* Store in waitstatus as well, as there's nothing else to process 3826 for this event. */ 3827 if (WIFEXITED (wstat)) 3828 { 3829 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED; 3830 lwp->waitstatus.value.integer = WEXITSTATUS (wstat); 3831 } 3832 else if (WIFSIGNALED (wstat)) 3833 { 3834 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED; 3835 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat)); 3836 } 3837 3838 /* Prevent trying to stop it. */ 3839 lwp->stopped = 1; 3840 3841 /* No further stops are expected from a dead lwp. */ 3842 lwp->stop_expected = 0; 3843 } 3844 3845 /* Return true if LWP has exited already, and has a pending exit event 3846 to report to GDB. */ 3847 3848 static int 3849 lwp_is_marked_dead (struct lwp_info *lwp) 3850 { 3851 return (lwp->status_pending_p 3852 && (WIFEXITED (lwp->status_pending) 3853 || WIFSIGNALED (lwp->status_pending))); 3854 } 3855 3856 void 3857 linux_process_target::wait_for_sigstop () 3858 { 3859 struct thread_info *saved_thread; 3860 ptid_t saved_tid; 3861 int wstat; 3862 int ret; 3863 3864 saved_thread = current_thread; 3865 if (saved_thread != NULL) 3866 saved_tid = saved_thread->id; 3867 else 3868 saved_tid = null_ptid; /* avoid bogus unused warning */ 3869 3870 if (debug_threads) 3871 debug_printf ("wait_for_sigstop: pulling events\n"); 3872 3873 /* Passing NULL_PTID as filter indicates we want all events to be 3874 left pending. Eventually this returns when there are no 3875 unwaited-for children left. */ 3876 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL); 3877 gdb_assert (ret == -1); 3878 3879 if (saved_thread == NULL || mythread_alive (saved_tid)) 3880 current_thread = saved_thread; 3881 else 3882 { 3883 if (debug_threads) 3884 debug_printf ("Previously current thread died.\n"); 3885 3886 /* We can't change the current inferior behind GDB's back, 3887 otherwise, a subsequent command may apply to the wrong 3888 process. */ 3889 current_thread = NULL; 3890 } 3891 } 3892 3893 bool 3894 linux_process_target::stuck_in_jump_pad (thread_info *thread) 3895 { 3896 struct lwp_info *lwp = get_thread_lwp (thread); 3897 3898 if (lwp->suspended != 0) 3899 { 3900 internal_error (__FILE__, __LINE__, 3901 "LWP %ld is suspended, suspended=%d\n", 3902 lwpid_of (thread), lwp->suspended); 3903 } 3904 gdb_assert (lwp->stopped); 3905 3906 /* Allow debugging the jump pad, gdb_collect, etc.. */ 3907 return (supports_fast_tracepoints () 3908 && agent_loaded_p () 3909 && (gdb_breakpoint_here (lwp->stop_pc) 3910 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT 3911 || thread->last_resume_kind == resume_step) 3912 && (linux_fast_tracepoint_collecting (lwp, NULL) 3913 != fast_tpoint_collect_result::not_collecting)); 3914 } 3915 3916 void 3917 linux_process_target::move_out_of_jump_pad (thread_info *thread) 3918 { 3919 struct thread_info *saved_thread; 3920 struct lwp_info *lwp = get_thread_lwp (thread); 3921 int *wstat; 3922 3923 if (lwp->suspended != 0) 3924 { 3925 internal_error (__FILE__, __LINE__, 3926 "LWP %ld is suspended, suspended=%d\n", 3927 lwpid_of (thread), lwp->suspended); 3928 } 3929 gdb_assert (lwp->stopped); 3930 3931 /* For gdb_breakpoint_here. */ 3932 saved_thread = current_thread; 3933 current_thread = thread; 3934 3935 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL; 3936 3937 /* Allow debugging the jump pad, gdb_collect, etc. */ 3938 if (!gdb_breakpoint_here (lwp->stop_pc) 3939 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT 3940 && thread->last_resume_kind != resume_step 3941 && maybe_move_out_of_jump_pad (lwp, wstat)) 3942 { 3943 if (debug_threads) 3944 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n", 3945 lwpid_of (thread)); 3946 3947 if (wstat) 3948 { 3949 lwp->status_pending_p = 0; 3950 enqueue_one_deferred_signal (lwp, wstat); 3951 3952 if (debug_threads) 3953 debug_printf ("Signal %d for LWP %ld deferred " 3954 "(in jump pad)\n", 3955 WSTOPSIG (*wstat), lwpid_of (thread)); 3956 } 3957 3958 resume_one_lwp (lwp, 0, 0, NULL); 3959 } 3960 else 3961 lwp_suspended_inc (lwp); 3962 3963 current_thread = saved_thread; 3964 } 3965 3966 static bool 3967 lwp_running (thread_info *thread) 3968 { 3969 struct lwp_info *lwp = get_thread_lwp (thread); 3970 3971 if (lwp_is_marked_dead (lwp)) 3972 return false; 3973 3974 return !lwp->stopped; 3975 } 3976 3977 void 3978 linux_process_target::stop_all_lwps (int suspend, lwp_info *except) 3979 { 3980 /* Should not be called recursively. */ 3981 gdb_assert (stopping_threads == NOT_STOPPING_THREADS); 3982 3983 if (debug_threads) 3984 { 3985 debug_enter (); 3986 debug_printf ("stop_all_lwps (%s, except=%s)\n", 3987 suspend ? "stop-and-suspend" : "stop", 3988 except != NULL 3989 ? target_pid_to_str (ptid_of (get_lwp_thread (except))) 3990 : "none"); 3991 } 3992 3993 stopping_threads = (suspend 3994 ? STOPPING_AND_SUSPENDING_THREADS 3995 : STOPPING_THREADS); 3996 3997 if (suspend) 3998 for_each_thread ([&] (thread_info *thread) 3999 { 4000 suspend_and_send_sigstop (thread, except); 4001 }); 4002 else 4003 for_each_thread ([&] (thread_info *thread) 4004 { 4005 send_sigstop (thread, except); 4006 }); 4007 4008 wait_for_sigstop (); 4009 stopping_threads = NOT_STOPPING_THREADS; 4010 4011 if (debug_threads) 4012 { 4013 debug_printf ("stop_all_lwps done, setting stopping_threads " 4014 "back to !stopping\n"); 4015 debug_exit (); 4016 } 4017 } 4018 4019 /* Enqueue one signal in the chain of signals which need to be 4020 delivered to this process on next resume. */ 4021 4022 static void 4023 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info) 4024 { 4025 lwp->pending_signals.emplace_back (signal); 4026 if (info == nullptr) 4027 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t)); 4028 else 4029 lwp->pending_signals.back ().info = *info; 4030 } 4031 4032 void 4033 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp) 4034 { 4035 struct thread_info *thread = get_lwp_thread (lwp); 4036 struct regcache *regcache = get_thread_regcache (thread, 1); 4037 4038 scoped_restore save_current_thread = make_scoped_restore (¤t_thread); 4039 4040 current_thread = thread; 4041 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache); 4042 4043 for (CORE_ADDR pc : next_pcs) 4044 set_single_step_breakpoint (pc, current_ptid); 4045 } 4046 4047 int 4048 linux_process_target::single_step (lwp_info* lwp) 4049 { 4050 int step = 0; 4051 4052 if (supports_hardware_single_step ()) 4053 { 4054 step = 1; 4055 } 4056 else if (supports_software_single_step ()) 4057 { 4058 install_software_single_step_breakpoints (lwp); 4059 step = 0; 4060 } 4061 else 4062 { 4063 if (debug_threads) 4064 debug_printf ("stepping is not implemented on this target"); 4065 } 4066 4067 return step; 4068 } 4069 4070 /* The signal can be delivered to the inferior if we are not trying to 4071 finish a fast tracepoint collect. Since signal can be delivered in 4072 the step-over, the program may go to signal handler and trap again 4073 after return from the signal handler. We can live with the spurious 4074 double traps. */ 4075 4076 static int 4077 lwp_signal_can_be_delivered (struct lwp_info *lwp) 4078 { 4079 return (lwp->collecting_fast_tracepoint 4080 == fast_tpoint_collect_result::not_collecting); 4081 } 4082 4083 void 4084 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step, 4085 int signal, siginfo_t *info) 4086 { 4087 struct thread_info *thread = get_lwp_thread (lwp); 4088 struct thread_info *saved_thread; 4089 int ptrace_request; 4090 struct process_info *proc = get_thread_process (thread); 4091 4092 /* Note that target description may not be initialised 4093 (proc->tdesc == NULL) at this point because the program hasn't 4094 stopped at the first instruction yet. It means GDBserver skips 4095 the extra traps from the wrapper program (see option --wrapper). 4096 Code in this function that requires register access should be 4097 guarded by proc->tdesc == NULL or something else. */ 4098 4099 if (lwp->stopped == 0) 4100 return; 4101 4102 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE); 4103 4104 fast_tpoint_collect_result fast_tp_collecting 4105 = lwp->collecting_fast_tracepoint; 4106 4107 gdb_assert (!stabilizing_threads 4108 || (fast_tp_collecting 4109 != fast_tpoint_collect_result::not_collecting)); 4110 4111 /* Cancel actions that rely on GDB not changing the PC (e.g., the 4112 user used the "jump" command, or "set $pc = foo"). */ 4113 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp)) 4114 { 4115 /* Collecting 'while-stepping' actions doesn't make sense 4116 anymore. */ 4117 release_while_stepping_state_list (thread); 4118 } 4119 4120 /* If we have pending signals or status, and a new signal, enqueue the 4121 signal. Also enqueue the signal if it can't be delivered to the 4122 inferior right now. */ 4123 if (signal != 0 4124 && (lwp->status_pending_p 4125 || !lwp->pending_signals.empty () 4126 || !lwp_signal_can_be_delivered (lwp))) 4127 { 4128 enqueue_pending_signal (lwp, signal, info); 4129 4130 /* Postpone any pending signal. It was enqueued above. */ 4131 signal = 0; 4132 } 4133 4134 if (lwp->status_pending_p) 4135 { 4136 if (debug_threads) 4137 debug_printf ("Not resuming lwp %ld (%s, stop %s);" 4138 " has pending status\n", 4139 lwpid_of (thread), step ? "step" : "continue", 4140 lwp->stop_expected ? "expected" : "not expected"); 4141 return; 4142 } 4143 4144 saved_thread = current_thread; 4145 current_thread = thread; 4146 4147 /* This bit needs some thinking about. If we get a signal that 4148 we must report while a single-step reinsert is still pending, 4149 we often end up resuming the thread. It might be better to 4150 (ew) allow a stack of pending events; then we could be sure that 4151 the reinsert happened right away and not lose any signals. 4152 4153 Making this stack would also shrink the window in which breakpoints are 4154 uninserted (see comment in linux_wait_for_lwp) but not enough for 4155 complete correctness, so it won't solve that problem. It may be 4156 worthwhile just to solve this one, however. */ 4157 if (lwp->bp_reinsert != 0) 4158 { 4159 if (debug_threads) 4160 debug_printf (" pending reinsert at 0x%s\n", 4161 paddress (lwp->bp_reinsert)); 4162 4163 if (supports_hardware_single_step ()) 4164 { 4165 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting) 4166 { 4167 if (step == 0) 4168 warning ("BAD - reinserting but not stepping."); 4169 if (lwp->suspended) 4170 warning ("BAD - reinserting and suspended(%d).", 4171 lwp->suspended); 4172 } 4173 } 4174 4175 step = maybe_hw_step (thread); 4176 } 4177 4178 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn) 4179 { 4180 if (debug_threads) 4181 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad" 4182 " (exit-jump-pad-bkpt)\n", 4183 lwpid_of (thread)); 4184 } 4185 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn) 4186 { 4187 if (debug_threads) 4188 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad" 4189 " single-stepping\n", 4190 lwpid_of (thread)); 4191 4192 if (supports_hardware_single_step ()) 4193 step = 1; 4194 else 4195 { 4196 internal_error (__FILE__, __LINE__, 4197 "moving out of jump pad single-stepping" 4198 " not implemented on this target"); 4199 } 4200 } 4201 4202 /* If we have while-stepping actions in this thread set it stepping. 4203 If we have a signal to deliver, it may or may not be set to 4204 SIG_IGN, we don't know. Assume so, and allow collecting 4205 while-stepping into a signal handler. A possible smart thing to 4206 do would be to set an internal breakpoint at the signal return 4207 address, continue, and carry on catching this while-stepping 4208 action only when that breakpoint is hit. A future 4209 enhancement. */ 4210 if (thread->while_stepping != NULL) 4211 { 4212 if (debug_threads) 4213 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n", 4214 lwpid_of (thread)); 4215 4216 step = single_step (lwp); 4217 } 4218 4219 if (proc->tdesc != NULL && low_supports_breakpoints ()) 4220 { 4221 struct regcache *regcache = get_thread_regcache (current_thread, 1); 4222 4223 lwp->stop_pc = low_get_pc (regcache); 4224 4225 if (debug_threads) 4226 { 4227 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue", 4228 (long) lwp->stop_pc); 4229 } 4230 } 4231 4232 /* If we have pending signals, consume one if it can be delivered to 4233 the inferior. */ 4234 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp)) 4235 { 4236 const pending_signal &p_sig = lwp->pending_signals.front (); 4237 4238 signal = p_sig.signal; 4239 if (p_sig.info.si_signo != 0) 4240 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0, 4241 &p_sig.info); 4242 4243 lwp->pending_signals.pop_front (); 4244 } 4245 4246 if (debug_threads) 4247 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n", 4248 lwpid_of (thread), step ? "step" : "continue", signal, 4249 lwp->stop_expected ? "expected" : "not expected"); 4250 4251 low_prepare_to_resume (lwp); 4252 4253 regcache_invalidate_thread (thread); 4254 errno = 0; 4255 lwp->stepping = step; 4256 if (step) 4257 ptrace_request = PTRACE_SINGLESTEP; 4258 else if (gdb_catching_syscalls_p (lwp)) 4259 ptrace_request = PTRACE_SYSCALL; 4260 else 4261 ptrace_request = PTRACE_CONT; 4262 ptrace (ptrace_request, 4263 lwpid_of (thread), 4264 (PTRACE_TYPE_ARG3) 0, 4265 /* Coerce to a uintptr_t first to avoid potential gcc warning 4266 of coercing an 8 byte integer to a 4 byte pointer. */ 4267 (PTRACE_TYPE_ARG4) (uintptr_t) signal); 4268 4269 current_thread = saved_thread; 4270 if (errno) 4271 perror_with_name ("resuming thread"); 4272 4273 /* Successfully resumed. Clear state that no longer makes sense, 4274 and mark the LWP as running. Must not do this before resuming 4275 otherwise if that fails other code will be confused. E.g., we'd 4276 later try to stop the LWP and hang forever waiting for a stop 4277 status. Note that we must not throw after this is cleared, 4278 otherwise handle_zombie_lwp_error would get confused. */ 4279 lwp->stopped = 0; 4280 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON; 4281 } 4282 4283 void 4284 linux_process_target::low_prepare_to_resume (lwp_info *lwp) 4285 { 4286 /* Nop. */ 4287 } 4288 4289 /* Called when we try to resume a stopped LWP and that errors out. If 4290 the LWP is no longer in ptrace-stopped state (meaning it's zombie, 4291 or about to become), discard the error, clear any pending status 4292 the LWP may have, and return true (we'll collect the exit status 4293 soon enough). Otherwise, return false. */ 4294 4295 static int 4296 check_ptrace_stopped_lwp_gone (struct lwp_info *lp) 4297 { 4298 struct thread_info *thread = get_lwp_thread (lp); 4299 4300 /* If we get an error after resuming the LWP successfully, we'd 4301 confuse !T state for the LWP being gone. */ 4302 gdb_assert (lp->stopped); 4303 4304 /* We can't just check whether the LWP is in 'Z (Zombie)' state, 4305 because even if ptrace failed with ESRCH, the tracee may be "not 4306 yet fully dead", but already refusing ptrace requests. In that 4307 case the tracee has 'R (Running)' state for a little bit 4308 (observed in Linux 3.18). See also the note on ESRCH in the 4309 ptrace(2) man page. Instead, check whether the LWP has any state 4310 other than ptrace-stopped. */ 4311 4312 /* Don't assume anything if /proc/PID/status can't be read. */ 4313 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0) 4314 { 4315 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON; 4316 lp->status_pending_p = 0; 4317 return 1; 4318 } 4319 return 0; 4320 } 4321 4322 void 4323 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal, 4324 siginfo_t *info) 4325 { 4326 try 4327 { 4328 resume_one_lwp_throw (lwp, step, signal, info); 4329 } 4330 catch (const gdb_exception_error &ex) 4331 { 4332 if (!check_ptrace_stopped_lwp_gone (lwp)) 4333 throw; 4334 } 4335 } 4336 4337 /* This function is called once per thread via for_each_thread. 4338 We look up which resume request applies to THREAD and mark it with a 4339 pointer to the appropriate resume request. 4340 4341 This algorithm is O(threads * resume elements), but resume elements 4342 is small (and will remain small at least until GDB supports thread 4343 suspension). */ 4344 4345 static void 4346 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n) 4347 { 4348 struct lwp_info *lwp = get_thread_lwp (thread); 4349 4350 for (int ndx = 0; ndx < n; ndx++) 4351 { 4352 ptid_t ptid = resume[ndx].thread; 4353 if (ptid == minus_one_ptid 4354 || ptid == thread->id 4355 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads 4356 of PID'. */ 4357 || (ptid.pid () == pid_of (thread) 4358 && (ptid.is_pid () 4359 || ptid.lwp () == -1))) 4360 { 4361 if (resume[ndx].kind == resume_stop 4362 && thread->last_resume_kind == resume_stop) 4363 { 4364 if (debug_threads) 4365 debug_printf ("already %s LWP %ld at GDB's request\n", 4366 (thread->last_status.kind 4367 == TARGET_WAITKIND_STOPPED) 4368 ? "stopped" 4369 : "stopping", 4370 lwpid_of (thread)); 4371 4372 continue; 4373 } 4374 4375 /* Ignore (wildcard) resume requests for already-resumed 4376 threads. */ 4377 if (resume[ndx].kind != resume_stop 4378 && thread->last_resume_kind != resume_stop) 4379 { 4380 if (debug_threads) 4381 debug_printf ("already %s LWP %ld at GDB's request\n", 4382 (thread->last_resume_kind 4383 == resume_step) 4384 ? "stepping" 4385 : "continuing", 4386 lwpid_of (thread)); 4387 continue; 4388 } 4389 4390 /* Don't let wildcard resumes resume fork children that GDB 4391 does not yet know are new fork children. */ 4392 if (lwp->fork_relative != NULL) 4393 { 4394 struct lwp_info *rel = lwp->fork_relative; 4395 4396 if (rel->status_pending_p 4397 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED 4398 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED)) 4399 { 4400 if (debug_threads) 4401 debug_printf ("not resuming LWP %ld: has queued stop reply\n", 4402 lwpid_of (thread)); 4403 continue; 4404 } 4405 } 4406 4407 /* If the thread has a pending event that has already been 4408 reported to GDBserver core, but GDB has not pulled the 4409 event out of the vStopped queue yet, likewise, ignore the 4410 (wildcard) resume request. */ 4411 if (in_queued_stop_replies (thread->id)) 4412 { 4413 if (debug_threads) 4414 debug_printf ("not resuming LWP %ld: has queued stop reply\n", 4415 lwpid_of (thread)); 4416 continue; 4417 } 4418 4419 lwp->resume = &resume[ndx]; 4420 thread->last_resume_kind = lwp->resume->kind; 4421 4422 lwp->step_range_start = lwp->resume->step_range_start; 4423 lwp->step_range_end = lwp->resume->step_range_end; 4424 4425 /* If we had a deferred signal to report, dequeue one now. 4426 This can happen if LWP gets more than one signal while 4427 trying to get out of a jump pad. */ 4428 if (lwp->stopped 4429 && !lwp->status_pending_p 4430 && dequeue_one_deferred_signal (lwp, &lwp->status_pending)) 4431 { 4432 lwp->status_pending_p = 1; 4433 4434 if (debug_threads) 4435 debug_printf ("Dequeueing deferred signal %d for LWP %ld, " 4436 "leaving status pending.\n", 4437 WSTOPSIG (lwp->status_pending), 4438 lwpid_of (thread)); 4439 } 4440 4441 return; 4442 } 4443 } 4444 4445 /* No resume action for this thread. */ 4446 lwp->resume = NULL; 4447 } 4448 4449 bool 4450 linux_process_target::resume_status_pending (thread_info *thread) 4451 { 4452 struct lwp_info *lwp = get_thread_lwp (thread); 4453 4454 /* LWPs which will not be resumed are not interesting, because 4455 we might not wait for them next time through linux_wait. */ 4456 if (lwp->resume == NULL) 4457 return false; 4458 4459 return thread_still_has_status_pending (thread); 4460 } 4461 4462 bool 4463 linux_process_target::thread_needs_step_over (thread_info *thread) 4464 { 4465 struct lwp_info *lwp = get_thread_lwp (thread); 4466 struct thread_info *saved_thread; 4467 CORE_ADDR pc; 4468 struct process_info *proc = get_thread_process (thread); 4469 4470 /* GDBserver is skipping the extra traps from the wrapper program, 4471 don't have to do step over. */ 4472 if (proc->tdesc == NULL) 4473 return false; 4474 4475 /* LWPs which will not be resumed are not interesting, because we 4476 might not wait for them next time through linux_wait. */ 4477 4478 if (!lwp->stopped) 4479 { 4480 if (debug_threads) 4481 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n", 4482 lwpid_of (thread)); 4483 return false; 4484 } 4485 4486 if (thread->last_resume_kind == resume_stop) 4487 { 4488 if (debug_threads) 4489 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain" 4490 " stopped\n", 4491 lwpid_of (thread)); 4492 return false; 4493 } 4494 4495 gdb_assert (lwp->suspended >= 0); 4496 4497 if (lwp->suspended) 4498 { 4499 if (debug_threads) 4500 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n", 4501 lwpid_of (thread)); 4502 return false; 4503 } 4504 4505 if (lwp->status_pending_p) 4506 { 4507 if (debug_threads) 4508 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending" 4509 " status.\n", 4510 lwpid_of (thread)); 4511 return false; 4512 } 4513 4514 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already, 4515 or we have. */ 4516 pc = get_pc (lwp); 4517 4518 /* If the PC has changed since we stopped, then don't do anything, 4519 and let the breakpoint/tracepoint be hit. This happens if, for 4520 instance, GDB handled the decr_pc_after_break subtraction itself, 4521 GDB is OOL stepping this thread, or the user has issued a "jump" 4522 command, or poked thread's registers herself. */ 4523 if (pc != lwp->stop_pc) 4524 { 4525 if (debug_threads) 4526 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. " 4527 "Old stop_pc was 0x%s, PC is now 0x%s\n", 4528 lwpid_of (thread), 4529 paddress (lwp->stop_pc), paddress (pc)); 4530 return false; 4531 } 4532 4533 /* On software single step target, resume the inferior with signal 4534 rather than stepping over. */ 4535 if (supports_software_single_step () 4536 && !lwp->pending_signals.empty () 4537 && lwp_signal_can_be_delivered (lwp)) 4538 { 4539 if (debug_threads) 4540 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending" 4541 " signals.\n", 4542 lwpid_of (thread)); 4543 4544 return false; 4545 } 4546 4547 saved_thread = current_thread; 4548 current_thread = thread; 4549 4550 /* We can only step over breakpoints we know about. */ 4551 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc)) 4552 { 4553 /* Don't step over a breakpoint that GDB expects to hit 4554 though. If the condition is being evaluated on the target's side 4555 and it evaluate to false, step over this breakpoint as well. */ 4556 if (gdb_breakpoint_here (pc) 4557 && gdb_condition_true_at_breakpoint (pc) 4558 && gdb_no_commands_at_breakpoint (pc)) 4559 { 4560 if (debug_threads) 4561 debug_printf ("Need step over [LWP %ld]? yes, but found" 4562 " GDB breakpoint at 0x%s; skipping step over\n", 4563 lwpid_of (thread), paddress (pc)); 4564 4565 current_thread = saved_thread; 4566 return false; 4567 } 4568 else 4569 { 4570 if (debug_threads) 4571 debug_printf ("Need step over [LWP %ld]? yes, " 4572 "found breakpoint at 0x%s\n", 4573 lwpid_of (thread), paddress (pc)); 4574 4575 /* We've found an lwp that needs stepping over --- return 1 so 4576 that find_thread stops looking. */ 4577 current_thread = saved_thread; 4578 4579 return true; 4580 } 4581 } 4582 4583 current_thread = saved_thread; 4584 4585 if (debug_threads) 4586 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found" 4587 " at 0x%s\n", 4588 lwpid_of (thread), paddress (pc)); 4589 4590 return false; 4591 } 4592 4593 void 4594 linux_process_target::start_step_over (lwp_info *lwp) 4595 { 4596 struct thread_info *thread = get_lwp_thread (lwp); 4597 struct thread_info *saved_thread; 4598 CORE_ADDR pc; 4599 int step; 4600 4601 if (debug_threads) 4602 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n", 4603 lwpid_of (thread)); 4604 4605 stop_all_lwps (1, lwp); 4606 4607 if (lwp->suspended != 0) 4608 { 4609 internal_error (__FILE__, __LINE__, 4610 "LWP %ld suspended=%d\n", lwpid_of (thread), 4611 lwp->suspended); 4612 } 4613 4614 if (debug_threads) 4615 debug_printf ("Done stopping all threads for step-over.\n"); 4616 4617 /* Note, we should always reach here with an already adjusted PC, 4618 either by GDB (if we're resuming due to GDB's request), or by our 4619 caller, if we just finished handling an internal breakpoint GDB 4620 shouldn't care about. */ 4621 pc = get_pc (lwp); 4622 4623 saved_thread = current_thread; 4624 current_thread = thread; 4625 4626 lwp->bp_reinsert = pc; 4627 uninsert_breakpoints_at (pc); 4628 uninsert_fast_tracepoint_jumps_at (pc); 4629 4630 step = single_step (lwp); 4631 4632 current_thread = saved_thread; 4633 4634 resume_one_lwp (lwp, step, 0, NULL); 4635 4636 /* Require next event from this LWP. */ 4637 step_over_bkpt = thread->id; 4638 } 4639 4640 bool 4641 linux_process_target::finish_step_over (lwp_info *lwp) 4642 { 4643 if (lwp->bp_reinsert != 0) 4644 { 4645 struct thread_info *saved_thread = current_thread; 4646 4647 if (debug_threads) 4648 debug_printf ("Finished step over.\n"); 4649 4650 current_thread = get_lwp_thread (lwp); 4651 4652 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there 4653 may be no breakpoint to reinsert there by now. */ 4654 reinsert_breakpoints_at (lwp->bp_reinsert); 4655 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert); 4656 4657 lwp->bp_reinsert = 0; 4658 4659 /* Delete any single-step breakpoints. No longer needed. We 4660 don't have to worry about other threads hitting this trap, 4661 and later not being able to explain it, because we were 4662 stepping over a breakpoint, and we hold all threads but 4663 LWP stopped while doing that. */ 4664 if (!supports_hardware_single_step ()) 4665 { 4666 gdb_assert (has_single_step_breakpoints (current_thread)); 4667 delete_single_step_breakpoints (current_thread); 4668 } 4669 4670 step_over_bkpt = null_ptid; 4671 current_thread = saved_thread; 4672 return true; 4673 } 4674 else 4675 return false; 4676 } 4677 4678 void 4679 linux_process_target::complete_ongoing_step_over () 4680 { 4681 if (step_over_bkpt != null_ptid) 4682 { 4683 struct lwp_info *lwp; 4684 int wstat; 4685 int ret; 4686 4687 if (debug_threads) 4688 debug_printf ("detach: step over in progress, finish it first\n"); 4689 4690 /* Passing NULL_PTID as filter indicates we want all events to 4691 be left pending. Eventually this returns when there are no 4692 unwaited-for children left. */ 4693 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, 4694 __WALL); 4695 gdb_assert (ret == -1); 4696 4697 lwp = find_lwp_pid (step_over_bkpt); 4698 if (lwp != NULL) 4699 finish_step_over (lwp); 4700 step_over_bkpt = null_ptid; 4701 unsuspend_all_lwps (lwp); 4702 } 4703 } 4704 4705 void 4706 linux_process_target::resume_one_thread (thread_info *thread, 4707 bool leave_all_stopped) 4708 { 4709 struct lwp_info *lwp = get_thread_lwp (thread); 4710 int leave_pending; 4711 4712 if (lwp->resume == NULL) 4713 return; 4714 4715 if (lwp->resume->kind == resume_stop) 4716 { 4717 if (debug_threads) 4718 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread)); 4719 4720 if (!lwp->stopped) 4721 { 4722 if (debug_threads) 4723 debug_printf ("stopping LWP %ld\n", lwpid_of (thread)); 4724 4725 /* Stop the thread, and wait for the event asynchronously, 4726 through the event loop. */ 4727 send_sigstop (lwp); 4728 } 4729 else 4730 { 4731 if (debug_threads) 4732 debug_printf ("already stopped LWP %ld\n", 4733 lwpid_of (thread)); 4734 4735 /* The LWP may have been stopped in an internal event that 4736 was not meant to be notified back to GDB (e.g., gdbserver 4737 breakpoint), so we should be reporting a stop event in 4738 this case too. */ 4739 4740 /* If the thread already has a pending SIGSTOP, this is a 4741 no-op. Otherwise, something later will presumably resume 4742 the thread and this will cause it to cancel any pending 4743 operation, due to last_resume_kind == resume_stop. If 4744 the thread already has a pending status to report, we 4745 will still report it the next time we wait - see 4746 status_pending_p_callback. */ 4747 4748 /* If we already have a pending signal to report, then 4749 there's no need to queue a SIGSTOP, as this means we're 4750 midway through moving the LWP out of the jumppad, and we 4751 will report the pending signal as soon as that is 4752 finished. */ 4753 if (lwp->pending_signals_to_report.empty ()) 4754 send_sigstop (lwp); 4755 } 4756 4757 /* For stop requests, we're done. */ 4758 lwp->resume = NULL; 4759 thread->last_status.kind = TARGET_WAITKIND_IGNORE; 4760 return; 4761 } 4762 4763 /* If this thread which is about to be resumed has a pending status, 4764 then don't resume it - we can just report the pending status. 4765 Likewise if it is suspended, because e.g., another thread is 4766 stepping past a breakpoint. Make sure to queue any signals that 4767 would otherwise be sent. In all-stop mode, we do this decision 4768 based on if *any* thread has a pending status. If there's a 4769 thread that needs the step-over-breakpoint dance, then don't 4770 resume any other thread but that particular one. */ 4771 leave_pending = (lwp->suspended 4772 || lwp->status_pending_p 4773 || leave_all_stopped); 4774 4775 /* If we have a new signal, enqueue the signal. */ 4776 if (lwp->resume->sig != 0) 4777 { 4778 siginfo_t info, *info_p; 4779 4780 /* If this is the same signal we were previously stopped by, 4781 make sure to queue its siginfo. */ 4782 if (WIFSTOPPED (lwp->last_status) 4783 && WSTOPSIG (lwp->last_status) == lwp->resume->sig 4784 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), 4785 (PTRACE_TYPE_ARG3) 0, &info) == 0) 4786 info_p = &info; 4787 else 4788 info_p = NULL; 4789 4790 enqueue_pending_signal (lwp, lwp->resume->sig, info_p); 4791 } 4792 4793 if (!leave_pending) 4794 { 4795 if (debug_threads) 4796 debug_printf ("resuming LWP %ld\n", lwpid_of (thread)); 4797 4798 proceed_one_lwp (thread, NULL); 4799 } 4800 else 4801 { 4802 if (debug_threads) 4803 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread)); 4804 } 4805 4806 thread->last_status.kind = TARGET_WAITKIND_IGNORE; 4807 lwp->resume = NULL; 4808 } 4809 4810 void 4811 linux_process_target::resume (thread_resume *resume_info, size_t n) 4812 { 4813 struct thread_info *need_step_over = NULL; 4814 4815 if (debug_threads) 4816 { 4817 debug_enter (); 4818 debug_printf ("linux_resume:\n"); 4819 } 4820 4821 for_each_thread ([&] (thread_info *thread) 4822 { 4823 linux_set_resume_request (thread, resume_info, n); 4824 }); 4825 4826 /* If there is a thread which would otherwise be resumed, which has 4827 a pending status, then don't resume any threads - we can just 4828 report the pending status. Make sure to queue any signals that 4829 would otherwise be sent. In non-stop mode, we'll apply this 4830 logic to each thread individually. We consume all pending events 4831 before considering to start a step-over (in all-stop). */ 4832 bool any_pending = false; 4833 if (!non_stop) 4834 any_pending = find_thread ([this] (thread_info *thread) 4835 { 4836 return resume_status_pending (thread); 4837 }) != nullptr; 4838 4839 /* If there is a thread which would otherwise be resumed, which is 4840 stopped at a breakpoint that needs stepping over, then don't 4841 resume any threads - have it step over the breakpoint with all 4842 other threads stopped, then resume all threads again. Make sure 4843 to queue any signals that would otherwise be delivered or 4844 queued. */ 4845 if (!any_pending && low_supports_breakpoints ()) 4846 need_step_over = find_thread ([this] (thread_info *thread) 4847 { 4848 return thread_needs_step_over (thread); 4849 }); 4850 4851 bool leave_all_stopped = (need_step_over != NULL || any_pending); 4852 4853 if (debug_threads) 4854 { 4855 if (need_step_over != NULL) 4856 debug_printf ("Not resuming all, need step over\n"); 4857 else if (any_pending) 4858 debug_printf ("Not resuming, all-stop and found " 4859 "an LWP with pending status\n"); 4860 else 4861 debug_printf ("Resuming, no pending status or step over needed\n"); 4862 } 4863 4864 /* Even if we're leaving threads stopped, queue all signals we'd 4865 otherwise deliver. */ 4866 for_each_thread ([&] (thread_info *thread) 4867 { 4868 resume_one_thread (thread, leave_all_stopped); 4869 }); 4870 4871 if (need_step_over) 4872 start_step_over (get_thread_lwp (need_step_over)); 4873 4874 if (debug_threads) 4875 { 4876 debug_printf ("linux_resume done\n"); 4877 debug_exit (); 4878 } 4879 4880 /* We may have events that were pending that can/should be sent to 4881 the client now. Trigger a linux_wait call. */ 4882 if (target_is_async_p ()) 4883 async_file_mark (); 4884 } 4885 4886 void 4887 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except) 4888 { 4889 struct lwp_info *lwp = get_thread_lwp (thread); 4890 int step; 4891 4892 if (lwp == except) 4893 return; 4894 4895 if (debug_threads) 4896 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread)); 4897 4898 if (!lwp->stopped) 4899 { 4900 if (debug_threads) 4901 debug_printf (" LWP %ld already running\n", lwpid_of (thread)); 4902 return; 4903 } 4904 4905 if (thread->last_resume_kind == resume_stop 4906 && thread->last_status.kind != TARGET_WAITKIND_IGNORE) 4907 { 4908 if (debug_threads) 4909 debug_printf (" client wants LWP to remain %ld stopped\n", 4910 lwpid_of (thread)); 4911 return; 4912 } 4913 4914 if (lwp->status_pending_p) 4915 { 4916 if (debug_threads) 4917 debug_printf (" LWP %ld has pending status, leaving stopped\n", 4918 lwpid_of (thread)); 4919 return; 4920 } 4921 4922 gdb_assert (lwp->suspended >= 0); 4923 4924 if (lwp->suspended) 4925 { 4926 if (debug_threads) 4927 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread)); 4928 return; 4929 } 4930 4931 if (thread->last_resume_kind == resume_stop 4932 && lwp->pending_signals_to_report.empty () 4933 && (lwp->collecting_fast_tracepoint 4934 == fast_tpoint_collect_result::not_collecting)) 4935 { 4936 /* We haven't reported this LWP as stopped yet (otherwise, the 4937 last_status.kind check above would catch it, and we wouldn't 4938 reach here. This LWP may have been momentarily paused by a 4939 stop_all_lwps call while handling for example, another LWP's 4940 step-over. In that case, the pending expected SIGSTOP signal 4941 that was queued at vCont;t handling time will have already 4942 been consumed by wait_for_sigstop, and so we need to requeue 4943 another one here. Note that if the LWP already has a SIGSTOP 4944 pending, this is a no-op. */ 4945 4946 if (debug_threads) 4947 debug_printf ("Client wants LWP %ld to stop. " 4948 "Making sure it has a SIGSTOP pending\n", 4949 lwpid_of (thread)); 4950 4951 send_sigstop (lwp); 4952 } 4953 4954 if (thread->last_resume_kind == resume_step) 4955 { 4956 if (debug_threads) 4957 debug_printf (" stepping LWP %ld, client wants it stepping\n", 4958 lwpid_of (thread)); 4959 4960 /* If resume_step is requested by GDB, install single-step 4961 breakpoints when the thread is about to be actually resumed if 4962 the single-step breakpoints weren't removed. */ 4963 if (supports_software_single_step () 4964 && !has_single_step_breakpoints (thread)) 4965 install_software_single_step_breakpoints (lwp); 4966 4967 step = maybe_hw_step (thread); 4968 } 4969 else if (lwp->bp_reinsert != 0) 4970 { 4971 if (debug_threads) 4972 debug_printf (" stepping LWP %ld, reinsert set\n", 4973 lwpid_of (thread)); 4974 4975 step = maybe_hw_step (thread); 4976 } 4977 else 4978 step = 0; 4979 4980 resume_one_lwp (lwp, step, 0, NULL); 4981 } 4982 4983 void 4984 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread, 4985 lwp_info *except) 4986 { 4987 struct lwp_info *lwp = get_thread_lwp (thread); 4988 4989 if (lwp == except) 4990 return; 4991 4992 lwp_suspended_decr (lwp); 4993 4994 proceed_one_lwp (thread, except); 4995 } 4996 4997 void 4998 linux_process_target::proceed_all_lwps () 4999 { 5000 struct thread_info *need_step_over; 5001 5002 /* If there is a thread which would otherwise be resumed, which is 5003 stopped at a breakpoint that needs stepping over, then don't 5004 resume any threads - have it step over the breakpoint with all 5005 other threads stopped, then resume all threads again. */ 5006 5007 if (low_supports_breakpoints ()) 5008 { 5009 need_step_over = find_thread ([this] (thread_info *thread) 5010 { 5011 return thread_needs_step_over (thread); 5012 }); 5013 5014 if (need_step_over != NULL) 5015 { 5016 if (debug_threads) 5017 debug_printf ("proceed_all_lwps: found " 5018 "thread %ld needing a step-over\n", 5019 lwpid_of (need_step_over)); 5020 5021 start_step_over (get_thread_lwp (need_step_over)); 5022 return; 5023 } 5024 } 5025 5026 if (debug_threads) 5027 debug_printf ("Proceeding, no step-over needed\n"); 5028 5029 for_each_thread ([this] (thread_info *thread) 5030 { 5031 proceed_one_lwp (thread, NULL); 5032 }); 5033 } 5034 5035 void 5036 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except) 5037 { 5038 if (debug_threads) 5039 { 5040 debug_enter (); 5041 if (except) 5042 debug_printf ("unstopping all lwps, except=(LWP %ld)\n", 5043 lwpid_of (get_lwp_thread (except))); 5044 else 5045 debug_printf ("unstopping all lwps\n"); 5046 } 5047 5048 if (unsuspend) 5049 for_each_thread ([&] (thread_info *thread) 5050 { 5051 unsuspend_and_proceed_one_lwp (thread, except); 5052 }); 5053 else 5054 for_each_thread ([&] (thread_info *thread) 5055 { 5056 proceed_one_lwp (thread, except); 5057 }); 5058 5059 if (debug_threads) 5060 { 5061 debug_printf ("unstop_all_lwps done\n"); 5062 debug_exit (); 5063 } 5064 } 5065 5066 5067 #ifdef HAVE_LINUX_REGSETS 5068 5069 #define use_linux_regsets 1 5070 5071 /* Returns true if REGSET has been disabled. */ 5072 5073 static int 5074 regset_disabled (struct regsets_info *info, struct regset_info *regset) 5075 { 5076 return (info->disabled_regsets != NULL 5077 && info->disabled_regsets[regset - info->regsets]); 5078 } 5079 5080 /* Disable REGSET. */ 5081 5082 static void 5083 disable_regset (struct regsets_info *info, struct regset_info *regset) 5084 { 5085 int dr_offset; 5086 5087 dr_offset = regset - info->regsets; 5088 if (info->disabled_regsets == NULL) 5089 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets); 5090 info->disabled_regsets[dr_offset] = 1; 5091 } 5092 5093 static int 5094 regsets_fetch_inferior_registers (struct regsets_info *regsets_info, 5095 struct regcache *regcache) 5096 { 5097 struct regset_info *regset; 5098 int saw_general_regs = 0; 5099 int pid; 5100 struct iovec iov; 5101 5102 pid = lwpid_of (current_thread); 5103 for (regset = regsets_info->regsets; regset->size >= 0; regset++) 5104 { 5105 void *buf, *data; 5106 int nt_type, res; 5107 5108 if (regset->size == 0 || regset_disabled (regsets_info, regset)) 5109 continue; 5110 5111 buf = xmalloc (regset->size); 5112 5113 nt_type = regset->nt_type; 5114 if (nt_type) 5115 { 5116 iov.iov_base = buf; 5117 iov.iov_len = regset->size; 5118 data = (void *) &iov; 5119 } 5120 else 5121 data = buf; 5122 5123 #ifndef __sparc__ 5124 res = ptrace (regset->get_request, pid, 5125 (PTRACE_TYPE_ARG3) (long) nt_type, data); 5126 #else 5127 res = ptrace (regset->get_request, pid, data, nt_type); 5128 #endif 5129 if (res < 0) 5130 { 5131 if (errno == EIO 5132 || (errno == EINVAL && regset->type == OPTIONAL_REGS)) 5133 { 5134 /* If we get EIO on a regset, or an EINVAL and the regset is 5135 optional, do not try it again for this process mode. */ 5136 disable_regset (regsets_info, regset); 5137 } 5138 else if (errno == ENODATA) 5139 { 5140 /* ENODATA may be returned if the regset is currently 5141 not "active". This can happen in normal operation, 5142 so suppress the warning in this case. */ 5143 } 5144 else if (errno == ESRCH) 5145 { 5146 /* At this point, ESRCH should mean the process is 5147 already gone, in which case we simply ignore attempts 5148 to read its registers. */ 5149 } 5150 else 5151 { 5152 char s[256]; 5153 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d", 5154 pid); 5155 perror (s); 5156 } 5157 } 5158 else 5159 { 5160 if (regset->type == GENERAL_REGS) 5161 saw_general_regs = 1; 5162 regset->store_function (regcache, buf); 5163 } 5164 free (buf); 5165 } 5166 if (saw_general_regs) 5167 return 0; 5168 else 5169 return 1; 5170 } 5171 5172 static int 5173 regsets_store_inferior_registers (struct regsets_info *regsets_info, 5174 struct regcache *regcache) 5175 { 5176 struct regset_info *regset; 5177 int saw_general_regs = 0; 5178 int pid; 5179 struct iovec iov; 5180 5181 pid = lwpid_of (current_thread); 5182 for (regset = regsets_info->regsets; regset->size >= 0; regset++) 5183 { 5184 void *buf, *data; 5185 int nt_type, res; 5186 5187 if (regset->size == 0 || regset_disabled (regsets_info, regset) 5188 || regset->fill_function == NULL) 5189 continue; 5190 5191 buf = xmalloc (regset->size); 5192 5193 /* First fill the buffer with the current register set contents, 5194 in case there are any items in the kernel's regset that are 5195 not in gdbserver's regcache. */ 5196 5197 nt_type = regset->nt_type; 5198 if (nt_type) 5199 { 5200 iov.iov_base = buf; 5201 iov.iov_len = regset->size; 5202 data = (void *) &iov; 5203 } 5204 else 5205 data = buf; 5206 5207 #ifndef __sparc__ 5208 res = ptrace (regset->get_request, pid, 5209 (PTRACE_TYPE_ARG3) (long) nt_type, data); 5210 #else 5211 res = ptrace (regset->get_request, pid, data, nt_type); 5212 #endif 5213 5214 if (res == 0) 5215 { 5216 /* Then overlay our cached registers on that. */ 5217 regset->fill_function (regcache, buf); 5218 5219 /* Only now do we write the register set. */ 5220 #ifndef __sparc__ 5221 res = ptrace (regset->set_request, pid, 5222 (PTRACE_TYPE_ARG3) (long) nt_type, data); 5223 #else 5224 res = ptrace (regset->set_request, pid, data, nt_type); 5225 #endif 5226 } 5227 5228 if (res < 0) 5229 { 5230 if (errno == EIO 5231 || (errno == EINVAL && regset->type == OPTIONAL_REGS)) 5232 { 5233 /* If we get EIO on a regset, or an EINVAL and the regset is 5234 optional, do not try it again for this process mode. */ 5235 disable_regset (regsets_info, regset); 5236 } 5237 else if (errno == ESRCH) 5238 { 5239 /* At this point, ESRCH should mean the process is 5240 already gone, in which case we simply ignore attempts 5241 to change its registers. See also the related 5242 comment in resume_one_lwp. */ 5243 free (buf); 5244 return 0; 5245 } 5246 else 5247 { 5248 perror ("Warning: ptrace(regsets_store_inferior_registers)"); 5249 } 5250 } 5251 else if (regset->type == GENERAL_REGS) 5252 saw_general_regs = 1; 5253 free (buf); 5254 } 5255 if (saw_general_regs) 5256 return 0; 5257 else 5258 return 1; 5259 } 5260 5261 #else /* !HAVE_LINUX_REGSETS */ 5262 5263 #define use_linux_regsets 0 5264 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1 5265 #define regsets_store_inferior_registers(regsets_info, regcache) 1 5266 5267 #endif 5268 5269 /* Return 1 if register REGNO is supported by one of the regset ptrace 5270 calls or 0 if it has to be transferred individually. */ 5271 5272 static int 5273 linux_register_in_regsets (const struct regs_info *regs_info, int regno) 5274 { 5275 unsigned char mask = 1 << (regno % 8); 5276 size_t index = regno / 8; 5277 5278 return (use_linux_regsets 5279 && (regs_info->regset_bitmap == NULL 5280 || (regs_info->regset_bitmap[index] & mask) != 0)); 5281 } 5282 5283 #ifdef HAVE_LINUX_USRREGS 5284 5285 static int 5286 register_addr (const struct usrregs_info *usrregs, int regnum) 5287 { 5288 int addr; 5289 5290 if (regnum < 0 || regnum >= usrregs->num_regs) 5291 error ("Invalid register number %d.", regnum); 5292 5293 addr = usrregs->regmap[regnum]; 5294 5295 return addr; 5296 } 5297 5298 5299 void 5300 linux_process_target::fetch_register (const usrregs_info *usrregs, 5301 regcache *regcache, int regno) 5302 { 5303 CORE_ADDR regaddr; 5304 int i, size; 5305 char *buf; 5306 int pid; 5307 5308 if (regno >= usrregs->num_regs) 5309 return; 5310 if (low_cannot_fetch_register (regno)) 5311 return; 5312 5313 regaddr = register_addr (usrregs, regno); 5314 if (regaddr == -1) 5315 return; 5316 5317 size = ((register_size (regcache->tdesc, regno) 5318 + sizeof (PTRACE_XFER_TYPE) - 1) 5319 & -sizeof (PTRACE_XFER_TYPE)); 5320 buf = (char *) alloca (size); 5321 5322 pid = lwpid_of (current_thread); 5323 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE)) 5324 { 5325 errno = 0; 5326 *(PTRACE_XFER_TYPE *) (buf + i) = 5327 ptrace (PTRACE_PEEKUSER, pid, 5328 /* Coerce to a uintptr_t first to avoid potential gcc warning 5329 of coercing an 8 byte integer to a 4 byte pointer. */ 5330 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0); 5331 regaddr += sizeof (PTRACE_XFER_TYPE); 5332 if (errno != 0) 5333 { 5334 /* Mark register REGNO unavailable. */ 5335 supply_register (regcache, regno, NULL); 5336 return; 5337 } 5338 } 5339 5340 low_supply_ptrace_register (regcache, regno, buf); 5341 } 5342 5343 void 5344 linux_process_target::store_register (const usrregs_info *usrregs, 5345 regcache *regcache, int regno) 5346 { 5347 CORE_ADDR regaddr; 5348 int i, size; 5349 char *buf; 5350 int pid; 5351 5352 if (regno >= usrregs->num_regs) 5353 return; 5354 if (low_cannot_store_register (regno)) 5355 return; 5356 5357 regaddr = register_addr (usrregs, regno); 5358 if (regaddr == -1) 5359 return; 5360 5361 size = ((register_size (regcache->tdesc, regno) 5362 + sizeof (PTRACE_XFER_TYPE) - 1) 5363 & -sizeof (PTRACE_XFER_TYPE)); 5364 buf = (char *) alloca (size); 5365 memset (buf, 0, size); 5366 5367 low_collect_ptrace_register (regcache, regno, buf); 5368 5369 pid = lwpid_of (current_thread); 5370 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE)) 5371 { 5372 errno = 0; 5373 ptrace (PTRACE_POKEUSER, pid, 5374 /* Coerce to a uintptr_t first to avoid potential gcc warning 5375 about coercing an 8 byte integer to a 4 byte pointer. */ 5376 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, 5377 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i)); 5378 if (errno != 0) 5379 { 5380 /* At this point, ESRCH should mean the process is 5381 already gone, in which case we simply ignore attempts 5382 to change its registers. See also the related 5383 comment in resume_one_lwp. */ 5384 if (errno == ESRCH) 5385 return; 5386 5387 5388 if (!low_cannot_store_register (regno)) 5389 error ("writing register %d: %s", regno, safe_strerror (errno)); 5390 } 5391 regaddr += sizeof (PTRACE_XFER_TYPE); 5392 } 5393 } 5394 #endif /* HAVE_LINUX_USRREGS */ 5395 5396 void 5397 linux_process_target::low_collect_ptrace_register (regcache *regcache, 5398 int regno, char *buf) 5399 { 5400 collect_register (regcache, regno, buf); 5401 } 5402 5403 void 5404 linux_process_target::low_supply_ptrace_register (regcache *regcache, 5405 int regno, const char *buf) 5406 { 5407 supply_register (regcache, regno, buf); 5408 } 5409 5410 void 5411 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info, 5412 regcache *regcache, 5413 int regno, int all) 5414 { 5415 #ifdef HAVE_LINUX_USRREGS 5416 struct usrregs_info *usr = regs_info->usrregs; 5417 5418 if (regno == -1) 5419 { 5420 for (regno = 0; regno < usr->num_regs; regno++) 5421 if (all || !linux_register_in_regsets (regs_info, regno)) 5422 fetch_register (usr, regcache, regno); 5423 } 5424 else 5425 fetch_register (usr, regcache, regno); 5426 #endif 5427 } 5428 5429 void 5430 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info, 5431 regcache *regcache, 5432 int regno, int all) 5433 { 5434 #ifdef HAVE_LINUX_USRREGS 5435 struct usrregs_info *usr = regs_info->usrregs; 5436 5437 if (regno == -1) 5438 { 5439 for (regno = 0; regno < usr->num_regs; regno++) 5440 if (all || !linux_register_in_regsets (regs_info, regno)) 5441 store_register (usr, regcache, regno); 5442 } 5443 else 5444 store_register (usr, regcache, regno); 5445 #endif 5446 } 5447 5448 void 5449 linux_process_target::fetch_registers (regcache *regcache, int regno) 5450 { 5451 int use_regsets; 5452 int all = 0; 5453 const regs_info *regs_info = get_regs_info (); 5454 5455 if (regno == -1) 5456 { 5457 if (regs_info->usrregs != NULL) 5458 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++) 5459 low_fetch_register (regcache, regno); 5460 5461 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache); 5462 if (regs_info->usrregs != NULL) 5463 usr_fetch_inferior_registers (regs_info, regcache, -1, all); 5464 } 5465 else 5466 { 5467 if (low_fetch_register (regcache, regno)) 5468 return; 5469 5470 use_regsets = linux_register_in_regsets (regs_info, regno); 5471 if (use_regsets) 5472 all = regsets_fetch_inferior_registers (regs_info->regsets_info, 5473 regcache); 5474 if ((!use_regsets || all) && regs_info->usrregs != NULL) 5475 usr_fetch_inferior_registers (regs_info, regcache, regno, 1); 5476 } 5477 } 5478 5479 void 5480 linux_process_target::store_registers (regcache *regcache, int regno) 5481 { 5482 int use_regsets; 5483 int all = 0; 5484 const regs_info *regs_info = get_regs_info (); 5485 5486 if (regno == -1) 5487 { 5488 all = regsets_store_inferior_registers (regs_info->regsets_info, 5489 regcache); 5490 if (regs_info->usrregs != NULL) 5491 usr_store_inferior_registers (regs_info, regcache, regno, all); 5492 } 5493 else 5494 { 5495 use_regsets = linux_register_in_regsets (regs_info, regno); 5496 if (use_regsets) 5497 all = regsets_store_inferior_registers (regs_info->regsets_info, 5498 regcache); 5499 if ((!use_regsets || all) && regs_info->usrregs != NULL) 5500 usr_store_inferior_registers (regs_info, regcache, regno, 1); 5501 } 5502 } 5503 5504 bool 5505 linux_process_target::low_fetch_register (regcache *regcache, int regno) 5506 { 5507 return false; 5508 } 5509 5510 /* A wrapper for the read_memory target op. */ 5511 5512 static int 5513 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len) 5514 { 5515 return the_target->read_memory (memaddr, myaddr, len); 5516 } 5517 5518 /* Copy LEN bytes from inferior's memory starting at MEMADDR 5519 to debugger memory starting at MYADDR. */ 5520 5521 int 5522 linux_process_target::read_memory (CORE_ADDR memaddr, 5523 unsigned char *myaddr, int len) 5524 { 5525 int pid = lwpid_of (current_thread); 5526 PTRACE_XFER_TYPE *buffer; 5527 CORE_ADDR addr; 5528 int count; 5529 char filename[64]; 5530 int i; 5531 int ret; 5532 int fd; 5533 5534 /* Try using /proc. Don't bother for one word. */ 5535 if (len >= 3 * sizeof (long)) 5536 { 5537 int bytes; 5538 5539 /* We could keep this file open and cache it - possibly one per 5540 thread. That requires some juggling, but is even faster. */ 5541 sprintf (filename, "/proc/%d/mem", pid); 5542 fd = open (filename, O_RDONLY | O_LARGEFILE); 5543 if (fd == -1) 5544 goto no_proc; 5545 5546 /* If pread64 is available, use it. It's faster if the kernel 5547 supports it (only one syscall), and it's 64-bit safe even on 5548 32-bit platforms (for instance, SPARC debugging a SPARC64 5549 application). */ 5550 #ifdef HAVE_PREAD64 5551 bytes = pread64 (fd, myaddr, len, memaddr); 5552 #else 5553 bytes = -1; 5554 if (lseek (fd, memaddr, SEEK_SET) != -1) 5555 bytes = read (fd, myaddr, len); 5556 #endif 5557 5558 close (fd); 5559 if (bytes == len) 5560 return 0; 5561 5562 /* Some data was read, we'll try to get the rest with ptrace. */ 5563 if (bytes > 0) 5564 { 5565 memaddr += bytes; 5566 myaddr += bytes; 5567 len -= bytes; 5568 } 5569 } 5570 5571 no_proc: 5572 /* Round starting address down to longword boundary. */ 5573 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE); 5574 /* Round ending address up; get number of longwords that makes. */ 5575 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) 5576 / sizeof (PTRACE_XFER_TYPE)); 5577 /* Allocate buffer of that many longwords. */ 5578 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count); 5579 5580 /* Read all the longwords */ 5581 errno = 0; 5582 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE)) 5583 { 5584 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning 5585 about coercing an 8 byte integer to a 4 byte pointer. */ 5586 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid, 5587 (PTRACE_TYPE_ARG3) (uintptr_t) addr, 5588 (PTRACE_TYPE_ARG4) 0); 5589 if (errno) 5590 break; 5591 } 5592 ret = errno; 5593 5594 /* Copy appropriate bytes out of the buffer. */ 5595 if (i > 0) 5596 { 5597 i *= sizeof (PTRACE_XFER_TYPE); 5598 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1); 5599 memcpy (myaddr, 5600 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), 5601 i < len ? i : len); 5602 } 5603 5604 return ret; 5605 } 5606 5607 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's 5608 memory at MEMADDR. On failure (cannot write to the inferior) 5609 returns the value of errno. Always succeeds if LEN is zero. */ 5610 5611 int 5612 linux_process_target::write_memory (CORE_ADDR memaddr, 5613 const unsigned char *myaddr, int len) 5614 { 5615 int i; 5616 /* Round starting address down to longword boundary. */ 5617 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE); 5618 /* Round ending address up; get number of longwords that makes. */ 5619 int count 5620 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) 5621 / sizeof (PTRACE_XFER_TYPE); 5622 5623 /* Allocate buffer of that many longwords. */ 5624 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count); 5625 5626 int pid = lwpid_of (current_thread); 5627 5628 if (len == 0) 5629 { 5630 /* Zero length write always succeeds. */ 5631 return 0; 5632 } 5633 5634 if (debug_threads) 5635 { 5636 /* Dump up to four bytes. */ 5637 char str[4 * 2 + 1]; 5638 char *p = str; 5639 int dump = len < 4 ? len : 4; 5640 5641 for (i = 0; i < dump; i++) 5642 { 5643 sprintf (p, "%02x", myaddr[i]); 5644 p += 2; 5645 } 5646 *p = '\0'; 5647 5648 debug_printf ("Writing %s to 0x%08lx in process %d\n", 5649 str, (long) memaddr, pid); 5650 } 5651 5652 /* Fill start and end extra bytes of buffer with existing memory data. */ 5653 5654 errno = 0; 5655 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning 5656 about coercing an 8 byte integer to a 4 byte pointer. */ 5657 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid, 5658 (PTRACE_TYPE_ARG3) (uintptr_t) addr, 5659 (PTRACE_TYPE_ARG4) 0); 5660 if (errno) 5661 return errno; 5662 5663 if (count > 1) 5664 { 5665 errno = 0; 5666 buffer[count - 1] 5667 = ptrace (PTRACE_PEEKTEXT, pid, 5668 /* Coerce to a uintptr_t first to avoid potential gcc warning 5669 about coercing an 8 byte integer to a 4 byte pointer. */ 5670 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1) 5671 * sizeof (PTRACE_XFER_TYPE)), 5672 (PTRACE_TYPE_ARG4) 0); 5673 if (errno) 5674 return errno; 5675 } 5676 5677 /* Copy data to be written over corresponding part of buffer. */ 5678 5679 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), 5680 myaddr, len); 5681 5682 /* Write the entire buffer. */ 5683 5684 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE)) 5685 { 5686 errno = 0; 5687 ptrace (PTRACE_POKETEXT, pid, 5688 /* Coerce to a uintptr_t first to avoid potential gcc warning 5689 about coercing an 8 byte integer to a 4 byte pointer. */ 5690 (PTRACE_TYPE_ARG3) (uintptr_t) addr, 5691 (PTRACE_TYPE_ARG4) buffer[i]); 5692 if (errno) 5693 return errno; 5694 } 5695 5696 return 0; 5697 } 5698 5699 void 5700 linux_process_target::look_up_symbols () 5701 { 5702 #ifdef USE_THREAD_DB 5703 struct process_info *proc = current_process (); 5704 5705 if (proc->priv->thread_db != NULL) 5706 return; 5707 5708 thread_db_init (); 5709 #endif 5710 } 5711 5712 void 5713 linux_process_target::request_interrupt () 5714 { 5715 /* Send a SIGINT to the process group. This acts just like the user 5716 typed a ^C on the controlling terminal. */ 5717 ::kill (-signal_pid, SIGINT); 5718 } 5719 5720 bool 5721 linux_process_target::supports_read_auxv () 5722 { 5723 return true; 5724 } 5725 5726 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET 5727 to debugger memory starting at MYADDR. */ 5728 5729 int 5730 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr, 5731 unsigned int len) 5732 { 5733 char filename[PATH_MAX]; 5734 int fd, n; 5735 int pid = lwpid_of (current_thread); 5736 5737 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid); 5738 5739 fd = open (filename, O_RDONLY); 5740 if (fd < 0) 5741 return -1; 5742 5743 if (offset != (CORE_ADDR) 0 5744 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset) 5745 n = -1; 5746 else 5747 n = read (fd, myaddr, len); 5748 5749 close (fd); 5750 5751 return n; 5752 } 5753 5754 int 5755 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr, 5756 int size, raw_breakpoint *bp) 5757 { 5758 if (type == raw_bkpt_type_sw) 5759 return insert_memory_breakpoint (bp); 5760 else 5761 return low_insert_point (type, addr, size, bp); 5762 } 5763 5764 int 5765 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr, 5766 int size, raw_breakpoint *bp) 5767 { 5768 /* Unsupported (see target.h). */ 5769 return 1; 5770 } 5771 5772 int 5773 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr, 5774 int size, raw_breakpoint *bp) 5775 { 5776 if (type == raw_bkpt_type_sw) 5777 return remove_memory_breakpoint (bp); 5778 else 5779 return low_remove_point (type, addr, size, bp); 5780 } 5781 5782 int 5783 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr, 5784 int size, raw_breakpoint *bp) 5785 { 5786 /* Unsupported (see target.h). */ 5787 return 1; 5788 } 5789 5790 /* Implement the stopped_by_sw_breakpoint target_ops 5791 method. */ 5792 5793 bool 5794 linux_process_target::stopped_by_sw_breakpoint () 5795 { 5796 struct lwp_info *lwp = get_thread_lwp (current_thread); 5797 5798 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT); 5799 } 5800 5801 /* Implement the supports_stopped_by_sw_breakpoint target_ops 5802 method. */ 5803 5804 bool 5805 linux_process_target::supports_stopped_by_sw_breakpoint () 5806 { 5807 return USE_SIGTRAP_SIGINFO; 5808 } 5809 5810 /* Implement the stopped_by_hw_breakpoint target_ops 5811 method. */ 5812 5813 bool 5814 linux_process_target::stopped_by_hw_breakpoint () 5815 { 5816 struct lwp_info *lwp = get_thread_lwp (current_thread); 5817 5818 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT); 5819 } 5820 5821 /* Implement the supports_stopped_by_hw_breakpoint target_ops 5822 method. */ 5823 5824 bool 5825 linux_process_target::supports_stopped_by_hw_breakpoint () 5826 { 5827 return USE_SIGTRAP_SIGINFO; 5828 } 5829 5830 /* Implement the supports_hardware_single_step target_ops method. */ 5831 5832 bool 5833 linux_process_target::supports_hardware_single_step () 5834 { 5835 return true; 5836 } 5837 5838 bool 5839 linux_process_target::stopped_by_watchpoint () 5840 { 5841 struct lwp_info *lwp = get_thread_lwp (current_thread); 5842 5843 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT; 5844 } 5845 5846 CORE_ADDR 5847 linux_process_target::stopped_data_address () 5848 { 5849 struct lwp_info *lwp = get_thread_lwp (current_thread); 5850 5851 return lwp->stopped_data_address; 5852 } 5853 5854 /* This is only used for targets that define PT_TEXT_ADDR, 5855 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly 5856 the target has different ways of acquiring this information, like 5857 loadmaps. */ 5858 5859 bool 5860 linux_process_target::supports_read_offsets () 5861 { 5862 #ifdef SUPPORTS_READ_OFFSETS 5863 return true; 5864 #else 5865 return false; 5866 #endif 5867 } 5868 5869 /* Under uClinux, programs are loaded at non-zero offsets, which we need 5870 to tell gdb about. */ 5871 5872 int 5873 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p) 5874 { 5875 #ifdef SUPPORTS_READ_OFFSETS 5876 unsigned long text, text_end, data; 5877 int pid = lwpid_of (current_thread); 5878 5879 errno = 0; 5880 5881 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR, 5882 (PTRACE_TYPE_ARG4) 0); 5883 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR, 5884 (PTRACE_TYPE_ARG4) 0); 5885 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR, 5886 (PTRACE_TYPE_ARG4) 0); 5887 5888 if (errno == 0) 5889 { 5890 /* Both text and data offsets produced at compile-time (and so 5891 used by gdb) are relative to the beginning of the program, 5892 with the data segment immediately following the text segment. 5893 However, the actual runtime layout in memory may put the data 5894 somewhere else, so when we send gdb a data base-address, we 5895 use the real data base address and subtract the compile-time 5896 data base-address from it (which is just the length of the 5897 text segment). BSS immediately follows data in both 5898 cases. */ 5899 *text_p = text; 5900 *data_p = data - (text_end - text); 5901 5902 return 1; 5903 } 5904 return 0; 5905 #else 5906 gdb_assert_not_reached ("target op read_offsets not supported"); 5907 #endif 5908 } 5909 5910 bool 5911 linux_process_target::supports_get_tls_address () 5912 { 5913 #ifdef USE_THREAD_DB 5914 return true; 5915 #else 5916 return false; 5917 #endif 5918 } 5919 5920 int 5921 linux_process_target::get_tls_address (thread_info *thread, 5922 CORE_ADDR offset, 5923 CORE_ADDR load_module, 5924 CORE_ADDR *address) 5925 { 5926 #ifdef USE_THREAD_DB 5927 return thread_db_get_tls_address (thread, offset, load_module, address); 5928 #else 5929 return -1; 5930 #endif 5931 } 5932 5933 bool 5934 linux_process_target::supports_qxfer_osdata () 5935 { 5936 return true; 5937 } 5938 5939 int 5940 linux_process_target::qxfer_osdata (const char *annex, 5941 unsigned char *readbuf, 5942 unsigned const char *writebuf, 5943 CORE_ADDR offset, int len) 5944 { 5945 return linux_common_xfer_osdata (annex, readbuf, offset, len); 5946 } 5947 5948 void 5949 linux_process_target::siginfo_fixup (siginfo_t *siginfo, 5950 gdb_byte *inf_siginfo, int direction) 5951 { 5952 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction); 5953 5954 /* If there was no callback, or the callback didn't do anything, 5955 then just do a straight memcpy. */ 5956 if (!done) 5957 { 5958 if (direction == 1) 5959 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t)); 5960 else 5961 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t)); 5962 } 5963 } 5964 5965 bool 5966 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf, 5967 int direction) 5968 { 5969 return false; 5970 } 5971 5972 bool 5973 linux_process_target::supports_qxfer_siginfo () 5974 { 5975 return true; 5976 } 5977 5978 int 5979 linux_process_target::qxfer_siginfo (const char *annex, 5980 unsigned char *readbuf, 5981 unsigned const char *writebuf, 5982 CORE_ADDR offset, int len) 5983 { 5984 int pid; 5985 siginfo_t siginfo; 5986 gdb_byte inf_siginfo[sizeof (siginfo_t)]; 5987 5988 if (current_thread == NULL) 5989 return -1; 5990 5991 pid = lwpid_of (current_thread); 5992 5993 if (debug_threads) 5994 debug_printf ("%s siginfo for lwp %d.\n", 5995 readbuf != NULL ? "Reading" : "Writing", 5996 pid); 5997 5998 if (offset >= sizeof (siginfo)) 5999 return -1; 6000 6001 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0) 6002 return -1; 6003 6004 /* When GDBSERVER is built as a 64-bit application, ptrace writes into 6005 SIGINFO an object with 64-bit layout. Since debugging a 32-bit 6006 inferior with a 64-bit GDBSERVER should look the same as debugging it 6007 with a 32-bit GDBSERVER, we need to convert it. */ 6008 siginfo_fixup (&siginfo, inf_siginfo, 0); 6009 6010 if (offset + len > sizeof (siginfo)) 6011 len = sizeof (siginfo) - offset; 6012 6013 if (readbuf != NULL) 6014 memcpy (readbuf, inf_siginfo + offset, len); 6015 else 6016 { 6017 memcpy (inf_siginfo + offset, writebuf, len); 6018 6019 /* Convert back to ptrace layout before flushing it out. */ 6020 siginfo_fixup (&siginfo, inf_siginfo, 1); 6021 6022 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0) 6023 return -1; 6024 } 6025 6026 return len; 6027 } 6028 6029 /* SIGCHLD handler that serves two purposes: In non-stop/async mode, 6030 so we notice when children change state; as the handler for the 6031 sigsuspend in my_waitpid. */ 6032 6033 static void 6034 sigchld_handler (int signo) 6035 { 6036 int old_errno = errno; 6037 6038 if (debug_threads) 6039 { 6040 do 6041 { 6042 /* Use the async signal safe debug function. */ 6043 if (debug_write ("sigchld_handler\n", 6044 sizeof ("sigchld_handler\n") - 1) < 0) 6045 break; /* just ignore */ 6046 } while (0); 6047 } 6048 6049 if (target_is_async_p ()) 6050 async_file_mark (); /* trigger a linux_wait */ 6051 6052 errno = old_errno; 6053 } 6054 6055 bool 6056 linux_process_target::supports_non_stop () 6057 { 6058 return true; 6059 } 6060 6061 bool 6062 linux_process_target::async (bool enable) 6063 { 6064 bool previous = target_is_async_p (); 6065 6066 if (debug_threads) 6067 debug_printf ("linux_async (%d), previous=%d\n", 6068 enable, previous); 6069 6070 if (previous != enable) 6071 { 6072 sigset_t mask; 6073 sigemptyset (&mask); 6074 sigaddset (&mask, SIGCHLD); 6075 6076 gdb_sigmask (SIG_BLOCK, &mask, NULL); 6077 6078 if (enable) 6079 { 6080 if (pipe (linux_event_pipe) == -1) 6081 { 6082 linux_event_pipe[0] = -1; 6083 linux_event_pipe[1] = -1; 6084 gdb_sigmask (SIG_UNBLOCK, &mask, NULL); 6085 6086 warning ("creating event pipe failed."); 6087 return previous; 6088 } 6089 6090 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK); 6091 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK); 6092 6093 /* Register the event loop handler. */ 6094 add_file_handler (linux_event_pipe[0], 6095 handle_target_event, NULL); 6096 6097 /* Always trigger a linux_wait. */ 6098 async_file_mark (); 6099 } 6100 else 6101 { 6102 delete_file_handler (linux_event_pipe[0]); 6103 6104 close (linux_event_pipe[0]); 6105 close (linux_event_pipe[1]); 6106 linux_event_pipe[0] = -1; 6107 linux_event_pipe[1] = -1; 6108 } 6109 6110 gdb_sigmask (SIG_UNBLOCK, &mask, NULL); 6111 } 6112 6113 return previous; 6114 } 6115 6116 int 6117 linux_process_target::start_non_stop (bool nonstop) 6118 { 6119 /* Register or unregister from event-loop accordingly. */ 6120 target_async (nonstop); 6121 6122 if (target_is_async_p () != (nonstop != false)) 6123 return -1; 6124 6125 return 0; 6126 } 6127 6128 bool 6129 linux_process_target::supports_multi_process () 6130 { 6131 return true; 6132 } 6133 6134 /* Check if fork events are supported. */ 6135 6136 bool 6137 linux_process_target::supports_fork_events () 6138 { 6139 return linux_supports_tracefork (); 6140 } 6141 6142 /* Check if vfork events are supported. */ 6143 6144 bool 6145 linux_process_target::supports_vfork_events () 6146 { 6147 return linux_supports_tracefork (); 6148 } 6149 6150 /* Check if exec events are supported. */ 6151 6152 bool 6153 linux_process_target::supports_exec_events () 6154 { 6155 return linux_supports_traceexec (); 6156 } 6157 6158 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the 6159 ptrace flags for all inferiors. This is in case the new GDB connection 6160 doesn't support the same set of events that the previous one did. */ 6161 6162 void 6163 linux_process_target::handle_new_gdb_connection () 6164 { 6165 /* Request that all the lwps reset their ptrace options. */ 6166 for_each_thread ([] (thread_info *thread) 6167 { 6168 struct lwp_info *lwp = get_thread_lwp (thread); 6169 6170 if (!lwp->stopped) 6171 { 6172 /* Stop the lwp so we can modify its ptrace options. */ 6173 lwp->must_set_ptrace_flags = 1; 6174 linux_stop_lwp (lwp); 6175 } 6176 else 6177 { 6178 /* Already stopped; go ahead and set the ptrace options. */ 6179 struct process_info *proc = find_process_pid (pid_of (thread)); 6180 int options = linux_low_ptrace_options (proc->attached); 6181 6182 linux_enable_event_reporting (lwpid_of (thread), options); 6183 lwp->must_set_ptrace_flags = 0; 6184 } 6185 }); 6186 } 6187 6188 int 6189 linux_process_target::handle_monitor_command (char *mon) 6190 { 6191 #ifdef USE_THREAD_DB 6192 return thread_db_handle_monitor_command (mon); 6193 #else 6194 return 0; 6195 #endif 6196 } 6197 6198 int 6199 linux_process_target::core_of_thread (ptid_t ptid) 6200 { 6201 return linux_common_core_of_thread (ptid); 6202 } 6203 6204 bool 6205 linux_process_target::supports_disable_randomization () 6206 { 6207 #ifdef HAVE_PERSONALITY 6208 return true; 6209 #else 6210 return false; 6211 #endif 6212 } 6213 6214 bool 6215 linux_process_target::supports_agent () 6216 { 6217 return true; 6218 } 6219 6220 bool 6221 linux_process_target::supports_range_stepping () 6222 { 6223 if (supports_software_single_step ()) 6224 return true; 6225 6226 return low_supports_range_stepping (); 6227 } 6228 6229 bool 6230 linux_process_target::low_supports_range_stepping () 6231 { 6232 return false; 6233 } 6234 6235 bool 6236 linux_process_target::supports_pid_to_exec_file () 6237 { 6238 return true; 6239 } 6240 6241 char * 6242 linux_process_target::pid_to_exec_file (int pid) 6243 { 6244 return linux_proc_pid_to_exec_file (pid); 6245 } 6246 6247 bool 6248 linux_process_target::supports_multifs () 6249 { 6250 return true; 6251 } 6252 6253 int 6254 linux_process_target::multifs_open (int pid, const char *filename, 6255 int flags, mode_t mode) 6256 { 6257 return linux_mntns_open_cloexec (pid, filename, flags, mode); 6258 } 6259 6260 int 6261 linux_process_target::multifs_unlink (int pid, const char *filename) 6262 { 6263 return linux_mntns_unlink (pid, filename); 6264 } 6265 6266 ssize_t 6267 linux_process_target::multifs_readlink (int pid, const char *filename, 6268 char *buf, size_t bufsiz) 6269 { 6270 return linux_mntns_readlink (pid, filename, buf, bufsiz); 6271 } 6272 6273 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC 6274 struct target_loadseg 6275 { 6276 /* Core address to which the segment is mapped. */ 6277 Elf32_Addr addr; 6278 /* VMA recorded in the program header. */ 6279 Elf32_Addr p_vaddr; 6280 /* Size of this segment in memory. */ 6281 Elf32_Word p_memsz; 6282 }; 6283 6284 # if defined PT_GETDSBT 6285 struct target_loadmap 6286 { 6287 /* Protocol version number, must be zero. */ 6288 Elf32_Word version; 6289 /* Pointer to the DSBT table, its size, and the DSBT index. */ 6290 unsigned *dsbt_table; 6291 unsigned dsbt_size, dsbt_index; 6292 /* Number of segments in this map. */ 6293 Elf32_Word nsegs; 6294 /* The actual memory map. */ 6295 struct target_loadseg segs[/*nsegs*/]; 6296 }; 6297 # define LINUX_LOADMAP PT_GETDSBT 6298 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC 6299 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP 6300 # else 6301 struct target_loadmap 6302 { 6303 /* Protocol version number, must be zero. */ 6304 Elf32_Half version; 6305 /* Number of segments in this map. */ 6306 Elf32_Half nsegs; 6307 /* The actual memory map. */ 6308 struct target_loadseg segs[/*nsegs*/]; 6309 }; 6310 # define LINUX_LOADMAP PTRACE_GETFDPIC 6311 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC 6312 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP 6313 # endif 6314 6315 bool 6316 linux_process_target::supports_read_loadmap () 6317 { 6318 return true; 6319 } 6320 6321 int 6322 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset, 6323 unsigned char *myaddr, unsigned int len) 6324 { 6325 int pid = lwpid_of (current_thread); 6326 int addr = -1; 6327 struct target_loadmap *data = NULL; 6328 unsigned int actual_length, copy_length; 6329 6330 if (strcmp (annex, "exec") == 0) 6331 addr = (int) LINUX_LOADMAP_EXEC; 6332 else if (strcmp (annex, "interp") == 0) 6333 addr = (int) LINUX_LOADMAP_INTERP; 6334 else 6335 return -1; 6336 6337 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0) 6338 return -1; 6339 6340 if (data == NULL) 6341 return -1; 6342 6343 actual_length = sizeof (struct target_loadmap) 6344 + sizeof (struct target_loadseg) * data->nsegs; 6345 6346 if (offset < 0 || offset > actual_length) 6347 return -1; 6348 6349 copy_length = actual_length - offset < len ? actual_length - offset : len; 6350 memcpy (myaddr, (char *) data + offset, copy_length); 6351 return copy_length; 6352 } 6353 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */ 6354 6355 bool 6356 linux_process_target::supports_catch_syscall () 6357 { 6358 return (low_supports_catch_syscall () 6359 && linux_supports_tracesysgood ()); 6360 } 6361 6362 bool 6363 linux_process_target::low_supports_catch_syscall () 6364 { 6365 return false; 6366 } 6367 6368 CORE_ADDR 6369 linux_process_target::read_pc (regcache *regcache) 6370 { 6371 if (!low_supports_breakpoints ()) 6372 return 0; 6373 6374 return low_get_pc (regcache); 6375 } 6376 6377 void 6378 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc) 6379 { 6380 gdb_assert (low_supports_breakpoints ()); 6381 6382 low_set_pc (regcache, pc); 6383 } 6384 6385 bool 6386 linux_process_target::supports_thread_stopped () 6387 { 6388 return true; 6389 } 6390 6391 bool 6392 linux_process_target::thread_stopped (thread_info *thread) 6393 { 6394 return get_thread_lwp (thread)->stopped; 6395 } 6396 6397 /* This exposes stop-all-threads functionality to other modules. */ 6398 6399 void 6400 linux_process_target::pause_all (bool freeze) 6401 { 6402 stop_all_lwps (freeze, NULL); 6403 } 6404 6405 /* This exposes unstop-all-threads functionality to other gdbserver 6406 modules. */ 6407 6408 void 6409 linux_process_target::unpause_all (bool unfreeze) 6410 { 6411 unstop_all_lwps (unfreeze, NULL); 6412 } 6413 6414 int 6415 linux_process_target::prepare_to_access_memory () 6416 { 6417 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a 6418 running LWP. */ 6419 if (non_stop) 6420 target_pause_all (true); 6421 return 0; 6422 } 6423 6424 void 6425 linux_process_target::done_accessing_memory () 6426 { 6427 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a 6428 running LWP. */ 6429 if (non_stop) 6430 target_unpause_all (true); 6431 } 6432 6433 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */ 6434 6435 static int 6436 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64, 6437 CORE_ADDR *phdr_memaddr, int *num_phdr) 6438 { 6439 char filename[PATH_MAX]; 6440 int fd; 6441 const int auxv_size = is_elf64 6442 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t); 6443 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */ 6444 6445 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid); 6446 6447 fd = open (filename, O_RDONLY); 6448 if (fd < 0) 6449 return 1; 6450 6451 *phdr_memaddr = 0; 6452 *num_phdr = 0; 6453 while (read (fd, buf, auxv_size) == auxv_size 6454 && (*phdr_memaddr == 0 || *num_phdr == 0)) 6455 { 6456 if (is_elf64) 6457 { 6458 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf; 6459 6460 switch (aux->a_type) 6461 { 6462 case AT_PHDR: 6463 *phdr_memaddr = aux->a_un.a_val; 6464 break; 6465 case AT_PHNUM: 6466 *num_phdr = aux->a_un.a_val; 6467 break; 6468 } 6469 } 6470 else 6471 { 6472 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf; 6473 6474 switch (aux->a_type) 6475 { 6476 case AT_PHDR: 6477 *phdr_memaddr = aux->a_un.a_val; 6478 break; 6479 case AT_PHNUM: 6480 *num_phdr = aux->a_un.a_val; 6481 break; 6482 } 6483 } 6484 } 6485 6486 close (fd); 6487 6488 if (*phdr_memaddr == 0 || *num_phdr == 0) 6489 { 6490 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: " 6491 "phdr_memaddr = %ld, phdr_num = %d", 6492 (long) *phdr_memaddr, *num_phdr); 6493 return 2; 6494 } 6495 6496 return 0; 6497 } 6498 6499 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */ 6500 6501 static CORE_ADDR 6502 get_dynamic (const int pid, const int is_elf64) 6503 { 6504 CORE_ADDR phdr_memaddr, relocation; 6505 int num_phdr, i; 6506 unsigned char *phdr_buf; 6507 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr); 6508 6509 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr)) 6510 return 0; 6511 6512 gdb_assert (num_phdr < 100); /* Basic sanity check. */ 6513 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size); 6514 6515 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size)) 6516 return 0; 6517 6518 /* Compute relocation: it is expected to be 0 for "regular" executables, 6519 non-zero for PIE ones. */ 6520 relocation = -1; 6521 for (i = 0; relocation == -1 && i < num_phdr; i++) 6522 if (is_elf64) 6523 { 6524 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size); 6525 6526 if (p->p_type == PT_PHDR) 6527 relocation = phdr_memaddr - p->p_vaddr; 6528 } 6529 else 6530 { 6531 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size); 6532 6533 if (p->p_type == PT_PHDR) 6534 relocation = phdr_memaddr - p->p_vaddr; 6535 } 6536 6537 if (relocation == -1) 6538 { 6539 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately 6540 any real world executables, including PIE executables, have always 6541 PT_PHDR present. PT_PHDR is not present in some shared libraries or 6542 in fpc (Free Pascal 2.4) binaries but neither of those have a need for 6543 or present DT_DEBUG anyway (fpc binaries are statically linked). 6544 6545 Therefore if there exists DT_DEBUG there is always also PT_PHDR. 6546 6547 GDB could find RELOCATION also from AT_ENTRY - e_entry. */ 6548 6549 return 0; 6550 } 6551 6552 for (i = 0; i < num_phdr; i++) 6553 { 6554 if (is_elf64) 6555 { 6556 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size); 6557 6558 if (p->p_type == PT_DYNAMIC) 6559 return p->p_vaddr + relocation; 6560 } 6561 else 6562 { 6563 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size); 6564 6565 if (p->p_type == PT_DYNAMIC) 6566 return p->p_vaddr + relocation; 6567 } 6568 } 6569 6570 return 0; 6571 } 6572 6573 /* Return &_r_debug in the inferior, or -1 if not present. Return value 6574 can be 0 if the inferior does not yet have the library list initialized. 6575 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of 6576 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */ 6577 6578 static CORE_ADDR 6579 get_r_debug (const int pid, const int is_elf64) 6580 { 6581 CORE_ADDR dynamic_memaddr; 6582 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn); 6583 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */ 6584 CORE_ADDR map = -1; 6585 6586 dynamic_memaddr = get_dynamic (pid, is_elf64); 6587 if (dynamic_memaddr == 0) 6588 return map; 6589 6590 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0) 6591 { 6592 if (is_elf64) 6593 { 6594 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf; 6595 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL 6596 union 6597 { 6598 Elf64_Xword map; 6599 unsigned char buf[sizeof (Elf64_Xword)]; 6600 } 6601 rld_map; 6602 #endif 6603 #ifdef DT_MIPS_RLD_MAP 6604 if (dyn->d_tag == DT_MIPS_RLD_MAP) 6605 { 6606 if (linux_read_memory (dyn->d_un.d_val, 6607 rld_map.buf, sizeof (rld_map.buf)) == 0) 6608 return rld_map.map; 6609 else 6610 break; 6611 } 6612 #endif /* DT_MIPS_RLD_MAP */ 6613 #ifdef DT_MIPS_RLD_MAP_REL 6614 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL) 6615 { 6616 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr, 6617 rld_map.buf, sizeof (rld_map.buf)) == 0) 6618 return rld_map.map; 6619 else 6620 break; 6621 } 6622 #endif /* DT_MIPS_RLD_MAP_REL */ 6623 6624 if (dyn->d_tag == DT_DEBUG && map == -1) 6625 map = dyn->d_un.d_val; 6626 6627 if (dyn->d_tag == DT_NULL) 6628 break; 6629 } 6630 else 6631 { 6632 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf; 6633 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL 6634 union 6635 { 6636 Elf32_Word map; 6637 unsigned char buf[sizeof (Elf32_Word)]; 6638 } 6639 rld_map; 6640 #endif 6641 #ifdef DT_MIPS_RLD_MAP 6642 if (dyn->d_tag == DT_MIPS_RLD_MAP) 6643 { 6644 if (linux_read_memory (dyn->d_un.d_val, 6645 rld_map.buf, sizeof (rld_map.buf)) == 0) 6646 return rld_map.map; 6647 else 6648 break; 6649 } 6650 #endif /* DT_MIPS_RLD_MAP */ 6651 #ifdef DT_MIPS_RLD_MAP_REL 6652 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL) 6653 { 6654 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr, 6655 rld_map.buf, sizeof (rld_map.buf)) == 0) 6656 return rld_map.map; 6657 else 6658 break; 6659 } 6660 #endif /* DT_MIPS_RLD_MAP_REL */ 6661 6662 if (dyn->d_tag == DT_DEBUG && map == -1) 6663 map = dyn->d_un.d_val; 6664 6665 if (dyn->d_tag == DT_NULL) 6666 break; 6667 } 6668 6669 dynamic_memaddr += dyn_size; 6670 } 6671 6672 return map; 6673 } 6674 6675 /* Read one pointer from MEMADDR in the inferior. */ 6676 6677 static int 6678 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size) 6679 { 6680 int ret; 6681 6682 /* Go through a union so this works on either big or little endian 6683 hosts, when the inferior's pointer size is smaller than the size 6684 of CORE_ADDR. It is assumed the inferior's endianness is the 6685 same of the superior's. */ 6686 union 6687 { 6688 CORE_ADDR core_addr; 6689 unsigned int ui; 6690 unsigned char uc; 6691 } addr; 6692 6693 ret = linux_read_memory (memaddr, &addr.uc, ptr_size); 6694 if (ret == 0) 6695 { 6696 if (ptr_size == sizeof (CORE_ADDR)) 6697 *ptr = addr.core_addr; 6698 else if (ptr_size == sizeof (unsigned int)) 6699 *ptr = addr.ui; 6700 else 6701 gdb_assert_not_reached ("unhandled pointer size"); 6702 } 6703 return ret; 6704 } 6705 6706 bool 6707 linux_process_target::supports_qxfer_libraries_svr4 () 6708 { 6709 return true; 6710 } 6711 6712 struct link_map_offsets 6713 { 6714 /* Offset and size of r_debug.r_version. */ 6715 int r_version_offset; 6716 6717 /* Offset and size of r_debug.r_map. */ 6718 int r_map_offset; 6719 6720 /* Offset to l_addr field in struct link_map. */ 6721 int l_addr_offset; 6722 6723 /* Offset to l_name field in struct link_map. */ 6724 int l_name_offset; 6725 6726 /* Offset to l_ld field in struct link_map. */ 6727 int l_ld_offset; 6728 6729 /* Offset to l_next field in struct link_map. */ 6730 int l_next_offset; 6731 6732 /* Offset to l_prev field in struct link_map. */ 6733 int l_prev_offset; 6734 }; 6735 6736 /* Construct qXfer:libraries-svr4:read reply. */ 6737 6738 int 6739 linux_process_target::qxfer_libraries_svr4 (const char *annex, 6740 unsigned char *readbuf, 6741 unsigned const char *writebuf, 6742 CORE_ADDR offset, int len) 6743 { 6744 struct process_info_private *const priv = current_process ()->priv; 6745 char filename[PATH_MAX]; 6746 int pid, is_elf64; 6747 6748 static const struct link_map_offsets lmo_32bit_offsets = 6749 { 6750 0, /* r_version offset. */ 6751 4, /* r_debug.r_map offset. */ 6752 0, /* l_addr offset in link_map. */ 6753 4, /* l_name offset in link_map. */ 6754 8, /* l_ld offset in link_map. */ 6755 12, /* l_next offset in link_map. */ 6756 16 /* l_prev offset in link_map. */ 6757 }; 6758 6759 static const struct link_map_offsets lmo_64bit_offsets = 6760 { 6761 0, /* r_version offset. */ 6762 8, /* r_debug.r_map offset. */ 6763 0, /* l_addr offset in link_map. */ 6764 8, /* l_name offset in link_map. */ 6765 16, /* l_ld offset in link_map. */ 6766 24, /* l_next offset in link_map. */ 6767 32 /* l_prev offset in link_map. */ 6768 }; 6769 const struct link_map_offsets *lmo; 6770 unsigned int machine; 6771 int ptr_size; 6772 CORE_ADDR lm_addr = 0, lm_prev = 0; 6773 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev; 6774 int header_done = 0; 6775 6776 if (writebuf != NULL) 6777 return -2; 6778 if (readbuf == NULL) 6779 return -1; 6780 6781 pid = lwpid_of (current_thread); 6782 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid); 6783 is_elf64 = elf_64_file_p (filename, &machine); 6784 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets; 6785 ptr_size = is_elf64 ? 8 : 4; 6786 6787 while (annex[0] != '\0') 6788 { 6789 const char *sep; 6790 CORE_ADDR *addrp; 6791 int name_len; 6792 6793 sep = strchr (annex, '='); 6794 if (sep == NULL) 6795 break; 6796 6797 name_len = sep - annex; 6798 if (name_len == 5 && startswith (annex, "start")) 6799 addrp = &lm_addr; 6800 else if (name_len == 4 && startswith (annex, "prev")) 6801 addrp = &lm_prev; 6802 else 6803 { 6804 annex = strchr (sep, ';'); 6805 if (annex == NULL) 6806 break; 6807 annex++; 6808 continue; 6809 } 6810 6811 annex = decode_address_to_semicolon (addrp, sep + 1); 6812 } 6813 6814 if (lm_addr == 0) 6815 { 6816 int r_version = 0; 6817 6818 if (priv->r_debug == 0) 6819 priv->r_debug = get_r_debug (pid, is_elf64); 6820 6821 /* We failed to find DT_DEBUG. Such situation will not change 6822 for this inferior - do not retry it. Report it to GDB as 6823 E01, see for the reasons at the GDB solib-svr4.c side. */ 6824 if (priv->r_debug == (CORE_ADDR) -1) 6825 return -1; 6826 6827 if (priv->r_debug != 0) 6828 { 6829 if (linux_read_memory (priv->r_debug + lmo->r_version_offset, 6830 (unsigned char *) &r_version, 6831 sizeof (r_version)) != 0 6832 || r_version != 1) 6833 { 6834 warning ("unexpected r_debug version %d", r_version); 6835 } 6836 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset, 6837 &lm_addr, ptr_size) != 0) 6838 { 6839 warning ("unable to read r_map from 0x%lx", 6840 (long) priv->r_debug + lmo->r_map_offset); 6841 } 6842 } 6843 } 6844 6845 std::string document = "<library-list-svr4 version=\"1.0\""; 6846 6847 while (lm_addr 6848 && read_one_ptr (lm_addr + lmo->l_name_offset, 6849 &l_name, ptr_size) == 0 6850 && read_one_ptr (lm_addr + lmo->l_addr_offset, 6851 &l_addr, ptr_size) == 0 6852 && read_one_ptr (lm_addr + lmo->l_ld_offset, 6853 &l_ld, ptr_size) == 0 6854 && read_one_ptr (lm_addr + lmo->l_prev_offset, 6855 &l_prev, ptr_size) == 0 6856 && read_one_ptr (lm_addr + lmo->l_next_offset, 6857 &l_next, ptr_size) == 0) 6858 { 6859 unsigned char libname[PATH_MAX]; 6860 6861 if (lm_prev != l_prev) 6862 { 6863 warning ("Corrupted shared library list: 0x%lx != 0x%lx", 6864 (long) lm_prev, (long) l_prev); 6865 break; 6866 } 6867 6868 /* Ignore the first entry even if it has valid name as the first entry 6869 corresponds to the main executable. The first entry should not be 6870 skipped if the dynamic loader was loaded late by a static executable 6871 (see solib-svr4.c parameter ignore_first). But in such case the main 6872 executable does not have PT_DYNAMIC present and this function already 6873 exited above due to failed get_r_debug. */ 6874 if (lm_prev == 0) 6875 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr); 6876 else 6877 { 6878 /* Not checking for error because reading may stop before 6879 we've got PATH_MAX worth of characters. */ 6880 libname[0] = '\0'; 6881 linux_read_memory (l_name, libname, sizeof (libname) - 1); 6882 libname[sizeof (libname) - 1] = '\0'; 6883 if (libname[0] != '\0') 6884 { 6885 if (!header_done) 6886 { 6887 /* Terminate `<library-list-svr4'. */ 6888 document += '>'; 6889 header_done = 1; 6890 } 6891 6892 string_appendf (document, "<library name=\""); 6893 xml_escape_text_append (&document, (char *) libname); 6894 string_appendf (document, "\" lm=\"0x%lx\" " 6895 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>", 6896 (unsigned long) lm_addr, (unsigned long) l_addr, 6897 (unsigned long) l_ld); 6898 } 6899 } 6900 6901 lm_prev = lm_addr; 6902 lm_addr = l_next; 6903 } 6904 6905 if (!header_done) 6906 { 6907 /* Empty list; terminate `<library-list-svr4'. */ 6908 document += "/>"; 6909 } 6910 else 6911 document += "</library-list-svr4>"; 6912 6913 int document_len = document.length (); 6914 if (offset < document_len) 6915 document_len -= offset; 6916 else 6917 document_len = 0; 6918 if (len > document_len) 6919 len = document_len; 6920 6921 memcpy (readbuf, document.data () + offset, len); 6922 6923 return len; 6924 } 6925 6926 #ifdef HAVE_LINUX_BTRACE 6927 6928 btrace_target_info * 6929 linux_process_target::enable_btrace (ptid_t ptid, 6930 const btrace_config *conf) 6931 { 6932 return linux_enable_btrace (ptid, conf); 6933 } 6934 6935 /* See to_disable_btrace target method. */ 6936 6937 int 6938 linux_process_target::disable_btrace (btrace_target_info *tinfo) 6939 { 6940 enum btrace_error err; 6941 6942 err = linux_disable_btrace (tinfo); 6943 return (err == BTRACE_ERR_NONE ? 0 : -1); 6944 } 6945 6946 /* Encode an Intel Processor Trace configuration. */ 6947 6948 static void 6949 linux_low_encode_pt_config (struct buffer *buffer, 6950 const struct btrace_data_pt_config *config) 6951 { 6952 buffer_grow_str (buffer, "<pt-config>\n"); 6953 6954 switch (config->cpu.vendor) 6955 { 6956 case CV_INTEL: 6957 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" " 6958 "model=\"%u\" stepping=\"%u\"/>\n", 6959 config->cpu.family, config->cpu.model, 6960 config->cpu.stepping); 6961 break; 6962 6963 default: 6964 break; 6965 } 6966 6967 buffer_grow_str (buffer, "</pt-config>\n"); 6968 } 6969 6970 /* Encode a raw buffer. */ 6971 6972 static void 6973 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data, 6974 unsigned int size) 6975 { 6976 if (size == 0) 6977 return; 6978 6979 /* We use hex encoding - see gdbsupport/rsp-low.h. */ 6980 buffer_grow_str (buffer, "<raw>\n"); 6981 6982 while (size-- > 0) 6983 { 6984 char elem[2]; 6985 6986 elem[0] = tohex ((*data >> 4) & 0xf); 6987 elem[1] = tohex (*data++ & 0xf); 6988 6989 buffer_grow (buffer, elem, 2); 6990 } 6991 6992 buffer_grow_str (buffer, "</raw>\n"); 6993 } 6994 6995 /* See to_read_btrace target method. */ 6996 6997 int 6998 linux_process_target::read_btrace (btrace_target_info *tinfo, 6999 buffer *buffer, 7000 enum btrace_read_type type) 7001 { 7002 struct btrace_data btrace; 7003 enum btrace_error err; 7004 7005 err = linux_read_btrace (&btrace, tinfo, type); 7006 if (err != BTRACE_ERR_NONE) 7007 { 7008 if (err == BTRACE_ERR_OVERFLOW) 7009 buffer_grow_str0 (buffer, "E.Overflow."); 7010 else 7011 buffer_grow_str0 (buffer, "E.Generic Error."); 7012 7013 return -1; 7014 } 7015 7016 switch (btrace.format) 7017 { 7018 case BTRACE_FORMAT_NONE: 7019 buffer_grow_str0 (buffer, "E.No Trace."); 7020 return -1; 7021 7022 case BTRACE_FORMAT_BTS: 7023 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n"); 7024 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n"); 7025 7026 for (const btrace_block &block : *btrace.variant.bts.blocks) 7027 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n", 7028 paddress (block.begin), paddress (block.end)); 7029 7030 buffer_grow_str0 (buffer, "</btrace>\n"); 7031 break; 7032 7033 case BTRACE_FORMAT_PT: 7034 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n"); 7035 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n"); 7036 buffer_grow_str (buffer, "<pt>\n"); 7037 7038 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config); 7039 7040 linux_low_encode_raw (buffer, btrace.variant.pt.data, 7041 btrace.variant.pt.size); 7042 7043 buffer_grow_str (buffer, "</pt>\n"); 7044 buffer_grow_str0 (buffer, "</btrace>\n"); 7045 break; 7046 7047 default: 7048 buffer_grow_str0 (buffer, "E.Unsupported Trace Format."); 7049 return -1; 7050 } 7051 7052 return 0; 7053 } 7054 7055 /* See to_btrace_conf target method. */ 7056 7057 int 7058 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo, 7059 buffer *buffer) 7060 { 7061 const struct btrace_config *conf; 7062 7063 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n"); 7064 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n"); 7065 7066 conf = linux_btrace_conf (tinfo); 7067 if (conf != NULL) 7068 { 7069 switch (conf->format) 7070 { 7071 case BTRACE_FORMAT_NONE: 7072 break; 7073 7074 case BTRACE_FORMAT_BTS: 7075 buffer_xml_printf (buffer, "<bts"); 7076 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size); 7077 buffer_xml_printf (buffer, " />\n"); 7078 break; 7079 7080 case BTRACE_FORMAT_PT: 7081 buffer_xml_printf (buffer, "<pt"); 7082 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size); 7083 buffer_xml_printf (buffer, "/>\n"); 7084 break; 7085 } 7086 } 7087 7088 buffer_grow_str0 (buffer, "</btrace-conf>\n"); 7089 return 0; 7090 } 7091 #endif /* HAVE_LINUX_BTRACE */ 7092 7093 /* See nat/linux-nat.h. */ 7094 7095 ptid_t 7096 current_lwp_ptid (void) 7097 { 7098 return ptid_of (current_thread); 7099 } 7100 7101 const char * 7102 linux_process_target::thread_name (ptid_t thread) 7103 { 7104 return linux_proc_tid_get_name (thread); 7105 } 7106 7107 #if USE_THREAD_DB 7108 bool 7109 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle, 7110 int *handle_len) 7111 { 7112 return thread_db_thread_handle (ptid, handle, handle_len); 7113 } 7114 #endif 7115 7116 /* Default implementation of linux_target_ops method "set_pc" for 7117 32-bit pc register which is literally named "pc". */ 7118 7119 void 7120 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc) 7121 { 7122 uint32_t newpc = pc; 7123 7124 supply_register_by_name (regcache, "pc", &newpc); 7125 } 7126 7127 /* Default implementation of linux_target_ops method "get_pc" for 7128 32-bit pc register which is literally named "pc". */ 7129 7130 CORE_ADDR 7131 linux_get_pc_32bit (struct regcache *regcache) 7132 { 7133 uint32_t pc; 7134 7135 collect_register_by_name (regcache, "pc", &pc); 7136 if (debug_threads) 7137 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc); 7138 return pc; 7139 } 7140 7141 /* Default implementation of linux_target_ops method "set_pc" for 7142 64-bit pc register which is literally named "pc". */ 7143 7144 void 7145 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc) 7146 { 7147 uint64_t newpc = pc; 7148 7149 supply_register_by_name (regcache, "pc", &newpc); 7150 } 7151 7152 /* Default implementation of linux_target_ops method "get_pc" for 7153 64-bit pc register which is literally named "pc". */ 7154 7155 CORE_ADDR 7156 linux_get_pc_64bit (struct regcache *regcache) 7157 { 7158 uint64_t pc; 7159 7160 collect_register_by_name (regcache, "pc", &pc); 7161 if (debug_threads) 7162 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc); 7163 return pc; 7164 } 7165 7166 /* See linux-low.h. */ 7167 7168 int 7169 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp) 7170 { 7171 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize); 7172 int offset = 0; 7173 7174 gdb_assert (wordsize == 4 || wordsize == 8); 7175 7176 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize) 7177 { 7178 if (wordsize == 4) 7179 { 7180 uint32_t *data_p = (uint32_t *) data; 7181 if (data_p[0] == match) 7182 { 7183 *valp = data_p[1]; 7184 return 1; 7185 } 7186 } 7187 else 7188 { 7189 uint64_t *data_p = (uint64_t *) data; 7190 if (data_p[0] == match) 7191 { 7192 *valp = data_p[1]; 7193 return 1; 7194 } 7195 } 7196 7197 offset += 2 * wordsize; 7198 } 7199 7200 return 0; 7201 } 7202 7203 /* See linux-low.h. */ 7204 7205 CORE_ADDR 7206 linux_get_hwcap (int wordsize) 7207 { 7208 CORE_ADDR hwcap = 0; 7209 linux_get_auxv (wordsize, AT_HWCAP, &hwcap); 7210 return hwcap; 7211 } 7212 7213 /* See linux-low.h. */ 7214 7215 CORE_ADDR 7216 linux_get_hwcap2 (int wordsize) 7217 { 7218 CORE_ADDR hwcap2 = 0; 7219 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2); 7220 return hwcap2; 7221 } 7222 7223 #ifdef HAVE_LINUX_REGSETS 7224 void 7225 initialize_regsets_info (struct regsets_info *info) 7226 { 7227 for (info->num_regsets = 0; 7228 info->regsets[info->num_regsets].size >= 0; 7229 info->num_regsets++) 7230 ; 7231 } 7232 #endif 7233 7234 void 7235 initialize_low (void) 7236 { 7237 struct sigaction sigchld_action; 7238 7239 memset (&sigchld_action, 0, sizeof (sigchld_action)); 7240 set_target_ops (the_linux_target); 7241 7242 linux_ptrace_init_warnings (); 7243 linux_proc_init_warnings (); 7244 7245 sigchld_action.sa_handler = sigchld_handler; 7246 sigemptyset (&sigchld_action.sa_mask); 7247 sigchld_action.sa_flags = SA_RESTART; 7248 sigaction (SIGCHLD, &sigchld_action, NULL); 7249 7250 initialize_low_arch (); 7251 7252 linux_check_ptrace_features (); 7253 } 7254