1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver. 2 Copyright (C) 2002-2023 Free Software Foundation, Inc. 3 4 This file is part of GDB. 5 6 This program is free software; you can redistribute it and/or modify 7 it under the terms of the GNU General Public License as published by 8 the Free Software Foundation; either version 3 of the License, or 9 (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 GNU General Public License for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 18 19 #ifndef GDBSERVER_LINUX_LOW_H 20 #define GDBSERVER_LINUX_LOW_H 21 22 #include "nat/linux-nat.h" 23 #include "nat/gdb_thread_db.h" 24 #include <signal.h> 25 26 #include "gdbthread.h" 27 #include "gdb_proc_service.h" 28 29 /* Included for ptrace type definitions. */ 30 #include "nat/linux-ptrace.h" 31 #include "target/waitstatus.h" /* For enum target_stop_reason. */ 32 #include "tracepoint.h" 33 34 #include <list> 35 36 #define PTRACE_XFER_TYPE long 37 38 #ifdef HAVE_LINUX_REGSETS 39 typedef void (*regset_fill_func) (struct regcache *, void *); 40 typedef void (*regset_store_func) (struct regcache *, const void *); 41 enum regset_type { 42 GENERAL_REGS, 43 FP_REGS, 44 EXTENDED_REGS, 45 OPTIONAL_REGS, /* Do not error if the regset cannot be accessed. */ 46 }; 47 48 /* The arch's regsets array initializer must be terminated with a NULL 49 regset. */ 50 #define NULL_REGSET \ 51 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL } 52 53 struct regset_info 54 { 55 int get_request, set_request; 56 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd 57 argument and the 4th argument should be "const struct iovec *". */ 58 int nt_type; 59 int size; 60 enum regset_type type; 61 regset_fill_func fill_function; 62 regset_store_func store_function; 63 }; 64 65 /* Aggregation of all the supported regsets of a given 66 architecture/mode. */ 67 68 struct regsets_info 69 { 70 /* The regsets array. */ 71 struct regset_info *regsets; 72 73 /* The number of regsets in the REGSETS array. */ 74 int num_regsets; 75 76 /* If we get EIO on a regset, do not try it again. Note the set of 77 supported regsets may depend on processor mode on biarch 78 machines. This is a (lazily allocated) array holding one boolean 79 byte (0/1) per regset, with each element corresponding to the 80 regset in the REGSETS array above at the same offset. */ 81 char *disabled_regsets; 82 }; 83 84 #endif 85 86 /* Mapping between the general-purpose registers in `struct user' 87 format and GDB's register array layout. */ 88 89 struct usrregs_info 90 { 91 /* The number of registers accessible. */ 92 int num_regs; 93 94 /* The registers map. */ 95 int *regmap; 96 }; 97 98 /* All info needed to access an architecture/mode's registers. */ 99 100 struct regs_info 101 { 102 /* Regset support bitmap: 1 for registers that are transferred as a part 103 of a regset, 0 for ones that need to be handled individually. This 104 can be NULL if all registers are transferred with regsets or regsets 105 are not supported. */ 106 unsigned char *regset_bitmap; 107 108 /* Info used when accessing registers with PTRACE_PEEKUSER / 109 PTRACE_POKEUSER. This can be NULL if all registers are 110 transferred with regsets .*/ 111 struct usrregs_info *usrregs; 112 113 #ifdef HAVE_LINUX_REGSETS 114 /* Info used when accessing registers with regsets. */ 115 struct regsets_info *regsets_info; 116 #endif 117 }; 118 119 struct process_info_private 120 { 121 /* Arch-specific additions. */ 122 struct arch_process_info *arch_private; 123 124 /* libthread_db-specific additions. Not NULL if this process has loaded 125 thread_db, and it is active. */ 126 struct thread_db *thread_db; 127 128 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */ 129 CORE_ADDR r_debug; 130 131 /* The /proc/pid/mem file used for reading/writing memory. */ 132 int mem_fd; 133 }; 134 135 struct lwp_info; 136 137 /* Target ops definitions for a Linux target. */ 138 139 class linux_process_target : public process_stratum_target 140 { 141 public: 142 143 int create_inferior (const char *program, 144 const std::vector<char *> &program_args) override; 145 146 void post_create_inferior () override; 147 148 int attach (unsigned long pid) override; 149 150 int kill (process_info *proc) override; 151 152 int detach (process_info *proc) override; 153 154 void mourn (process_info *proc) override; 155 156 void join (int pid) override; 157 158 bool thread_alive (ptid_t pid) override; 159 160 void resume (thread_resume *resume_info, size_t n) override; 161 162 ptid_t wait (ptid_t ptid, target_waitstatus *status, 163 target_wait_flags options) override; 164 165 void fetch_registers (regcache *regcache, int regno) override; 166 167 void store_registers (regcache *regcache, int regno) override; 168 169 int read_memory (CORE_ADDR memaddr, unsigned char *myaddr, 170 int len) override; 171 172 int write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, 173 int len) override; 174 175 void look_up_symbols () override; 176 177 void request_interrupt () override; 178 179 bool supports_read_auxv () override; 180 181 int read_auxv (CORE_ADDR offset, unsigned char *myaddr, 182 unsigned int len) override; 183 184 int insert_point (enum raw_bkpt_type type, CORE_ADDR addr, 185 int size, raw_breakpoint *bp) override; 186 187 int remove_point (enum raw_bkpt_type type, CORE_ADDR addr, 188 int size, raw_breakpoint *bp) override; 189 190 bool stopped_by_sw_breakpoint () override; 191 192 bool supports_stopped_by_sw_breakpoint () override; 193 194 bool stopped_by_hw_breakpoint () override; 195 196 bool supports_stopped_by_hw_breakpoint () override; 197 198 bool supports_hardware_single_step () override; 199 200 bool stopped_by_watchpoint () override; 201 202 CORE_ADDR stopped_data_address () override; 203 204 bool supports_read_offsets () override; 205 206 int read_offsets (CORE_ADDR *text, CORE_ADDR *data) override; 207 208 bool supports_get_tls_address () override; 209 210 int get_tls_address (thread_info *thread, CORE_ADDR offset, 211 CORE_ADDR load_module, CORE_ADDR *address) override; 212 213 bool supports_qxfer_osdata () override; 214 215 int qxfer_osdata (const char *annex, unsigned char *readbuf, 216 unsigned const char *writebuf, 217 CORE_ADDR offset, int len) override; 218 219 bool supports_qxfer_siginfo () override; 220 221 int qxfer_siginfo (const char *annex, unsigned char *readbuf, 222 unsigned const char *writebuf, 223 CORE_ADDR offset, int len) override; 224 225 bool supports_non_stop () override; 226 227 bool async (bool enable) override; 228 229 int start_non_stop (bool enable) override; 230 231 bool supports_multi_process () override; 232 233 bool supports_fork_events () override; 234 235 bool supports_vfork_events () override; 236 237 bool supports_exec_events () override; 238 239 void handle_new_gdb_connection () override; 240 241 int handle_monitor_command (char *mon) override; 242 243 int core_of_thread (ptid_t ptid) override; 244 245 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC 246 bool supports_read_loadmap () override; 247 248 int read_loadmap (const char *annex, CORE_ADDR offset, 249 unsigned char *myaddr, unsigned int len) override; 250 #endif 251 252 CORE_ADDR read_pc (regcache *regcache) override; 253 254 void write_pc (regcache *regcache, CORE_ADDR pc) override; 255 256 bool supports_thread_stopped () override; 257 258 bool thread_stopped (thread_info *thread) override; 259 260 void pause_all (bool freeze) override; 261 262 void unpause_all (bool unfreeze) override; 263 264 void stabilize_threads () override; 265 266 bool supports_disable_randomization () override; 267 268 bool supports_qxfer_libraries_svr4 () override; 269 270 int qxfer_libraries_svr4 (const char *annex, 271 unsigned char *readbuf, 272 unsigned const char *writebuf, 273 CORE_ADDR offset, int len) override; 274 275 bool supports_agent () override; 276 277 #ifdef HAVE_LINUX_BTRACE 278 bool supports_btrace () override; 279 280 btrace_target_info *enable_btrace (thread_info *tp, 281 const btrace_config *conf) override; 282 283 int disable_btrace (btrace_target_info *tinfo) override; 284 285 int read_btrace (btrace_target_info *tinfo, buffer *buf, 286 enum btrace_read_type type) override; 287 288 int read_btrace_conf (const btrace_target_info *tinfo, 289 buffer *buf) override; 290 #endif 291 292 bool supports_range_stepping () override; 293 294 bool supports_pid_to_exec_file () override; 295 296 const char *pid_to_exec_file (int pid) override; 297 298 bool supports_multifs () override; 299 300 int multifs_open (int pid, const char *filename, int flags, 301 mode_t mode) override; 302 303 int multifs_unlink (int pid, const char *filename) override; 304 305 ssize_t multifs_readlink (int pid, const char *filename, char *buf, 306 size_t bufsiz) override; 307 308 const char *thread_name (ptid_t thread) override; 309 310 #if USE_THREAD_DB 311 bool thread_handle (ptid_t ptid, gdb_byte **handle, 312 int *handle_len) override; 313 #endif 314 315 thread_info *thread_pending_parent (thread_info *thread) override; 316 thread_info *thread_pending_child (thread_info *thread) override; 317 318 bool supports_catch_syscall () override; 319 320 /* Return the information to access registers. This has public 321 visibility because proc-service uses it. */ 322 virtual const regs_info *get_regs_info () = 0; 323 324 private: 325 326 /* Handle a GNU/Linux extended wait response. If we see a clone, 327 fork, or vfork event, we need to add the new LWP to our list 328 (and return 0 so as not to report the trap to higher layers). 329 If we see an exec event, we will modify ORIG_EVENT_LWP to point 330 to a new LWP representing the new program. */ 331 int handle_extended_wait (lwp_info **orig_event_lwp, int wstat); 332 333 /* Do low-level handling of the event, and check if this is an event we want 334 to report. Is so, store it as a pending status in the lwp_info structure 335 corresponding to LWPID. */ 336 void filter_event (int lwpid, int wstat); 337 338 /* Wait for an event from child(ren) WAIT_PTID, and return any that 339 match FILTER_PTID (leaving others pending). The PTIDs can be: 340 minus_one_ptid, to specify any child; a pid PTID, specifying all 341 lwps of a thread group; or a PTID representing a single lwp. Store 342 the stop status through the status pointer WSTAT. OPTIONS is 343 passed to the waitpid call. Return 0 if no event was found and 344 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children 345 was found. Return the PID of the stopped child otherwise. */ 346 int wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid, 347 int *wstatp, int options); 348 349 /* Wait for an event from child(ren) PTID. PTIDs can be: 350 minus_one_ptid, to specify any child; a pid PTID, specifying all 351 lwps of a thread group; or a PTID representing a single lwp. Store 352 the stop status through the status pointer WSTAT. OPTIONS is 353 passed to the waitpid call. Return 0 if no event was found and 354 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children 355 was found. Return the PID of the stopped child otherwise. */ 356 int wait_for_event (ptid_t ptid, int *wstatp, int options); 357 358 /* Wait for all children to stop for the SIGSTOPs we just queued. */ 359 void wait_for_sigstop (); 360 361 /* Wait for process, returns status. */ 362 ptid_t wait_1 (ptid_t ptid, target_waitstatus *ourstatus, 363 target_wait_flags target_options); 364 365 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL. 366 If SUSPEND, then also increase the suspend count of every LWP, 367 except EXCEPT. */ 368 void stop_all_lwps (int suspend, lwp_info *except); 369 370 /* Stopped LWPs that the client wanted to be running, that don't have 371 pending statuses, are set to run again, except for EXCEPT, if not 372 NULL. This undoes a stop_all_lwps call. */ 373 void unstop_all_lwps (int unsuspend, lwp_info *except); 374 375 /* Start a step-over operation on LWP. When LWP stopped at a 376 breakpoint, to make progress, we need to remove the breakpoint out 377 of the way. If we let other threads run while we do that, they may 378 pass by the breakpoint location and miss hitting it. To avoid 379 that, a step-over momentarily stops all threads while LWP is 380 single-stepped by either hardware or software while the breakpoint 381 is temporarily uninserted from the inferior. When the single-step 382 finishes, we reinsert the breakpoint, and let all threads that are 383 supposed to be running, run again. */ 384 void start_step_over (lwp_info *lwp); 385 386 /* If there's a step over in progress, wait until all threads stop 387 (that is, until the stepping thread finishes its step), and 388 unsuspend all lwps. The stepping thread ends with its status 389 pending, which is processed later when we get back to processing 390 events. */ 391 void complete_ongoing_step_over (); 392 393 /* Finish a step-over. Reinsert the breakpoint we had uninserted in 394 start_step_over, if still there, and delete any single-step 395 breakpoints we've set, on non hardware single-step targets. 396 Return true if step over finished. */ 397 bool finish_step_over (lwp_info *lwp); 398 399 /* When we finish a step-over, set threads running again. If there's 400 another thread that may need a step-over, now's the time to start 401 it. Eventually, we'll move all threads past their breakpoints. */ 402 void proceed_all_lwps (); 403 404 /* The reason we resume in the caller, is because we want to be able 405 to pass lwp->status_pending as WSTAT, and we need to clear 406 status_pending_p before resuming, otherwise, resume_one_lwp 407 refuses to resume. */ 408 bool maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat); 409 410 /* Move THREAD out of the jump pad. */ 411 void move_out_of_jump_pad (thread_info *thread); 412 413 /* Call low_arch_setup on THREAD. */ 414 void arch_setup_thread (thread_info *thread); 415 416 #ifdef HAVE_LINUX_USRREGS 417 /* Fetch one register. */ 418 void fetch_register (const usrregs_info *usrregs, regcache *regcache, 419 int regno); 420 421 /* Store one register. */ 422 void store_register (const usrregs_info *usrregs, regcache *regcache, 423 int regno); 424 #endif 425 426 /* Fetch all registers, or just one, from the child process. 427 If REGNO is -1, do this for all registers, skipping any that are 428 assumed to have been retrieved by regsets_fetch_inferior_registers, 429 unless ALL is non-zero. 430 Otherwise, REGNO specifies which register (so we can save time). */ 431 void usr_fetch_inferior_registers (const regs_info *regs_info, 432 regcache *regcache, int regno, int all); 433 434 /* Store our register values back into the inferior. 435 If REGNO is -1, do this for all registers, skipping any that are 436 assumed to have been saved by regsets_store_inferior_registers, 437 unless ALL is non-zero. 438 Otherwise, REGNO specifies which register (so we can save time). */ 439 void usr_store_inferior_registers (const regs_info *regs_info, 440 regcache *regcache, int regno, int all); 441 442 /* Return the PC as read from the regcache of LWP, without any 443 adjustment. */ 444 CORE_ADDR get_pc (lwp_info *lwp); 445 446 /* Called when the LWP stopped for a signal/trap. If it stopped for a 447 trap check what caused it (breakpoint, watchpoint, trace, etc.), 448 and save the result in the LWP's stop_reason field. If it stopped 449 for a breakpoint, decrement the PC if necessary on the lwp's 450 architecture. Returns true if we now have the LWP's stop PC. */ 451 bool save_stop_reason (lwp_info *lwp); 452 453 /* Resume execution of LWP. If STEP is nonzero, single-step it. If 454 SIGNAL is nonzero, give it that signal. */ 455 void resume_one_lwp_throw (lwp_info *lwp, int step, int signal, 456 siginfo_t *info); 457 458 /* Like resume_one_lwp_throw, but no error is thrown if the LWP 459 disappears while we try to resume it. */ 460 void resume_one_lwp (lwp_info *lwp, int step, int signal, siginfo_t *info); 461 462 /* This function is called once per thread. We check the thread's 463 last resume request, which will tell us whether to resume, step, or 464 leave the thread stopped. Any signal the client requested to be 465 delivered has already been enqueued at this point. 466 467 If any thread that GDB wants running is stopped at an internal 468 breakpoint that needs stepping over, we start a step-over operation 469 on that particular thread, and leave all others stopped. */ 470 void proceed_one_lwp (thread_info *thread, lwp_info *except); 471 472 /* This function is called once per thread. We check the thread's 473 resume request, which will tell us whether to resume, step, or 474 leave the thread stopped; and what signal, if any, it should be 475 sent. 476 477 For threads which we aren't explicitly told otherwise, we preserve 478 the stepping flag; this is used for stepping over gdbserver-placed 479 breakpoints. 480 481 If pending_flags was set in any thread, we queue any needed 482 signals, since we won't actually resume. We already have a pending 483 event to report, so we don't need to preserve any step requests; 484 they should be re-issued if necessary. */ 485 void resume_one_thread (thread_info *thread, bool leave_all_stopped); 486 487 /* Return true if this lwp has an interesting status pending. */ 488 bool status_pending_p_callback (thread_info *thread, ptid_t ptid); 489 490 /* Resume LWPs that are currently stopped without any pending status 491 to report, but are resumed from the core's perspective. */ 492 void resume_stopped_resumed_lwps (thread_info *thread); 493 494 /* Unsuspend THREAD, except EXCEPT, and proceed. */ 495 void unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except); 496 497 /* Return true if this lwp still has an interesting status pending. 498 If not (e.g., it had stopped for a breakpoint that is gone), return 499 false. */ 500 bool thread_still_has_status_pending (thread_info *thread); 501 502 /* Return true if this lwp is to-be-resumed and has an interesting 503 status pending. */ 504 bool resume_status_pending (thread_info *thread); 505 506 /* Return true if this lwp that GDB wants running is stopped at an 507 internal breakpoint that we need to step over. It assumes that 508 any required STOP_PC adjustment has already been propagated to 509 the inferior's regcache. */ 510 bool thread_needs_step_over (thread_info *thread); 511 512 /* Single step via hardware or software single step. 513 Return 1 if hardware single stepping, 0 if software single stepping 514 or can't single step. */ 515 int single_step (lwp_info* lwp); 516 517 /* Return true if THREAD is doing hardware single step. */ 518 bool maybe_hw_step (thread_info *thread); 519 520 /* Install breakpoints for software single stepping. */ 521 void install_software_single_step_breakpoints (lwp_info *lwp); 522 523 /* Fetch the possibly triggered data watchpoint info and store it in 524 CHILD. 525 526 On some archs, like x86, that use debug registers to set 527 watchpoints, it's possible that the way to know which watched 528 address trapped, is to check the register that is used to select 529 which address to watch. Problem is, between setting the watchpoint 530 and reading back which data address trapped, the user may change 531 the set of watchpoints, and, as a consequence, GDB changes the 532 debug registers in the inferior. To avoid reading back a stale 533 stopped-data-address when that happens, we cache in LP the fact 534 that a watchpoint trapped, and the corresponding data address, as 535 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug 536 registers meanwhile, we have the cached data we can rely on. */ 537 bool check_stopped_by_watchpoint (lwp_info *child); 538 539 /* Convert a native/host siginfo object, into/from the siginfo in the 540 layout of the inferiors' architecture. */ 541 void siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, 542 int direction); 543 544 /* Add a process to the common process list, and set its private 545 data. */ 546 process_info *add_linux_process (int pid, int attached); 547 548 /* Same as add_linux_process, but don't open the /proc/PID/mem file 549 yet. */ 550 process_info *add_linux_process_no_mem_file (int pid, int attached); 551 552 /* Free resources associated to PROC and remove it. */ 553 void remove_linux_process (process_info *proc); 554 555 /* Add a new thread. */ 556 lwp_info *add_lwp (ptid_t ptid); 557 558 /* Delete a thread. */ 559 void delete_lwp (lwp_info *lwp); 560 561 public: /* Make this public because it's used from outside. */ 562 /* Attach to an inferior process. Returns 0 on success, ERRNO on 563 error. */ 564 int attach_lwp (ptid_t ptid); 565 566 private: /* Back to private. */ 567 /* Detach from LWP. */ 568 void detach_one_lwp (lwp_info *lwp); 569 570 /* Detect zombie thread group leaders, and "exit" them. We can't 571 reap their exits until all other threads in the group have 572 exited. */ 573 void check_zombie_leaders (); 574 575 /* Convenience function that is called when the kernel reports an exit 576 event. This decides whether to report the event to GDB as a 577 process exit event, a thread exit event, or to suppress the 578 event. */ 579 ptid_t filter_exit_event (lwp_info *event_child, 580 target_waitstatus *ourstatus); 581 582 /* Returns true if THREAD is stopped in a jump pad, and we can't 583 move it out, because we need to report the stop event to GDB. For 584 example, if the user puts a breakpoint in the jump pad, it's 585 because she wants to debug it. */ 586 bool stuck_in_jump_pad (thread_info *thread); 587 588 /* Convenience wrapper. Returns information about LWP's fast tracepoint 589 collection status. */ 590 fast_tpoint_collect_result linux_fast_tracepoint_collecting 591 (lwp_info *lwp, fast_tpoint_collect_status *status); 592 593 /* This function should only be called if LWP got a SYSCALL_SIGTRAP. 594 Fill *SYSNO with the syscall nr trapped. */ 595 void get_syscall_trapinfo (lwp_info *lwp, int *sysno); 596 597 /* Returns true if GDB is interested in the event_child syscall. 598 Only to be called when stopped reason is SYSCALL_SIGTRAP. */ 599 bool gdb_catch_this_syscall (lwp_info *event_child); 600 601 protected: 602 /* The architecture-specific "low" methods are listed below. */ 603 604 /* Architecture-specific setup for the current thread. */ 605 virtual void low_arch_setup () = 0; 606 607 /* Return false if we can fetch/store the register, true if we cannot 608 fetch/store the register. */ 609 virtual bool low_cannot_fetch_register (int regno) = 0; 610 611 virtual bool low_cannot_store_register (int regno) = 0; 612 613 /* Hook to fetch a register in some non-standard way. Used for 614 example by backends that have read-only registers with hardcoded 615 values (e.g., IA64's gr0/fr0/fr1). Returns true if register 616 REGNO was supplied, false if not, and we should fallback to the 617 standard ptrace methods. */ 618 virtual bool low_fetch_register (regcache *regcache, int regno); 619 620 /* Return true if breakpoints are supported. Such targets must 621 implement the GET_PC and SET_PC methods. */ 622 virtual bool low_supports_breakpoints (); 623 624 virtual CORE_ADDR low_get_pc (regcache *regcache); 625 626 virtual void low_set_pc (regcache *regcache, CORE_ADDR newpc); 627 628 /* Find the next possible PCs after the current instruction executes. 629 Targets that override this method should also override 630 'supports_software_single_step' to return true. */ 631 virtual std::vector<CORE_ADDR> low_get_next_pcs (regcache *regcache); 632 633 /* Return true if there is a breakpoint at PC. */ 634 virtual bool low_breakpoint_at (CORE_ADDR pc) = 0; 635 636 /* Breakpoint and watchpoint related functions. See target.h for 637 comments. */ 638 virtual int low_insert_point (raw_bkpt_type type, CORE_ADDR addr, 639 int size, raw_breakpoint *bp); 640 641 virtual int low_remove_point (raw_bkpt_type type, CORE_ADDR addr, 642 int size, raw_breakpoint *bp); 643 644 virtual bool low_stopped_by_watchpoint (); 645 646 virtual CORE_ADDR low_stopped_data_address (); 647 648 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular 649 for registers smaller than an xfer unit). */ 650 virtual void low_collect_ptrace_register (regcache *regcache, int regno, 651 char *buf); 652 653 virtual void low_supply_ptrace_register (regcache *regcache, int regno, 654 const char *buf); 655 656 /* Hook to convert from target format to ptrace format and back. 657 Returns true if any conversion was done; false otherwise. 658 If DIRECTION is 1, then copy from INF to NATIVE. 659 If DIRECTION is 0, copy from NATIVE to INF. */ 660 virtual bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf, 661 int direction); 662 663 /* Hook to call when a new process is created or attached to. 664 If extra per-process architecture-specific data is needed, 665 allocate it here. */ 666 virtual arch_process_info *low_new_process (); 667 668 /* Hook to call when a process is being deleted. If extra per-process 669 architecture-specific data is needed, delete it here. */ 670 virtual void low_delete_process (arch_process_info *info); 671 672 /* Hook to call when a new thread is detected. 673 If extra per-thread architecture-specific data is needed, 674 allocate it here. */ 675 virtual void low_new_thread (lwp_info *); 676 677 /* Hook to call when a thread is being deleted. If extra per-thread 678 architecture-specific data is needed, delete it here. */ 679 virtual void low_delete_thread (arch_lwp_info *); 680 681 /* Hook to call, if any, when a new fork is attached. */ 682 virtual void low_new_fork (process_info *parent, process_info *child); 683 684 /* Hook to call prior to resuming a thread. */ 685 virtual void low_prepare_to_resume (lwp_info *lwp); 686 687 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on 688 success, -1 on failure. */ 689 virtual int low_get_thread_area (int lwpid, CORE_ADDR *addrp); 690 691 /* Returns true if the low target supports range stepping. */ 692 virtual bool low_supports_range_stepping (); 693 694 /* Return true if the target supports catch syscall. Such targets 695 override the low_get_syscall_trapinfo method below. */ 696 virtual bool low_supports_catch_syscall (); 697 698 /* Fill *SYSNO with the syscall nr trapped. Only to be called when 699 inferior is stopped due to SYSCALL_SIGTRAP. */ 700 virtual void low_get_syscall_trapinfo (regcache *regcache, int *sysno); 701 702 /* How many bytes the PC should be decremented after a break. */ 703 virtual int low_decr_pc_after_break (); 704 }; 705 706 extern linux_process_target *the_linux_target; 707 708 #define get_thread_lwp(thr) ((struct lwp_info *) (thread_target_data (thr))) 709 #define get_lwp_thread(lwp) ((lwp)->thread) 710 711 /* Information about a signal that is to be delivered to a thread. */ 712 713 struct pending_signal 714 { 715 pending_signal (int signal) 716 : signal {signal} 717 {}; 718 719 int signal; 720 siginfo_t info; 721 }; 722 723 /* This struct is recorded in the target_data field of struct thread_info. 724 725 On linux ``all_threads'' is keyed by the LWP ID, which we use as the 726 GDB protocol representation of the thread ID. Threads also have 727 a "process ID" (poorly named) which is (presently) the same as the 728 LWP ID. 729 730 There is also ``all_processes'' is keyed by the "overall process ID", 731 which GNU/Linux calls tgid, "thread group ID". */ 732 733 struct lwp_info 734 { 735 /* If this LWP is a fork child that wasn't reported to GDB yet, return 736 its parent, else nullptr. */ 737 lwp_info *pending_parent () const 738 { 739 if (this->fork_relative == nullptr) 740 return nullptr; 741 742 gdb_assert (this->fork_relative->fork_relative == this); 743 744 /* In a fork parent/child relationship, the parent has a status pending and 745 the child does not, and a thread can only be in one such relationship 746 at most. So we can recognize who is the parent based on which one has 747 a pending status. */ 748 gdb_assert (!!this->status_pending_p 749 != !!this->fork_relative->status_pending_p); 750 751 if (!this->fork_relative->status_pending_p) 752 return nullptr; 753 754 const target_waitstatus &ws 755 = this->fork_relative->waitstatus; 756 gdb_assert (ws.kind () == TARGET_WAITKIND_FORKED 757 || ws.kind () == TARGET_WAITKIND_VFORKED); 758 759 return this->fork_relative; 760 } 761 762 /* If this LWP is the parent of a fork child we haven't reported to GDB yet, 763 return that child, else nullptr. */ 764 lwp_info *pending_child () const 765 { 766 if (this->fork_relative == nullptr) 767 return nullptr; 768 769 gdb_assert (this->fork_relative->fork_relative == this); 770 771 /* In a fork parent/child relationship, the parent has a status pending and 772 the child does not, and a thread can only be in one such relationship 773 at most. So we can recognize who is the parent based on which one has 774 a pending status. */ 775 gdb_assert (!!this->status_pending_p 776 != !!this->fork_relative->status_pending_p); 777 778 if (!this->status_pending_p) 779 return nullptr; 780 781 const target_waitstatus &ws = this->waitstatus; 782 gdb_assert (ws.kind () == TARGET_WAITKIND_FORKED 783 || ws.kind () == TARGET_WAITKIND_VFORKED); 784 785 return this->fork_relative; 786 } 787 788 /* Backlink to the parent object. */ 789 struct thread_info *thread = nullptr; 790 791 /* If this flag is set, the next SIGSTOP will be ignored (the 792 process will be immediately resumed). This means that either we 793 sent the SIGSTOP to it ourselves and got some other pending event 794 (so the SIGSTOP is still pending), or that we stopped the 795 inferior implicitly via PTRACE_ATTACH and have not waited for it 796 yet. */ 797 int stop_expected = 0; 798 799 /* When this is true, we shall not try to resume this thread, even 800 if last_resume_kind isn't resume_stop. */ 801 int suspended = 0; 802 803 /* If this flag is set, the lwp is known to be stopped right now (stop 804 event already received in a wait()). */ 805 int stopped = 0; 806 807 /* Signal whether we are in a SYSCALL_ENTRY or 808 in a SYSCALL_RETURN event. 809 Values: 810 - TARGET_WAITKIND_SYSCALL_ENTRY 811 - TARGET_WAITKIND_SYSCALL_RETURN */ 812 enum target_waitkind syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY; 813 814 /* When stopped is set, the last wait status recorded for this lwp. */ 815 int last_status = 0; 816 817 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for 818 this LWP's last event, to pass to GDB without any further 819 processing. This is used to store extended ptrace event 820 information or exit status until it can be reported to GDB. */ 821 struct target_waitstatus waitstatus; 822 823 /* A pointer to the fork child/parent relative. Valid only while 824 the parent fork event is not reported to higher layers. Used to 825 avoid wildcard vCont actions resuming a fork child before GDB is 826 notified about the parent's fork event. */ 827 struct lwp_info *fork_relative = nullptr; 828 829 /* When stopped is set, this is where the lwp last stopped, with 830 decr_pc_after_break already accounted for. If the LWP is 831 running, this is the address at which the lwp was resumed. */ 832 CORE_ADDR stop_pc = 0; 833 834 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet 835 been reported. */ 836 int status_pending_p = 0; 837 int status_pending = 0; 838 839 /* The reason the LWP last stopped, if we need to track it 840 (breakpoint, watchpoint, etc.) */ 841 enum target_stop_reason stop_reason = TARGET_STOPPED_BY_NO_REASON; 842 843 /* On architectures where it is possible to know the data address of 844 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and 845 contains such data address. Only valid if STOPPED_BY_WATCHPOINT 846 is true. */ 847 CORE_ADDR stopped_data_address = 0; 848 849 /* If this is non-zero, it is a breakpoint to be reinserted at our next 850 stop (SIGTRAP stops only). */ 851 CORE_ADDR bp_reinsert = 0; 852 853 /* If this flag is set, the last continue operation at the ptrace 854 level on this process was a single-step. */ 855 int stepping = 0; 856 857 /* Range to single step within. This is a copy of the step range 858 passed along the last resume request. See 'struct 859 thread_resume'. */ 860 CORE_ADDR step_range_start = 0; /* Inclusive */ 861 CORE_ADDR step_range_end = 0; /* Exclusive */ 862 863 /* If this flag is set, we need to set the event request flags the 864 next time we see this LWP stop. */ 865 int must_set_ptrace_flags = 0; 866 867 /* A chain of signals that need to be delivered to this process. */ 868 std::list<pending_signal> pending_signals; 869 870 /* A link used when resuming. It is initialized from the resume request, 871 and then processed and cleared in linux_resume_one_lwp. */ 872 struct thread_resume *resume = nullptr; 873 874 /* Information bout this lwp's fast tracepoint collection status (is it 875 currently stopped in the jump pad, and if so, before or at/after the 876 relocated instruction). Normally, we won't care about this, but we will 877 if a signal arrives to this lwp while it is collecting. */ 878 fast_tpoint_collect_result collecting_fast_tracepoint 879 = fast_tpoint_collect_result::not_collecting; 880 881 /* A chain of signals that need to be reported to GDB. These were 882 deferred because the thread was doing a fast tracepoint collect 883 when they arrived. */ 884 std::list<pending_signal> pending_signals_to_report; 885 886 /* When collecting_fast_tracepoint is first found to be 1, we insert 887 a exit-jump-pad-quickly breakpoint. This is it. */ 888 struct breakpoint *exit_jump_pad_bkpt = nullptr; 889 890 #ifdef USE_THREAD_DB 891 int thread_known = 0; 892 /* The thread handle, used for e.g. TLS access. Only valid if 893 THREAD_KNOWN is set. */ 894 td_thrhandle_t th {}; 895 896 /* The pthread_t handle. */ 897 thread_t thread_handle {}; 898 #endif 899 900 /* Arch-specific additions. */ 901 struct arch_lwp_info *arch_private = nullptr; 902 }; 903 904 int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine); 905 906 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an 907 errno). */ 908 int linux_attach_lwp (ptid_t ptid); 909 910 struct lwp_info *find_lwp_pid (ptid_t ptid); 911 /* For linux_stop_lwp see nat/linux-nat.h. */ 912 913 #ifdef HAVE_LINUX_REGSETS 914 void initialize_regsets_info (struct regsets_info *regsets_info); 915 #endif 916 917 void initialize_low_arch (void); 918 919 void linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc); 920 CORE_ADDR linux_get_pc_32bit (struct regcache *regcache); 921 922 void linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc); 923 CORE_ADDR linux_get_pc_64bit (struct regcache *regcache); 924 925 /* From thread-db.c */ 926 int thread_db_init (void); 927 void thread_db_detach (struct process_info *); 928 void thread_db_mourn (struct process_info *); 929 int thread_db_handle_monitor_command (char *); 930 int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset, 931 CORE_ADDR load_module, CORE_ADDR *address); 932 int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp); 933 934 /* Called from linux-low.c when a clone event is detected. Upon entry, 935 both the clone and the parent should be stopped. This function does 936 whatever is required have the clone under thread_db's control. */ 937 938 void thread_db_notice_clone (struct thread_info *parent_thr, ptid_t child_ptid); 939 940 bool thread_db_thread_handle (ptid_t ptid, gdb_byte **handle, int *handle_len); 941 942 extern int have_ptrace_getregset; 943 944 /* Search for the value with type MATCH in the auxv vector with 945 entries of length WORDSIZE bytes. If found, store the value in 946 *VALP and return 1. If not found or if there is an error, return 947 0. */ 948 949 int linux_get_auxv (int wordsize, CORE_ADDR match, 950 CORE_ADDR *valp); 951 952 /* Fetch the AT_HWCAP entry from the auxv vector, where entries are length 953 WORDSIZE. If no entry was found, return zero. */ 954 955 CORE_ADDR linux_get_hwcap (int wordsize); 956 957 /* Fetch the AT_HWCAP2 entry from the auxv vector, where entries are length 958 WORDSIZE. If no entry was found, return zero. */ 959 960 CORE_ADDR linux_get_hwcap2 (int wordsize); 961 962 #endif /* GDBSERVER_LINUX_LOW_H */ 963