1 /* BSD user-level threads support. 2 3 Copyright (C) 2005-2019 Free Software Foundation, Inc. 4 5 This file is part of GDB. 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 3 of the License, or 10 (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 19 20 #include "defs.h" 21 #include "gdbcore.h" 22 #include "gdbthread.h" 23 #include "inferior.h" 24 #include "objfiles.h" 25 #include "observable.h" 26 #include "regcache.h" 27 #include "solib.h" 28 #include "solist.h" 29 #include "symfile.h" 30 #include "target.h" 31 32 #include "gdb_obstack.h" 33 34 #include "bsd-uthread.h" 35 36 static const target_info bsd_uthread_target_info = { 37 "bsd-uthreads", 38 N_("BSD user-level threads"), 39 N_("BSD user-level threads") 40 }; 41 42 struct bsd_uthread_target final : public target_ops 43 { 44 const target_info &info () const override 45 { return bsd_uthread_target_info; } 46 47 strata stratum () const override { return thread_stratum; } 48 49 void close () override; 50 51 void mourn_inferior () override; 52 53 void fetch_registers (struct regcache *, int) override; 54 void store_registers (struct regcache *, int) override; 55 56 ptid_t wait (ptid_t, struct target_waitstatus *, int) override; 57 void resume (ptid_t, int, enum gdb_signal) override; 58 59 bool thread_alive (ptid_t ptid) override; 60 61 void update_thread_list () override; 62 63 const char *extra_thread_info (struct thread_info *) override; 64 65 const char *pid_to_str (ptid_t) override; 66 }; 67 68 static bsd_uthread_target bsd_uthread_ops; 69 70 71 /* Architecture-specific operations. */ 72 73 /* Per-architecture data key. */ 74 static struct gdbarch_data *bsd_uthread_data; 75 76 struct bsd_uthread_ops 77 { 78 /* Supply registers for an inactive thread to a register cache. */ 79 void (*supply_uthread)(struct regcache *, int, CORE_ADDR); 80 81 /* Collect registers for an inactive thread from a register cache. */ 82 void (*collect_uthread)(const struct regcache *, int, CORE_ADDR); 83 }; 84 85 static void * 86 bsd_uthread_init (struct obstack *obstack) 87 { 88 struct bsd_uthread_ops *ops; 89 90 ops = OBSTACK_ZALLOC (obstack, struct bsd_uthread_ops); 91 return ops; 92 } 93 94 /* Set the function that supplies registers from an inactive thread 95 for architecture GDBARCH to SUPPLY_UTHREAD. */ 96 97 void 98 bsd_uthread_set_supply_uthread (struct gdbarch *gdbarch, 99 void (*supply_uthread) (struct regcache *, 100 int, CORE_ADDR)) 101 { 102 struct bsd_uthread_ops *ops 103 = (struct bsd_uthread_ops *) gdbarch_data (gdbarch, bsd_uthread_data); 104 105 ops->supply_uthread = supply_uthread; 106 } 107 108 /* Set the function that collects registers for an inactive thread for 109 architecture GDBARCH to SUPPLY_UTHREAD. */ 110 111 void 112 bsd_uthread_set_collect_uthread (struct gdbarch *gdbarch, 113 void (*collect_uthread) (const struct regcache *, 114 int, CORE_ADDR)) 115 { 116 struct bsd_uthread_ops *ops 117 = (struct bsd_uthread_ops *) gdbarch_data (gdbarch, bsd_uthread_data); 118 119 ops->collect_uthread = collect_uthread; 120 } 121 122 /* Magic number to help recognize a valid thread structure. */ 123 #define BSD_UTHREAD_PTHREAD_MAGIC 0xd09ba115 124 125 /* Check whether the thread structure at ADDR is valid. */ 126 127 static void 128 bsd_uthread_check_magic (CORE_ADDR addr) 129 { 130 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ()); 131 ULONGEST magic = read_memory_unsigned_integer (addr, 4, byte_order); 132 133 if (magic != BSD_UTHREAD_PTHREAD_MAGIC) 134 error (_("Bad magic")); 135 } 136 137 /* Thread states. */ 138 #define BSD_UTHREAD_PS_RUNNING 0 139 #define BSD_UTHREAD_PS_DEAD 18 140 141 /* Address of the pointer to the thread structure for the running 142 thread. */ 143 static CORE_ADDR bsd_uthread_thread_run_addr; 144 145 /* Address of the list of all threads. */ 146 static CORE_ADDR bsd_uthread_thread_list_addr; 147 148 /* Offsets of various "interesting" bits in the thread structure. */ 149 static int bsd_uthread_thread_state_offset = -1; 150 static int bsd_uthread_thread_next_offset = -1; 151 static int bsd_uthread_thread_ctx_offset; 152 153 /* Name of shared threads library. */ 154 static const char *bsd_uthread_solib_name; 155 156 /* Non-zero if the thread startum implemented by this module is active. */ 157 static int bsd_uthread_active; 158 159 static CORE_ADDR 160 bsd_uthread_lookup_address (const char *name, struct objfile *objfile) 161 { 162 struct bound_minimal_symbol sym; 163 164 sym = lookup_minimal_symbol (name, NULL, objfile); 165 if (sym.minsym) 166 return BMSYMBOL_VALUE_ADDRESS (sym); 167 168 return 0; 169 } 170 171 static int 172 bsd_uthread_lookup_offset (const char *name, struct objfile *objfile) 173 { 174 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ()); 175 CORE_ADDR addr; 176 177 addr = bsd_uthread_lookup_address (name, objfile); 178 if (addr == 0) 179 return 0; 180 181 return read_memory_unsigned_integer (addr, 4, byte_order); 182 } 183 184 static CORE_ADDR 185 bsd_uthread_read_memory_address (CORE_ADDR addr) 186 { 187 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr; 188 return read_memory_typed_address (addr, ptr_type); 189 } 190 191 /* If OBJFILE contains the symbols corresponding to one of the 192 supported user-level threads libraries, activate the thread stratum 193 implemented by this module. */ 194 195 static int 196 bsd_uthread_activate (struct objfile *objfile) 197 { 198 struct gdbarch *gdbarch = target_gdbarch (); 199 struct bsd_uthread_ops *ops 200 = (struct bsd_uthread_ops *) gdbarch_data (gdbarch, bsd_uthread_data); 201 202 /* Skip if the thread stratum has already been activated. */ 203 if (bsd_uthread_active) 204 return 0; 205 206 /* There's no point in enabling this module if no 207 architecture-specific operations are provided. */ 208 if (!ops->supply_uthread) 209 return 0; 210 211 bsd_uthread_thread_run_addr = 212 bsd_uthread_lookup_address ("_thread_run", objfile); 213 if (bsd_uthread_thread_run_addr == 0) 214 return 0; 215 216 bsd_uthread_thread_list_addr = 217 bsd_uthread_lookup_address ("_thread_list", objfile); 218 if (bsd_uthread_thread_list_addr == 0) 219 return 0; 220 221 bsd_uthread_thread_state_offset = 222 bsd_uthread_lookup_offset ("_thread_state_offset", objfile); 223 if (bsd_uthread_thread_state_offset == 0) 224 return 0; 225 226 bsd_uthread_thread_next_offset = 227 bsd_uthread_lookup_offset ("_thread_next_offset", objfile); 228 if (bsd_uthread_thread_next_offset == 0) 229 return 0; 230 231 bsd_uthread_thread_ctx_offset = 232 bsd_uthread_lookup_offset ("_thread_ctx_offset", objfile); 233 234 push_target (&bsd_uthread_ops); 235 bsd_uthread_active = 1; 236 return 1; 237 } 238 239 /* Cleanup due to deactivation. */ 240 241 void 242 bsd_uthread_target::close () 243 { 244 bsd_uthread_active = 0; 245 bsd_uthread_thread_run_addr = 0; 246 bsd_uthread_thread_list_addr = 0; 247 bsd_uthread_thread_state_offset = 0; 248 bsd_uthread_thread_next_offset = 0; 249 bsd_uthread_thread_ctx_offset = 0; 250 bsd_uthread_solib_name = NULL; 251 } 252 253 /* Deactivate the thread stratum implemented by this module. */ 254 255 static void 256 bsd_uthread_deactivate (void) 257 { 258 /* Skip if the thread stratum has already been deactivated. */ 259 if (!bsd_uthread_active) 260 return; 261 262 unpush_target (&bsd_uthread_ops); 263 } 264 265 static void 266 bsd_uthread_inferior_created (struct target_ops *ops, int from_tty) 267 { 268 bsd_uthread_activate (NULL); 269 } 270 271 /* Likely candidates for the threads library. */ 272 static const char *bsd_uthread_solib_names[] = 273 { 274 "/usr/lib/libc_r.so", /* FreeBSD */ 275 "/usr/lib/libpthread.so", /* OpenBSD */ 276 NULL 277 }; 278 279 static void 280 bsd_uthread_solib_loaded (struct so_list *so) 281 { 282 const char **names = bsd_uthread_solib_names; 283 284 for (names = bsd_uthread_solib_names; *names; names++) 285 { 286 if (startswith (so->so_original_name, *names)) 287 { 288 solib_read_symbols (so, 0); 289 290 if (bsd_uthread_activate (so->objfile)) 291 { 292 bsd_uthread_solib_name = so->so_original_name; 293 return; 294 } 295 } 296 } 297 } 298 299 static void 300 bsd_uthread_solib_unloaded (struct so_list *so) 301 { 302 if (!bsd_uthread_solib_name) 303 return; 304 305 if (strcmp (so->so_original_name, bsd_uthread_solib_name) == 0) 306 bsd_uthread_deactivate (); 307 } 308 309 void 310 bsd_uthread_target::mourn_inferior () 311 { 312 beneath ()->mourn_inferior (); 313 bsd_uthread_deactivate (); 314 } 315 316 void 317 bsd_uthread_target::fetch_registers (struct regcache *regcache, int regnum) 318 { 319 struct gdbarch *gdbarch = regcache->arch (); 320 struct bsd_uthread_ops *uthread_ops 321 = (struct bsd_uthread_ops *) gdbarch_data (gdbarch, bsd_uthread_data); 322 ptid_t ptid = regcache->ptid (); 323 CORE_ADDR addr = ptid.tid (); 324 CORE_ADDR active_addr; 325 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid); 326 327 /* We are doing operations (e.g. reading memory) that rely on 328 inferior_ptid. */ 329 inferior_ptid = ptid; 330 331 /* Always fetch the appropriate registers from the layer beneath. */ 332 beneath ()->fetch_registers (regcache, regnum); 333 334 /* FIXME: That might have gotten us more than we asked for. Make 335 sure we overwrite all relevant registers with values from the 336 thread structure. This can go once we fix the underlying target. */ 337 regnum = -1; 338 339 active_addr = bsd_uthread_read_memory_address (bsd_uthread_thread_run_addr); 340 if (addr != 0 && addr != active_addr) 341 { 342 bsd_uthread_check_magic (addr); 343 uthread_ops->supply_uthread (regcache, regnum, 344 addr + bsd_uthread_thread_ctx_offset); 345 } 346 } 347 348 void 349 bsd_uthread_target::store_registers (struct regcache *regcache, int regnum) 350 { 351 struct gdbarch *gdbarch = regcache->arch (); 352 struct bsd_uthread_ops *uthread_ops 353 = (struct bsd_uthread_ops *) gdbarch_data (gdbarch, bsd_uthread_data); 354 ptid_t ptid = regcache->ptid (); 355 CORE_ADDR addr = ptid.tid (); 356 CORE_ADDR active_addr; 357 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid); 358 359 /* We are doing operations (e.g. reading memory) that rely on 360 inferior_ptid. */ 361 inferior_ptid = ptid; 362 363 active_addr = bsd_uthread_read_memory_address (bsd_uthread_thread_run_addr); 364 if (addr != 0 && addr != active_addr) 365 { 366 bsd_uthread_check_magic (addr); 367 uthread_ops->collect_uthread (regcache, regnum, 368 addr + bsd_uthread_thread_ctx_offset); 369 } 370 else 371 { 372 /* Updating the thread that is currently running; pass the 373 request to the layer beneath. */ 374 beneath ()->store_registers (regcache, regnum); 375 } 376 } 377 378 ptid_t 379 bsd_uthread_target::wait (ptid_t ptid, struct target_waitstatus *status, 380 int options) 381 { 382 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ()); 383 CORE_ADDR addr; 384 385 /* Pass the request to the layer beneath. */ 386 ptid = beneath ()->wait (ptid, status, options); 387 388 /* If the process is no longer alive, there's no point in figuring 389 out the thread ID. It will fail anyway. */ 390 if (status->kind == TARGET_WAITKIND_SIGNALLED 391 || status->kind == TARGET_WAITKIND_EXITED) 392 return ptid; 393 394 /* Fetch the corresponding thread ID, and augment the returned 395 process ID with it. */ 396 addr = bsd_uthread_read_memory_address (bsd_uthread_thread_run_addr); 397 if (addr != 0) 398 { 399 gdb_byte buf[4]; 400 401 /* FIXME: For executables linked statically with the threads 402 library, we end up here before the program has actually been 403 executed. In that case ADDR will be garbage since it has 404 been read from the wrong virtual memory image. */ 405 if (target_read_memory (addr, buf, 4) == 0) 406 { 407 ULONGEST magic = extract_unsigned_integer (buf, 4, byte_order); 408 if (magic == BSD_UTHREAD_PTHREAD_MAGIC) 409 ptid = ptid_t (ptid.pid (), 0, addr); 410 } 411 } 412 413 /* If INFERIOR_PTID doesn't have a tid member yet, and we now have a 414 ptid with tid set, then ptid is still the initial thread of 415 the process. Notify GDB core about it. */ 416 if (inferior_ptid.tid () == 0 417 && ptid.tid () != 0 && !in_thread_list (ptid)) 418 thread_change_ptid (inferior_ptid, ptid); 419 420 /* Don't let the core see a ptid without a corresponding thread. */ 421 thread_info *thread = find_thread_ptid (ptid); 422 if (thread == NULL || thread->state == THREAD_EXITED) 423 add_thread (ptid); 424 425 return ptid; 426 } 427 428 void 429 bsd_uthread_target::resume (ptid_t ptid, int step, enum gdb_signal sig) 430 { 431 /* Pass the request to the layer beneath. */ 432 beneath ()->resume (ptid, step, sig); 433 } 434 435 bool 436 bsd_uthread_target::thread_alive (ptid_t ptid) 437 { 438 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ()); 439 CORE_ADDR addr = ptid.tid (); 440 441 if (addr != 0) 442 { 443 int offset = bsd_uthread_thread_state_offset; 444 ULONGEST state; 445 446 bsd_uthread_check_magic (addr); 447 448 state = read_memory_unsigned_integer (addr + offset, 4, byte_order); 449 if (state == BSD_UTHREAD_PS_DEAD) 450 return false; 451 } 452 453 return beneath ()->thread_alive (ptid); 454 } 455 456 void 457 bsd_uthread_target::update_thread_list () 458 { 459 pid_t pid = inferior_ptid.pid (); 460 int offset = bsd_uthread_thread_next_offset; 461 CORE_ADDR addr; 462 463 prune_threads (); 464 465 addr = bsd_uthread_read_memory_address (bsd_uthread_thread_list_addr); 466 while (addr != 0) 467 { 468 ptid_t ptid = ptid_t (pid, 0, addr); 469 470 thread_info *thread = find_thread_ptid (ptid); 471 if (thread == nullptr || thread->state == THREAD_EXITED) 472 { 473 /* If INFERIOR_PTID doesn't have a tid member yet, then ptid 474 is still the initial thread of the process. Notify GDB 475 core about it. */ 476 if (inferior_ptid.tid () == 0) 477 thread_change_ptid (inferior_ptid, ptid); 478 else 479 add_thread (ptid); 480 } 481 482 addr = bsd_uthread_read_memory_address (addr + offset); 483 } 484 } 485 486 /* Possible states a thread can be in. */ 487 static const char *bsd_uthread_state[] = 488 { 489 "RUNNING", 490 "SIGTHREAD", 491 "MUTEX_WAIT", 492 "COND_WAIT", 493 "FDLR_WAIT", 494 "FDLW_WAIT", 495 "FDR_WAIT", 496 "FDW_WAIT", 497 "FILE_WAIT", 498 "POLL_WAIT", 499 "SELECT_WAIT", 500 "SLEEP_WAIT", 501 "WAIT_WAIT", 502 "SIGSUSPEND", 503 "SIGWAIT", 504 "SPINBLOCK", 505 "JOIN", 506 "SUSPENDED", 507 "DEAD", 508 "DEADLOCK" 509 }; 510 511 /* Return a string describing th state of the thread specified by 512 INFO. */ 513 514 const char * 515 bsd_uthread_target::extra_thread_info (thread_info *info) 516 { 517 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ()); 518 CORE_ADDR addr = info->ptid.tid (); 519 520 if (addr != 0) 521 { 522 int offset = bsd_uthread_thread_state_offset; 523 ULONGEST state; 524 525 state = read_memory_unsigned_integer (addr + offset, 4, byte_order); 526 if (state < ARRAY_SIZE (bsd_uthread_state)) 527 return bsd_uthread_state[state]; 528 } 529 530 return NULL; 531 } 532 533 const char * 534 bsd_uthread_target::pid_to_str (ptid_t ptid) 535 { 536 if (ptid.tid () != 0) 537 { 538 static char buf[64]; 539 540 xsnprintf (buf, sizeof buf, "process %d, thread 0x%lx", 541 ptid.pid (), ptid.tid ()); 542 return buf; 543 } 544 545 return normal_pid_to_str (ptid); 546 } 547 548 void 549 _initialize_bsd_uthread (void) 550 { 551 bsd_uthread_data = gdbarch_data_register_pre_init (bsd_uthread_init); 552 553 gdb::observers::inferior_created.attach (bsd_uthread_inferior_created); 554 gdb::observers::solib_loaded.attach (bsd_uthread_solib_loaded); 555 gdb::observers::solib_unloaded.attach (bsd_uthread_solib_unloaded); 556 } 557