1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <stddef.h>
30 #include <unistd.h>
31 #include <thr_uberdata.h>
32 #include <thread_db.h>
33 #include <libc_int.h>
34
35 /*
36 * Private structures.
37 */
38
39 typedef union {
40 mutex_t lock;
41 rwlock_t rwlock;
42 sema_t semaphore;
43 cond_t condition;
44 } td_so_un_t;
45
46 struct td_thragent {
47 rwlock_t rwlock;
48 struct ps_prochandle *ph_p;
49 int initialized;
50 int sync_tracking;
51 int model;
52 int primary_map;
53 psaddr_t bootstrap_addr;
54 psaddr_t uberdata_addr;
55 psaddr_t tdb_eventmask_addr;
56 psaddr_t tdb_register_sync_addr;
57 psaddr_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
58 psaddr_t hash_table_addr;
59 int hash_size;
60 lwpid_t single_lwpid;
61 psaddr_t single_ulwp_addr;
62 };
63
64 /*
65 * This is the name of the variable in libc that contains
66 * the uberdata address that we will need.
67 */
68 #define TD_BOOTSTRAP_NAME "_tdb_bootstrap"
69 /*
70 * This is the actual name of uberdata, used in the event
71 * that tdb_bootstrap has not yet been initialized.
72 */
73 #define TD_UBERDATA_NAME "_uberdata"
74 /*
75 * The library name should end with ".so.1", but older versions of
76 * dbx expect the unadorned name and malfunction if ".1" is specified.
77 * Unfortunately, if ".1" is not specified, mdb malfunctions when it
78 * is applied to another instance of itself (due to the presence of
79 * /usr/lib/mdb/proc/libc.so). So we try it both ways.
80 */
81 #define TD_LIBRARY_NAME "libc.so"
82 #define TD_LIBRARY_NAME_1 "libc.so.1"
83
84 td_err_e __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p);
85
86 td_err_e __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
87 void *cbdata_p, td_thr_state_e state, int ti_pri,
88 sigset_t *ti_sigmask_p, unsigned ti_user_flags);
89
90 /*
91 * Initialize threads debugging interface.
92 */
93 #pragma weak td_init = __td_init
94 td_err_e
__td_init()95 __td_init()
96 {
97 return (TD_OK);
98 }
99
100 /*
101 * This function does nothing, and never did.
102 * But the symbol is in the ABI, so we can't delete it.
103 */
104 #pragma weak td_log = __td_log
105 void
__td_log()106 __td_log()
107 {
108 }
109
110 /*
111 * Short-cut to read just the hash table size from the process,
112 * to avoid repeatedly reading the full uberdata structure when
113 * dealing with a single-threaded process.
114 */
115 static uint_t
td_read_hash_size(td_thragent_t * ta_p)116 td_read_hash_size(td_thragent_t *ta_p)
117 {
118 psaddr_t addr;
119 uint_t hash_size;
120
121 switch (ta_p->initialized) {
122 default: /* uninitialized */
123 return (0);
124 case 1: /* partially initialized */
125 break;
126 case 2: /* fully initialized */
127 return (ta_p->hash_size);
128 }
129
130 if (ta_p->model == PR_MODEL_NATIVE) {
131 addr = ta_p->uberdata_addr + offsetof(uberdata_t, hash_size);
132 } else {
133 #if defined(_LP64) && defined(_SYSCALL32)
134 addr = ta_p->uberdata_addr + offsetof(uberdata32_t, hash_size);
135 #else
136 addr = 0;
137 #endif
138 }
139 if (ps_pdread(ta_p->ph_p, addr, &hash_size, sizeof (hash_size))
140 != PS_OK)
141 return (0);
142 return (hash_size);
143 }
144
145 static td_err_e
td_read_uberdata(td_thragent_t * ta_p)146 td_read_uberdata(td_thragent_t *ta_p)
147 {
148 struct ps_prochandle *ph_p = ta_p->ph_p;
149
150 if (ta_p->model == PR_MODEL_NATIVE) {
151 uberdata_t uberdata;
152
153 if (ps_pdread(ph_p, ta_p->uberdata_addr,
154 &uberdata, sizeof (uberdata)) != PS_OK)
155 return (TD_DBERR);
156 ta_p->primary_map = uberdata.primary_map;
157 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
158 offsetof(uberdata_t, tdb.tdb_ev_global_mask);
159 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
160 offsetof(uberdata_t, uberflags.uf_tdb_register_sync);
161 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
162 ta_p->hash_size = uberdata.hash_size;
163 if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
164 ta_p->tdb_events, sizeof (ta_p->tdb_events)) != PS_OK)
165 return (TD_DBERR);
166
167 } else {
168 #if defined(_LP64) && defined(_SYSCALL32)
169 uberdata32_t uberdata;
170 caddr32_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
171 int i;
172
173 if (ps_pdread(ph_p, ta_p->uberdata_addr,
174 &uberdata, sizeof (uberdata)) != PS_OK)
175 return (TD_DBERR);
176 ta_p->primary_map = uberdata.primary_map;
177 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
178 offsetof(uberdata32_t, tdb.tdb_ev_global_mask);
179 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
180 offsetof(uberdata32_t, uberflags.uf_tdb_register_sync);
181 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
182 ta_p->hash_size = uberdata.hash_size;
183 if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
184 tdb_events, sizeof (tdb_events)) != PS_OK)
185 return (TD_DBERR);
186 for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++)
187 ta_p->tdb_events[i] = tdb_events[i];
188 #else
189 return (TD_DBERR);
190 #endif
191 }
192 if (ta_p->hash_size != 1) { /* multi-threaded */
193 ta_p->initialized = 2;
194 ta_p->single_lwpid = 0;
195 ta_p->single_ulwp_addr = NULL;
196 } else { /* single-threaded */
197 ta_p->initialized = 1;
198 /*
199 * Get the address and lwpid of the single thread/LWP.
200 * It may not be ulwp_one if this is a child of fork1().
201 */
202 if (ta_p->model == PR_MODEL_NATIVE) {
203 thr_hash_table_t head;
204 lwpid_t lwpid = 0;
205
206 if (ps_pdread(ph_p, ta_p->hash_table_addr,
207 &head, sizeof (head)) != PS_OK)
208 return (TD_DBERR);
209 if ((psaddr_t)head.hash_bucket == NULL)
210 ta_p->initialized = 0;
211 else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
212 offsetof(ulwp_t, ul_lwpid),
213 &lwpid, sizeof (lwpid)) != PS_OK)
214 return (TD_DBERR);
215 ta_p->single_lwpid = lwpid;
216 ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
217 } else {
218 #if defined(_LP64) && defined(_SYSCALL32)
219 thr_hash_table32_t head;
220 lwpid_t lwpid = 0;
221
222 if (ps_pdread(ph_p, ta_p->hash_table_addr,
223 &head, sizeof (head)) != PS_OK)
224 return (TD_DBERR);
225 if ((psaddr_t)head.hash_bucket == NULL)
226 ta_p->initialized = 0;
227 else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
228 offsetof(ulwp32_t, ul_lwpid),
229 &lwpid, sizeof (lwpid)) != PS_OK)
230 return (TD_DBERR);
231 ta_p->single_lwpid = lwpid;
232 ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
233 #else
234 return (TD_DBERR);
235 #endif
236 }
237 }
238 if (!ta_p->primary_map)
239 ta_p->initialized = 0;
240 return (TD_OK);
241 }
242
243 static td_err_e
td_read_bootstrap_data(td_thragent_t * ta_p)244 td_read_bootstrap_data(td_thragent_t *ta_p)
245 {
246 struct ps_prochandle *ph_p = ta_p->ph_p;
247 psaddr_t bootstrap_addr;
248 psaddr_t uberdata_addr;
249 ps_err_e db_return;
250 td_err_e return_val;
251 int do_1;
252
253 switch (ta_p->initialized) {
254 case 2: /* fully initialized */
255 return (TD_OK);
256 case 1: /* partially initialized */
257 if (td_read_hash_size(ta_p) == 1)
258 return (TD_OK);
259 return (td_read_uberdata(ta_p));
260 }
261
262 /*
263 * Uninitialized -- do the startup work.
264 * We set ta_p->initialized to -1 to cut off recursive calls
265 * into libc_db by code in the provider of ps_pglobal_lookup().
266 */
267 do_1 = 0;
268 ta_p->initialized = -1;
269 db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME,
270 TD_BOOTSTRAP_NAME, &bootstrap_addr);
271 if (db_return == PS_NOSYM) {
272 do_1 = 1;
273 db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME_1,
274 TD_BOOTSTRAP_NAME, &bootstrap_addr);
275 }
276 if (db_return == PS_NOSYM) /* libc is not linked yet */
277 return (TD_NOLIBTHREAD);
278 if (db_return != PS_OK)
279 return (TD_ERR);
280 db_return = ps_pglobal_lookup(ph_p,
281 do_1? TD_LIBRARY_NAME_1 : TD_LIBRARY_NAME,
282 TD_UBERDATA_NAME, &uberdata_addr);
283 if (db_return == PS_NOSYM) /* libc is not linked yet */
284 return (TD_NOLIBTHREAD);
285 if (db_return != PS_OK)
286 return (TD_ERR);
287
288 /*
289 * Read the uberdata address into the thread agent structure.
290 */
291 if (ta_p->model == PR_MODEL_NATIVE) {
292 psaddr_t psaddr;
293 if (ps_pdread(ph_p, bootstrap_addr,
294 &psaddr, sizeof (psaddr)) != PS_OK)
295 return (TD_DBERR);
296 if ((ta_p->bootstrap_addr = psaddr) == NULL)
297 psaddr = uberdata_addr;
298 else if (ps_pdread(ph_p, psaddr,
299 &psaddr, sizeof (psaddr)) != PS_OK)
300 return (TD_DBERR);
301 if (psaddr == NULL) {
302 /* primary linkmap in the tgt is not initialized */
303 ta_p->bootstrap_addr = NULL;
304 psaddr = uberdata_addr;
305 }
306 ta_p->uberdata_addr = psaddr;
307 } else {
308 #if defined(_LP64) && defined(_SYSCALL32)
309 caddr32_t psaddr;
310 if (ps_pdread(ph_p, bootstrap_addr,
311 &psaddr, sizeof (psaddr)) != PS_OK)
312 return (TD_DBERR);
313 if ((ta_p->bootstrap_addr = (psaddr_t)psaddr) == NULL)
314 psaddr = (caddr32_t)uberdata_addr;
315 else if (ps_pdread(ph_p, (psaddr_t)psaddr,
316 &psaddr, sizeof (psaddr)) != PS_OK)
317 return (TD_DBERR);
318 if (psaddr == NULL) {
319 /* primary linkmap in the tgt is not initialized */
320 ta_p->bootstrap_addr = NULL;
321 psaddr = (caddr32_t)uberdata_addr;
322 }
323 ta_p->uberdata_addr = (psaddr_t)psaddr;
324 #else
325 return (TD_DBERR);
326 #endif /* _SYSCALL32 */
327 }
328
329 if ((return_val = td_read_uberdata(ta_p)) != TD_OK)
330 return (return_val);
331 if (ta_p->bootstrap_addr == NULL)
332 ta_p->initialized = 0;
333 return (TD_OK);
334 }
335
336 #pragma weak ps_kill
337 #pragma weak ps_lrolltoaddr
338
339 /*
340 * Allocate a new agent process handle ("thread agent").
341 */
342 #pragma weak td_ta_new = __td_ta_new
343 td_err_e
__td_ta_new(struct ps_prochandle * ph_p,td_thragent_t ** ta_pp)344 __td_ta_new(struct ps_prochandle *ph_p, td_thragent_t **ta_pp)
345 {
346 td_thragent_t *ta_p;
347 int model;
348 td_err_e return_val = TD_OK;
349
350 if (ph_p == NULL)
351 return (TD_BADPH);
352 if (ta_pp == NULL)
353 return (TD_ERR);
354 *ta_pp = NULL;
355 if (ps_pstop(ph_p) != PS_OK)
356 return (TD_DBERR);
357 /*
358 * ps_pdmodel might not be defined if this is an older client.
359 * Make it a weak symbol and test if it exists before calling.
360 */
361 #pragma weak ps_pdmodel
362 if (ps_pdmodel == NULL) {
363 model = PR_MODEL_NATIVE;
364 } else if (ps_pdmodel(ph_p, &model) != PS_OK) {
365 (void) ps_pcontinue(ph_p);
366 return (TD_ERR);
367 }
368 if ((ta_p = malloc(sizeof (*ta_p))) == NULL) {
369 (void) ps_pcontinue(ph_p);
370 return (TD_MALLOC);
371 }
372
373 /*
374 * Initialize the agent process handle.
375 * Pick up the symbol value we need from the target process.
376 */
377 (void) memset(ta_p, 0, sizeof (*ta_p));
378 ta_p->ph_p = ph_p;
379 (void) rwlock_init(&ta_p->rwlock, USYNC_THREAD, NULL);
380 ta_p->model = model;
381 return_val = td_read_bootstrap_data(ta_p);
382
383 /*
384 * Because the old libthread_db enabled lock tracking by default,
385 * we must also do it. However, we do it only if the application
386 * provides the ps_kill() and ps_lrolltoaddr() interfaces.
387 * (dbx provides the ps_kill() and ps_lrolltoaddr() interfaces.)
388 */
389 if (return_val == TD_OK && ps_kill != NULL && ps_lrolltoaddr != NULL) {
390 register_sync_t oldenable;
391 register_sync_t enable = REGISTER_SYNC_ENABLE;
392 psaddr_t psaddr = ta_p->tdb_register_sync_addr;
393
394 if (ps_pdread(ph_p, psaddr,
395 &oldenable, sizeof (oldenable)) != PS_OK)
396 return_val = TD_DBERR;
397 else if (oldenable != REGISTER_SYNC_OFF ||
398 ps_pdwrite(ph_p, psaddr,
399 &enable, sizeof (enable)) != PS_OK) {
400 /*
401 * Lock tracking was already enabled or we
402 * failed to enable it, probably because we
403 * are examining a core file. In either case
404 * set the sync_tracking flag non-zero to
405 * indicate that we should not attempt to
406 * disable lock tracking when we delete the
407 * agent process handle in td_ta_delete().
408 */
409 ta_p->sync_tracking = 1;
410 }
411 }
412
413 if (return_val == TD_OK)
414 *ta_pp = ta_p;
415 else
416 free(ta_p);
417
418 (void) ps_pcontinue(ph_p);
419 return (return_val);
420 }
421
422 /*
423 * Utility function to grab the readers lock and return the prochandle,
424 * given an agent process handle. Performs standard error checking.
425 * Returns non-NULL with the lock held, or NULL with the lock not held.
426 */
427 static struct ps_prochandle *
ph_lock_ta(td_thragent_t * ta_p,td_err_e * err)428 ph_lock_ta(td_thragent_t *ta_p, td_err_e *err)
429 {
430 struct ps_prochandle *ph_p = NULL;
431 td_err_e error;
432
433 if (ta_p == NULL || ta_p->initialized == -1) {
434 *err = TD_BADTA;
435 } else if (rw_rdlock(&ta_p->rwlock) != 0) { /* can't happen? */
436 *err = TD_BADTA;
437 } else if ((ph_p = ta_p->ph_p) == NULL) {
438 (void) rw_unlock(&ta_p->rwlock);
439 *err = TD_BADPH;
440 } else if (ta_p->initialized != 2 &&
441 (error = td_read_bootstrap_data(ta_p)) != TD_OK) {
442 (void) rw_unlock(&ta_p->rwlock);
443 ph_p = NULL;
444 *err = error;
445 } else {
446 *err = TD_OK;
447 }
448
449 return (ph_p);
450 }
451
452 /*
453 * Utility function to grab the readers lock and return the prochandle,
454 * given an agent thread handle. Performs standard error checking.
455 * Returns non-NULL with the lock held, or NULL with the lock not held.
456 */
457 static struct ps_prochandle *
ph_lock_th(const td_thrhandle_t * th_p,td_err_e * err)458 ph_lock_th(const td_thrhandle_t *th_p, td_err_e *err)
459 {
460 if (th_p == NULL || th_p->th_unique == NULL) {
461 *err = TD_BADTH;
462 return (NULL);
463 }
464 return (ph_lock_ta(th_p->th_ta_p, err));
465 }
466
467 /*
468 * Utility function to grab the readers lock and return the prochandle,
469 * given a synchronization object handle. Performs standard error checking.
470 * Returns non-NULL with the lock held, or NULL with the lock not held.
471 */
472 static struct ps_prochandle *
ph_lock_sh(const td_synchandle_t * sh_p,td_err_e * err)473 ph_lock_sh(const td_synchandle_t *sh_p, td_err_e *err)
474 {
475 if (sh_p == NULL || sh_p->sh_unique == NULL) {
476 *err = TD_BADSH;
477 return (NULL);
478 }
479 return (ph_lock_ta(sh_p->sh_ta_p, err));
480 }
481
482 /*
483 * Unlock the agent process handle obtained from ph_lock_*().
484 */
485 static void
ph_unlock(td_thragent_t * ta_p)486 ph_unlock(td_thragent_t *ta_p)
487 {
488 (void) rw_unlock(&ta_p->rwlock);
489 }
490
491 /*
492 * De-allocate an agent process handle,
493 * releasing all related resources.
494 *
495 * XXX -- This is hopelessly broken ---
496 * Storage for thread agent is not deallocated. The prochandle
497 * in the thread agent is set to NULL so that future uses of
498 * the thread agent can be detected and an error value returned.
499 * All functions in the external user interface that make
500 * use of the thread agent are expected
501 * to check for a NULL prochandle in the thread agent.
502 * All such functions are also expected to obtain a
503 * reader lock on the thread agent while it is using it.
504 */
505 #pragma weak td_ta_delete = __td_ta_delete
506 td_err_e
__td_ta_delete(td_thragent_t * ta_p)507 __td_ta_delete(td_thragent_t *ta_p)
508 {
509 struct ps_prochandle *ph_p;
510
511 /*
512 * This is the only place we grab the writer lock.
513 * We are going to NULL out the prochandle.
514 */
515 if (ta_p == NULL || rw_wrlock(&ta_p->rwlock) != 0)
516 return (TD_BADTA);
517 if ((ph_p = ta_p->ph_p) == NULL) {
518 (void) rw_unlock(&ta_p->rwlock);
519 return (TD_BADPH);
520 }
521 /*
522 * If synch. tracking was disabled when td_ta_new() was called and
523 * if td_ta_sync_tracking_enable() was never called, then disable
524 * synch. tracking (it was enabled by default in td_ta_new()).
525 */
526 if (ta_p->sync_tracking == 0 &&
527 ps_kill != NULL && ps_lrolltoaddr != NULL) {
528 register_sync_t enable = REGISTER_SYNC_DISABLE;
529
530 (void) ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
531 &enable, sizeof (enable));
532 }
533 ta_p->ph_p = NULL;
534 (void) rw_unlock(&ta_p->rwlock);
535 return (TD_OK);
536 }
537
538 /*
539 * Map an agent process handle to a client prochandle.
540 * Currently unused by dbx.
541 */
542 #pragma weak td_ta_get_ph = __td_ta_get_ph
543 td_err_e
__td_ta_get_ph(td_thragent_t * ta_p,struct ps_prochandle ** ph_pp)544 __td_ta_get_ph(td_thragent_t *ta_p, struct ps_prochandle **ph_pp)
545 {
546 td_err_e return_val;
547
548 if (ph_pp != NULL) /* protect stupid callers */
549 *ph_pp = NULL;
550 if (ph_pp == NULL)
551 return (TD_ERR);
552 if ((*ph_pp = ph_lock_ta(ta_p, &return_val)) == NULL)
553 return (return_val);
554 ph_unlock(ta_p);
555 return (TD_OK);
556 }
557
558 /*
559 * Set the process's suggested concurrency level.
560 * This is a no-op in a one-level model.
561 * Currently unused by dbx.
562 */
563 #pragma weak td_ta_setconcurrency = __td_ta_setconcurrency
564 /* ARGSUSED1 */
565 td_err_e
__td_ta_setconcurrency(const td_thragent_t * ta_p,int level)566 __td_ta_setconcurrency(const td_thragent_t *ta_p, int level)
567 {
568 if (ta_p == NULL)
569 return (TD_BADTA);
570 if (ta_p->ph_p == NULL)
571 return (TD_BADPH);
572 return (TD_OK);
573 }
574
575 /*
576 * Get the number of threads in the process.
577 */
578 #pragma weak td_ta_get_nthreads = __td_ta_get_nthreads
579 td_err_e
__td_ta_get_nthreads(td_thragent_t * ta_p,int * nthread_p)580 __td_ta_get_nthreads(td_thragent_t *ta_p, int *nthread_p)
581 {
582 struct ps_prochandle *ph_p;
583 td_err_e return_val;
584 int nthreads;
585 int nzombies;
586 psaddr_t nthreads_addr;
587 psaddr_t nzombies_addr;
588
589 if (ta_p->model == PR_MODEL_NATIVE) {
590 nthreads_addr = ta_p->uberdata_addr +
591 offsetof(uberdata_t, nthreads);
592 nzombies_addr = ta_p->uberdata_addr +
593 offsetof(uberdata_t, nzombies);
594 } else {
595 #if defined(_LP64) && defined(_SYSCALL32)
596 nthreads_addr = ta_p->uberdata_addr +
597 offsetof(uberdata32_t, nthreads);
598 nzombies_addr = ta_p->uberdata_addr +
599 offsetof(uberdata32_t, nzombies);
600 #else
601 nthreads_addr = 0;
602 nzombies_addr = 0;
603 #endif /* _SYSCALL32 */
604 }
605
606 if (nthread_p == NULL)
607 return (TD_ERR);
608 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
609 return (return_val);
610 if (ps_pdread(ph_p, nthreads_addr, &nthreads, sizeof (int)) != PS_OK)
611 return_val = TD_DBERR;
612 if (ps_pdread(ph_p, nzombies_addr, &nzombies, sizeof (int)) != PS_OK)
613 return_val = TD_DBERR;
614 ph_unlock(ta_p);
615 if (return_val == TD_OK)
616 *nthread_p = nthreads + nzombies;
617 return (return_val);
618 }
619
620 typedef struct {
621 thread_t tid;
622 int found;
623 td_thrhandle_t th;
624 } td_mapper_param_t;
625
626 /*
627 * Check the value in data against the thread id.
628 * If it matches, return 1 to terminate iterations.
629 * This function is used by td_ta_map_id2thr() to map a tid to a thread handle.
630 */
631 static int
td_mapper_id2thr(td_thrhandle_t * th_p,td_mapper_param_t * data)632 td_mapper_id2thr(td_thrhandle_t *th_p, td_mapper_param_t *data)
633 {
634 td_thrinfo_t ti;
635
636 if (__td_thr_get_info(th_p, &ti) == TD_OK &&
637 data->tid == ti.ti_tid) {
638 data->found = 1;
639 data->th = *th_p;
640 return (1);
641 }
642 return (0);
643 }
644
645 /*
646 * Given a thread identifier, return the corresponding thread handle.
647 */
648 #pragma weak td_ta_map_id2thr = __td_ta_map_id2thr
649 td_err_e
__td_ta_map_id2thr(td_thragent_t * ta_p,thread_t tid,td_thrhandle_t * th_p)650 __td_ta_map_id2thr(td_thragent_t *ta_p, thread_t tid,
651 td_thrhandle_t *th_p)
652 {
653 td_err_e return_val;
654 td_mapper_param_t data;
655
656 if (th_p != NULL && /* optimize for a single thread */
657 ta_p != NULL &&
658 ta_p->initialized == 1 &&
659 (td_read_hash_size(ta_p) == 1 ||
660 td_read_uberdata(ta_p) == TD_OK) &&
661 ta_p->initialized == 1 &&
662 ta_p->single_lwpid == tid) {
663 th_p->th_ta_p = ta_p;
664 if ((th_p->th_unique = ta_p->single_ulwp_addr) == 0)
665 return (TD_NOTHR);
666 return (TD_OK);
667 }
668
669 /*
670 * LOCKING EXCEPTION - Locking is not required here because
671 * the locking and checking will be done in __td_ta_thr_iter.
672 */
673
674 if (ta_p == NULL)
675 return (TD_BADTA);
676 if (th_p == NULL)
677 return (TD_BADTH);
678 if (tid == 0)
679 return (TD_NOTHR);
680
681 data.tid = tid;
682 data.found = 0;
683 return_val = __td_ta_thr_iter(ta_p,
684 (td_thr_iter_f *)td_mapper_id2thr, (void *)&data,
685 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
686 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
687 if (return_val == TD_OK) {
688 if (data.found == 0)
689 return_val = TD_NOTHR;
690 else
691 *th_p = data.th;
692 }
693
694 return (return_val);
695 }
696
697 /*
698 * Map the address of a synchronization object to a sync. object handle.
699 */
700 #pragma weak td_ta_map_addr2sync = __td_ta_map_addr2sync
701 td_err_e
__td_ta_map_addr2sync(td_thragent_t * ta_p,psaddr_t addr,td_synchandle_t * sh_p)702 __td_ta_map_addr2sync(td_thragent_t *ta_p, psaddr_t addr, td_synchandle_t *sh_p)
703 {
704 struct ps_prochandle *ph_p;
705 td_err_e return_val;
706 uint16_t sync_magic;
707
708 if (sh_p == NULL)
709 return (TD_BADSH);
710 if (addr == NULL)
711 return (TD_ERR);
712 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
713 return (return_val);
714 /*
715 * Check the magic number of the sync. object to make sure it's valid.
716 * The magic number is at the same offset for all sync. objects.
717 */
718 if (ps_pdread(ph_p, (psaddr_t)&((mutex_t *)addr)->mutex_magic,
719 &sync_magic, sizeof (sync_magic)) != PS_OK) {
720 ph_unlock(ta_p);
721 return (TD_BADSH);
722 }
723 ph_unlock(ta_p);
724 if (sync_magic != MUTEX_MAGIC && sync_magic != COND_MAGIC &&
725 sync_magic != SEMA_MAGIC && sync_magic != RWL_MAGIC)
726 return (TD_BADSH);
727 /*
728 * Just fill in the appropriate fields of the sync. handle.
729 */
730 sh_p->sh_ta_p = (td_thragent_t *)ta_p;
731 sh_p->sh_unique = addr;
732 return (TD_OK);
733 }
734
735 /*
736 * Iterate over the set of global TSD keys.
737 * The call back function is called with three arguments,
738 * a key, a pointer to the destructor function, and the cbdata pointer.
739 * Currently unused by dbx.
740 */
741 #pragma weak td_ta_tsd_iter = __td_ta_tsd_iter
742 td_err_e
__td_ta_tsd_iter(td_thragent_t * ta_p,td_key_iter_f * cb,void * cbdata_p)743 __td_ta_tsd_iter(td_thragent_t *ta_p, td_key_iter_f *cb, void *cbdata_p)
744 {
745 struct ps_prochandle *ph_p;
746 td_err_e return_val;
747 int key;
748 int numkeys;
749 psaddr_t dest_addr;
750 psaddr_t *destructors = NULL;
751 PFrV destructor;
752
753 if (cb == NULL)
754 return (TD_ERR);
755 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
756 return (return_val);
757 if (ps_pstop(ph_p) != PS_OK) {
758 ph_unlock(ta_p);
759 return (TD_DBERR);
760 }
761
762 if (ta_p->model == PR_MODEL_NATIVE) {
763 tsd_metadata_t tsdm;
764
765 if (ps_pdread(ph_p,
766 ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
767 &tsdm, sizeof (tsdm)) != PS_OK)
768 return_val = TD_DBERR;
769 else {
770 numkeys = tsdm.tsdm_nused;
771 dest_addr = (psaddr_t)tsdm.tsdm_destro;
772 if (numkeys > 0)
773 destructors =
774 malloc(numkeys * sizeof (psaddr_t));
775 }
776 } else {
777 #if defined(_LP64) && defined(_SYSCALL32)
778 tsd_metadata32_t tsdm;
779
780 if (ps_pdread(ph_p,
781 ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
782 &tsdm, sizeof (tsdm)) != PS_OK)
783 return_val = TD_DBERR;
784 else {
785 numkeys = tsdm.tsdm_nused;
786 dest_addr = (psaddr_t)tsdm.tsdm_destro;
787 if (numkeys > 0)
788 destructors =
789 malloc(numkeys * sizeof (caddr32_t));
790 }
791 #else
792 return_val = TD_DBERR;
793 #endif /* _SYSCALL32 */
794 }
795
796 if (return_val != TD_OK || numkeys <= 0) {
797 (void) ps_pcontinue(ph_p);
798 ph_unlock(ta_p);
799 return (return_val);
800 }
801
802 if (destructors == NULL)
803 return_val = TD_MALLOC;
804 else if (ta_p->model == PR_MODEL_NATIVE) {
805 if (ps_pdread(ph_p, dest_addr,
806 destructors, numkeys * sizeof (psaddr_t)) != PS_OK)
807 return_val = TD_DBERR;
808 else {
809 for (key = 1; key < numkeys; key++) {
810 destructor = (PFrV)destructors[key];
811 if (destructor != TSD_UNALLOCATED &&
812 (*cb)(key, destructor, cbdata_p))
813 break;
814 }
815 }
816 #if defined(_LP64) && defined(_SYSCALL32)
817 } else {
818 caddr32_t *destructors32 = (caddr32_t *)destructors;
819 caddr32_t destruct32;
820
821 if (ps_pdread(ph_p, dest_addr,
822 destructors32, numkeys * sizeof (caddr32_t)) != PS_OK)
823 return_val = TD_DBERR;
824 else {
825 for (key = 1; key < numkeys; key++) {
826 destruct32 = destructors32[key];
827 if (destruct32 != (caddr32_t)TSD_UNALLOCATED &&
828 (*cb)(key, (PFrV)(uintptr_t)destruct32,
829 cbdata_p))
830 break;
831 }
832 }
833 #endif /* _SYSCALL32 */
834 }
835
836 if (destructors)
837 free(destructors);
838 (void) ps_pcontinue(ph_p);
839 ph_unlock(ta_p);
840 return (return_val);
841 }
842
843 int
sigequalset(const sigset_t * s1,const sigset_t * s2)844 sigequalset(const sigset_t *s1, const sigset_t *s2)
845 {
846 return (
847 s1->__sigbits[0] == s2->__sigbits[0] &&
848 s1->__sigbits[1] == s2->__sigbits[1] &&
849 s1->__sigbits[2] == s2->__sigbits[2] &&
850 s1->__sigbits[3] == s2->__sigbits[3]);
851 }
852
853 /*
854 * Description:
855 * Iterate over all threads. For each thread call
856 * the function pointed to by "cb" with a pointer
857 * to a thread handle, and a pointer to data which
858 * can be NULL. Only call td_thr_iter_f() on threads
859 * which match the properties of state, ti_pri,
860 * ti_sigmask_p, and ti_user_flags. If cb returns
861 * a non-zero value, terminate iterations.
862 *
863 * Input:
864 * *ta_p - thread agent
865 * *cb - call back function defined by user.
866 * td_thr_iter_f() takes a thread handle and
867 * cbdata_p as a parameter.
868 * cbdata_p - parameter for td_thr_iter_f().
869 *
870 * state - state of threads of interest. A value of
871 * TD_THR_ANY_STATE from enum td_thr_state_e
872 * does not restrict iterations by state.
873 * ti_pri - lower bound of priorities of threads of
874 * interest. A value of TD_THR_LOWEST_PRIORITY
875 * defined in thread_db.h does not restrict
876 * iterations by priority. A thread with priority
877 * less than ti_pri will NOT be passed to the callback
878 * function.
879 * ti_sigmask_p - signal mask of threads of interest.
880 * A value of TD_SIGNO_MASK defined in thread_db.h
881 * does not restrict iterations by signal mask.
882 * ti_user_flags - user flags of threads of interest. A
883 * value of TD_THR_ANY_USER_FLAGS defined in thread_db.h
884 * does not restrict iterations by user flags.
885 */
886 #pragma weak td_ta_thr_iter = __td_ta_thr_iter
887 td_err_e
__td_ta_thr_iter(td_thragent_t * ta_p,td_thr_iter_f * cb,void * cbdata_p,td_thr_state_e state,int ti_pri,sigset_t * ti_sigmask_p,unsigned ti_user_flags)888 __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
889 void *cbdata_p, td_thr_state_e state, int ti_pri,
890 sigset_t *ti_sigmask_p, unsigned ti_user_flags)
891 {
892 struct ps_prochandle *ph_p;
893 psaddr_t first_lwp_addr;
894 psaddr_t first_zombie_addr;
895 psaddr_t curr_lwp_addr;
896 psaddr_t next_lwp_addr;
897 td_thrhandle_t th;
898 ps_err_e db_return;
899 ps_err_e db_return2;
900 td_err_e return_val;
901
902 if (cb == NULL)
903 return (TD_ERR);
904 /*
905 * If state is not within bound, short circuit.
906 */
907 if (state < TD_THR_ANY_STATE || state > TD_THR_STOPPED_ASLEEP)
908 return (TD_OK);
909
910 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
911 return (return_val);
912 if (ps_pstop(ph_p) != PS_OK) {
913 ph_unlock(ta_p);
914 return (TD_DBERR);
915 }
916
917 /*
918 * For each ulwp_t in the circular linked lists pointed
919 * to by "all_lwps" and "all_zombies":
920 * (1) Filter each thread.
921 * (2) Create the thread_object for each thread that passes.
922 * (3) Call the call back function on each thread.
923 */
924
925 if (ta_p->model == PR_MODEL_NATIVE) {
926 db_return = ps_pdread(ph_p,
927 ta_p->uberdata_addr + offsetof(uberdata_t, all_lwps),
928 &first_lwp_addr, sizeof (first_lwp_addr));
929 db_return2 = ps_pdread(ph_p,
930 ta_p->uberdata_addr + offsetof(uberdata_t, all_zombies),
931 &first_zombie_addr, sizeof (first_zombie_addr));
932 } else {
933 #if defined(_LP64) && defined(_SYSCALL32)
934 caddr32_t addr32;
935
936 db_return = ps_pdread(ph_p,
937 ta_p->uberdata_addr + offsetof(uberdata32_t, all_lwps),
938 &addr32, sizeof (addr32));
939 first_lwp_addr = addr32;
940 db_return2 = ps_pdread(ph_p,
941 ta_p->uberdata_addr + offsetof(uberdata32_t, all_zombies),
942 &addr32, sizeof (addr32));
943 first_zombie_addr = addr32;
944 #else /* _SYSCALL32 */
945 db_return = PS_ERR;
946 db_return2 = PS_ERR;
947 #endif /* _SYSCALL32 */
948 }
949 if (db_return == PS_OK)
950 db_return = db_return2;
951
952 /*
953 * If first_lwp_addr and first_zombie_addr are both NULL,
954 * libc must not yet be initialized or all threads have
955 * exited. Return TD_NOTHR and all will be well.
956 */
957 if (db_return == PS_OK &&
958 first_lwp_addr == NULL && first_zombie_addr == NULL) {
959 (void) ps_pcontinue(ph_p);
960 ph_unlock(ta_p);
961 return (TD_NOTHR);
962 }
963 if (db_return != PS_OK) {
964 (void) ps_pcontinue(ph_p);
965 ph_unlock(ta_p);
966 return (TD_DBERR);
967 }
968
969 /*
970 * Run down the lists of all living and dead lwps.
971 */
972 if (first_lwp_addr == NULL)
973 first_lwp_addr = first_zombie_addr;
974 curr_lwp_addr = first_lwp_addr;
975 for (;;) {
976 td_thr_state_e ts_state;
977 int userpri;
978 unsigned userflags;
979 sigset_t mask;
980
981 /*
982 * Read the ulwp struct.
983 */
984 if (ta_p->model == PR_MODEL_NATIVE) {
985 ulwp_t ulwp;
986
987 if (ps_pdread(ph_p, curr_lwp_addr,
988 &ulwp, sizeof (ulwp)) != PS_OK &&
989 ((void) memset(&ulwp, 0, sizeof (ulwp)),
990 ps_pdread(ph_p, curr_lwp_addr,
991 &ulwp, REPLACEMENT_SIZE)) != PS_OK) {
992 return_val = TD_DBERR;
993 break;
994 }
995 next_lwp_addr = (psaddr_t)ulwp.ul_forw;
996
997 ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
998 ulwp.ul_stop? TD_THR_STOPPED :
999 ulwp.ul_wchan? TD_THR_SLEEP :
1000 TD_THR_ACTIVE;
1001 userpri = ulwp.ul_pri;
1002 userflags = ulwp.ul_usropts;
1003 if (ulwp.ul_dead)
1004 (void) sigemptyset(&mask);
1005 else
1006 mask = *(sigset_t *)&ulwp.ul_sigmask;
1007 } else {
1008 #if defined(_LP64) && defined(_SYSCALL32)
1009 ulwp32_t ulwp;
1010
1011 if (ps_pdread(ph_p, curr_lwp_addr,
1012 &ulwp, sizeof (ulwp)) != PS_OK &&
1013 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1014 ps_pdread(ph_p, curr_lwp_addr,
1015 &ulwp, REPLACEMENT_SIZE32)) != PS_OK) {
1016 return_val = TD_DBERR;
1017 break;
1018 }
1019 next_lwp_addr = (psaddr_t)ulwp.ul_forw;
1020
1021 ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
1022 ulwp.ul_stop? TD_THR_STOPPED :
1023 ulwp.ul_wchan? TD_THR_SLEEP :
1024 TD_THR_ACTIVE;
1025 userpri = ulwp.ul_pri;
1026 userflags = ulwp.ul_usropts;
1027 if (ulwp.ul_dead)
1028 (void) sigemptyset(&mask);
1029 else
1030 mask = *(sigset_t *)&ulwp.ul_sigmask;
1031 #else /* _SYSCALL32 */
1032 return_val = TD_ERR;
1033 break;
1034 #endif /* _SYSCALL32 */
1035 }
1036
1037 /*
1038 * Filter on state, priority, sigmask, and user flags.
1039 */
1040
1041 if ((state != ts_state) &&
1042 (state != TD_THR_ANY_STATE))
1043 goto advance;
1044
1045 if (ti_pri > userpri)
1046 goto advance;
1047
1048 if (ti_sigmask_p != TD_SIGNO_MASK &&
1049 !sigequalset(ti_sigmask_p, &mask))
1050 goto advance;
1051
1052 if (ti_user_flags != userflags &&
1053 ti_user_flags != (unsigned)TD_THR_ANY_USER_FLAGS)
1054 goto advance;
1055
1056 /*
1057 * Call back - break if the return
1058 * from the call back is non-zero.
1059 */
1060 th.th_ta_p = (td_thragent_t *)ta_p;
1061 th.th_unique = curr_lwp_addr;
1062 if ((*cb)(&th, cbdata_p))
1063 break;
1064
1065 advance:
1066 if ((curr_lwp_addr = next_lwp_addr) == first_lwp_addr) {
1067 /*
1068 * Switch to the zombie list, unless it is NULL
1069 * or we have already been doing the zombie list,
1070 * in which case terminate the loop.
1071 */
1072 if (first_zombie_addr == NULL ||
1073 first_lwp_addr == first_zombie_addr)
1074 break;
1075 curr_lwp_addr = first_lwp_addr = first_zombie_addr;
1076 }
1077 }
1078
1079 (void) ps_pcontinue(ph_p);
1080 ph_unlock(ta_p);
1081 return (return_val);
1082 }
1083
1084 /*
1085 * Enable or disable process synchronization object tracking.
1086 * Currently unused by dbx.
1087 */
1088 #pragma weak td_ta_sync_tracking_enable = __td_ta_sync_tracking_enable
1089 td_err_e
__td_ta_sync_tracking_enable(td_thragent_t * ta_p,int onoff)1090 __td_ta_sync_tracking_enable(td_thragent_t *ta_p, int onoff)
1091 {
1092 struct ps_prochandle *ph_p;
1093 td_err_e return_val;
1094 register_sync_t enable;
1095
1096 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1097 return (return_val);
1098 /*
1099 * Values of tdb_register_sync in the victim process:
1100 * REGISTER_SYNC_ENABLE enables registration of synch objects
1101 * REGISTER_SYNC_DISABLE disables registration of synch objects
1102 * These cause the table to be cleared and tdb_register_sync set to:
1103 * REGISTER_SYNC_ON registration in effect
1104 * REGISTER_SYNC_OFF registration not in effect
1105 */
1106 enable = onoff? REGISTER_SYNC_ENABLE : REGISTER_SYNC_DISABLE;
1107 if (ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
1108 &enable, sizeof (enable)) != PS_OK)
1109 return_val = TD_DBERR;
1110 /*
1111 * Remember that this interface was called (see td_ta_delete()).
1112 */
1113 ta_p->sync_tracking = 1;
1114 ph_unlock(ta_p);
1115 return (return_val);
1116 }
1117
1118 /*
1119 * Iterate over all known synchronization variables.
1120 * It is very possible that the list generated is incomplete,
1121 * because the iterator can only find synchronization variables
1122 * that have been registered by the process since synchronization
1123 * object registration was enabled.
1124 * The call back function cb is called for each synchronization
1125 * variable with two arguments: a pointer to the synchronization
1126 * handle and the passed-in argument cbdata.
1127 * If cb returns a non-zero value, iterations are terminated.
1128 */
1129 #pragma weak td_ta_sync_iter = __td_ta_sync_iter
1130 td_err_e
__td_ta_sync_iter(td_thragent_t * ta_p,td_sync_iter_f * cb,void * cbdata)1131 __td_ta_sync_iter(td_thragent_t *ta_p, td_sync_iter_f *cb, void *cbdata)
1132 {
1133 struct ps_prochandle *ph_p;
1134 td_err_e return_val;
1135 int i;
1136 register_sync_t enable;
1137 psaddr_t next_desc;
1138 tdb_sync_stats_t sync_stats;
1139 td_synchandle_t synchandle;
1140 psaddr_t psaddr;
1141 void *vaddr;
1142 uint64_t *sync_addr_hash = NULL;
1143
1144 if (cb == NULL)
1145 return (TD_ERR);
1146 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1147 return (return_val);
1148 if (ps_pstop(ph_p) != PS_OK) {
1149 ph_unlock(ta_p);
1150 return (TD_DBERR);
1151 }
1152 if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
1153 &enable, sizeof (enable)) != PS_OK) {
1154 return_val = TD_DBERR;
1155 goto out;
1156 }
1157 if (enable != REGISTER_SYNC_ON)
1158 goto out;
1159
1160 /*
1161 * First read the hash table.
1162 * The hash table is large; allocate with mmap().
1163 */
1164 if ((vaddr = mmap(NULL, TDB_HASH_SIZE * sizeof (uint64_t),
1165 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
1166 == MAP_FAILED) {
1167 return_val = TD_MALLOC;
1168 goto out;
1169 }
1170 sync_addr_hash = vaddr;
1171
1172 if (ta_p->model == PR_MODEL_NATIVE) {
1173 if (ps_pdread(ph_p, ta_p->uberdata_addr +
1174 offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
1175 &psaddr, sizeof (&psaddr)) != PS_OK) {
1176 return_val = TD_DBERR;
1177 goto out;
1178 }
1179 } else {
1180 #ifdef _SYSCALL32
1181 caddr32_t addr;
1182
1183 if (ps_pdread(ph_p, ta_p->uberdata_addr +
1184 offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
1185 &addr, sizeof (addr)) != PS_OK) {
1186 return_val = TD_DBERR;
1187 goto out;
1188 }
1189 psaddr = addr;
1190 #else
1191 return_val = TD_ERR;
1192 goto out;
1193 #endif /* _SYSCALL32 */
1194 }
1195
1196 if (psaddr == NULL)
1197 goto out;
1198 if (ps_pdread(ph_p, psaddr, sync_addr_hash,
1199 TDB_HASH_SIZE * sizeof (uint64_t)) != PS_OK) {
1200 return_val = TD_DBERR;
1201 goto out;
1202 }
1203
1204 /*
1205 * Now scan the hash table.
1206 */
1207 for (i = 0; i < TDB_HASH_SIZE; i++) {
1208 for (next_desc = (psaddr_t)sync_addr_hash[i];
1209 next_desc != NULL;
1210 next_desc = (psaddr_t)sync_stats.next) {
1211 if (ps_pdread(ph_p, next_desc,
1212 &sync_stats, sizeof (sync_stats)) != PS_OK) {
1213 return_val = TD_DBERR;
1214 goto out;
1215 }
1216 if (sync_stats.un.type == TDB_NONE) {
1217 /* not registered since registration enabled */
1218 continue;
1219 }
1220 synchandle.sh_ta_p = ta_p;
1221 synchandle.sh_unique = (psaddr_t)sync_stats.sync_addr;
1222 if ((*cb)(&synchandle, cbdata) != 0)
1223 goto out;
1224 }
1225 }
1226
1227 out:
1228 if (sync_addr_hash != NULL)
1229 (void) munmap((void *)sync_addr_hash,
1230 TDB_HASH_SIZE * sizeof (uint64_t));
1231 (void) ps_pcontinue(ph_p);
1232 ph_unlock(ta_p);
1233 return (return_val);
1234 }
1235
1236 /*
1237 * Enable process statistics collection.
1238 */
1239 #pragma weak td_ta_enable_stats = __td_ta_enable_stats
1240 /* ARGSUSED */
1241 td_err_e
__td_ta_enable_stats(const td_thragent_t * ta_p,int onoff)1242 __td_ta_enable_stats(const td_thragent_t *ta_p, int onoff)
1243 {
1244 return (TD_NOCAPAB);
1245 }
1246
1247 /*
1248 * Reset process statistics.
1249 */
1250 #pragma weak td_ta_reset_stats = __td_ta_reset_stats
1251 /* ARGSUSED */
1252 td_err_e
__td_ta_reset_stats(const td_thragent_t * ta_p)1253 __td_ta_reset_stats(const td_thragent_t *ta_p)
1254 {
1255 return (TD_NOCAPAB);
1256 }
1257
1258 /*
1259 * Read process statistics.
1260 */
1261 #pragma weak td_ta_get_stats = __td_ta_get_stats
1262 /* ARGSUSED */
1263 td_err_e
__td_ta_get_stats(const td_thragent_t * ta_p,td_ta_stats_t * tstats)1264 __td_ta_get_stats(const td_thragent_t *ta_p, td_ta_stats_t *tstats)
1265 {
1266 return (TD_NOCAPAB);
1267 }
1268
1269 /*
1270 * Transfer information from lwp struct to thread information struct.
1271 * XXX -- lots of this needs cleaning up.
1272 */
1273 static void
td_thr2to(td_thragent_t * ta_p,psaddr_t ts_addr,ulwp_t * ulwp,td_thrinfo_t * ti_p)1274 td_thr2to(td_thragent_t *ta_p, psaddr_t ts_addr,
1275 ulwp_t *ulwp, td_thrinfo_t *ti_p)
1276 {
1277 lwpid_t lwpid;
1278
1279 if ((lwpid = ulwp->ul_lwpid) == 0)
1280 lwpid = 1;
1281 (void) memset(ti_p, 0, sizeof (*ti_p));
1282 ti_p->ti_ta_p = ta_p;
1283 ti_p->ti_user_flags = ulwp->ul_usropts;
1284 ti_p->ti_tid = lwpid;
1285 ti_p->ti_exitval = ulwp->ul_rval;
1286 ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1287 if (!ulwp->ul_dead) {
1288 /*
1289 * The bloody fools got this backwards!
1290 */
1291 ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1292 ti_p->ti_stksize = ulwp->ul_stksiz;
1293 }
1294 ti_p->ti_ro_area = ts_addr;
1295 ti_p->ti_ro_size = ulwp->ul_replace?
1296 REPLACEMENT_SIZE : sizeof (ulwp_t);
1297 ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1298 ulwp->ul_stop? TD_THR_STOPPED :
1299 ulwp->ul_wchan? TD_THR_SLEEP :
1300 TD_THR_ACTIVE;
1301 ti_p->ti_db_suspended = 0;
1302 ti_p->ti_type = TD_THR_USER;
1303 ti_p->ti_sp = ulwp->ul_sp;
1304 ti_p->ti_flags = 0;
1305 ti_p->ti_pri = ulwp->ul_pri;
1306 ti_p->ti_lid = lwpid;
1307 if (!ulwp->ul_dead)
1308 ti_p->ti_sigmask = ulwp->ul_sigmask;
1309 ti_p->ti_traceme = 0;
1310 ti_p->ti_preemptflag = 0;
1311 ti_p->ti_pirecflag = 0;
1312 (void) sigemptyset(&ti_p->ti_pending);
1313 ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1314 }
1315
1316 #if defined(_LP64) && defined(_SYSCALL32)
1317 static void
td_thr2to32(td_thragent_t * ta_p,psaddr_t ts_addr,ulwp32_t * ulwp,td_thrinfo_t * ti_p)1318 td_thr2to32(td_thragent_t *ta_p, psaddr_t ts_addr,
1319 ulwp32_t *ulwp, td_thrinfo_t *ti_p)
1320 {
1321 lwpid_t lwpid;
1322
1323 if ((lwpid = ulwp->ul_lwpid) == 0)
1324 lwpid = 1;
1325 (void) memset(ti_p, 0, sizeof (*ti_p));
1326 ti_p->ti_ta_p = ta_p;
1327 ti_p->ti_user_flags = ulwp->ul_usropts;
1328 ti_p->ti_tid = lwpid;
1329 ti_p->ti_exitval = (void *)(uintptr_t)ulwp->ul_rval;
1330 ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1331 if (!ulwp->ul_dead) {
1332 /*
1333 * The bloody fools got this backwards!
1334 */
1335 ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1336 ti_p->ti_stksize = ulwp->ul_stksiz;
1337 }
1338 ti_p->ti_ro_area = ts_addr;
1339 ti_p->ti_ro_size = ulwp->ul_replace?
1340 REPLACEMENT_SIZE32 : sizeof (ulwp32_t);
1341 ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1342 ulwp->ul_stop? TD_THR_STOPPED :
1343 ulwp->ul_wchan? TD_THR_SLEEP :
1344 TD_THR_ACTIVE;
1345 ti_p->ti_db_suspended = 0;
1346 ti_p->ti_type = TD_THR_USER;
1347 ti_p->ti_sp = (uint32_t)ulwp->ul_sp;
1348 ti_p->ti_flags = 0;
1349 ti_p->ti_pri = ulwp->ul_pri;
1350 ti_p->ti_lid = lwpid;
1351 if (!ulwp->ul_dead)
1352 ti_p->ti_sigmask = *(sigset_t *)&ulwp->ul_sigmask;
1353 ti_p->ti_traceme = 0;
1354 ti_p->ti_preemptflag = 0;
1355 ti_p->ti_pirecflag = 0;
1356 (void) sigemptyset(&ti_p->ti_pending);
1357 ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1358 }
1359 #endif /* _SYSCALL32 */
1360
1361 /*
1362 * Get thread information.
1363 */
1364 #pragma weak td_thr_get_info = __td_thr_get_info
1365 td_err_e
__td_thr_get_info(td_thrhandle_t * th_p,td_thrinfo_t * ti_p)1366 __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p)
1367 {
1368 struct ps_prochandle *ph_p;
1369 td_thragent_t *ta_p;
1370 td_err_e return_val;
1371 psaddr_t psaddr;
1372
1373 if (ti_p == NULL)
1374 return (TD_ERR);
1375 (void) memset(ti_p, NULL, sizeof (*ti_p));
1376
1377 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1378 return (return_val);
1379 ta_p = th_p->th_ta_p;
1380 if (ps_pstop(ph_p) != PS_OK) {
1381 ph_unlock(ta_p);
1382 return (TD_DBERR);
1383 }
1384
1385 /*
1386 * Read the ulwp struct from the process.
1387 * Transfer the ulwp struct to the thread information struct.
1388 */
1389 psaddr = th_p->th_unique;
1390 if (ta_p->model == PR_MODEL_NATIVE) {
1391 ulwp_t ulwp;
1392
1393 if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1394 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1395 ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE)) != PS_OK)
1396 return_val = TD_DBERR;
1397 else
1398 td_thr2to(ta_p, psaddr, &ulwp, ti_p);
1399 } else {
1400 #if defined(_LP64) && defined(_SYSCALL32)
1401 ulwp32_t ulwp;
1402
1403 if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1404 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1405 ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE32)) !=
1406 PS_OK)
1407 return_val = TD_DBERR;
1408 else
1409 td_thr2to32(ta_p, psaddr, &ulwp, ti_p);
1410 #else
1411 return_val = TD_ERR;
1412 #endif /* _SYSCALL32 */
1413 }
1414
1415 (void) ps_pcontinue(ph_p);
1416 ph_unlock(ta_p);
1417 return (return_val);
1418 }
1419
1420 /*
1421 * Given a process and an event number, return information about
1422 * an address in the process or at which a breakpoint can be set
1423 * to monitor the event.
1424 */
1425 #pragma weak td_ta_event_addr = __td_ta_event_addr
1426 td_err_e
__td_ta_event_addr(td_thragent_t * ta_p,td_event_e event,td_notify_t * notify_p)1427 __td_ta_event_addr(td_thragent_t *ta_p, td_event_e event, td_notify_t *notify_p)
1428 {
1429 if (ta_p == NULL)
1430 return (TD_BADTA);
1431 if (event < TD_MIN_EVENT_NUM || event > TD_MAX_EVENT_NUM)
1432 return (TD_NOEVENT);
1433 if (notify_p == NULL)
1434 return (TD_ERR);
1435
1436 notify_p->type = NOTIFY_BPT;
1437 notify_p->u.bptaddr = ta_p->tdb_events[event - TD_MIN_EVENT_NUM];
1438
1439 return (TD_OK);
1440 }
1441
1442 /*
1443 * Add the events in eventset 2 to eventset 1.
1444 */
1445 static void
eventsetaddset(td_thr_events_t * event1_p,td_thr_events_t * event2_p)1446 eventsetaddset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1447 {
1448 int i;
1449
1450 for (i = 0; i < TD_EVENTSIZE; i++)
1451 event1_p->event_bits[i] |= event2_p->event_bits[i];
1452 }
1453
1454 /*
1455 * Delete the events in eventset 2 from eventset 1.
1456 */
1457 static void
eventsetdelset(td_thr_events_t * event1_p,td_thr_events_t * event2_p)1458 eventsetdelset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1459 {
1460 int i;
1461
1462 for (i = 0; i < TD_EVENTSIZE; i++)
1463 event1_p->event_bits[i] &= ~event2_p->event_bits[i];
1464 }
1465
1466 /*
1467 * Either add or delete the given event set from a thread's event mask.
1468 */
1469 static td_err_e
mod_eventset(td_thrhandle_t * th_p,td_thr_events_t * events,int onoff)1470 mod_eventset(td_thrhandle_t *th_p, td_thr_events_t *events, int onoff)
1471 {
1472 struct ps_prochandle *ph_p;
1473 td_err_e return_val = TD_OK;
1474 char enable;
1475 td_thr_events_t evset;
1476 psaddr_t psaddr_evset;
1477 psaddr_t psaddr_enab;
1478
1479 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1480 return (return_val);
1481 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1482 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1483 psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1484 psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1485 } else {
1486 #if defined(_LP64) && defined(_SYSCALL32)
1487 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1488 psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1489 psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1490 #else
1491 ph_unlock(th_p->th_ta_p);
1492 return (TD_ERR);
1493 #endif /* _SYSCALL32 */
1494 }
1495 if (ps_pstop(ph_p) != PS_OK) {
1496 ph_unlock(th_p->th_ta_p);
1497 return (TD_DBERR);
1498 }
1499
1500 if (ps_pdread(ph_p, psaddr_evset, &evset, sizeof (evset)) != PS_OK)
1501 return_val = TD_DBERR;
1502 else {
1503 if (onoff)
1504 eventsetaddset(&evset, events);
1505 else
1506 eventsetdelset(&evset, events);
1507 if (ps_pdwrite(ph_p, psaddr_evset, &evset, sizeof (evset))
1508 != PS_OK)
1509 return_val = TD_DBERR;
1510 else {
1511 enable = 0;
1512 if (td_eventismember(&evset, TD_EVENTS_ENABLE))
1513 enable = 1;
1514 if (ps_pdwrite(ph_p, psaddr_enab,
1515 &enable, sizeof (enable)) != PS_OK)
1516 return_val = TD_DBERR;
1517 }
1518 }
1519
1520 (void) ps_pcontinue(ph_p);
1521 ph_unlock(th_p->th_ta_p);
1522 return (return_val);
1523 }
1524
1525 /*
1526 * Enable or disable tracing for a given thread. Tracing
1527 * is filtered based on the event mask of each thread. Tracing
1528 * can be turned on/off for the thread without changing thread
1529 * event mask.
1530 * Currently unused by dbx.
1531 */
1532 #pragma weak td_thr_event_enable = __td_thr_event_enable
1533 td_err_e
__td_thr_event_enable(td_thrhandle_t * th_p,int onoff)1534 __td_thr_event_enable(td_thrhandle_t *th_p, int onoff)
1535 {
1536 td_thr_events_t evset;
1537
1538 td_event_emptyset(&evset);
1539 td_event_addset(&evset, TD_EVENTS_ENABLE);
1540 return (mod_eventset(th_p, &evset, onoff));
1541 }
1542
1543 /*
1544 * Set event mask to enable event. event is turned on in
1545 * event mask for thread. If a thread encounters an event
1546 * for which its event mask is on, notification will be sent
1547 * to the debugger.
1548 * Addresses for each event are provided to the
1549 * debugger. It is assumed that a breakpoint of some type will
1550 * be placed at that address. If the event mask for the thread
1551 * is on, the instruction at the address will be executed.
1552 * Otherwise, the instruction will be skipped.
1553 */
1554 #pragma weak td_thr_set_event = __td_thr_set_event
1555 td_err_e
__td_thr_set_event(td_thrhandle_t * th_p,td_thr_events_t * events)1556 __td_thr_set_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1557 {
1558 return (mod_eventset(th_p, events, 1));
1559 }
1560
1561 /*
1562 * Enable or disable a set of events in the process-global event mask,
1563 * depending on the value of onoff.
1564 */
1565 static td_err_e
td_ta_mod_event(td_thragent_t * ta_p,td_thr_events_t * events,int onoff)1566 td_ta_mod_event(td_thragent_t *ta_p, td_thr_events_t *events, int onoff)
1567 {
1568 struct ps_prochandle *ph_p;
1569 td_thr_events_t targ_eventset;
1570 td_err_e return_val;
1571
1572 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1573 return (return_val);
1574 if (ps_pstop(ph_p) != PS_OK) {
1575 ph_unlock(ta_p);
1576 return (TD_DBERR);
1577 }
1578 if (ps_pdread(ph_p, ta_p->tdb_eventmask_addr,
1579 &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1580 return_val = TD_DBERR;
1581 else {
1582 if (onoff)
1583 eventsetaddset(&targ_eventset, events);
1584 else
1585 eventsetdelset(&targ_eventset, events);
1586 if (ps_pdwrite(ph_p, ta_p->tdb_eventmask_addr,
1587 &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1588 return_val = TD_DBERR;
1589 }
1590 (void) ps_pcontinue(ph_p);
1591 ph_unlock(ta_p);
1592 return (return_val);
1593 }
1594
1595 /*
1596 * Enable a set of events in the process-global event mask.
1597 */
1598 #pragma weak td_ta_set_event = __td_ta_set_event
1599 td_err_e
__td_ta_set_event(td_thragent_t * ta_p,td_thr_events_t * events)1600 __td_ta_set_event(td_thragent_t *ta_p, td_thr_events_t *events)
1601 {
1602 return (td_ta_mod_event(ta_p, events, 1));
1603 }
1604
1605 /*
1606 * Set event mask to disable the given event set; these events are cleared
1607 * from the event mask of the thread. Events that occur for a thread
1608 * with the event masked off will not cause notification to be
1609 * sent to the debugger (see td_thr_set_event for fuller description).
1610 */
1611 #pragma weak td_thr_clear_event = __td_thr_clear_event
1612 td_err_e
__td_thr_clear_event(td_thrhandle_t * th_p,td_thr_events_t * events)1613 __td_thr_clear_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1614 {
1615 return (mod_eventset(th_p, events, 0));
1616 }
1617
1618 /*
1619 * Disable a set of events in the process-global event mask.
1620 */
1621 #pragma weak td_ta_clear_event = __td_ta_clear_event
1622 td_err_e
__td_ta_clear_event(td_thragent_t * ta_p,td_thr_events_t * events)1623 __td_ta_clear_event(td_thragent_t *ta_p, td_thr_events_t *events)
1624 {
1625 return (td_ta_mod_event(ta_p, events, 0));
1626 }
1627
1628 /*
1629 * This function returns the most recent event message, if any,
1630 * associated with a thread. Given a thread handle, return the message
1631 * corresponding to the event encountered by the thread. Only one
1632 * message per thread is saved. Messages from earlier events are lost
1633 * when later events occur.
1634 */
1635 #pragma weak td_thr_event_getmsg = __td_thr_event_getmsg
1636 td_err_e
__td_thr_event_getmsg(td_thrhandle_t * th_p,td_event_msg_t * msg)1637 __td_thr_event_getmsg(td_thrhandle_t *th_p, td_event_msg_t *msg)
1638 {
1639 struct ps_prochandle *ph_p;
1640 td_err_e return_val = TD_OK;
1641 psaddr_t psaddr;
1642
1643 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1644 return (return_val);
1645 if (ps_pstop(ph_p) != PS_OK) {
1646 ph_unlock(th_p->th_ta_p);
1647 return (TD_BADTA);
1648 }
1649 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1650 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1651 td_evbuf_t evbuf;
1652
1653 psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1654 if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1655 return_val = TD_DBERR;
1656 } else if (evbuf.eventnum == TD_EVENT_NONE) {
1657 return_val = TD_NOEVENT;
1658 } else {
1659 msg->event = evbuf.eventnum;
1660 msg->th_p = (td_thrhandle_t *)th_p;
1661 msg->msg.data = (uintptr_t)evbuf.eventdata;
1662 /* "Consume" the message */
1663 evbuf.eventnum = TD_EVENT_NONE;
1664 evbuf.eventdata = NULL;
1665 if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1666 != PS_OK)
1667 return_val = TD_DBERR;
1668 }
1669 } else {
1670 #if defined(_LP64) && defined(_SYSCALL32)
1671 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1672 td_evbuf32_t evbuf;
1673
1674 psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1675 if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1676 return_val = TD_DBERR;
1677 } else if (evbuf.eventnum == TD_EVENT_NONE) {
1678 return_val = TD_NOEVENT;
1679 } else {
1680 msg->event = evbuf.eventnum;
1681 msg->th_p = (td_thrhandle_t *)th_p;
1682 msg->msg.data = (uintptr_t)evbuf.eventdata;
1683 /* "Consume" the message */
1684 evbuf.eventnum = TD_EVENT_NONE;
1685 evbuf.eventdata = NULL;
1686 if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1687 != PS_OK)
1688 return_val = TD_DBERR;
1689 }
1690 #else
1691 return_val = TD_ERR;
1692 #endif /* _SYSCALL32 */
1693 }
1694
1695 (void) ps_pcontinue(ph_p);
1696 ph_unlock(th_p->th_ta_p);
1697 return (return_val);
1698 }
1699
1700 /*
1701 * The callback function td_ta_event_getmsg uses when looking for
1702 * a thread with an event. A thin wrapper around td_thr_event_getmsg.
1703 */
1704 static int
event_msg_cb(const td_thrhandle_t * th_p,void * arg)1705 event_msg_cb(const td_thrhandle_t *th_p, void *arg)
1706 {
1707 static td_thrhandle_t th;
1708 td_event_msg_t *msg = arg;
1709
1710 if (__td_thr_event_getmsg((td_thrhandle_t *)th_p, msg) == TD_OK) {
1711 /*
1712 * Got an event, stop iterating.
1713 *
1714 * Because of past mistakes in interface definition,
1715 * we are forced to pass back a static local variable
1716 * for the thread handle because th_p is a pointer
1717 * to a local variable in __td_ta_thr_iter().
1718 * Grr...
1719 */
1720 th = *th_p;
1721 msg->th_p = &th;
1722 return (1);
1723 }
1724 return (0);
1725 }
1726
1727 /*
1728 * This function is just like td_thr_event_getmsg, except that it is
1729 * passed a process handle rather than a thread handle, and returns
1730 * an event message for some thread in the process that has an event
1731 * message pending. If no thread has an event message pending, this
1732 * routine returns TD_NOEVENT. Thus, all pending event messages may
1733 * be collected from a process by repeatedly calling this routine
1734 * until it returns TD_NOEVENT.
1735 */
1736 #pragma weak td_ta_event_getmsg = __td_ta_event_getmsg
1737 td_err_e
__td_ta_event_getmsg(td_thragent_t * ta_p,td_event_msg_t * msg)1738 __td_ta_event_getmsg(td_thragent_t *ta_p, td_event_msg_t *msg)
1739 {
1740 td_err_e return_val;
1741
1742 if (ta_p == NULL)
1743 return (TD_BADTA);
1744 if (ta_p->ph_p == NULL)
1745 return (TD_BADPH);
1746 if (msg == NULL)
1747 return (TD_ERR);
1748 msg->event = TD_EVENT_NONE;
1749 if ((return_val = __td_ta_thr_iter(ta_p, event_msg_cb, msg,
1750 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK,
1751 TD_THR_ANY_USER_FLAGS)) != TD_OK)
1752 return (return_val);
1753 if (msg->event == TD_EVENT_NONE)
1754 return (TD_NOEVENT);
1755 return (TD_OK);
1756 }
1757
1758 static lwpid_t
thr_to_lwpid(const td_thrhandle_t * th_p)1759 thr_to_lwpid(const td_thrhandle_t *th_p)
1760 {
1761 struct ps_prochandle *ph_p = th_p->th_ta_p->ph_p;
1762 lwpid_t lwpid;
1763
1764 /*
1765 * The caller holds the prochandle lock
1766 * and has already verfied everything.
1767 */
1768 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1769 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1770
1771 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1772 &lwpid, sizeof (lwpid)) != PS_OK)
1773 lwpid = 0;
1774 else if (lwpid == 0)
1775 lwpid = 1;
1776 } else {
1777 #if defined(_LP64) && defined(_SYSCALL32)
1778 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1779
1780 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1781 &lwpid, sizeof (lwpid)) != PS_OK)
1782 lwpid = 0;
1783 else if (lwpid == 0)
1784 lwpid = 1;
1785 #else
1786 lwpid = 0;
1787 #endif /* _SYSCALL32 */
1788 }
1789
1790 return (lwpid);
1791 }
1792
1793 /*
1794 * Suspend a thread.
1795 * XXX: What does this mean in a one-level model?
1796 */
1797 #pragma weak td_thr_dbsuspend = __td_thr_dbsuspend
1798 td_err_e
__td_thr_dbsuspend(const td_thrhandle_t * th_p)1799 __td_thr_dbsuspend(const td_thrhandle_t *th_p)
1800 {
1801 struct ps_prochandle *ph_p;
1802 td_err_e return_val;
1803
1804 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1805 return (return_val);
1806 if (ps_lstop(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1807 return_val = TD_DBERR;
1808 ph_unlock(th_p->th_ta_p);
1809 return (return_val);
1810 }
1811
1812 /*
1813 * Resume a suspended thread.
1814 * XXX: What does this mean in a one-level model?
1815 */
1816 #pragma weak td_thr_dbresume = __td_thr_dbresume
1817 td_err_e
__td_thr_dbresume(const td_thrhandle_t * th_p)1818 __td_thr_dbresume(const td_thrhandle_t *th_p)
1819 {
1820 struct ps_prochandle *ph_p;
1821 td_err_e return_val;
1822
1823 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1824 return (return_val);
1825 if (ps_lcontinue(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1826 return_val = TD_DBERR;
1827 ph_unlock(th_p->th_ta_p);
1828 return (return_val);
1829 }
1830
1831 /*
1832 * Set a thread's signal mask.
1833 * Currently unused by dbx.
1834 */
1835 #pragma weak td_thr_sigsetmask = __td_thr_sigsetmask
1836 /* ARGSUSED */
1837 td_err_e
__td_thr_sigsetmask(const td_thrhandle_t * th_p,const sigset_t ti_sigmask)1838 __td_thr_sigsetmask(const td_thrhandle_t *th_p, const sigset_t ti_sigmask)
1839 {
1840 return (TD_NOCAPAB);
1841 }
1842
1843 /*
1844 * Set a thread's "signals-pending" set.
1845 * Currently unused by dbx.
1846 */
1847 #pragma weak td_thr_setsigpending = __td_thr_setsigpending
1848 /* ARGSUSED */
1849 td_err_e
__td_thr_setsigpending(const td_thrhandle_t * th_p,uchar_t ti_pending_flag,const sigset_t ti_pending)1850 __td_thr_setsigpending(const td_thrhandle_t *th_p,
1851 uchar_t ti_pending_flag, const sigset_t ti_pending)
1852 {
1853 return (TD_NOCAPAB);
1854 }
1855
1856 /*
1857 * Get a thread's general register set.
1858 */
1859 #pragma weak td_thr_getgregs = __td_thr_getgregs
1860 td_err_e
__td_thr_getgregs(td_thrhandle_t * th_p,prgregset_t regset)1861 __td_thr_getgregs(td_thrhandle_t *th_p, prgregset_t regset)
1862 {
1863 struct ps_prochandle *ph_p;
1864 td_err_e return_val;
1865
1866 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1867 return (return_val);
1868 if (ps_pstop(ph_p) != PS_OK) {
1869 ph_unlock(th_p->th_ta_p);
1870 return (TD_DBERR);
1871 }
1872
1873 if (ps_lgetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1874 return_val = TD_DBERR;
1875
1876 (void) ps_pcontinue(ph_p);
1877 ph_unlock(th_p->th_ta_p);
1878 return (return_val);
1879 }
1880
1881 /*
1882 * Set a thread's general register set.
1883 */
1884 #pragma weak td_thr_setgregs = __td_thr_setgregs
1885 td_err_e
__td_thr_setgregs(td_thrhandle_t * th_p,const prgregset_t regset)1886 __td_thr_setgregs(td_thrhandle_t *th_p, const prgregset_t regset)
1887 {
1888 struct ps_prochandle *ph_p;
1889 td_err_e return_val;
1890
1891 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1892 return (return_val);
1893 if (ps_pstop(ph_p) != PS_OK) {
1894 ph_unlock(th_p->th_ta_p);
1895 return (TD_DBERR);
1896 }
1897
1898 if (ps_lsetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1899 return_val = TD_DBERR;
1900
1901 (void) ps_pcontinue(ph_p);
1902 ph_unlock(th_p->th_ta_p);
1903 return (return_val);
1904 }
1905
1906 /*
1907 * Get a thread's floating-point register set.
1908 */
1909 #pragma weak td_thr_getfpregs = __td_thr_getfpregs
1910 td_err_e
__td_thr_getfpregs(td_thrhandle_t * th_p,prfpregset_t * fpregset)1911 __td_thr_getfpregs(td_thrhandle_t *th_p, prfpregset_t *fpregset)
1912 {
1913 struct ps_prochandle *ph_p;
1914 td_err_e return_val;
1915
1916 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1917 return (return_val);
1918 if (ps_pstop(ph_p) != PS_OK) {
1919 ph_unlock(th_p->th_ta_p);
1920 return (TD_DBERR);
1921 }
1922
1923 if (ps_lgetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1924 return_val = TD_DBERR;
1925
1926 (void) ps_pcontinue(ph_p);
1927 ph_unlock(th_p->th_ta_p);
1928 return (return_val);
1929 }
1930
1931 /*
1932 * Set a thread's floating-point register set.
1933 */
1934 #pragma weak td_thr_setfpregs = __td_thr_setfpregs
1935 td_err_e
__td_thr_setfpregs(td_thrhandle_t * th_p,const prfpregset_t * fpregset)1936 __td_thr_setfpregs(td_thrhandle_t *th_p, const prfpregset_t *fpregset)
1937 {
1938 struct ps_prochandle *ph_p;
1939 td_err_e return_val;
1940
1941 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1942 return (return_val);
1943 if (ps_pstop(ph_p) != PS_OK) {
1944 ph_unlock(th_p->th_ta_p);
1945 return (TD_DBERR);
1946 }
1947
1948 if (ps_lsetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1949 return_val = TD_DBERR;
1950
1951 (void) ps_pcontinue(ph_p);
1952 ph_unlock(th_p->th_ta_p);
1953 return (return_val);
1954 }
1955
1956 /*
1957 * Get the size of the extra state register set for this architecture.
1958 * Currently unused by dbx.
1959 */
1960 #pragma weak td_thr_getxregsize = __td_thr_getxregsize
1961 /* ARGSUSED */
1962 td_err_e
__td_thr_getxregsize(td_thrhandle_t * th_p,int * xregsize)1963 __td_thr_getxregsize(td_thrhandle_t *th_p, int *xregsize)
1964 {
1965 #if defined(__sparc)
1966 struct ps_prochandle *ph_p;
1967 td_err_e return_val;
1968
1969 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1970 return (return_val);
1971 if (ps_pstop(ph_p) != PS_OK) {
1972 ph_unlock(th_p->th_ta_p);
1973 return (TD_DBERR);
1974 }
1975
1976 if (ps_lgetxregsize(ph_p, thr_to_lwpid(th_p), xregsize) != PS_OK)
1977 return_val = TD_DBERR;
1978
1979 (void) ps_pcontinue(ph_p);
1980 ph_unlock(th_p->th_ta_p);
1981 return (return_val);
1982 #else /* __sparc */
1983 return (TD_NOXREGS);
1984 #endif /* __sparc */
1985 }
1986
1987 /*
1988 * Get a thread's extra state register set.
1989 */
1990 #pragma weak td_thr_getxregs = __td_thr_getxregs
1991 /* ARGSUSED */
1992 td_err_e
__td_thr_getxregs(td_thrhandle_t * th_p,void * xregset)1993 __td_thr_getxregs(td_thrhandle_t *th_p, void *xregset)
1994 {
1995 #if defined(__sparc)
1996 struct ps_prochandle *ph_p;
1997 td_err_e return_val;
1998
1999 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2000 return (return_val);
2001 if (ps_pstop(ph_p) != PS_OK) {
2002 ph_unlock(th_p->th_ta_p);
2003 return (TD_DBERR);
2004 }
2005
2006 if (ps_lgetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
2007 return_val = TD_DBERR;
2008
2009 (void) ps_pcontinue(ph_p);
2010 ph_unlock(th_p->th_ta_p);
2011 return (return_val);
2012 #else /* __sparc */
2013 return (TD_NOXREGS);
2014 #endif /* __sparc */
2015 }
2016
2017 /*
2018 * Set a thread's extra state register set.
2019 */
2020 #pragma weak td_thr_setxregs = __td_thr_setxregs
2021 /* ARGSUSED */
2022 td_err_e
__td_thr_setxregs(td_thrhandle_t * th_p,const void * xregset)2023 __td_thr_setxregs(td_thrhandle_t *th_p, const void *xregset)
2024 {
2025 #if defined(__sparc)
2026 struct ps_prochandle *ph_p;
2027 td_err_e return_val;
2028
2029 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2030 return (return_val);
2031 if (ps_pstop(ph_p) != PS_OK) {
2032 ph_unlock(th_p->th_ta_p);
2033 return (TD_DBERR);
2034 }
2035
2036 if (ps_lsetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
2037 return_val = TD_DBERR;
2038
2039 (void) ps_pcontinue(ph_p);
2040 ph_unlock(th_p->th_ta_p);
2041 return (return_val);
2042 #else /* __sparc */
2043 return (TD_NOXREGS);
2044 #endif /* __sparc */
2045 }
2046
2047 struct searcher {
2048 psaddr_t addr;
2049 int status;
2050 };
2051
2052 /*
2053 * Check the struct thread address in *th_p again first
2054 * value in "data". If value in data is found, set second value
2055 * in "data" to 1 and return 1 to terminate iterations.
2056 * This function is used by td_thr_validate() to verify that
2057 * a thread handle is valid.
2058 */
2059 static int
td_searcher(const td_thrhandle_t * th_p,void * data)2060 td_searcher(const td_thrhandle_t *th_p, void *data)
2061 {
2062 struct searcher *searcher_data = (struct searcher *)data;
2063
2064 if (searcher_data->addr == th_p->th_unique) {
2065 searcher_data->status = 1;
2066 return (1);
2067 }
2068 return (0);
2069 }
2070
2071 /*
2072 * Validate the thread handle. Check that
2073 * a thread exists in the thread agent/process that
2074 * corresponds to thread with handle *th_p.
2075 * Currently unused by dbx.
2076 */
2077 #pragma weak td_thr_validate = __td_thr_validate
2078 td_err_e
__td_thr_validate(const td_thrhandle_t * th_p)2079 __td_thr_validate(const td_thrhandle_t *th_p)
2080 {
2081 td_err_e return_val;
2082 struct searcher searcher_data = {0, 0};
2083
2084 if (th_p == NULL)
2085 return (TD_BADTH);
2086 if (th_p->th_unique == NULL || th_p->th_ta_p == NULL)
2087 return (TD_BADTH);
2088
2089 /*
2090 * LOCKING EXCEPTION - Locking is not required
2091 * here because no use of the thread agent is made (other
2092 * than the sanity check) and checking of the thread
2093 * agent will be done in __td_ta_thr_iter.
2094 */
2095
2096 searcher_data.addr = th_p->th_unique;
2097 return_val = __td_ta_thr_iter(th_p->th_ta_p,
2098 td_searcher, &searcher_data,
2099 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
2100 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
2101
2102 if (return_val == TD_OK && searcher_data.status == 0)
2103 return_val = TD_NOTHR;
2104
2105 return (return_val);
2106 }
2107
2108 /*
2109 * Get a thread's private binding to a given thread specific
2110 * data(TSD) key(see thr_getspecific(3T). If the thread doesn't
2111 * have a binding for a particular key, then NULL is returned.
2112 */
2113 #pragma weak td_thr_tsd = __td_thr_tsd
2114 td_err_e
__td_thr_tsd(td_thrhandle_t * th_p,thread_key_t key,void ** data_pp)2115 __td_thr_tsd(td_thrhandle_t *th_p, thread_key_t key, void **data_pp)
2116 {
2117 struct ps_prochandle *ph_p;
2118 td_thragent_t *ta_p;
2119 td_err_e return_val;
2120 int maxkey;
2121 int nkey;
2122 psaddr_t tsd_paddr;
2123
2124 if (data_pp == NULL)
2125 return (TD_ERR);
2126 *data_pp = NULL;
2127 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2128 return (return_val);
2129 ta_p = th_p->th_ta_p;
2130 if (ps_pstop(ph_p) != PS_OK) {
2131 ph_unlock(ta_p);
2132 return (TD_DBERR);
2133 }
2134
2135 if (ta_p->model == PR_MODEL_NATIVE) {
2136 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2137 tsd_metadata_t tsdm;
2138 tsd_t stsd;
2139
2140 if (ps_pdread(ph_p,
2141 ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
2142 &tsdm, sizeof (tsdm)) != PS_OK)
2143 return_val = TD_DBERR;
2144 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2145 &tsd_paddr, sizeof (tsd_paddr)) != PS_OK)
2146 return_val = TD_DBERR;
2147 else if (tsd_paddr != NULL &&
2148 ps_pdread(ph_p, tsd_paddr, &stsd, sizeof (stsd)) != PS_OK)
2149 return_val = TD_DBERR;
2150 else {
2151 maxkey = tsdm.tsdm_nused;
2152 nkey = tsd_paddr == NULL ? TSD_NFAST : stsd.tsd_nalloc;
2153
2154 if (key < TSD_NFAST)
2155 tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2156 }
2157 } else {
2158 #if defined(_LP64) && defined(_SYSCALL32)
2159 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2160 tsd_metadata32_t tsdm;
2161 tsd32_t stsd;
2162 caddr32_t addr;
2163
2164 if (ps_pdread(ph_p,
2165 ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
2166 &tsdm, sizeof (tsdm)) != PS_OK)
2167 return_val = TD_DBERR;
2168 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2169 &addr, sizeof (addr)) != PS_OK)
2170 return_val = TD_DBERR;
2171 else if (addr != NULL &&
2172 ps_pdread(ph_p, addr, &stsd, sizeof (stsd)) != PS_OK)
2173 return_val = TD_DBERR;
2174 else {
2175 maxkey = tsdm.tsdm_nused;
2176 nkey = addr == NULL ? TSD_NFAST : stsd.tsd_nalloc;
2177
2178 if (key < TSD_NFAST) {
2179 tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2180 } else {
2181 tsd_paddr = addr;
2182 }
2183 }
2184 #else
2185 return_val = TD_ERR;
2186 #endif /* _SYSCALL32 */
2187 }
2188
2189 if (return_val == TD_OK && (key < 1 || key >= maxkey))
2190 return_val = TD_NOTSD;
2191 if (return_val != TD_OK || key >= nkey) {
2192 /* NULL has already been stored in data_pp */
2193 (void) ps_pcontinue(ph_p);
2194 ph_unlock(ta_p);
2195 return (return_val);
2196 }
2197
2198 /*
2199 * Read the value from the thread's tsd array.
2200 */
2201 if (ta_p->model == PR_MODEL_NATIVE) {
2202 void *value;
2203
2204 if (ps_pdread(ph_p, tsd_paddr + key * sizeof (void *),
2205 &value, sizeof (value)) != PS_OK)
2206 return_val = TD_DBERR;
2207 else
2208 *data_pp = value;
2209 #if defined(_LP64) && defined(_SYSCALL32)
2210 } else {
2211 caddr32_t value32;
2212
2213 if (ps_pdread(ph_p, tsd_paddr + key * sizeof (caddr32_t),
2214 &value32, sizeof (value32)) != PS_OK)
2215 return_val = TD_DBERR;
2216 else
2217 *data_pp = (void *)(uintptr_t)value32;
2218 #endif /* _SYSCALL32 */
2219 }
2220
2221 (void) ps_pcontinue(ph_p);
2222 ph_unlock(ta_p);
2223 return (return_val);
2224 }
2225
2226 /*
2227 * Get the base address of a thread's thread local storage (TLS) block
2228 * for the module (executable or shared object) identified by 'moduleid'.
2229 */
2230 #pragma weak td_thr_tlsbase = __td_thr_tlsbase
2231 td_err_e
__td_thr_tlsbase(td_thrhandle_t * th_p,ulong_t moduleid,psaddr_t * base)2232 __td_thr_tlsbase(td_thrhandle_t *th_p, ulong_t moduleid, psaddr_t *base)
2233 {
2234 struct ps_prochandle *ph_p;
2235 td_thragent_t *ta_p;
2236 td_err_e return_val;
2237
2238 if (base == NULL)
2239 return (TD_ERR);
2240 *base = NULL;
2241 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2242 return (return_val);
2243 ta_p = th_p->th_ta_p;
2244 if (ps_pstop(ph_p) != PS_OK) {
2245 ph_unlock(ta_p);
2246 return (TD_DBERR);
2247 }
2248
2249 if (ta_p->model == PR_MODEL_NATIVE) {
2250 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2251 tls_metadata_t tls_metadata;
2252 TLS_modinfo tlsmod;
2253 tls_t tls;
2254
2255 if (ps_pdread(ph_p,
2256 ta_p->uberdata_addr + offsetof(uberdata_t, tls_metadata),
2257 &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2258 return_val = TD_DBERR;
2259 else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2260 return_val = TD_NOTLS;
2261 else if (ps_pdread(ph_p,
2262 (psaddr_t)((TLS_modinfo *)
2263 tls_metadata.tls_modinfo.tls_data + moduleid),
2264 &tlsmod, sizeof (tlsmod)) != PS_OK)
2265 return_val = TD_DBERR;
2266 else if (tlsmod.tm_memsz == 0)
2267 return_val = TD_NOTLS;
2268 else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2269 *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2270 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2271 &tls, sizeof (tls)) != PS_OK)
2272 return_val = TD_DBERR;
2273 else if (moduleid >= tls.tls_size)
2274 return_val = TD_TLSDEFER;
2275 else if (ps_pdread(ph_p,
2276 (psaddr_t)((tls_t *)tls.tls_data + moduleid),
2277 &tls, sizeof (tls)) != PS_OK)
2278 return_val = TD_DBERR;
2279 else if (tls.tls_size == 0)
2280 return_val = TD_TLSDEFER;
2281 else
2282 *base = (psaddr_t)tls.tls_data;
2283 } else {
2284 #if defined(_LP64) && defined(_SYSCALL32)
2285 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2286 tls_metadata32_t tls_metadata;
2287 TLS_modinfo32 tlsmod;
2288 tls32_t tls;
2289
2290 if (ps_pdread(ph_p,
2291 ta_p->uberdata_addr + offsetof(uberdata32_t, tls_metadata),
2292 &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2293 return_val = TD_DBERR;
2294 else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2295 return_val = TD_NOTLS;
2296 else if (ps_pdread(ph_p,
2297 (psaddr_t)((TLS_modinfo32 *)
2298 (uintptr_t)tls_metadata.tls_modinfo.tls_data + moduleid),
2299 &tlsmod, sizeof (tlsmod)) != PS_OK)
2300 return_val = TD_DBERR;
2301 else if (tlsmod.tm_memsz == 0)
2302 return_val = TD_NOTLS;
2303 else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2304 *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2305 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2306 &tls, sizeof (tls)) != PS_OK)
2307 return_val = TD_DBERR;
2308 else if (moduleid >= tls.tls_size)
2309 return_val = TD_TLSDEFER;
2310 else if (ps_pdread(ph_p,
2311 (psaddr_t)((tls32_t *)(uintptr_t)tls.tls_data + moduleid),
2312 &tls, sizeof (tls)) != PS_OK)
2313 return_val = TD_DBERR;
2314 else if (tls.tls_size == 0)
2315 return_val = TD_TLSDEFER;
2316 else
2317 *base = (psaddr_t)tls.tls_data;
2318 #else
2319 return_val = TD_ERR;
2320 #endif /* _SYSCALL32 */
2321 }
2322
2323 (void) ps_pcontinue(ph_p);
2324 ph_unlock(ta_p);
2325 return (return_val);
2326 }
2327
2328 /*
2329 * Change a thread's priority to the value specified by ti_pri.
2330 * Currently unused by dbx.
2331 */
2332 #pragma weak td_thr_setprio = __td_thr_setprio
2333 /* ARGSUSED */
2334 td_err_e
__td_thr_setprio(td_thrhandle_t * th_p,int ti_pri)2335 __td_thr_setprio(td_thrhandle_t *th_p, int ti_pri)
2336 {
2337 return (TD_NOCAPAB);
2338 }
2339
2340 /*
2341 * This structure links td_thr_lockowner and the lowner_cb callback function.
2342 */
2343 typedef struct {
2344 td_sync_iter_f *owner_cb;
2345 void *owner_cb_arg;
2346 td_thrhandle_t *th_p;
2347 } lowner_cb_ctl_t;
2348
2349 static int
lowner_cb(const td_synchandle_t * sh_p,void * arg)2350 lowner_cb(const td_synchandle_t *sh_p, void *arg)
2351 {
2352 lowner_cb_ctl_t *ocb = arg;
2353 int trunc = 0;
2354 union {
2355 rwlock_t rwl;
2356 mutex_t mx;
2357 } rw_m;
2358
2359 if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2360 &rw_m, sizeof (rw_m)) != PS_OK) {
2361 trunc = 1;
2362 if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2363 &rw_m.mx, sizeof (rw_m.mx)) != PS_OK)
2364 return (0);
2365 }
2366 if (rw_m.mx.mutex_magic == MUTEX_MAGIC &&
2367 rw_m.mx.mutex_owner == ocb->th_p->th_unique)
2368 return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2369 if (!trunc && rw_m.rwl.magic == RWL_MAGIC) {
2370 mutex_t *rwlock = &rw_m.rwl.mutex;
2371 if (rwlock->mutex_owner == ocb->th_p->th_unique)
2372 return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2373 }
2374 return (0);
2375 }
2376
2377 /*
2378 * Iterate over the set of locks owned by a specified thread.
2379 * If cb returns a non-zero value, terminate iterations.
2380 */
2381 #pragma weak td_thr_lockowner = __td_thr_lockowner
2382 td_err_e
__td_thr_lockowner(const td_thrhandle_t * th_p,td_sync_iter_f * cb,void * cb_data)2383 __td_thr_lockowner(const td_thrhandle_t *th_p, td_sync_iter_f *cb,
2384 void *cb_data)
2385 {
2386 td_thragent_t *ta_p;
2387 td_err_e return_val;
2388 lowner_cb_ctl_t lcb;
2389
2390 /*
2391 * Just sanity checks.
2392 */
2393 if (ph_lock_th((td_thrhandle_t *)th_p, &return_val) == NULL)
2394 return (return_val);
2395 ta_p = th_p->th_ta_p;
2396 ph_unlock(ta_p);
2397
2398 lcb.owner_cb = cb;
2399 lcb.owner_cb_arg = cb_data;
2400 lcb.th_p = (td_thrhandle_t *)th_p;
2401 return (__td_ta_sync_iter(ta_p, lowner_cb, &lcb));
2402 }
2403
2404 /*
2405 * If a thread is asleep on a synchronization variable,
2406 * then get the synchronization handle.
2407 */
2408 #pragma weak td_thr_sleepinfo = __td_thr_sleepinfo
2409 td_err_e
__td_thr_sleepinfo(const td_thrhandle_t * th_p,td_synchandle_t * sh_p)2410 __td_thr_sleepinfo(const td_thrhandle_t *th_p, td_synchandle_t *sh_p)
2411 {
2412 struct ps_prochandle *ph_p;
2413 td_err_e return_val = TD_OK;
2414 uintptr_t wchan;
2415
2416 if (sh_p == NULL)
2417 return (TD_ERR);
2418 if ((ph_p = ph_lock_th((td_thrhandle_t *)th_p, &return_val)) == NULL)
2419 return (return_val);
2420
2421 /*
2422 * No need to stop the process for a simple read.
2423 */
2424 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
2425 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2426
2427 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2428 &wchan, sizeof (wchan)) != PS_OK)
2429 return_val = TD_DBERR;
2430 } else {
2431 #if defined(_LP64) && defined(_SYSCALL32)
2432 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2433 caddr32_t wchan32;
2434
2435 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2436 &wchan32, sizeof (wchan32)) != PS_OK)
2437 return_val = TD_DBERR;
2438 wchan = wchan32;
2439 #else
2440 return_val = TD_ERR;
2441 #endif /* _SYSCALL32 */
2442 }
2443
2444 if (return_val != TD_OK || wchan == NULL) {
2445 sh_p->sh_ta_p = NULL;
2446 sh_p->sh_unique = NULL;
2447 if (return_val == TD_OK)
2448 return_val = TD_ERR;
2449 } else {
2450 sh_p->sh_ta_p = th_p->th_ta_p;
2451 sh_p->sh_unique = (psaddr_t)wchan;
2452 }
2453
2454 ph_unlock(th_p->th_ta_p);
2455 return (return_val);
2456 }
2457
2458 /*
2459 * Which thread is running on an lwp?
2460 */
2461 #pragma weak td_ta_map_lwp2thr = __td_ta_map_lwp2thr
2462 td_err_e
__td_ta_map_lwp2thr(td_thragent_t * ta_p,lwpid_t lwpid,td_thrhandle_t * th_p)2463 __td_ta_map_lwp2thr(td_thragent_t *ta_p, lwpid_t lwpid,
2464 td_thrhandle_t *th_p)
2465 {
2466 return (__td_ta_map_id2thr(ta_p, lwpid, th_p));
2467 }
2468
2469 /*
2470 * Common code for td_sync_get_info() and td_sync_get_stats()
2471 */
2472 static td_err_e
sync_get_info_common(const td_synchandle_t * sh_p,struct ps_prochandle * ph_p,td_syncinfo_t * si_p)2473 sync_get_info_common(const td_synchandle_t *sh_p, struct ps_prochandle *ph_p,
2474 td_syncinfo_t *si_p)
2475 {
2476 int trunc = 0;
2477 td_so_un_t generic_so;
2478
2479 /*
2480 * Determine the sync. object type; a little type fudgery here.
2481 * First attempt to read the whole union. If that fails, attempt
2482 * to read just the condvar. A condvar is the smallest sync. object.
2483 */
2484 if (ps_pdread(ph_p, sh_p->sh_unique,
2485 &generic_so, sizeof (generic_so)) != PS_OK) {
2486 trunc = 1;
2487 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2488 sizeof (generic_so.condition)) != PS_OK)
2489 return (TD_DBERR);
2490 }
2491
2492 switch (generic_so.condition.cond_magic) {
2493 case MUTEX_MAGIC:
2494 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2495 &generic_so.lock, sizeof (generic_so.lock)) != PS_OK)
2496 return (TD_DBERR);
2497 si_p->si_type = TD_SYNC_MUTEX;
2498 si_p->si_shared_type =
2499 (generic_so.lock.mutex_type & USYNC_PROCESS);
2500 (void) memcpy(si_p->si_flags, &generic_so.lock.mutex_flag,
2501 sizeof (generic_so.lock.mutex_flag));
2502 si_p->si_state.mutex_locked =
2503 (generic_so.lock.mutex_lockw != 0);
2504 si_p->si_size = sizeof (generic_so.lock);
2505 si_p->si_has_waiters = generic_so.lock.mutex_waiters;
2506 si_p->si_rcount = generic_so.lock.mutex_rcount;
2507 si_p->si_prioceiling = generic_so.lock.mutex_ceiling;
2508 if (si_p->si_state.mutex_locked) {
2509 if (si_p->si_shared_type & USYNC_PROCESS)
2510 si_p->si_ownerpid =
2511 generic_so.lock.mutex_ownerpid;
2512 si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2513 si_p->si_owner.th_unique = generic_so.lock.mutex_owner;
2514 }
2515 break;
2516 case COND_MAGIC:
2517 si_p->si_type = TD_SYNC_COND;
2518 si_p->si_shared_type =
2519 (generic_so.condition.cond_type & USYNC_PROCESS);
2520 (void) memcpy(si_p->si_flags, generic_so.condition.flags.flag,
2521 sizeof (generic_so.condition.flags.flag));
2522 si_p->si_size = sizeof (generic_so.condition);
2523 si_p->si_has_waiters =
2524 (generic_so.condition.cond_waiters_user |
2525 generic_so.condition.cond_waiters_kernel)? 1 : 0;
2526 break;
2527 case SEMA_MAGIC:
2528 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2529 &generic_so.semaphore, sizeof (generic_so.semaphore))
2530 != PS_OK)
2531 return (TD_DBERR);
2532 si_p->si_type = TD_SYNC_SEMA;
2533 si_p->si_shared_type =
2534 (generic_so.semaphore.type & USYNC_PROCESS);
2535 si_p->si_state.sem_count = generic_so.semaphore.count;
2536 si_p->si_size = sizeof (generic_so.semaphore);
2537 si_p->si_has_waiters =
2538 ((lwp_sema_t *)&generic_so.semaphore)->flags[7];
2539 /* this is useless but the old interface provided it */
2540 si_p->si_data = (psaddr_t)generic_so.semaphore.count;
2541 break;
2542 case RWL_MAGIC:
2543 {
2544 uint32_t rwstate;
2545
2546 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2547 &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK)
2548 return (TD_DBERR);
2549 si_p->si_type = TD_SYNC_RWLOCK;
2550 si_p->si_shared_type =
2551 (generic_so.rwlock.rwlock_type & USYNC_PROCESS);
2552 si_p->si_size = sizeof (generic_so.rwlock);
2553
2554 rwstate = (uint32_t)generic_so.rwlock.rwlock_readers;
2555 if (rwstate & URW_WRITE_LOCKED) {
2556 si_p->si_state.nreaders = -1;
2557 si_p->si_is_wlock = 1;
2558 si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2559 si_p->si_owner.th_unique =
2560 generic_so.rwlock.rwlock_owner;
2561 if (si_p->si_shared_type & USYNC_PROCESS)
2562 si_p->si_ownerpid =
2563 generic_so.rwlock.rwlock_ownerpid;
2564 } else {
2565 si_p->si_state.nreaders = (rwstate & URW_READERS_MASK);
2566 }
2567 si_p->si_has_waiters = ((rwstate & URW_HAS_WAITERS) != 0);
2568
2569 /* this is useless but the old interface provided it */
2570 si_p->si_data = (psaddr_t)generic_so.rwlock.readers;
2571 break;
2572 }
2573 default:
2574 return (TD_BADSH);
2575 }
2576
2577 si_p->si_ta_p = sh_p->sh_ta_p;
2578 si_p->si_sv_addr = sh_p->sh_unique;
2579 return (TD_OK);
2580 }
2581
2582 /*
2583 * Given a synchronization handle, fill in the
2584 * information for the synchronization variable into *si_p.
2585 */
2586 #pragma weak td_sync_get_info = __td_sync_get_info
2587 td_err_e
__td_sync_get_info(const td_synchandle_t * sh_p,td_syncinfo_t * si_p)2588 __td_sync_get_info(const td_synchandle_t *sh_p, td_syncinfo_t *si_p)
2589 {
2590 struct ps_prochandle *ph_p;
2591 td_err_e return_val;
2592
2593 if (si_p == NULL)
2594 return (TD_ERR);
2595 (void) memset(si_p, 0, sizeof (*si_p));
2596 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2597 return (return_val);
2598 if (ps_pstop(ph_p) != PS_OK) {
2599 ph_unlock(sh_p->sh_ta_p);
2600 return (TD_DBERR);
2601 }
2602
2603 return_val = sync_get_info_common(sh_p, ph_p, si_p);
2604
2605 (void) ps_pcontinue(ph_p);
2606 ph_unlock(sh_p->sh_ta_p);
2607 return (return_val);
2608 }
2609
2610 static uint_t
tdb_addr_hash64(uint64_t addr)2611 tdb_addr_hash64(uint64_t addr)
2612 {
2613 uint64_t value60 = (addr >> 4);
2614 uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff);
2615 return ((value30 >> 15) ^ (value30 & 0x7fff));
2616 }
2617
2618 static uint_t
tdb_addr_hash32(uint64_t addr)2619 tdb_addr_hash32(uint64_t addr)
2620 {
2621 uint32_t value30 = (addr >> 2); /* 30 bits */
2622 return ((value30 >> 15) ^ (value30 & 0x7fff));
2623 }
2624
2625 static td_err_e
read_sync_stats(td_thragent_t * ta_p,psaddr_t hash_table,psaddr_t sync_obj_addr,tdb_sync_stats_t * sync_stats)2626 read_sync_stats(td_thragent_t *ta_p, psaddr_t hash_table,
2627 psaddr_t sync_obj_addr, tdb_sync_stats_t *sync_stats)
2628 {
2629 psaddr_t next_desc;
2630 uint64_t first;
2631 uint_t ix;
2632
2633 /*
2634 * Compute the hash table index from the synch object's address.
2635 */
2636 if (ta_p->model == PR_MODEL_LP64)
2637 ix = tdb_addr_hash64(sync_obj_addr);
2638 else
2639 ix = tdb_addr_hash32(sync_obj_addr);
2640
2641 /*
2642 * Get the address of the first element in the linked list.
2643 */
2644 if (ps_pdread(ta_p->ph_p, hash_table + ix * sizeof (uint64_t),
2645 &first, sizeof (first)) != PS_OK)
2646 return (TD_DBERR);
2647
2648 /*
2649 * Search the linked list for an entry for the synch object..
2650 */
2651 for (next_desc = (psaddr_t)first; next_desc != NULL;
2652 next_desc = (psaddr_t)sync_stats->next) {
2653 if (ps_pdread(ta_p->ph_p, next_desc,
2654 sync_stats, sizeof (*sync_stats)) != PS_OK)
2655 return (TD_DBERR);
2656 if (sync_stats->sync_addr == sync_obj_addr)
2657 return (TD_OK);
2658 }
2659
2660 (void) memset(sync_stats, 0, sizeof (*sync_stats));
2661 return (TD_OK);
2662 }
2663
2664 /*
2665 * Given a synchronization handle, fill in the
2666 * statistics for the synchronization variable into *ss_p.
2667 */
2668 #pragma weak td_sync_get_stats = __td_sync_get_stats
2669 td_err_e
__td_sync_get_stats(const td_synchandle_t * sh_p,td_syncstats_t * ss_p)2670 __td_sync_get_stats(const td_synchandle_t *sh_p, td_syncstats_t *ss_p)
2671 {
2672 struct ps_prochandle *ph_p;
2673 td_thragent_t *ta_p;
2674 td_err_e return_val;
2675 register_sync_t enable;
2676 psaddr_t hashaddr;
2677 tdb_sync_stats_t sync_stats;
2678 size_t ix;
2679
2680 if (ss_p == NULL)
2681 return (TD_ERR);
2682 (void) memset(ss_p, 0, sizeof (*ss_p));
2683 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2684 return (return_val);
2685 ta_p = sh_p->sh_ta_p;
2686 if (ps_pstop(ph_p) != PS_OK) {
2687 ph_unlock(ta_p);
2688 return (TD_DBERR);
2689 }
2690
2691 if ((return_val = sync_get_info_common(sh_p, ph_p, &ss_p->ss_info))
2692 != TD_OK) {
2693 if (return_val != TD_BADSH)
2694 goto out;
2695 /* we can correct TD_BADSH */
2696 (void) memset(&ss_p->ss_info, 0, sizeof (ss_p->ss_info));
2697 ss_p->ss_info.si_ta_p = sh_p->sh_ta_p;
2698 ss_p->ss_info.si_sv_addr = sh_p->sh_unique;
2699 /* we correct si_type and si_size below */
2700 return_val = TD_OK;
2701 }
2702 if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
2703 &enable, sizeof (enable)) != PS_OK) {
2704 return_val = TD_DBERR;
2705 goto out;
2706 }
2707 if (enable != REGISTER_SYNC_ON)
2708 goto out;
2709
2710 /*
2711 * Get the address of the hash table in the target process.
2712 */
2713 if (ta_p->model == PR_MODEL_NATIVE) {
2714 if (ps_pdread(ph_p, ta_p->uberdata_addr +
2715 offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
2716 &hashaddr, sizeof (&hashaddr)) != PS_OK) {
2717 return_val = TD_DBERR;
2718 goto out;
2719 }
2720 } else {
2721 #if defined(_LP64) && defined(_SYSCALL32)
2722 caddr32_t addr;
2723
2724 if (ps_pdread(ph_p, ta_p->uberdata_addr +
2725 offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
2726 &addr, sizeof (addr)) != PS_OK) {
2727 return_val = TD_DBERR;
2728 goto out;
2729 }
2730 hashaddr = addr;
2731 #else
2732 return_val = TD_ERR;
2733 goto out;
2734 #endif /* _SYSCALL32 */
2735 }
2736
2737 if (hashaddr == 0)
2738 return_val = TD_BADSH;
2739 else
2740 return_val = read_sync_stats(ta_p, hashaddr,
2741 sh_p->sh_unique, &sync_stats);
2742 if (return_val != TD_OK)
2743 goto out;
2744
2745 /*
2746 * We have the hash table entry. Transfer the data to
2747 * the td_syncstats_t structure provided by the caller.
2748 */
2749 switch (sync_stats.un.type) {
2750 case TDB_MUTEX:
2751 {
2752 td_mutex_stats_t *msp = &ss_p->ss_un.mutex;
2753
2754 ss_p->ss_info.si_type = TD_SYNC_MUTEX;
2755 ss_p->ss_info.si_size = sizeof (mutex_t);
2756 msp->mutex_lock =
2757 sync_stats.un.mutex.mutex_lock;
2758 msp->mutex_sleep =
2759 sync_stats.un.mutex.mutex_sleep;
2760 msp->mutex_sleep_time =
2761 sync_stats.un.mutex.mutex_sleep_time;
2762 msp->mutex_hold_time =
2763 sync_stats.un.mutex.mutex_hold_time;
2764 msp->mutex_try =
2765 sync_stats.un.mutex.mutex_try;
2766 msp->mutex_try_fail =
2767 sync_stats.un.mutex.mutex_try_fail;
2768 if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2769 (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2770 < ta_p->hash_size * sizeof (thr_hash_table_t))
2771 msp->mutex_internal =
2772 ix / sizeof (thr_hash_table_t) + 1;
2773 break;
2774 }
2775 case TDB_COND:
2776 {
2777 td_cond_stats_t *csp = &ss_p->ss_un.cond;
2778
2779 ss_p->ss_info.si_type = TD_SYNC_COND;
2780 ss_p->ss_info.si_size = sizeof (cond_t);
2781 csp->cond_wait =
2782 sync_stats.un.cond.cond_wait;
2783 csp->cond_timedwait =
2784 sync_stats.un.cond.cond_timedwait;
2785 csp->cond_wait_sleep_time =
2786 sync_stats.un.cond.cond_wait_sleep_time;
2787 csp->cond_timedwait_sleep_time =
2788 sync_stats.un.cond.cond_timedwait_sleep_time;
2789 csp->cond_timedwait_timeout =
2790 sync_stats.un.cond.cond_timedwait_timeout;
2791 csp->cond_signal =
2792 sync_stats.un.cond.cond_signal;
2793 csp->cond_broadcast =
2794 sync_stats.un.cond.cond_broadcast;
2795 if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2796 (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2797 < ta_p->hash_size * sizeof (thr_hash_table_t))
2798 csp->cond_internal =
2799 ix / sizeof (thr_hash_table_t) + 1;
2800 break;
2801 }
2802 case TDB_RWLOCK:
2803 {
2804 td_rwlock_stats_t *rwsp = &ss_p->ss_un.rwlock;
2805
2806 ss_p->ss_info.si_type = TD_SYNC_RWLOCK;
2807 ss_p->ss_info.si_size = sizeof (rwlock_t);
2808 rwsp->rw_rdlock =
2809 sync_stats.un.rwlock.rw_rdlock;
2810 rwsp->rw_rdlock_try =
2811 sync_stats.un.rwlock.rw_rdlock_try;
2812 rwsp->rw_rdlock_try_fail =
2813 sync_stats.un.rwlock.rw_rdlock_try_fail;
2814 rwsp->rw_wrlock =
2815 sync_stats.un.rwlock.rw_wrlock;
2816 rwsp->rw_wrlock_hold_time =
2817 sync_stats.un.rwlock.rw_wrlock_hold_time;
2818 rwsp->rw_wrlock_try =
2819 sync_stats.un.rwlock.rw_wrlock_try;
2820 rwsp->rw_wrlock_try_fail =
2821 sync_stats.un.rwlock.rw_wrlock_try_fail;
2822 break;
2823 }
2824 case TDB_SEMA:
2825 {
2826 td_sema_stats_t *ssp = &ss_p->ss_un.sema;
2827
2828 ss_p->ss_info.si_type = TD_SYNC_SEMA;
2829 ss_p->ss_info.si_size = sizeof (sema_t);
2830 ssp->sema_wait =
2831 sync_stats.un.sema.sema_wait;
2832 ssp->sema_wait_sleep =
2833 sync_stats.un.sema.sema_wait_sleep;
2834 ssp->sema_wait_sleep_time =
2835 sync_stats.un.sema.sema_wait_sleep_time;
2836 ssp->sema_trywait =
2837 sync_stats.un.sema.sema_trywait;
2838 ssp->sema_trywait_fail =
2839 sync_stats.un.sema.sema_trywait_fail;
2840 ssp->sema_post =
2841 sync_stats.un.sema.sema_post;
2842 ssp->sema_max_count =
2843 sync_stats.un.sema.sema_max_count;
2844 ssp->sema_min_count =
2845 sync_stats.un.sema.sema_min_count;
2846 break;
2847 }
2848 default:
2849 return_val = TD_BADSH;
2850 break;
2851 }
2852
2853 out:
2854 (void) ps_pcontinue(ph_p);
2855 ph_unlock(ta_p);
2856 return (return_val);
2857 }
2858
2859 /*
2860 * Change the state of a synchronization variable.
2861 * 1) mutex lock state set to value
2862 * 2) semaphore's count set to value
2863 * 3) writer's lock set by value < 0
2864 * 4) reader's lock number of readers set to value >= 0
2865 * Currently unused by dbx.
2866 */
2867 #pragma weak td_sync_setstate = __td_sync_setstate
2868 td_err_e
__td_sync_setstate(const td_synchandle_t * sh_p,long lvalue)2869 __td_sync_setstate(const td_synchandle_t *sh_p, long lvalue)
2870 {
2871 struct ps_prochandle *ph_p;
2872 int trunc = 0;
2873 td_err_e return_val;
2874 td_so_un_t generic_so;
2875 uint32_t *rwstate;
2876 int value = (int)lvalue;
2877
2878 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2879 return (return_val);
2880 if (ps_pstop(ph_p) != PS_OK) {
2881 ph_unlock(sh_p->sh_ta_p);
2882 return (TD_DBERR);
2883 }
2884
2885 /*
2886 * Read the synch. variable information.
2887 * First attempt to read the whole union and if that fails
2888 * fall back to reading only the smallest member, the condvar.
2889 */
2890 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so,
2891 sizeof (generic_so)) != PS_OK) {
2892 trunc = 1;
2893 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2894 sizeof (generic_so.condition)) != PS_OK) {
2895 (void) ps_pcontinue(ph_p);
2896 ph_unlock(sh_p->sh_ta_p);
2897 return (TD_DBERR);
2898 }
2899 }
2900
2901 /*
2902 * Set the new value in the sync. variable, read the synch. variable
2903 * information. from the process, reset its value and write it back.
2904 */
2905 switch (generic_so.condition.mutex_magic) {
2906 case MUTEX_MAGIC:
2907 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2908 &generic_so.lock, sizeof (generic_so.lock)) != PS_OK) {
2909 return_val = TD_DBERR;
2910 break;
2911 }
2912 generic_so.lock.mutex_lockw = (uint8_t)value;
2913 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.lock,
2914 sizeof (generic_so.lock)) != PS_OK)
2915 return_val = TD_DBERR;
2916 break;
2917 case SEMA_MAGIC:
2918 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2919 &generic_so.semaphore, sizeof (generic_so.semaphore))
2920 != PS_OK) {
2921 return_val = TD_DBERR;
2922 break;
2923 }
2924 generic_so.semaphore.count = value;
2925 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.semaphore,
2926 sizeof (generic_so.semaphore)) != PS_OK)
2927 return_val = TD_DBERR;
2928 break;
2929 case COND_MAGIC:
2930 /* Operation not supported on a condition variable */
2931 return_val = TD_ERR;
2932 break;
2933 case RWL_MAGIC:
2934 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2935 &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK) {
2936 return_val = TD_DBERR;
2937 break;
2938 }
2939 rwstate = (uint32_t *)&generic_so.rwlock.readers;
2940 *rwstate &= URW_HAS_WAITERS;
2941 if (value < 0)
2942 *rwstate |= URW_WRITE_LOCKED;
2943 else
2944 *rwstate |= (value & URW_READERS_MASK);
2945 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.rwlock,
2946 sizeof (generic_so.rwlock)) != PS_OK)
2947 return_val = TD_DBERR;
2948 break;
2949 default:
2950 /* Bad sync. object type */
2951 return_val = TD_BADSH;
2952 break;
2953 }
2954
2955 (void) ps_pcontinue(ph_p);
2956 ph_unlock(sh_p->sh_ta_p);
2957 return (return_val);
2958 }
2959
2960 typedef struct {
2961 td_thr_iter_f *waiter_cb;
2962 psaddr_t sync_obj_addr;
2963 uint16_t sync_magic;
2964 void *waiter_cb_arg;
2965 td_err_e errcode;
2966 } waiter_cb_ctl_t;
2967
2968 static int
waiters_cb(const td_thrhandle_t * th_p,void * arg)2969 waiters_cb(const td_thrhandle_t *th_p, void *arg)
2970 {
2971 td_thragent_t *ta_p = th_p->th_ta_p;
2972 struct ps_prochandle *ph_p = ta_p->ph_p;
2973 waiter_cb_ctl_t *wcb = arg;
2974 caddr_t wchan;
2975
2976 if (ta_p->model == PR_MODEL_NATIVE) {
2977 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2978
2979 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2980 &wchan, sizeof (wchan)) != PS_OK) {
2981 wcb->errcode = TD_DBERR;
2982 return (1);
2983 }
2984 } else {
2985 #if defined(_LP64) && defined(_SYSCALL32)
2986 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2987 caddr32_t wchan32;
2988
2989 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2990 &wchan32, sizeof (wchan32)) != PS_OK) {
2991 wcb->errcode = TD_DBERR;
2992 return (1);
2993 }
2994 wchan = (caddr_t)(uintptr_t)wchan32;
2995 #else
2996 wcb->errcode = TD_ERR;
2997 return (1);
2998 #endif /* _SYSCALL32 */
2999 }
3000
3001 if (wchan == NULL)
3002 return (0);
3003
3004 if (wchan == (caddr_t)wcb->sync_obj_addr)
3005 return ((*wcb->waiter_cb)(th_p, wcb->waiter_cb_arg));
3006
3007 return (0);
3008 }
3009
3010 /*
3011 * For a given synchronization variable, iterate over the
3012 * set of waiting threads. The call back function is passed
3013 * two parameters, a pointer to a thread handle and a pointer
3014 * to extra call back data.
3015 */
3016 #pragma weak td_sync_waiters = __td_sync_waiters
3017 td_err_e
__td_sync_waiters(const td_synchandle_t * sh_p,td_thr_iter_f * cb,void * cb_data)3018 __td_sync_waiters(const td_synchandle_t *sh_p, td_thr_iter_f *cb, void *cb_data)
3019 {
3020 struct ps_prochandle *ph_p;
3021 waiter_cb_ctl_t wcb;
3022 td_err_e return_val;
3023
3024 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
3025 return (return_val);
3026 if (ps_pdread(ph_p,
3027 (psaddr_t)&((mutex_t *)sh_p->sh_unique)->mutex_magic,
3028 (caddr_t)&wcb.sync_magic, sizeof (wcb.sync_magic)) != PS_OK) {
3029 ph_unlock(sh_p->sh_ta_p);
3030 return (TD_DBERR);
3031 }
3032 ph_unlock(sh_p->sh_ta_p);
3033
3034 switch (wcb.sync_magic) {
3035 case MUTEX_MAGIC:
3036 case COND_MAGIC:
3037 case SEMA_MAGIC:
3038 case RWL_MAGIC:
3039 break;
3040 default:
3041 return (TD_BADSH);
3042 }
3043
3044 wcb.waiter_cb = cb;
3045 wcb.sync_obj_addr = sh_p->sh_unique;
3046 wcb.waiter_cb_arg = cb_data;
3047 wcb.errcode = TD_OK;
3048 return_val = __td_ta_thr_iter(sh_p->sh_ta_p, waiters_cb, &wcb,
3049 TD_THR_SLEEP, TD_THR_LOWEST_PRIORITY,
3050 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
3051
3052 if (return_val != TD_OK)
3053 return (return_val);
3054
3055 return (wcb.errcode);
3056 }
3057