xref: /onnv-gate/usr/src/lib/libc_db/common/thread_db.c (revision 4570:f93b74ddbdd5)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*4570Sraf  * Common Development and Distribution License (the "License").
6*4570Sraf  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
21*4570Sraf 
220Sstevel@tonic-gate /*
23*4570Sraf  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
290Sstevel@tonic-gate #include <stdio.h>
300Sstevel@tonic-gate #include <stdlib.h>
310Sstevel@tonic-gate #include <stddef.h>
320Sstevel@tonic-gate #include <unistd.h>
330Sstevel@tonic-gate #include <thr_uberdata.h>
340Sstevel@tonic-gate #include <thread_db.h>
350Sstevel@tonic-gate #include <libc_int.h>
360Sstevel@tonic-gate 
370Sstevel@tonic-gate /*
380Sstevel@tonic-gate  * Private structures.
390Sstevel@tonic-gate  */
400Sstevel@tonic-gate 
410Sstevel@tonic-gate typedef union {
420Sstevel@tonic-gate 	mutex_t		lock;
430Sstevel@tonic-gate 	rwlock_t	rwlock;
440Sstevel@tonic-gate 	sema_t		semaphore;
450Sstevel@tonic-gate 	cond_t		condition;
460Sstevel@tonic-gate } td_so_un_t;
470Sstevel@tonic-gate 
480Sstevel@tonic-gate struct td_thragent {
490Sstevel@tonic-gate 	rwlock_t	rwlock;
500Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
510Sstevel@tonic-gate 	int		initialized;
520Sstevel@tonic-gate 	int		sync_tracking;
530Sstevel@tonic-gate 	int		model;
540Sstevel@tonic-gate 	int		primary_map;
550Sstevel@tonic-gate 	psaddr_t	bootstrap_addr;
560Sstevel@tonic-gate 	psaddr_t	uberdata_addr;
570Sstevel@tonic-gate 	psaddr_t	tdb_eventmask_addr;
580Sstevel@tonic-gate 	psaddr_t	tdb_register_sync_addr;
590Sstevel@tonic-gate 	psaddr_t	tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
600Sstevel@tonic-gate 	psaddr_t	hash_table_addr;
610Sstevel@tonic-gate 	int		hash_size;
620Sstevel@tonic-gate 	lwpid_t		single_lwpid;
630Sstevel@tonic-gate 	psaddr_t	single_ulwp_addr;
640Sstevel@tonic-gate };
650Sstevel@tonic-gate 
660Sstevel@tonic-gate /*
670Sstevel@tonic-gate  * This is the name of the variable in libc that contains
680Sstevel@tonic-gate  * the uberdata address that we will need.
690Sstevel@tonic-gate  */
700Sstevel@tonic-gate #define	TD_BOOTSTRAP_NAME	"_tdb_bootstrap"
710Sstevel@tonic-gate /*
720Sstevel@tonic-gate  * This is the actual name of uberdata, used in the event
730Sstevel@tonic-gate  * that tdb_bootstrap has not yet been initialized.
740Sstevel@tonic-gate  */
750Sstevel@tonic-gate #define	TD_UBERDATA_NAME	"_uberdata"
760Sstevel@tonic-gate /*
770Sstevel@tonic-gate  * The library name should end with ".so.1", but older versions of
780Sstevel@tonic-gate  * dbx expect the unadorned name and malfunction if ".1" is specified.
790Sstevel@tonic-gate  * Unfortunately, if ".1" is not specified, mdb malfunctions when it
800Sstevel@tonic-gate  * is applied to another instance of itself (due to the presence of
810Sstevel@tonic-gate  * /usr/lib/mdb/proc/libc.so).  So we try it both ways.
820Sstevel@tonic-gate  */
830Sstevel@tonic-gate #define	TD_LIBRARY_NAME		"libc.so"
840Sstevel@tonic-gate #define	TD_LIBRARY_NAME_1	"libc.so.1"
850Sstevel@tonic-gate 
860Sstevel@tonic-gate td_err_e __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p);
870Sstevel@tonic-gate 
880Sstevel@tonic-gate td_err_e __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
890Sstevel@tonic-gate 	void *cbdata_p, td_thr_state_e state, int ti_pri,
900Sstevel@tonic-gate 	sigset_t *ti_sigmask_p, unsigned ti_user_flags);
910Sstevel@tonic-gate 
920Sstevel@tonic-gate /*
930Sstevel@tonic-gate  * Initialize threads debugging interface.
940Sstevel@tonic-gate  */
950Sstevel@tonic-gate #pragma weak td_init = __td_init
960Sstevel@tonic-gate td_err_e
970Sstevel@tonic-gate __td_init()
980Sstevel@tonic-gate {
990Sstevel@tonic-gate 	return (TD_OK);
1000Sstevel@tonic-gate }
1010Sstevel@tonic-gate 
1020Sstevel@tonic-gate /*
1030Sstevel@tonic-gate  * This function does nothing, and never did.
1040Sstevel@tonic-gate  * But the symbol is in the ABI, so we can't delete it.
1050Sstevel@tonic-gate  */
1060Sstevel@tonic-gate #pragma weak td_log = __td_log
1070Sstevel@tonic-gate void
1080Sstevel@tonic-gate __td_log()
1090Sstevel@tonic-gate {
1100Sstevel@tonic-gate }
1110Sstevel@tonic-gate 
1120Sstevel@tonic-gate /*
1130Sstevel@tonic-gate  * Short-cut to read just the hash table size from the process,
1140Sstevel@tonic-gate  * to avoid repeatedly reading the full uberdata structure when
1150Sstevel@tonic-gate  * dealing with a single-threaded process.
1160Sstevel@tonic-gate  */
1170Sstevel@tonic-gate static uint_t
1180Sstevel@tonic-gate td_read_hash_size(td_thragent_t *ta_p)
1190Sstevel@tonic-gate {
1200Sstevel@tonic-gate 	psaddr_t addr;
1210Sstevel@tonic-gate 	uint_t hash_size;
1220Sstevel@tonic-gate 
1230Sstevel@tonic-gate 	switch (ta_p->initialized) {
1240Sstevel@tonic-gate 	default:	/* uninitialized */
1250Sstevel@tonic-gate 		return (0);
1260Sstevel@tonic-gate 	case 1:		/* partially initialized */
1270Sstevel@tonic-gate 		break;
1280Sstevel@tonic-gate 	case 2:		/* fully initialized */
1290Sstevel@tonic-gate 		return (ta_p->hash_size);
1300Sstevel@tonic-gate 	}
1310Sstevel@tonic-gate 
1320Sstevel@tonic-gate 	if (ta_p->model == PR_MODEL_NATIVE) {
1330Sstevel@tonic-gate 		addr = ta_p->uberdata_addr + offsetof(uberdata_t, hash_size);
1340Sstevel@tonic-gate 	} else {
1350Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
1360Sstevel@tonic-gate 		addr = ta_p->uberdata_addr + offsetof(uberdata32_t, hash_size);
1370Sstevel@tonic-gate #else
1380Sstevel@tonic-gate 		addr = 0;
1390Sstevel@tonic-gate #endif
1400Sstevel@tonic-gate 	}
1410Sstevel@tonic-gate 	if (ps_pdread(ta_p->ph_p, addr, &hash_size, sizeof (hash_size))
1420Sstevel@tonic-gate 	    != PS_OK)
1430Sstevel@tonic-gate 		return (0);
1440Sstevel@tonic-gate 	return (hash_size);
1450Sstevel@tonic-gate }
1460Sstevel@tonic-gate 
1470Sstevel@tonic-gate static td_err_e
1480Sstevel@tonic-gate td_read_uberdata(td_thragent_t *ta_p)
1490Sstevel@tonic-gate {
1500Sstevel@tonic-gate 	struct ps_prochandle *ph_p = ta_p->ph_p;
1510Sstevel@tonic-gate 
1520Sstevel@tonic-gate 	if (ta_p->model == PR_MODEL_NATIVE) {
1530Sstevel@tonic-gate 		uberdata_t uberdata;
1540Sstevel@tonic-gate 
1550Sstevel@tonic-gate 		if (ps_pdread(ph_p, ta_p->uberdata_addr,
1560Sstevel@tonic-gate 		    &uberdata, sizeof (uberdata)) != PS_OK)
1570Sstevel@tonic-gate 			return (TD_DBERR);
1580Sstevel@tonic-gate 		ta_p->primary_map = uberdata.primary_map;
1590Sstevel@tonic-gate 		ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
1600Sstevel@tonic-gate 			offsetof(uberdata_t, tdb.tdb_ev_global_mask);
1610Sstevel@tonic-gate 		ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
1620Sstevel@tonic-gate 			offsetof(uberdata_t, uberflags.uf_tdb_register_sync);
1630Sstevel@tonic-gate 		ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
1640Sstevel@tonic-gate 		ta_p->hash_size = uberdata.hash_size;
1650Sstevel@tonic-gate 		if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
1660Sstevel@tonic-gate 		    ta_p->tdb_events, sizeof (ta_p->tdb_events)) != PS_OK)
1670Sstevel@tonic-gate 			return (TD_DBERR);
1680Sstevel@tonic-gate 
1690Sstevel@tonic-gate 	} else {
1700Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
1710Sstevel@tonic-gate 		uberdata32_t uberdata;
1720Sstevel@tonic-gate 		caddr32_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
1730Sstevel@tonic-gate 		int i;
1740Sstevel@tonic-gate 
1750Sstevel@tonic-gate 		if (ps_pdread(ph_p, ta_p->uberdata_addr,
1760Sstevel@tonic-gate 		    &uberdata, sizeof (uberdata)) != PS_OK)
1770Sstevel@tonic-gate 			return (TD_DBERR);
1780Sstevel@tonic-gate 		ta_p->primary_map = uberdata.primary_map;
1790Sstevel@tonic-gate 		ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
1800Sstevel@tonic-gate 			offsetof(uberdata32_t, tdb.tdb_ev_global_mask);
1810Sstevel@tonic-gate 		ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
1820Sstevel@tonic-gate 			offsetof(uberdata32_t, uberflags.uf_tdb_register_sync);
1830Sstevel@tonic-gate 		ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
1840Sstevel@tonic-gate 		ta_p->hash_size = uberdata.hash_size;
1850Sstevel@tonic-gate 		if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
1860Sstevel@tonic-gate 		    tdb_events, sizeof (tdb_events)) != PS_OK)
1870Sstevel@tonic-gate 			return (TD_DBERR);
1880Sstevel@tonic-gate 		for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++)
1890Sstevel@tonic-gate 			ta_p->tdb_events[i] = tdb_events[i];
1900Sstevel@tonic-gate #else
1910Sstevel@tonic-gate 		return (TD_DBERR);
1920Sstevel@tonic-gate #endif
1930Sstevel@tonic-gate 	}
1940Sstevel@tonic-gate 	if (ta_p->hash_size != 1) {	/* multi-threaded */
1950Sstevel@tonic-gate 		ta_p->initialized = 2;
1960Sstevel@tonic-gate 		ta_p->single_lwpid = 0;
1970Sstevel@tonic-gate 		ta_p->single_ulwp_addr = NULL;
1980Sstevel@tonic-gate 	} else {			/* single-threaded */
1990Sstevel@tonic-gate 		ta_p->initialized = 1;
2000Sstevel@tonic-gate 		/*
2010Sstevel@tonic-gate 		 * Get the address and lwpid of the single thread/LWP.
2020Sstevel@tonic-gate 		 * It may not be ulwp_one if this is a child of fork1().
2030Sstevel@tonic-gate 		 */
2040Sstevel@tonic-gate 		if (ta_p->model == PR_MODEL_NATIVE) {
2050Sstevel@tonic-gate 			thr_hash_table_t head;
2060Sstevel@tonic-gate 			lwpid_t lwpid = 0;
2070Sstevel@tonic-gate 
2080Sstevel@tonic-gate 			if (ps_pdread(ph_p, ta_p->hash_table_addr,
2090Sstevel@tonic-gate 			    &head, sizeof (head)) != PS_OK)
2100Sstevel@tonic-gate 				return (TD_DBERR);
2110Sstevel@tonic-gate 			if ((psaddr_t)head.hash_bucket == NULL)
2120Sstevel@tonic-gate 				ta_p->initialized = 0;
2130Sstevel@tonic-gate 			else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
2140Sstevel@tonic-gate 			    offsetof(ulwp_t, ul_lwpid),
2150Sstevel@tonic-gate 			    &lwpid, sizeof (lwpid)) != PS_OK)
2160Sstevel@tonic-gate 				return (TD_DBERR);
2170Sstevel@tonic-gate 			ta_p->single_lwpid = lwpid;
2180Sstevel@tonic-gate 			ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
2190Sstevel@tonic-gate 		} else {
2200Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
2210Sstevel@tonic-gate 			thr_hash_table32_t head;
2220Sstevel@tonic-gate 			lwpid_t lwpid = 0;
2230Sstevel@tonic-gate 
2240Sstevel@tonic-gate 			if (ps_pdread(ph_p, ta_p->hash_table_addr,
2250Sstevel@tonic-gate 			    &head, sizeof (head)) != PS_OK)
2260Sstevel@tonic-gate 				return (TD_DBERR);
2270Sstevel@tonic-gate 			if ((psaddr_t)head.hash_bucket == NULL)
2280Sstevel@tonic-gate 				ta_p->initialized = 0;
2290Sstevel@tonic-gate 			else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
2300Sstevel@tonic-gate 			    offsetof(ulwp32_t, ul_lwpid),
2310Sstevel@tonic-gate 			    &lwpid, sizeof (lwpid)) != PS_OK)
2320Sstevel@tonic-gate 				return (TD_DBERR);
2330Sstevel@tonic-gate 			ta_p->single_lwpid = lwpid;
2340Sstevel@tonic-gate 			ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
2350Sstevel@tonic-gate #else
2360Sstevel@tonic-gate 			return (TD_DBERR);
2370Sstevel@tonic-gate #endif
2380Sstevel@tonic-gate 		}
2390Sstevel@tonic-gate 	}
2400Sstevel@tonic-gate 	if (!ta_p->primary_map)
2410Sstevel@tonic-gate 		ta_p->initialized = 0;
2420Sstevel@tonic-gate 	return (TD_OK);
2430Sstevel@tonic-gate }
2440Sstevel@tonic-gate 
2450Sstevel@tonic-gate static td_err_e
2460Sstevel@tonic-gate td_read_bootstrap_data(td_thragent_t *ta_p)
2470Sstevel@tonic-gate {
2480Sstevel@tonic-gate 	struct ps_prochandle *ph_p = ta_p->ph_p;
2490Sstevel@tonic-gate 	psaddr_t bootstrap_addr;
2500Sstevel@tonic-gate 	psaddr_t uberdata_addr;
2510Sstevel@tonic-gate 	ps_err_e db_return;
2520Sstevel@tonic-gate 	td_err_e return_val;
2530Sstevel@tonic-gate 	int do_1;
2540Sstevel@tonic-gate 
2550Sstevel@tonic-gate 	switch (ta_p->initialized) {
2560Sstevel@tonic-gate 	case 2:			/* fully initialized */
2570Sstevel@tonic-gate 		return (TD_OK);
2580Sstevel@tonic-gate 	case 1:			/* partially initialized */
2590Sstevel@tonic-gate 		if (td_read_hash_size(ta_p) == 1)
2600Sstevel@tonic-gate 			return (TD_OK);
2610Sstevel@tonic-gate 		return (td_read_uberdata(ta_p));
2620Sstevel@tonic-gate 	}
2630Sstevel@tonic-gate 
2640Sstevel@tonic-gate 	/*
2650Sstevel@tonic-gate 	 * Uninitialized -- do the startup work.
2660Sstevel@tonic-gate 	 * We set ta_p->initialized to -1 to cut off recursive calls
2670Sstevel@tonic-gate 	 * into libc_db by code in the provider of ps_pglobal_lookup().
2680Sstevel@tonic-gate 	 */
2690Sstevel@tonic-gate 	do_1 = 0;
2700Sstevel@tonic-gate 	ta_p->initialized = -1;
2710Sstevel@tonic-gate 	db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME,
2720Sstevel@tonic-gate 	    TD_BOOTSTRAP_NAME, &bootstrap_addr);
2730Sstevel@tonic-gate 	if (db_return == PS_NOSYM) {
2740Sstevel@tonic-gate 		do_1 = 1;
2750Sstevel@tonic-gate 		db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME_1,
2760Sstevel@tonic-gate 		    TD_BOOTSTRAP_NAME, &bootstrap_addr);
2770Sstevel@tonic-gate 	}
2780Sstevel@tonic-gate 	if (db_return == PS_NOSYM)	/* libc is not linked yet */
2790Sstevel@tonic-gate 		return (TD_NOLIBTHREAD);
2800Sstevel@tonic-gate 	if (db_return != PS_OK)
2810Sstevel@tonic-gate 		return (TD_ERR);
2820Sstevel@tonic-gate 	db_return = ps_pglobal_lookup(ph_p,
2830Sstevel@tonic-gate 	    do_1? TD_LIBRARY_NAME_1 : TD_LIBRARY_NAME,
2840Sstevel@tonic-gate 	    TD_UBERDATA_NAME, &uberdata_addr);
2850Sstevel@tonic-gate 	if (db_return == PS_NOSYM)	/* libc is not linked yet */
2860Sstevel@tonic-gate 		return (TD_NOLIBTHREAD);
2870Sstevel@tonic-gate 	if (db_return != PS_OK)
2880Sstevel@tonic-gate 		return (TD_ERR);
2890Sstevel@tonic-gate 
2900Sstevel@tonic-gate 	/*
2910Sstevel@tonic-gate 	 * Read the uberdata address into the thread agent structure.
2920Sstevel@tonic-gate 	 */
2930Sstevel@tonic-gate 	if (ta_p->model == PR_MODEL_NATIVE) {
2940Sstevel@tonic-gate 		psaddr_t psaddr;
2950Sstevel@tonic-gate 		if (ps_pdread(ph_p, bootstrap_addr,
2960Sstevel@tonic-gate 		    &psaddr, sizeof (psaddr)) != PS_OK)
2970Sstevel@tonic-gate 			return (TD_DBERR);
2980Sstevel@tonic-gate 		if ((ta_p->bootstrap_addr = psaddr) == NULL)
2990Sstevel@tonic-gate 			psaddr = uberdata_addr;
3000Sstevel@tonic-gate 		else if (ps_pdread(ph_p, psaddr,
3010Sstevel@tonic-gate 		    &psaddr, sizeof (psaddr)) != PS_OK)
3020Sstevel@tonic-gate 			return (TD_DBERR);
3030Sstevel@tonic-gate 		ta_p->uberdata_addr = psaddr;
3040Sstevel@tonic-gate 	} else {
3050Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
3060Sstevel@tonic-gate 		caddr32_t psaddr;
3070Sstevel@tonic-gate 		if (ps_pdread(ph_p, bootstrap_addr,
3080Sstevel@tonic-gate 		    &psaddr, sizeof (psaddr)) != PS_OK)
3090Sstevel@tonic-gate 			return (TD_DBERR);
3100Sstevel@tonic-gate 		if ((ta_p->bootstrap_addr = (psaddr_t)psaddr) == NULL)
3110Sstevel@tonic-gate 			psaddr = (caddr32_t)uberdata_addr;
3120Sstevel@tonic-gate 		else if (ps_pdread(ph_p, (psaddr_t)psaddr,
3130Sstevel@tonic-gate 		    &psaddr, sizeof (psaddr)) != PS_OK)
3140Sstevel@tonic-gate 			return (TD_DBERR);
3150Sstevel@tonic-gate 		ta_p->uberdata_addr = (psaddr_t)psaddr;
3160Sstevel@tonic-gate #else
3170Sstevel@tonic-gate 		return (TD_DBERR);
3180Sstevel@tonic-gate #endif	/* _SYSCALL32 */
3190Sstevel@tonic-gate 	}
3200Sstevel@tonic-gate 
3210Sstevel@tonic-gate 	if ((return_val = td_read_uberdata(ta_p)) != TD_OK)
3220Sstevel@tonic-gate 		return (return_val);
3230Sstevel@tonic-gate 	if (ta_p->bootstrap_addr == NULL)
3240Sstevel@tonic-gate 		ta_p->initialized = 0;
3250Sstevel@tonic-gate 	return (TD_OK);
3260Sstevel@tonic-gate }
3270Sstevel@tonic-gate 
3280Sstevel@tonic-gate #pragma weak ps_kill
3290Sstevel@tonic-gate #pragma weak ps_lrolltoaddr
3300Sstevel@tonic-gate 
3310Sstevel@tonic-gate /*
3320Sstevel@tonic-gate  * Allocate a new agent process handle ("thread agent").
3330Sstevel@tonic-gate  */
3340Sstevel@tonic-gate #pragma weak td_ta_new = __td_ta_new
3350Sstevel@tonic-gate td_err_e
3360Sstevel@tonic-gate __td_ta_new(struct ps_prochandle *ph_p, td_thragent_t **ta_pp)
3370Sstevel@tonic-gate {
3380Sstevel@tonic-gate 	td_thragent_t *ta_p;
3390Sstevel@tonic-gate 	int model;
3400Sstevel@tonic-gate 	td_err_e return_val = TD_OK;
3410Sstevel@tonic-gate 
3420Sstevel@tonic-gate 	if (ph_p == NULL)
3430Sstevel@tonic-gate 		return (TD_BADPH);
3440Sstevel@tonic-gate 	if (ta_pp == NULL)
3450Sstevel@tonic-gate 		return (TD_ERR);
3460Sstevel@tonic-gate 	*ta_pp = NULL;
3470Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK)
3480Sstevel@tonic-gate 		return (TD_DBERR);
3490Sstevel@tonic-gate 	/*
3500Sstevel@tonic-gate 	 * ps_pdmodel might not be defined if this is an older client.
3510Sstevel@tonic-gate 	 * Make it a weak symbol and test if it exists before calling.
3520Sstevel@tonic-gate 	 */
3530Sstevel@tonic-gate #pragma weak ps_pdmodel
3540Sstevel@tonic-gate 	if (ps_pdmodel == NULL) {
3550Sstevel@tonic-gate 		model = PR_MODEL_NATIVE;
3560Sstevel@tonic-gate 	} else if (ps_pdmodel(ph_p, &model) != PS_OK) {
3570Sstevel@tonic-gate 		(void) ps_pcontinue(ph_p);
3580Sstevel@tonic-gate 		return (TD_ERR);
3590Sstevel@tonic-gate 	}
3600Sstevel@tonic-gate 	if ((ta_p = malloc(sizeof (*ta_p))) == NULL) {
3610Sstevel@tonic-gate 		(void) ps_pcontinue(ph_p);
3620Sstevel@tonic-gate 		return (TD_MALLOC);
3630Sstevel@tonic-gate 	}
3640Sstevel@tonic-gate 
3650Sstevel@tonic-gate 	/*
3660Sstevel@tonic-gate 	 * Initialize the agent process handle.
3670Sstevel@tonic-gate 	 * Pick up the symbol value we need from the target process.
3680Sstevel@tonic-gate 	 */
3690Sstevel@tonic-gate 	(void) memset(ta_p, 0, sizeof (*ta_p));
3700Sstevel@tonic-gate 	ta_p->ph_p = ph_p;
3710Sstevel@tonic-gate 	(void) rwlock_init(&ta_p->rwlock, USYNC_THREAD, NULL);
3720Sstevel@tonic-gate 	ta_p->model = model;
3730Sstevel@tonic-gate 	return_val = td_read_bootstrap_data(ta_p);
3740Sstevel@tonic-gate 
3750Sstevel@tonic-gate 	/*
3760Sstevel@tonic-gate 	 * Because the old libthread_db enabled lock tracking by default,
3770Sstevel@tonic-gate 	 * we must also do it.  However, we do it only if the application
3780Sstevel@tonic-gate 	 * provides the ps_kill() and ps_lrolltoaddr() interfaces.
3790Sstevel@tonic-gate 	 * (dbx provides the ps_kill() and ps_lrolltoaddr() interfaces.)
3800Sstevel@tonic-gate 	 */
3810Sstevel@tonic-gate 	if (return_val == TD_OK && ps_kill != NULL && ps_lrolltoaddr != NULL) {
3820Sstevel@tonic-gate 		register_sync_t oldenable;
3830Sstevel@tonic-gate 		register_sync_t enable = REGISTER_SYNC_ENABLE;
3840Sstevel@tonic-gate 		psaddr_t psaddr = ta_p->tdb_register_sync_addr;
3850Sstevel@tonic-gate 
3860Sstevel@tonic-gate 		if (ps_pdread(ph_p, psaddr,
3870Sstevel@tonic-gate 		    &oldenable, sizeof (oldenable)) != PS_OK)
3880Sstevel@tonic-gate 			return_val = TD_DBERR;
3890Sstevel@tonic-gate 		else if (oldenable != REGISTER_SYNC_OFF ||
3900Sstevel@tonic-gate 		    ps_pdwrite(ph_p, psaddr,
3910Sstevel@tonic-gate 		    &enable, sizeof (enable)) != PS_OK) {
3920Sstevel@tonic-gate 			/*
3930Sstevel@tonic-gate 			 * Lock tracking was already enabled or we
3940Sstevel@tonic-gate 			 * failed to enable it, probably because we
3950Sstevel@tonic-gate 			 * are examining a core file.  In either case
3960Sstevel@tonic-gate 			 * set the sync_tracking flag non-zero to
3970Sstevel@tonic-gate 			 * indicate that we should not attempt to
3980Sstevel@tonic-gate 			 * disable lock tracking when we delete the
3990Sstevel@tonic-gate 			 * agent process handle in td_ta_delete().
4000Sstevel@tonic-gate 			 */
4010Sstevel@tonic-gate 			ta_p->sync_tracking = 1;
4020Sstevel@tonic-gate 		}
4030Sstevel@tonic-gate 	}
4040Sstevel@tonic-gate 
4050Sstevel@tonic-gate 	if (return_val == TD_OK)
4060Sstevel@tonic-gate 		*ta_pp = ta_p;
4070Sstevel@tonic-gate 	else
4080Sstevel@tonic-gate 		free(ta_p);
4090Sstevel@tonic-gate 
4100Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
4110Sstevel@tonic-gate 	return (return_val);
4120Sstevel@tonic-gate }
4130Sstevel@tonic-gate 
4140Sstevel@tonic-gate /*
4150Sstevel@tonic-gate  * Utility function to grab the readers lock and return the prochandle,
4160Sstevel@tonic-gate  * given an agent process handle.  Performs standard error checking.
4170Sstevel@tonic-gate  * Returns non-NULL with the lock held, or NULL with the lock not held.
4180Sstevel@tonic-gate  */
4190Sstevel@tonic-gate static struct ps_prochandle *
4200Sstevel@tonic-gate ph_lock_ta(td_thragent_t *ta_p, td_err_e *err)
4210Sstevel@tonic-gate {
4220Sstevel@tonic-gate 	struct ps_prochandle *ph_p = NULL;
4230Sstevel@tonic-gate 	td_err_e error;
4240Sstevel@tonic-gate 
4250Sstevel@tonic-gate 	if (ta_p == NULL || ta_p->initialized == -1) {
4260Sstevel@tonic-gate 		*err = TD_BADTA;
4270Sstevel@tonic-gate 	} else if (rw_rdlock(&ta_p->rwlock) != 0) {	/* can't happen? */
4280Sstevel@tonic-gate 		*err = TD_BADTA;
4290Sstevel@tonic-gate 	} else if ((ph_p = ta_p->ph_p) == NULL) {
4300Sstevel@tonic-gate 		(void) rw_unlock(&ta_p->rwlock);
4310Sstevel@tonic-gate 		*err = TD_BADPH;
4320Sstevel@tonic-gate 	} else if (ta_p->initialized != 2 &&
4330Sstevel@tonic-gate 	    (error = td_read_bootstrap_data(ta_p)) != TD_OK) {
4340Sstevel@tonic-gate 		(void) rw_unlock(&ta_p->rwlock);
4350Sstevel@tonic-gate 		ph_p = NULL;
4360Sstevel@tonic-gate 		*err = error;
4370Sstevel@tonic-gate 	} else {
4380Sstevel@tonic-gate 		*err = TD_OK;
4390Sstevel@tonic-gate 	}
4400Sstevel@tonic-gate 
4410Sstevel@tonic-gate 	return (ph_p);
4420Sstevel@tonic-gate }
4430Sstevel@tonic-gate 
4440Sstevel@tonic-gate /*
4450Sstevel@tonic-gate  * Utility function to grab the readers lock and return the prochandle,
4460Sstevel@tonic-gate  * given an agent thread handle.  Performs standard error checking.
4470Sstevel@tonic-gate  * Returns non-NULL with the lock held, or NULL with the lock not held.
4480Sstevel@tonic-gate  */
4490Sstevel@tonic-gate static struct ps_prochandle *
4500Sstevel@tonic-gate ph_lock_th(const td_thrhandle_t *th_p, td_err_e *err)
4510Sstevel@tonic-gate {
4520Sstevel@tonic-gate 	if (th_p == NULL || th_p->th_unique == NULL) {
4530Sstevel@tonic-gate 		*err = TD_BADTH;
4540Sstevel@tonic-gate 		return (NULL);
4550Sstevel@tonic-gate 	}
4560Sstevel@tonic-gate 	return (ph_lock_ta(th_p->th_ta_p, err));
4570Sstevel@tonic-gate }
4580Sstevel@tonic-gate 
4590Sstevel@tonic-gate /*
4600Sstevel@tonic-gate  * Utility function to grab the readers lock and return the prochandle,
4610Sstevel@tonic-gate  * given a synchronization object handle.  Performs standard error checking.
4620Sstevel@tonic-gate  * Returns non-NULL with the lock held, or NULL with the lock not held.
4630Sstevel@tonic-gate  */
4640Sstevel@tonic-gate static struct ps_prochandle *
4650Sstevel@tonic-gate ph_lock_sh(const td_synchandle_t *sh_p, td_err_e *err)
4660Sstevel@tonic-gate {
4670Sstevel@tonic-gate 	if (sh_p == NULL || sh_p->sh_unique == NULL) {
4680Sstevel@tonic-gate 		*err = TD_BADSH;
4690Sstevel@tonic-gate 		return (NULL);
4700Sstevel@tonic-gate 	}
4710Sstevel@tonic-gate 	return (ph_lock_ta(sh_p->sh_ta_p, err));
4720Sstevel@tonic-gate }
4730Sstevel@tonic-gate 
4740Sstevel@tonic-gate /*
4750Sstevel@tonic-gate  * Unlock the agent process handle obtained from ph_lock_*().
4760Sstevel@tonic-gate  */
4770Sstevel@tonic-gate static void
4780Sstevel@tonic-gate ph_unlock(td_thragent_t *ta_p)
4790Sstevel@tonic-gate {
4800Sstevel@tonic-gate 	(void) rw_unlock(&ta_p->rwlock);
4810Sstevel@tonic-gate }
4820Sstevel@tonic-gate 
4830Sstevel@tonic-gate /*
4840Sstevel@tonic-gate  * De-allocate an agent process handle,
4850Sstevel@tonic-gate  * releasing all related resources.
4860Sstevel@tonic-gate  *
4870Sstevel@tonic-gate  * XXX -- This is hopelessly broken ---
4880Sstevel@tonic-gate  * Storage for thread agent is not deallocated.  The prochandle
4890Sstevel@tonic-gate  * in the thread agent is set to NULL so that future uses of
4900Sstevel@tonic-gate  * the thread agent can be detected and an error value returned.
4910Sstevel@tonic-gate  * All functions in the external user interface that make
4920Sstevel@tonic-gate  * use of the thread agent are expected
4930Sstevel@tonic-gate  * to check for a NULL prochandle in the thread agent.
4940Sstevel@tonic-gate  * All such functions are also expected to obtain a
4950Sstevel@tonic-gate  * reader lock on the thread agent while it is using it.
4960Sstevel@tonic-gate  */
4970Sstevel@tonic-gate #pragma weak td_ta_delete = __td_ta_delete
4980Sstevel@tonic-gate td_err_e
4990Sstevel@tonic-gate __td_ta_delete(td_thragent_t *ta_p)
5000Sstevel@tonic-gate {
5010Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
5020Sstevel@tonic-gate 
5030Sstevel@tonic-gate 	/*
5040Sstevel@tonic-gate 	 * This is the only place we grab the writer lock.
5050Sstevel@tonic-gate 	 * We are going to NULL out the prochandle.
5060Sstevel@tonic-gate 	 */
5070Sstevel@tonic-gate 	if (ta_p == NULL || rw_wrlock(&ta_p->rwlock) != 0)
5080Sstevel@tonic-gate 		return (TD_BADTA);
5090Sstevel@tonic-gate 	if ((ph_p = ta_p->ph_p) == NULL) {
5100Sstevel@tonic-gate 		(void) rw_unlock(&ta_p->rwlock);
5110Sstevel@tonic-gate 		return (TD_BADPH);
5120Sstevel@tonic-gate 	}
5130Sstevel@tonic-gate 	/*
5140Sstevel@tonic-gate 	 * If synch. tracking was disabled when td_ta_new() was called and
5150Sstevel@tonic-gate 	 * if td_ta_sync_tracking_enable() was never called, then disable
5160Sstevel@tonic-gate 	 * synch. tracking (it was enabled by default in td_ta_new()).
5170Sstevel@tonic-gate 	 */
5180Sstevel@tonic-gate 	if (ta_p->sync_tracking == 0 &&
5190Sstevel@tonic-gate 	    ps_kill != NULL && ps_lrolltoaddr != NULL) {
5200Sstevel@tonic-gate 		register_sync_t enable = REGISTER_SYNC_DISABLE;
5210Sstevel@tonic-gate 
5220Sstevel@tonic-gate 		(void) ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
5230Sstevel@tonic-gate 		    &enable, sizeof (enable));
5240Sstevel@tonic-gate 	}
5250Sstevel@tonic-gate 	ta_p->ph_p = NULL;
5260Sstevel@tonic-gate 	(void) rw_unlock(&ta_p->rwlock);
5270Sstevel@tonic-gate 	return (TD_OK);
5280Sstevel@tonic-gate }
5290Sstevel@tonic-gate 
5300Sstevel@tonic-gate /*
5310Sstevel@tonic-gate  * Map an agent process handle to a client prochandle.
5320Sstevel@tonic-gate  * Currently unused by dbx.
5330Sstevel@tonic-gate  */
5340Sstevel@tonic-gate #pragma weak td_ta_get_ph = __td_ta_get_ph
5350Sstevel@tonic-gate td_err_e
5360Sstevel@tonic-gate __td_ta_get_ph(td_thragent_t *ta_p, struct ps_prochandle **ph_pp)
5370Sstevel@tonic-gate {
5380Sstevel@tonic-gate 	td_err_e return_val;
5390Sstevel@tonic-gate 
5400Sstevel@tonic-gate 	if (ph_pp != NULL)	/* protect stupid callers */
5410Sstevel@tonic-gate 		*ph_pp = NULL;
5420Sstevel@tonic-gate 	if (ph_pp == NULL)
5430Sstevel@tonic-gate 		return (TD_ERR);
5440Sstevel@tonic-gate 	if ((*ph_pp = ph_lock_ta(ta_p, &return_val)) == NULL)
5450Sstevel@tonic-gate 		return (return_val);
5460Sstevel@tonic-gate 	ph_unlock(ta_p);
5470Sstevel@tonic-gate 	return (TD_OK);
5480Sstevel@tonic-gate }
5490Sstevel@tonic-gate 
5500Sstevel@tonic-gate /*
5510Sstevel@tonic-gate  * Set the process's suggested concurrency level.
5520Sstevel@tonic-gate  * This is a no-op in a one-level model.
5530Sstevel@tonic-gate  * Currently unused by dbx.
5540Sstevel@tonic-gate  */
5550Sstevel@tonic-gate #pragma weak td_ta_setconcurrency = __td_ta_setconcurrency
5560Sstevel@tonic-gate /* ARGSUSED1 */
5570Sstevel@tonic-gate td_err_e
5580Sstevel@tonic-gate __td_ta_setconcurrency(const td_thragent_t *ta_p, int level)
5590Sstevel@tonic-gate {
5600Sstevel@tonic-gate 	if (ta_p == NULL)
5610Sstevel@tonic-gate 		return (TD_BADTA);
5620Sstevel@tonic-gate 	if (ta_p->ph_p == NULL)
5630Sstevel@tonic-gate 		return (TD_BADPH);
5640Sstevel@tonic-gate 	return (TD_OK);
5650Sstevel@tonic-gate }
5660Sstevel@tonic-gate 
5670Sstevel@tonic-gate /*
5680Sstevel@tonic-gate  * Get the number of threads in the process.
5690Sstevel@tonic-gate  */
5700Sstevel@tonic-gate #pragma weak td_ta_get_nthreads = __td_ta_get_nthreads
5710Sstevel@tonic-gate td_err_e
5720Sstevel@tonic-gate __td_ta_get_nthreads(td_thragent_t *ta_p, int *nthread_p)
5730Sstevel@tonic-gate {
5740Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
5750Sstevel@tonic-gate 	td_err_e return_val;
5760Sstevel@tonic-gate 	int nthreads;
5770Sstevel@tonic-gate 	int nzombies;
5780Sstevel@tonic-gate 	psaddr_t nthreads_addr;
5790Sstevel@tonic-gate 	psaddr_t nzombies_addr;
5800Sstevel@tonic-gate 
5810Sstevel@tonic-gate 	if (ta_p->model == PR_MODEL_NATIVE) {
5820Sstevel@tonic-gate 		nthreads_addr = ta_p->uberdata_addr +
5830Sstevel@tonic-gate 			offsetof(uberdata_t, nthreads);
5840Sstevel@tonic-gate 		nzombies_addr = ta_p->uberdata_addr +
5850Sstevel@tonic-gate 			offsetof(uberdata_t, nzombies);
5860Sstevel@tonic-gate 	} else {
5870Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
5880Sstevel@tonic-gate 		nthreads_addr = ta_p->uberdata_addr +
5890Sstevel@tonic-gate 			offsetof(uberdata32_t, nthreads);
5900Sstevel@tonic-gate 		nzombies_addr = ta_p->uberdata_addr +
5910Sstevel@tonic-gate 			offsetof(uberdata32_t, nzombies);
5920Sstevel@tonic-gate #else
5930Sstevel@tonic-gate 		nthreads_addr = 0;
5940Sstevel@tonic-gate 		nzombies_addr = 0;
5950Sstevel@tonic-gate #endif	/* _SYSCALL32 */
5960Sstevel@tonic-gate 	}
5970Sstevel@tonic-gate 
5980Sstevel@tonic-gate 	if (nthread_p == NULL)
5990Sstevel@tonic-gate 		return (TD_ERR);
6000Sstevel@tonic-gate 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
6010Sstevel@tonic-gate 		return (return_val);
6020Sstevel@tonic-gate 	if (ps_pdread(ph_p, nthreads_addr, &nthreads, sizeof (int)) != PS_OK)
6030Sstevel@tonic-gate 		return_val = TD_DBERR;
6040Sstevel@tonic-gate 	if (ps_pdread(ph_p, nzombies_addr, &nzombies, sizeof (int)) != PS_OK)
6050Sstevel@tonic-gate 		return_val = TD_DBERR;
6060Sstevel@tonic-gate 	ph_unlock(ta_p);
6070Sstevel@tonic-gate 	if (return_val == TD_OK)
6080Sstevel@tonic-gate 		*nthread_p = nthreads + nzombies;
6090Sstevel@tonic-gate 	return (return_val);
6100Sstevel@tonic-gate }
6110Sstevel@tonic-gate 
6120Sstevel@tonic-gate typedef struct {
6130Sstevel@tonic-gate 	thread_t	tid;
6140Sstevel@tonic-gate 	int		found;
6150Sstevel@tonic-gate 	td_thrhandle_t	th;
6160Sstevel@tonic-gate } td_mapper_param_t;
6170Sstevel@tonic-gate 
6180Sstevel@tonic-gate /*
6190Sstevel@tonic-gate  * Check the value in data against the thread id.
6200Sstevel@tonic-gate  * If it matches, return 1 to terminate iterations.
6210Sstevel@tonic-gate  * This function is used by td_ta_map_id2thr() to map a tid to a thread handle.
6220Sstevel@tonic-gate  */
6230Sstevel@tonic-gate static int
6240Sstevel@tonic-gate td_mapper_id2thr(td_thrhandle_t *th_p, td_mapper_param_t *data)
6250Sstevel@tonic-gate {
6260Sstevel@tonic-gate 	td_thrinfo_t ti;
6270Sstevel@tonic-gate 
6280Sstevel@tonic-gate 	if (__td_thr_get_info(th_p, &ti) == TD_OK &&
6290Sstevel@tonic-gate 	    data->tid == ti.ti_tid) {
6300Sstevel@tonic-gate 		data->found = 1;
6310Sstevel@tonic-gate 		data->th = *th_p;
6320Sstevel@tonic-gate 		return (1);
6330Sstevel@tonic-gate 	}
6340Sstevel@tonic-gate 	return (0);
6350Sstevel@tonic-gate }
6360Sstevel@tonic-gate 
6370Sstevel@tonic-gate /*
6380Sstevel@tonic-gate  * Given a thread identifier, return the corresponding thread handle.
6390Sstevel@tonic-gate  */
6400Sstevel@tonic-gate #pragma weak td_ta_map_id2thr = __td_ta_map_id2thr
6410Sstevel@tonic-gate td_err_e
6420Sstevel@tonic-gate __td_ta_map_id2thr(td_thragent_t *ta_p, thread_t tid,
6430Sstevel@tonic-gate 	td_thrhandle_t *th_p)
6440Sstevel@tonic-gate {
6450Sstevel@tonic-gate 	td_err_e		return_val;
6460Sstevel@tonic-gate 	td_mapper_param_t	data;
6470Sstevel@tonic-gate 
6480Sstevel@tonic-gate 	if (th_p != NULL &&	/* optimize for a single thread */
6490Sstevel@tonic-gate 	    ta_p != NULL &&
6500Sstevel@tonic-gate 	    ta_p->initialized == 1 &&
6510Sstevel@tonic-gate 	    (td_read_hash_size(ta_p) == 1 ||
6520Sstevel@tonic-gate 	    td_read_uberdata(ta_p) == TD_OK) &&
6530Sstevel@tonic-gate 	    ta_p->initialized == 1 &&
6540Sstevel@tonic-gate 	    ta_p->single_lwpid == tid) {
6550Sstevel@tonic-gate 		th_p->th_ta_p = ta_p;
6560Sstevel@tonic-gate 		if ((th_p->th_unique = ta_p->single_ulwp_addr) == 0)
6570Sstevel@tonic-gate 			return (TD_NOTHR);
6580Sstevel@tonic-gate 		return (TD_OK);
6590Sstevel@tonic-gate 	}
6600Sstevel@tonic-gate 
6610Sstevel@tonic-gate 	/*
6620Sstevel@tonic-gate 	 * LOCKING EXCEPTION - Locking is not required here because
6630Sstevel@tonic-gate 	 * the locking and checking will be done in __td_ta_thr_iter.
6640Sstevel@tonic-gate 	 */
6650Sstevel@tonic-gate 
6660Sstevel@tonic-gate 	if (ta_p == NULL)
6670Sstevel@tonic-gate 		return (TD_BADTA);
6680Sstevel@tonic-gate 	if (th_p == NULL)
6690Sstevel@tonic-gate 		return (TD_BADTH);
6700Sstevel@tonic-gate 	if (tid == 0)
6710Sstevel@tonic-gate 		return (TD_NOTHR);
6720Sstevel@tonic-gate 
6730Sstevel@tonic-gate 	data.tid = tid;
6740Sstevel@tonic-gate 	data.found = 0;
6750Sstevel@tonic-gate 	return_val = __td_ta_thr_iter(ta_p,
6760Sstevel@tonic-gate 		(td_thr_iter_f *)td_mapper_id2thr, (void *)&data,
6770Sstevel@tonic-gate 		TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
6780Sstevel@tonic-gate 		TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
6790Sstevel@tonic-gate 	if (return_val == TD_OK) {
6800Sstevel@tonic-gate 		if (data.found == 0)
6810Sstevel@tonic-gate 			return_val = TD_NOTHR;
6820Sstevel@tonic-gate 		else
6830Sstevel@tonic-gate 			*th_p = data.th;
6840Sstevel@tonic-gate 	}
6850Sstevel@tonic-gate 
6860Sstevel@tonic-gate 	return (return_val);
6870Sstevel@tonic-gate }
6880Sstevel@tonic-gate 
6890Sstevel@tonic-gate /*
6900Sstevel@tonic-gate  * Map the address of a synchronization object to a sync. object handle.
6910Sstevel@tonic-gate  */
6920Sstevel@tonic-gate #pragma weak td_ta_map_addr2sync = __td_ta_map_addr2sync
6930Sstevel@tonic-gate td_err_e
6940Sstevel@tonic-gate __td_ta_map_addr2sync(td_thragent_t *ta_p, psaddr_t addr, td_synchandle_t *sh_p)
6950Sstevel@tonic-gate {
6960Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
6970Sstevel@tonic-gate 	td_err_e return_val;
6980Sstevel@tonic-gate 	uint16_t sync_magic;
6990Sstevel@tonic-gate 
7000Sstevel@tonic-gate 	if (sh_p == NULL)
7010Sstevel@tonic-gate 		return (TD_BADSH);
7020Sstevel@tonic-gate 	if (addr == NULL)
7030Sstevel@tonic-gate 		return (TD_ERR);
7040Sstevel@tonic-gate 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
7050Sstevel@tonic-gate 		return (return_val);
7060Sstevel@tonic-gate 	/*
7070Sstevel@tonic-gate 	 * Check the magic number of the sync. object to make sure it's valid.
7080Sstevel@tonic-gate 	 * The magic number is at the same offset for all sync. objects.
7090Sstevel@tonic-gate 	 */
7100Sstevel@tonic-gate 	if (ps_pdread(ph_p, (psaddr_t)&((mutex_t *)addr)->mutex_magic,
7110Sstevel@tonic-gate 	    &sync_magic, sizeof (sync_magic)) != PS_OK) {
7120Sstevel@tonic-gate 		ph_unlock(ta_p);
7130Sstevel@tonic-gate 		return (TD_BADSH);
7140Sstevel@tonic-gate 	}
7150Sstevel@tonic-gate 	ph_unlock(ta_p);
7160Sstevel@tonic-gate 	if (sync_magic != MUTEX_MAGIC && sync_magic != COND_MAGIC &&
7170Sstevel@tonic-gate 	    sync_magic != SEMA_MAGIC && sync_magic != RWL_MAGIC)
7180Sstevel@tonic-gate 		return (TD_BADSH);
7190Sstevel@tonic-gate 	/*
7200Sstevel@tonic-gate 	 * Just fill in the appropriate fields of the sync. handle.
7210Sstevel@tonic-gate 	 */
7220Sstevel@tonic-gate 	sh_p->sh_ta_p = (td_thragent_t *)ta_p;
7230Sstevel@tonic-gate 	sh_p->sh_unique = addr;
7240Sstevel@tonic-gate 	return (TD_OK);
7250Sstevel@tonic-gate }
7260Sstevel@tonic-gate 
7270Sstevel@tonic-gate /*
7280Sstevel@tonic-gate  * Iterate over the set of global TSD keys.
7290Sstevel@tonic-gate  * The call back function is called with three arguments,
7300Sstevel@tonic-gate  * a key, a pointer to the destructor function, and the cbdata pointer.
7310Sstevel@tonic-gate  * Currently unused by dbx.
7320Sstevel@tonic-gate  */
7330Sstevel@tonic-gate #pragma weak td_ta_tsd_iter = __td_ta_tsd_iter
7340Sstevel@tonic-gate td_err_e
7350Sstevel@tonic-gate __td_ta_tsd_iter(td_thragent_t *ta_p, td_key_iter_f *cb, void *cbdata_p)
7360Sstevel@tonic-gate {
7370Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
7380Sstevel@tonic-gate 	td_err_e	return_val;
7390Sstevel@tonic-gate 	int		key;
7400Sstevel@tonic-gate 	int		numkeys;
7410Sstevel@tonic-gate 	psaddr_t	dest_addr;
7420Sstevel@tonic-gate 	psaddr_t	*destructors = NULL;
7430Sstevel@tonic-gate 	PFrV		destructor;
7440Sstevel@tonic-gate 
7450Sstevel@tonic-gate 	if (cb == NULL)
7460Sstevel@tonic-gate 		return (TD_ERR);
7470Sstevel@tonic-gate 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
7480Sstevel@tonic-gate 		return (return_val);
7490Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK) {
7500Sstevel@tonic-gate 		ph_unlock(ta_p);
7510Sstevel@tonic-gate 		return (TD_DBERR);
7520Sstevel@tonic-gate 	}
7530Sstevel@tonic-gate 
7540Sstevel@tonic-gate 	if (ta_p->model == PR_MODEL_NATIVE) {
7550Sstevel@tonic-gate 		tsd_metadata_t tsdm;
7560Sstevel@tonic-gate 
7570Sstevel@tonic-gate 		if (ps_pdread(ph_p,
7580Sstevel@tonic-gate 		    ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
7590Sstevel@tonic-gate 		    &tsdm, sizeof (tsdm)) != PS_OK)
7600Sstevel@tonic-gate 			return_val = TD_DBERR;
7610Sstevel@tonic-gate 		else {
7620Sstevel@tonic-gate 			numkeys = tsdm.tsdm_nused;
7630Sstevel@tonic-gate 			dest_addr = (psaddr_t)tsdm.tsdm_destro;
7640Sstevel@tonic-gate 			if (numkeys > 0)
7650Sstevel@tonic-gate 				destructors =
7660Sstevel@tonic-gate 				    malloc(numkeys * sizeof (psaddr_t));
7670Sstevel@tonic-gate 		}
7680Sstevel@tonic-gate 	} else {
7690Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
7700Sstevel@tonic-gate 		tsd_metadata32_t tsdm;
7710Sstevel@tonic-gate 
7720Sstevel@tonic-gate 		if (ps_pdread(ph_p,
7730Sstevel@tonic-gate 		    ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
7740Sstevel@tonic-gate 		    &tsdm, sizeof (tsdm)) != PS_OK)
7750Sstevel@tonic-gate 			return_val = TD_DBERR;
7760Sstevel@tonic-gate 		else {
7770Sstevel@tonic-gate 			numkeys = tsdm.tsdm_nused;
7780Sstevel@tonic-gate 			dest_addr = (psaddr_t)tsdm.tsdm_destro;
7790Sstevel@tonic-gate 			if (numkeys > 0)
7800Sstevel@tonic-gate 				destructors =
7810Sstevel@tonic-gate 				    malloc(numkeys * sizeof (caddr32_t));
7820Sstevel@tonic-gate 		}
7830Sstevel@tonic-gate #else
7840Sstevel@tonic-gate 		return_val = TD_DBERR;
7850Sstevel@tonic-gate #endif	/* _SYSCALL32 */
7860Sstevel@tonic-gate 	}
7870Sstevel@tonic-gate 
7880Sstevel@tonic-gate 	if (return_val != TD_OK || numkeys <= 0) {
7890Sstevel@tonic-gate 		(void) ps_pcontinue(ph_p);
7900Sstevel@tonic-gate 		ph_unlock(ta_p);
7910Sstevel@tonic-gate 		return (return_val);
7920Sstevel@tonic-gate 	}
7930Sstevel@tonic-gate 
7940Sstevel@tonic-gate 	if (destructors == NULL)
7950Sstevel@tonic-gate 		return_val = TD_MALLOC;
7960Sstevel@tonic-gate 	else if (ta_p->model == PR_MODEL_NATIVE) {
7970Sstevel@tonic-gate 		if (ps_pdread(ph_p, dest_addr,
7980Sstevel@tonic-gate 		    destructors, numkeys * sizeof (psaddr_t)) != PS_OK)
7990Sstevel@tonic-gate 			return_val = TD_DBERR;
8000Sstevel@tonic-gate 		else {
8010Sstevel@tonic-gate 			for (key = 1; key < numkeys; key++) {
8020Sstevel@tonic-gate 				destructor = (PFrV)destructors[key];
8030Sstevel@tonic-gate 				if (destructor != TSD_UNALLOCATED &&
8040Sstevel@tonic-gate 				    (*cb)(key, destructor, cbdata_p))
8050Sstevel@tonic-gate 					break;
8060Sstevel@tonic-gate 			}
8070Sstevel@tonic-gate 		}
8080Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
8090Sstevel@tonic-gate 	} else {
8100Sstevel@tonic-gate 		caddr32_t *destructors32 = (caddr32_t *)destructors;
8110Sstevel@tonic-gate 		caddr32_t destruct32;
8120Sstevel@tonic-gate 
8130Sstevel@tonic-gate 		if (ps_pdread(ph_p, dest_addr,
8140Sstevel@tonic-gate 		    destructors32, numkeys * sizeof (caddr32_t)) != PS_OK)
8150Sstevel@tonic-gate 			return_val = TD_DBERR;
8160Sstevel@tonic-gate 		else {
8170Sstevel@tonic-gate 			for (key = 1; key < numkeys; key++) {
8180Sstevel@tonic-gate 				destruct32 = destructors32[key];
8190Sstevel@tonic-gate 				if (destruct32 != (caddr32_t)TSD_UNALLOCATED &&
8200Sstevel@tonic-gate 				    (*cb)(key, (PFrV)(uintptr_t)destruct32,
8210Sstevel@tonic-gate 				    cbdata_p))
8220Sstevel@tonic-gate 					break;
8230Sstevel@tonic-gate 			}
8240Sstevel@tonic-gate 		}
8250Sstevel@tonic-gate #endif	/* _SYSCALL32 */
8260Sstevel@tonic-gate 	}
8270Sstevel@tonic-gate 
8280Sstevel@tonic-gate 	if (destructors)
8290Sstevel@tonic-gate 		free(destructors);
8300Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
8310Sstevel@tonic-gate 	ph_unlock(ta_p);
8320Sstevel@tonic-gate 	return (return_val);
8330Sstevel@tonic-gate }
8340Sstevel@tonic-gate 
8350Sstevel@tonic-gate int
8360Sstevel@tonic-gate sigequalset(const sigset_t *s1, const sigset_t *s2)
8370Sstevel@tonic-gate {
8380Sstevel@tonic-gate 	return (s1->__sigbits[0] == s2->__sigbits[0] &&
8390Sstevel@tonic-gate 		s1->__sigbits[1] == s2->__sigbits[1] &&
8400Sstevel@tonic-gate 		s1->__sigbits[2] == s2->__sigbits[2] &&
8410Sstevel@tonic-gate 		s1->__sigbits[3] == s2->__sigbits[3]);
8420Sstevel@tonic-gate }
8430Sstevel@tonic-gate 
8440Sstevel@tonic-gate /*
8450Sstevel@tonic-gate  * Description:
8460Sstevel@tonic-gate  *   Iterate over all threads. For each thread call
8470Sstevel@tonic-gate  * the function pointed to by "cb" with a pointer
8480Sstevel@tonic-gate  * to a thread handle, and a pointer to data which
8490Sstevel@tonic-gate  * can be NULL. Only call td_thr_iter_f() on threads
8500Sstevel@tonic-gate  * which match the properties of state, ti_pri,
8510Sstevel@tonic-gate  * ti_sigmask_p, and ti_user_flags.  If cb returns
8520Sstevel@tonic-gate  * a non-zero value, terminate iterations.
8530Sstevel@tonic-gate  *
8540Sstevel@tonic-gate  * Input:
8550Sstevel@tonic-gate  *   *ta_p - thread agent
8560Sstevel@tonic-gate  *   *cb - call back function defined by user.
8570Sstevel@tonic-gate  * td_thr_iter_f() takes a thread handle and
8580Sstevel@tonic-gate  * cbdata_p as a parameter.
8590Sstevel@tonic-gate  *   cbdata_p - parameter for td_thr_iter_f().
8600Sstevel@tonic-gate  *
8610Sstevel@tonic-gate  *   state - state of threads of interest.  A value of
8620Sstevel@tonic-gate  * TD_THR_ANY_STATE from enum td_thr_state_e
8630Sstevel@tonic-gate  * does not restrict iterations by state.
8640Sstevel@tonic-gate  *   ti_pri - lower bound of priorities of threads of
8650Sstevel@tonic-gate  * interest.  A value of TD_THR_LOWEST_PRIORITY
8660Sstevel@tonic-gate  * defined in thread_db.h does not restrict
8670Sstevel@tonic-gate  * iterations by priority.  A thread with priority
8680Sstevel@tonic-gate  * less than ti_pri will NOT be passed to the callback
8690Sstevel@tonic-gate  * function.
8700Sstevel@tonic-gate  *   ti_sigmask_p - signal mask of threads of interest.
8710Sstevel@tonic-gate  * A value of TD_SIGNO_MASK defined in thread_db.h
8720Sstevel@tonic-gate  * does not restrict iterations by signal mask.
8730Sstevel@tonic-gate  *   ti_user_flags - user flags of threads of interest.  A
8740Sstevel@tonic-gate  * value of TD_THR_ANY_USER_FLAGS defined in thread_db.h
8750Sstevel@tonic-gate  * does not restrict iterations by user flags.
8760Sstevel@tonic-gate  */
8770Sstevel@tonic-gate #pragma weak td_ta_thr_iter = __td_ta_thr_iter
8780Sstevel@tonic-gate td_err_e
8790Sstevel@tonic-gate __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
8800Sstevel@tonic-gate 	void *cbdata_p, td_thr_state_e state, int ti_pri,
8810Sstevel@tonic-gate 	sigset_t *ti_sigmask_p, unsigned ti_user_flags)
8820Sstevel@tonic-gate {
8830Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
8840Sstevel@tonic-gate 	psaddr_t	first_lwp_addr;
8850Sstevel@tonic-gate 	psaddr_t	first_zombie_addr;
8860Sstevel@tonic-gate 	psaddr_t	curr_lwp_addr;
8870Sstevel@tonic-gate 	psaddr_t	next_lwp_addr;
8880Sstevel@tonic-gate 	td_thrhandle_t	th;
8890Sstevel@tonic-gate 	ps_err_e	db_return;
8900Sstevel@tonic-gate 	ps_err_e	db_return2;
8910Sstevel@tonic-gate 	td_err_e	return_val;
8920Sstevel@tonic-gate 
8930Sstevel@tonic-gate 	if (cb == NULL)
8940Sstevel@tonic-gate 		return (TD_ERR);
8950Sstevel@tonic-gate 	/*
8960Sstevel@tonic-gate 	 * If state is not within bound, short circuit.
8970Sstevel@tonic-gate 	 */
8980Sstevel@tonic-gate 	if (state < TD_THR_ANY_STATE || state > TD_THR_STOPPED_ASLEEP)
8990Sstevel@tonic-gate 		return (TD_OK);
9000Sstevel@tonic-gate 
9010Sstevel@tonic-gate 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
9020Sstevel@tonic-gate 		return (return_val);
9030Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK) {
9040Sstevel@tonic-gate 		ph_unlock(ta_p);
9050Sstevel@tonic-gate 		return (TD_DBERR);
9060Sstevel@tonic-gate 	}
9070Sstevel@tonic-gate 
9080Sstevel@tonic-gate 	/*
9090Sstevel@tonic-gate 	 * For each ulwp_t in the circular linked lists pointed
9100Sstevel@tonic-gate 	 * to by "all_lwps" and "all_zombies":
9110Sstevel@tonic-gate 	 * (1) Filter each thread.
9120Sstevel@tonic-gate 	 * (2) Create the thread_object for each thread that passes.
9130Sstevel@tonic-gate 	 * (3) Call the call back function on each thread.
9140Sstevel@tonic-gate 	 */
9150Sstevel@tonic-gate 
9160Sstevel@tonic-gate 	if (ta_p->model == PR_MODEL_NATIVE) {
9170Sstevel@tonic-gate 		db_return = ps_pdread(ph_p,
9180Sstevel@tonic-gate 		    ta_p->uberdata_addr + offsetof(uberdata_t, all_lwps),
9190Sstevel@tonic-gate 		    &first_lwp_addr, sizeof (first_lwp_addr));
9200Sstevel@tonic-gate 		db_return2 = ps_pdread(ph_p,
9210Sstevel@tonic-gate 		    ta_p->uberdata_addr + offsetof(uberdata_t, all_zombies),
9220Sstevel@tonic-gate 		    &first_zombie_addr, sizeof (first_zombie_addr));
9230Sstevel@tonic-gate 	} else {
9240Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
9250Sstevel@tonic-gate 		caddr32_t addr32;
9260Sstevel@tonic-gate 
9270Sstevel@tonic-gate 		db_return = ps_pdread(ph_p,
9280Sstevel@tonic-gate 		    ta_p->uberdata_addr + offsetof(uberdata32_t, all_lwps),
9290Sstevel@tonic-gate 		    &addr32, sizeof (addr32));
9300Sstevel@tonic-gate 		first_lwp_addr = addr32;
9310Sstevel@tonic-gate 		db_return2 = ps_pdread(ph_p,
9320Sstevel@tonic-gate 		    ta_p->uberdata_addr + offsetof(uberdata32_t, all_zombies),
9330Sstevel@tonic-gate 		    &addr32, sizeof (addr32));
9340Sstevel@tonic-gate 		first_zombie_addr = addr32;
9350Sstevel@tonic-gate #else	/* _SYSCALL32 */
9360Sstevel@tonic-gate 		db_return = PS_ERR;
9370Sstevel@tonic-gate 		db_return2 = PS_ERR;
9380Sstevel@tonic-gate #endif	/* _SYSCALL32 */
9390Sstevel@tonic-gate 	}
9400Sstevel@tonic-gate 	if (db_return == PS_OK)
9410Sstevel@tonic-gate 		db_return = db_return2;
9420Sstevel@tonic-gate 
9430Sstevel@tonic-gate 	/*
9440Sstevel@tonic-gate 	 * If first_lwp_addr and first_zombie_addr are both NULL,
9450Sstevel@tonic-gate 	 * libc must not yet be initialized or all threads have
9460Sstevel@tonic-gate 	 * exited.  Return TD_NOTHR and all will be well.
9470Sstevel@tonic-gate 	 */
9480Sstevel@tonic-gate 	if (db_return == PS_OK &&
9490Sstevel@tonic-gate 	    first_lwp_addr == NULL && first_zombie_addr == NULL) {
9500Sstevel@tonic-gate 		(void) ps_pcontinue(ph_p);
9510Sstevel@tonic-gate 		ph_unlock(ta_p);
9520Sstevel@tonic-gate 		return (TD_NOTHR);
9530Sstevel@tonic-gate 	}
9540Sstevel@tonic-gate 	if (db_return != PS_OK) {
9550Sstevel@tonic-gate 		(void) ps_pcontinue(ph_p);
9560Sstevel@tonic-gate 		ph_unlock(ta_p);
9570Sstevel@tonic-gate 		return (TD_DBERR);
9580Sstevel@tonic-gate 	}
9590Sstevel@tonic-gate 
9600Sstevel@tonic-gate 	/*
9610Sstevel@tonic-gate 	 * Run down the lists of all living and dead lwps.
9620Sstevel@tonic-gate 	 */
9630Sstevel@tonic-gate 	if (first_lwp_addr == NULL)
9640Sstevel@tonic-gate 		first_lwp_addr = first_zombie_addr;
9650Sstevel@tonic-gate 	curr_lwp_addr = first_lwp_addr;
9660Sstevel@tonic-gate 	for (;;) {
9670Sstevel@tonic-gate 		td_thr_state_e ts_state;
9680Sstevel@tonic-gate 		int userpri;
9690Sstevel@tonic-gate 		unsigned userflags;
9700Sstevel@tonic-gate 		sigset_t mask;
9710Sstevel@tonic-gate 
9720Sstevel@tonic-gate 		/*
9730Sstevel@tonic-gate 		 * Read the ulwp struct.
9740Sstevel@tonic-gate 		 */
9750Sstevel@tonic-gate 		if (ta_p->model == PR_MODEL_NATIVE) {
9760Sstevel@tonic-gate 			ulwp_t ulwp;
9770Sstevel@tonic-gate 
9780Sstevel@tonic-gate 			if (ps_pdread(ph_p, curr_lwp_addr,
9790Sstevel@tonic-gate 			    &ulwp, sizeof (ulwp)) != PS_OK &&
9800Sstevel@tonic-gate 			    ((void) memset(&ulwp, 0, sizeof (ulwp)),
9810Sstevel@tonic-gate 			    ps_pdread(ph_p, curr_lwp_addr,
9820Sstevel@tonic-gate 			    &ulwp, REPLACEMENT_SIZE)) != PS_OK) {
9830Sstevel@tonic-gate 				return_val = TD_DBERR;
9840Sstevel@tonic-gate 				break;
9850Sstevel@tonic-gate 			}
9860Sstevel@tonic-gate 			next_lwp_addr = (psaddr_t)ulwp.ul_forw;
9870Sstevel@tonic-gate 
9880Sstevel@tonic-gate 			ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
9890Sstevel@tonic-gate 				ulwp.ul_stop? TD_THR_STOPPED :
9900Sstevel@tonic-gate 				ulwp.ul_wchan? TD_THR_SLEEP :
9910Sstevel@tonic-gate 				TD_THR_ACTIVE;
9920Sstevel@tonic-gate 			userpri = ulwp.ul_pri;
9930Sstevel@tonic-gate 			userflags = ulwp.ul_usropts;
9940Sstevel@tonic-gate 			if (ulwp.ul_dead)
9950Sstevel@tonic-gate 				(void) sigemptyset(&mask);
9960Sstevel@tonic-gate 			else
9970Sstevel@tonic-gate 				mask = *(sigset_t *)&ulwp.ul_sigmask;
9980Sstevel@tonic-gate 		} else {
9990Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
10000Sstevel@tonic-gate 			ulwp32_t ulwp;
10010Sstevel@tonic-gate 
10020Sstevel@tonic-gate 			if (ps_pdread(ph_p, curr_lwp_addr,
10030Sstevel@tonic-gate 			    &ulwp, sizeof (ulwp)) != PS_OK &&
10040Sstevel@tonic-gate 			    ((void) memset(&ulwp, 0, sizeof (ulwp)),
10050Sstevel@tonic-gate 			    ps_pdread(ph_p, curr_lwp_addr,
10060Sstevel@tonic-gate 			    &ulwp, REPLACEMENT_SIZE32)) != PS_OK) {
10070Sstevel@tonic-gate 				return_val = TD_DBERR;
10080Sstevel@tonic-gate 				break;
10090Sstevel@tonic-gate 			}
10100Sstevel@tonic-gate 			next_lwp_addr = (psaddr_t)ulwp.ul_forw;
10110Sstevel@tonic-gate 
10120Sstevel@tonic-gate 			ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
10130Sstevel@tonic-gate 				ulwp.ul_stop? TD_THR_STOPPED :
10140Sstevel@tonic-gate 				ulwp.ul_wchan? TD_THR_SLEEP :
10150Sstevel@tonic-gate 				TD_THR_ACTIVE;
10160Sstevel@tonic-gate 			userpri = ulwp.ul_pri;
10170Sstevel@tonic-gate 			userflags = ulwp.ul_usropts;
10180Sstevel@tonic-gate 			if (ulwp.ul_dead)
10190Sstevel@tonic-gate 				(void) sigemptyset(&mask);
10200Sstevel@tonic-gate 			else
10210Sstevel@tonic-gate 				mask = *(sigset_t *)&ulwp.ul_sigmask;
10220Sstevel@tonic-gate #else	/* _SYSCALL32 */
10230Sstevel@tonic-gate 			return_val = TD_ERR;
10240Sstevel@tonic-gate 			break;
10250Sstevel@tonic-gate #endif	/* _SYSCALL32 */
10260Sstevel@tonic-gate 		}
10270Sstevel@tonic-gate 
10280Sstevel@tonic-gate 		/*
10290Sstevel@tonic-gate 		 * Filter on state, priority, sigmask, and user flags.
10300Sstevel@tonic-gate 		 */
10310Sstevel@tonic-gate 
10320Sstevel@tonic-gate 		if ((state != ts_state) &&
10330Sstevel@tonic-gate 		    (state != TD_THR_ANY_STATE))
10340Sstevel@tonic-gate 			goto advance;
10350Sstevel@tonic-gate 
10360Sstevel@tonic-gate 		if (ti_pri > userpri)
10370Sstevel@tonic-gate 			goto advance;
10380Sstevel@tonic-gate 
10390Sstevel@tonic-gate 		if (ti_sigmask_p != TD_SIGNO_MASK &&
10400Sstevel@tonic-gate 		    !sigequalset(ti_sigmask_p, &mask))
10410Sstevel@tonic-gate 			goto advance;
10420Sstevel@tonic-gate 
10430Sstevel@tonic-gate 		if (ti_user_flags != userflags &&
10440Sstevel@tonic-gate 		    ti_user_flags != (unsigned)TD_THR_ANY_USER_FLAGS)
10450Sstevel@tonic-gate 			goto advance;
10460Sstevel@tonic-gate 
10470Sstevel@tonic-gate 		/*
10480Sstevel@tonic-gate 		 * Call back - break if the return
10490Sstevel@tonic-gate 		 * from the call back is non-zero.
10500Sstevel@tonic-gate 		 */
10510Sstevel@tonic-gate 		th.th_ta_p = (td_thragent_t *)ta_p;
10520Sstevel@tonic-gate 		th.th_unique = curr_lwp_addr;
10530Sstevel@tonic-gate 		if ((*cb)(&th, cbdata_p))
10540Sstevel@tonic-gate 			break;
10550Sstevel@tonic-gate 
10560Sstevel@tonic-gate advance:
10570Sstevel@tonic-gate 		if ((curr_lwp_addr = next_lwp_addr) == first_lwp_addr) {
10580Sstevel@tonic-gate 			/*
10590Sstevel@tonic-gate 			 * Switch to the zombie list, unless it is NULL
10600Sstevel@tonic-gate 			 * or we have already been doing the zombie list,
10610Sstevel@tonic-gate 			 * in which case terminate the loop.
10620Sstevel@tonic-gate 			 */
10630Sstevel@tonic-gate 			if (first_zombie_addr == NULL ||
10640Sstevel@tonic-gate 			    first_lwp_addr == first_zombie_addr)
10650Sstevel@tonic-gate 				break;
10660Sstevel@tonic-gate 			curr_lwp_addr = first_lwp_addr = first_zombie_addr;
10670Sstevel@tonic-gate 		}
10680Sstevel@tonic-gate 	}
10690Sstevel@tonic-gate 
10700Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
10710Sstevel@tonic-gate 	ph_unlock(ta_p);
10720Sstevel@tonic-gate 	return (return_val);
10730Sstevel@tonic-gate }
10740Sstevel@tonic-gate 
10750Sstevel@tonic-gate /*
10760Sstevel@tonic-gate  * Enable or disable process synchronization object tracking.
10770Sstevel@tonic-gate  * Currently unused by dbx.
10780Sstevel@tonic-gate  */
10790Sstevel@tonic-gate #pragma weak td_ta_sync_tracking_enable = __td_ta_sync_tracking_enable
10800Sstevel@tonic-gate td_err_e
10810Sstevel@tonic-gate __td_ta_sync_tracking_enable(td_thragent_t *ta_p, int onoff)
10820Sstevel@tonic-gate {
10830Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
10840Sstevel@tonic-gate 	td_err_e return_val;
10850Sstevel@tonic-gate 	register_sync_t enable;
10860Sstevel@tonic-gate 
10870Sstevel@tonic-gate 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
10880Sstevel@tonic-gate 		return (return_val);
10890Sstevel@tonic-gate 	/*
10900Sstevel@tonic-gate 	 * Values of tdb_register_sync in the victim process:
10910Sstevel@tonic-gate 	 *	REGISTER_SYNC_ENABLE	enables registration of synch objects
10920Sstevel@tonic-gate 	 *	REGISTER_SYNC_DISABLE	disables registration of synch objects
10930Sstevel@tonic-gate 	 * These cause the table to be cleared and tdb_register_sync set to:
10940Sstevel@tonic-gate 	 *	REGISTER_SYNC_ON	registration in effect
10950Sstevel@tonic-gate 	 *	REGISTER_SYNC_OFF	registration not in effect
10960Sstevel@tonic-gate 	 */
10970Sstevel@tonic-gate 	enable = onoff? REGISTER_SYNC_ENABLE : REGISTER_SYNC_DISABLE;
10980Sstevel@tonic-gate 	if (ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
10990Sstevel@tonic-gate 	    &enable, sizeof (enable)) != PS_OK)
11000Sstevel@tonic-gate 		return_val = TD_DBERR;
11010Sstevel@tonic-gate 	/*
11020Sstevel@tonic-gate 	 * Remember that this interface was called (see td_ta_delete()).
11030Sstevel@tonic-gate 	 */
11040Sstevel@tonic-gate 	ta_p->sync_tracking = 1;
11050Sstevel@tonic-gate 	ph_unlock(ta_p);
11060Sstevel@tonic-gate 	return (return_val);
11070Sstevel@tonic-gate }
11080Sstevel@tonic-gate 
11090Sstevel@tonic-gate /*
11100Sstevel@tonic-gate  * Iterate over all known synchronization variables.
11110Sstevel@tonic-gate  * It is very possible that the list generated is incomplete,
11120Sstevel@tonic-gate  * because the iterator can only find synchronization variables
11130Sstevel@tonic-gate  * that have been registered by the process since synchronization
11140Sstevel@tonic-gate  * object registration was enabled.
11150Sstevel@tonic-gate  * The call back function cb is called for each synchronization
11160Sstevel@tonic-gate  * variable with two arguments: a pointer to the synchronization
11170Sstevel@tonic-gate  * handle and the passed-in argument cbdata.
11180Sstevel@tonic-gate  * If cb returns a non-zero value, iterations are terminated.
11190Sstevel@tonic-gate  */
11200Sstevel@tonic-gate #pragma weak td_ta_sync_iter = __td_ta_sync_iter
11210Sstevel@tonic-gate td_err_e
11220Sstevel@tonic-gate __td_ta_sync_iter(td_thragent_t *ta_p, td_sync_iter_f *cb, void *cbdata)
11230Sstevel@tonic-gate {
11240Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
11250Sstevel@tonic-gate 	td_err_e	return_val;
11260Sstevel@tonic-gate 	int		i;
11270Sstevel@tonic-gate 	register_sync_t	enable;
11280Sstevel@tonic-gate 	psaddr_t	next_desc;
11290Sstevel@tonic-gate 	tdb_sync_stats_t sync_stats;
11300Sstevel@tonic-gate 	td_synchandle_t	synchandle;
11310Sstevel@tonic-gate 	psaddr_t	psaddr;
11320Sstevel@tonic-gate 	void		*vaddr;
11330Sstevel@tonic-gate 	uint64_t	*sync_addr_hash = NULL;
11340Sstevel@tonic-gate 
11350Sstevel@tonic-gate 	if (cb == NULL)
11360Sstevel@tonic-gate 		return (TD_ERR);
11370Sstevel@tonic-gate 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
11380Sstevel@tonic-gate 		return (return_val);
11390Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK) {
11400Sstevel@tonic-gate 		ph_unlock(ta_p);
11410Sstevel@tonic-gate 		return (TD_DBERR);
11420Sstevel@tonic-gate 	}
11430Sstevel@tonic-gate 	if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
11440Sstevel@tonic-gate 	    &enable, sizeof (enable)) != PS_OK) {
11450Sstevel@tonic-gate 		return_val = TD_DBERR;
11460Sstevel@tonic-gate 		goto out;
11470Sstevel@tonic-gate 	}
11480Sstevel@tonic-gate 	if (enable != REGISTER_SYNC_ON)
11490Sstevel@tonic-gate 		goto out;
11500Sstevel@tonic-gate 
11510Sstevel@tonic-gate 	/*
11520Sstevel@tonic-gate 	 * First read the hash table.
11530Sstevel@tonic-gate 	 * The hash table is large; allocate with mmap().
11540Sstevel@tonic-gate 	 */
11550Sstevel@tonic-gate 	if ((vaddr = mmap(NULL, TDB_HASH_SIZE * sizeof (uint64_t),
11560Sstevel@tonic-gate 	    PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
11570Sstevel@tonic-gate 	    == MAP_FAILED) {
11580Sstevel@tonic-gate 		return_val = TD_MALLOC;
11590Sstevel@tonic-gate 		goto out;
11600Sstevel@tonic-gate 	}
11610Sstevel@tonic-gate 	sync_addr_hash = vaddr;
11620Sstevel@tonic-gate 
11630Sstevel@tonic-gate 	if (ta_p->model == PR_MODEL_NATIVE) {
11640Sstevel@tonic-gate 		if (ps_pdread(ph_p, ta_p->uberdata_addr +
11650Sstevel@tonic-gate 		    offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
11660Sstevel@tonic-gate 		    &psaddr, sizeof (&psaddr)) != PS_OK) {
11670Sstevel@tonic-gate 			return_val = TD_DBERR;
11680Sstevel@tonic-gate 			goto out;
11690Sstevel@tonic-gate 		}
11700Sstevel@tonic-gate 	} else {
11710Sstevel@tonic-gate #ifdef  _SYSCALL32
11720Sstevel@tonic-gate 		caddr32_t addr;
11730Sstevel@tonic-gate 
11740Sstevel@tonic-gate 		if (ps_pdread(ph_p, ta_p->uberdata_addr +
11750Sstevel@tonic-gate 		    offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
11760Sstevel@tonic-gate 		    &addr, sizeof (addr)) != PS_OK) {
11770Sstevel@tonic-gate 			return_val = TD_DBERR;
11780Sstevel@tonic-gate 			goto out;
11790Sstevel@tonic-gate 		}
11800Sstevel@tonic-gate 		psaddr = addr;
11810Sstevel@tonic-gate #else
11820Sstevel@tonic-gate 		return_val = TD_ERR;
11830Sstevel@tonic-gate 		goto out;
11840Sstevel@tonic-gate #endif /* _SYSCALL32 */
11850Sstevel@tonic-gate 	}
11860Sstevel@tonic-gate 
11870Sstevel@tonic-gate 	if (psaddr == NULL)
11880Sstevel@tonic-gate 		goto out;
11890Sstevel@tonic-gate 	if (ps_pdread(ph_p, psaddr, sync_addr_hash,
11900Sstevel@tonic-gate 	    TDB_HASH_SIZE * sizeof (uint64_t)) != PS_OK) {
11910Sstevel@tonic-gate 		return_val = TD_DBERR;
11920Sstevel@tonic-gate 		goto out;
11930Sstevel@tonic-gate 	}
11940Sstevel@tonic-gate 
11950Sstevel@tonic-gate 	/*
11960Sstevel@tonic-gate 	 * Now scan the hash table.
11970Sstevel@tonic-gate 	 */
11980Sstevel@tonic-gate 	for (i = 0; i < TDB_HASH_SIZE; i++) {
11990Sstevel@tonic-gate 		for (next_desc = (psaddr_t)sync_addr_hash[i];
12000Sstevel@tonic-gate 		    next_desc != NULL;
12010Sstevel@tonic-gate 		    next_desc = (psaddr_t)sync_stats.next) {
12020Sstevel@tonic-gate 			if (ps_pdread(ph_p, next_desc,
12030Sstevel@tonic-gate 			    &sync_stats, sizeof (sync_stats)) != PS_OK) {
12040Sstevel@tonic-gate 				return_val = TD_DBERR;
12050Sstevel@tonic-gate 				goto out;
12060Sstevel@tonic-gate 			}
12070Sstevel@tonic-gate 			if (sync_stats.un.type == TDB_NONE) {
12080Sstevel@tonic-gate 				/* not registered since registration enabled */
12090Sstevel@tonic-gate 				continue;
12100Sstevel@tonic-gate 			}
12110Sstevel@tonic-gate 			synchandle.sh_ta_p = ta_p;
12120Sstevel@tonic-gate 			synchandle.sh_unique = (psaddr_t)sync_stats.sync_addr;
12130Sstevel@tonic-gate 			if ((*cb)(&synchandle, cbdata) != 0)
12140Sstevel@tonic-gate 				goto out;
12150Sstevel@tonic-gate 		}
12160Sstevel@tonic-gate 	}
12170Sstevel@tonic-gate 
12180Sstevel@tonic-gate out:
12190Sstevel@tonic-gate 	if (sync_addr_hash != NULL)
12200Sstevel@tonic-gate 		(void) munmap((void *)sync_addr_hash,
12210Sstevel@tonic-gate 		    TDB_HASH_SIZE * sizeof (uint64_t));
12220Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
12230Sstevel@tonic-gate 	ph_unlock(ta_p);
12240Sstevel@tonic-gate 	return (return_val);
12250Sstevel@tonic-gate }
12260Sstevel@tonic-gate 
12270Sstevel@tonic-gate /*
12280Sstevel@tonic-gate  * Enable process statistics collection.
12290Sstevel@tonic-gate  */
12300Sstevel@tonic-gate #pragma weak td_ta_enable_stats = __td_ta_enable_stats
12310Sstevel@tonic-gate /* ARGSUSED */
12320Sstevel@tonic-gate td_err_e
12330Sstevel@tonic-gate __td_ta_enable_stats(const td_thragent_t *ta_p, int onoff)
12340Sstevel@tonic-gate {
12350Sstevel@tonic-gate 	return (TD_NOCAPAB);
12360Sstevel@tonic-gate }
12370Sstevel@tonic-gate 
12380Sstevel@tonic-gate /*
12390Sstevel@tonic-gate  * Reset process statistics.
12400Sstevel@tonic-gate  */
12410Sstevel@tonic-gate #pragma weak td_ta_reset_stats = __td_ta_reset_stats
12420Sstevel@tonic-gate /* ARGSUSED */
12430Sstevel@tonic-gate td_err_e
12440Sstevel@tonic-gate __td_ta_reset_stats(const td_thragent_t *ta_p)
12450Sstevel@tonic-gate {
12460Sstevel@tonic-gate 	return (TD_NOCAPAB);
12470Sstevel@tonic-gate }
12480Sstevel@tonic-gate 
12490Sstevel@tonic-gate /*
12500Sstevel@tonic-gate  * Read process statistics.
12510Sstevel@tonic-gate  */
12520Sstevel@tonic-gate #pragma weak td_ta_get_stats = __td_ta_get_stats
12530Sstevel@tonic-gate /* ARGSUSED */
12540Sstevel@tonic-gate td_err_e
12550Sstevel@tonic-gate __td_ta_get_stats(const td_thragent_t *ta_p, td_ta_stats_t *tstats)
12560Sstevel@tonic-gate {
12570Sstevel@tonic-gate 	return (TD_NOCAPAB);
12580Sstevel@tonic-gate }
12590Sstevel@tonic-gate 
12600Sstevel@tonic-gate /*
12610Sstevel@tonic-gate  * Transfer information from lwp struct to thread information struct.
12620Sstevel@tonic-gate  * XXX -- lots of this needs cleaning up.
12630Sstevel@tonic-gate  */
12640Sstevel@tonic-gate static void
12650Sstevel@tonic-gate td_thr2to(td_thragent_t *ta_p, psaddr_t ts_addr,
12660Sstevel@tonic-gate 	ulwp_t *ulwp, td_thrinfo_t *ti_p)
12670Sstevel@tonic-gate {
12680Sstevel@tonic-gate 	lwpid_t lwpid;
12690Sstevel@tonic-gate 
12700Sstevel@tonic-gate 	if ((lwpid = ulwp->ul_lwpid) == 0)
12710Sstevel@tonic-gate 		lwpid = 1;
12720Sstevel@tonic-gate 	(void) memset(ti_p, 0, sizeof (*ti_p));
12730Sstevel@tonic-gate 	ti_p->ti_ta_p = ta_p;
12740Sstevel@tonic-gate 	ti_p->ti_user_flags = ulwp->ul_usropts;
12750Sstevel@tonic-gate 	ti_p->ti_tid = lwpid;
12760Sstevel@tonic-gate 	ti_p->ti_exitval = ulwp->ul_rval;
12770Sstevel@tonic-gate 	ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
12780Sstevel@tonic-gate 	if (!ulwp->ul_dead) {
12790Sstevel@tonic-gate 		/*
12800Sstevel@tonic-gate 		 * The bloody fools got this backwards!
12810Sstevel@tonic-gate 		 */
12820Sstevel@tonic-gate 		ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
12830Sstevel@tonic-gate 		ti_p->ti_stksize = ulwp->ul_stksiz;
12840Sstevel@tonic-gate 	}
12850Sstevel@tonic-gate 	ti_p->ti_ro_area = ts_addr;
12860Sstevel@tonic-gate 	ti_p->ti_ro_size = ulwp->ul_replace?
12870Sstevel@tonic-gate 		REPLACEMENT_SIZE : sizeof (ulwp_t);
12880Sstevel@tonic-gate 	ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
12890Sstevel@tonic-gate 		ulwp->ul_stop? TD_THR_STOPPED :
12900Sstevel@tonic-gate 		ulwp->ul_wchan? TD_THR_SLEEP :
12910Sstevel@tonic-gate 		TD_THR_ACTIVE;
12920Sstevel@tonic-gate 	ti_p->ti_db_suspended = 0;
12930Sstevel@tonic-gate 	ti_p->ti_type = TD_THR_USER;
12940Sstevel@tonic-gate 	ti_p->ti_sp = ulwp->ul_sp;
12950Sstevel@tonic-gate 	ti_p->ti_flags = 0;
12960Sstevel@tonic-gate 	ti_p->ti_pri = ulwp->ul_pri;
12970Sstevel@tonic-gate 	ti_p->ti_lid = lwpid;
12980Sstevel@tonic-gate 	if (!ulwp->ul_dead)
12990Sstevel@tonic-gate 		ti_p->ti_sigmask = ulwp->ul_sigmask;
13000Sstevel@tonic-gate 	ti_p->ti_traceme = 0;
13010Sstevel@tonic-gate 	ti_p->ti_preemptflag = 0;
13020Sstevel@tonic-gate 	ti_p->ti_pirecflag = 0;
13030Sstevel@tonic-gate 	(void) sigemptyset(&ti_p->ti_pending);
13040Sstevel@tonic-gate 	ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
13050Sstevel@tonic-gate }
13060Sstevel@tonic-gate 
13070Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
13080Sstevel@tonic-gate static void
13090Sstevel@tonic-gate td_thr2to32(td_thragent_t *ta_p, psaddr_t ts_addr,
13100Sstevel@tonic-gate 	ulwp32_t *ulwp, td_thrinfo_t *ti_p)
13110Sstevel@tonic-gate {
13120Sstevel@tonic-gate 	lwpid_t lwpid;
13130Sstevel@tonic-gate 
13140Sstevel@tonic-gate 	if ((lwpid = ulwp->ul_lwpid) == 0)
13150Sstevel@tonic-gate 		lwpid = 1;
13160Sstevel@tonic-gate 	(void) memset(ti_p, 0, sizeof (*ti_p));
13170Sstevel@tonic-gate 	ti_p->ti_ta_p = ta_p;
13180Sstevel@tonic-gate 	ti_p->ti_user_flags = ulwp->ul_usropts;
13190Sstevel@tonic-gate 	ti_p->ti_tid = lwpid;
13200Sstevel@tonic-gate 	ti_p->ti_exitval = (void *)(uintptr_t)ulwp->ul_rval;
13210Sstevel@tonic-gate 	ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
13220Sstevel@tonic-gate 	if (!ulwp->ul_dead) {
13230Sstevel@tonic-gate 		/*
13240Sstevel@tonic-gate 		 * The bloody fools got this backwards!
13250Sstevel@tonic-gate 		 */
13260Sstevel@tonic-gate 		ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
13270Sstevel@tonic-gate 		ti_p->ti_stksize = ulwp->ul_stksiz;
13280Sstevel@tonic-gate 	}
13290Sstevel@tonic-gate 	ti_p->ti_ro_area = ts_addr;
13300Sstevel@tonic-gate 	ti_p->ti_ro_size = ulwp->ul_replace?
13310Sstevel@tonic-gate 		REPLACEMENT_SIZE32 : sizeof (ulwp32_t);
13320Sstevel@tonic-gate 	ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
13330Sstevel@tonic-gate 		ulwp->ul_stop? TD_THR_STOPPED :
13340Sstevel@tonic-gate 		ulwp->ul_wchan? TD_THR_SLEEP :
13350Sstevel@tonic-gate 		TD_THR_ACTIVE;
13360Sstevel@tonic-gate 	ti_p->ti_db_suspended = 0;
13370Sstevel@tonic-gate 	ti_p->ti_type = TD_THR_USER;
13380Sstevel@tonic-gate 	ti_p->ti_sp = (uint32_t)ulwp->ul_sp;
13390Sstevel@tonic-gate 	ti_p->ti_flags = 0;
13400Sstevel@tonic-gate 	ti_p->ti_pri = ulwp->ul_pri;
13410Sstevel@tonic-gate 	ti_p->ti_lid = lwpid;
13420Sstevel@tonic-gate 	if (!ulwp->ul_dead)
13430Sstevel@tonic-gate 		ti_p->ti_sigmask = *(sigset_t *)&ulwp->ul_sigmask;
13440Sstevel@tonic-gate 	ti_p->ti_traceme = 0;
13450Sstevel@tonic-gate 	ti_p->ti_preemptflag = 0;
13460Sstevel@tonic-gate 	ti_p->ti_pirecflag = 0;
13470Sstevel@tonic-gate 	(void) sigemptyset(&ti_p->ti_pending);
13480Sstevel@tonic-gate 	ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
13490Sstevel@tonic-gate }
13500Sstevel@tonic-gate #endif	/* _SYSCALL32 */
13510Sstevel@tonic-gate 
13520Sstevel@tonic-gate /*
13530Sstevel@tonic-gate  * Get thread information.
13540Sstevel@tonic-gate  */
13550Sstevel@tonic-gate #pragma weak td_thr_get_info = __td_thr_get_info
13560Sstevel@tonic-gate td_err_e
13570Sstevel@tonic-gate __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p)
13580Sstevel@tonic-gate {
13590Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
13600Sstevel@tonic-gate 	td_thragent_t	*ta_p;
13610Sstevel@tonic-gate 	td_err_e	return_val;
13620Sstevel@tonic-gate 	psaddr_t	psaddr;
13630Sstevel@tonic-gate 
13640Sstevel@tonic-gate 	if (ti_p == NULL)
13650Sstevel@tonic-gate 		return (TD_ERR);
13660Sstevel@tonic-gate 	(void) memset(ti_p, NULL, sizeof (*ti_p));
13670Sstevel@tonic-gate 
13680Sstevel@tonic-gate 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
13690Sstevel@tonic-gate 		return (return_val);
13700Sstevel@tonic-gate 	ta_p = th_p->th_ta_p;
13710Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK) {
13720Sstevel@tonic-gate 		ph_unlock(ta_p);
13730Sstevel@tonic-gate 		return (TD_DBERR);
13740Sstevel@tonic-gate 	}
13750Sstevel@tonic-gate 
13760Sstevel@tonic-gate 	/*
13770Sstevel@tonic-gate 	 * Read the ulwp struct from the process.
13780Sstevel@tonic-gate 	 * Transfer the ulwp struct to the thread information struct.
13790Sstevel@tonic-gate 	 */
13800Sstevel@tonic-gate 	psaddr = th_p->th_unique;
13810Sstevel@tonic-gate 	if (ta_p->model == PR_MODEL_NATIVE) {
13820Sstevel@tonic-gate 		ulwp_t ulwp;
13830Sstevel@tonic-gate 
13840Sstevel@tonic-gate 		if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
13850Sstevel@tonic-gate 		    ((void) memset(&ulwp, 0, sizeof (ulwp)),
13860Sstevel@tonic-gate 		    ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE)) != PS_OK)
13870Sstevel@tonic-gate 			return_val = TD_DBERR;
13880Sstevel@tonic-gate 		else
13890Sstevel@tonic-gate 			td_thr2to(ta_p, psaddr, &ulwp, ti_p);
13900Sstevel@tonic-gate 	} else {
13910Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
13920Sstevel@tonic-gate 		ulwp32_t ulwp;
13930Sstevel@tonic-gate 
13940Sstevel@tonic-gate 		if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
13950Sstevel@tonic-gate 		    ((void) memset(&ulwp, 0, sizeof (ulwp)),
13960Sstevel@tonic-gate 		    ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE32)) !=
13970Sstevel@tonic-gate 				PS_OK)
13980Sstevel@tonic-gate 			return_val = TD_DBERR;
13990Sstevel@tonic-gate 		else
14000Sstevel@tonic-gate 			td_thr2to32(ta_p, psaddr, &ulwp, ti_p);
14010Sstevel@tonic-gate #else
14020Sstevel@tonic-gate 		return_val = TD_ERR;
14030Sstevel@tonic-gate #endif	/* _SYSCALL32 */
14040Sstevel@tonic-gate 	}
14050Sstevel@tonic-gate 
14060Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
14070Sstevel@tonic-gate 	ph_unlock(ta_p);
14080Sstevel@tonic-gate 	return (return_val);
14090Sstevel@tonic-gate }
14100Sstevel@tonic-gate 
14110Sstevel@tonic-gate /*
14120Sstevel@tonic-gate  * Given a process and an event number, return information about
14130Sstevel@tonic-gate  * an address in the process or at which a breakpoint can be set
14140Sstevel@tonic-gate  * to monitor the event.
14150Sstevel@tonic-gate  */
14160Sstevel@tonic-gate #pragma weak td_ta_event_addr = __td_ta_event_addr
14170Sstevel@tonic-gate td_err_e
14180Sstevel@tonic-gate __td_ta_event_addr(td_thragent_t *ta_p, td_event_e event, td_notify_t *notify_p)
14190Sstevel@tonic-gate {
14200Sstevel@tonic-gate 	if (ta_p == NULL)
14210Sstevel@tonic-gate 		return (TD_BADTA);
14220Sstevel@tonic-gate 	if (event < TD_MIN_EVENT_NUM || event > TD_MAX_EVENT_NUM)
14230Sstevel@tonic-gate 		return (TD_NOEVENT);
14240Sstevel@tonic-gate 	if (notify_p == NULL)
14250Sstevel@tonic-gate 		return (TD_ERR);
14260Sstevel@tonic-gate 
14270Sstevel@tonic-gate 	notify_p->type = NOTIFY_BPT;
14280Sstevel@tonic-gate 	notify_p->u.bptaddr = ta_p->tdb_events[event - TD_MIN_EVENT_NUM];
14290Sstevel@tonic-gate 
14300Sstevel@tonic-gate 	return (TD_OK);
14310Sstevel@tonic-gate }
14320Sstevel@tonic-gate 
14330Sstevel@tonic-gate /*
14340Sstevel@tonic-gate  * Add the events in eventset 2 to eventset 1.
14350Sstevel@tonic-gate  */
14360Sstevel@tonic-gate static void
14370Sstevel@tonic-gate eventsetaddset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
14380Sstevel@tonic-gate {
14390Sstevel@tonic-gate 	int	i;
14400Sstevel@tonic-gate 
14410Sstevel@tonic-gate 	for (i = 0; i < TD_EVENTSIZE; i++)
14420Sstevel@tonic-gate 		event1_p->event_bits[i] |= event2_p->event_bits[i];
14430Sstevel@tonic-gate }
14440Sstevel@tonic-gate 
14450Sstevel@tonic-gate /*
14460Sstevel@tonic-gate  * Delete the events in eventset 2 from eventset 1.
14470Sstevel@tonic-gate  */
14480Sstevel@tonic-gate static void
14490Sstevel@tonic-gate eventsetdelset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
14500Sstevel@tonic-gate {
14510Sstevel@tonic-gate 	int	i;
14520Sstevel@tonic-gate 
14530Sstevel@tonic-gate 	for (i = 0; i < TD_EVENTSIZE; i++)
14540Sstevel@tonic-gate 		event1_p->event_bits[i] &= ~event2_p->event_bits[i];
14550Sstevel@tonic-gate }
14560Sstevel@tonic-gate 
14570Sstevel@tonic-gate /*
14580Sstevel@tonic-gate  * Either add or delete the given event set from a thread's event mask.
14590Sstevel@tonic-gate  */
14600Sstevel@tonic-gate static td_err_e
14610Sstevel@tonic-gate mod_eventset(td_thrhandle_t *th_p, td_thr_events_t *events, int onoff)
14620Sstevel@tonic-gate {
14630Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
14640Sstevel@tonic-gate 	td_err_e	return_val = TD_OK;
14650Sstevel@tonic-gate 	char		enable;
14660Sstevel@tonic-gate 	td_thr_events_t	evset;
14670Sstevel@tonic-gate 	psaddr_t	psaddr_evset;
14680Sstevel@tonic-gate 	psaddr_t	psaddr_enab;
14690Sstevel@tonic-gate 
14700Sstevel@tonic-gate 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
14710Sstevel@tonic-gate 		return (return_val);
14720Sstevel@tonic-gate 	if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
14730Sstevel@tonic-gate 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
14740Sstevel@tonic-gate 		psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
14750Sstevel@tonic-gate 		psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
14760Sstevel@tonic-gate 	} else {
14770Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
14780Sstevel@tonic-gate 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
14790Sstevel@tonic-gate 		psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
14800Sstevel@tonic-gate 		psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
14810Sstevel@tonic-gate #else
14820Sstevel@tonic-gate 		ph_unlock(th_p->th_ta_p);
14830Sstevel@tonic-gate 		return (TD_ERR);
14840Sstevel@tonic-gate #endif	/* _SYSCALL32 */
14850Sstevel@tonic-gate 	}
14860Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK) {
14870Sstevel@tonic-gate 		ph_unlock(th_p->th_ta_p);
14880Sstevel@tonic-gate 		return (TD_DBERR);
14890Sstevel@tonic-gate 	}
14900Sstevel@tonic-gate 
14910Sstevel@tonic-gate 	if (ps_pdread(ph_p, psaddr_evset, &evset, sizeof (evset)) != PS_OK)
14920Sstevel@tonic-gate 		return_val = TD_DBERR;
14930Sstevel@tonic-gate 	else {
14940Sstevel@tonic-gate 		if (onoff)
14950Sstevel@tonic-gate 			eventsetaddset(&evset, events);
14960Sstevel@tonic-gate 		else
14970Sstevel@tonic-gate 			eventsetdelset(&evset, events);
14980Sstevel@tonic-gate 		if (ps_pdwrite(ph_p, psaddr_evset, &evset, sizeof (evset))
14990Sstevel@tonic-gate 		    != PS_OK)
15000Sstevel@tonic-gate 			return_val = TD_DBERR;
15010Sstevel@tonic-gate 		else {
15020Sstevel@tonic-gate 			enable = 0;
15030Sstevel@tonic-gate 			if (td_eventismember(&evset, TD_EVENTS_ENABLE))
15040Sstevel@tonic-gate 				enable = 1;
15050Sstevel@tonic-gate 			if (ps_pdwrite(ph_p, psaddr_enab,
15060Sstevel@tonic-gate 			    &enable, sizeof (enable)) != PS_OK)
15070Sstevel@tonic-gate 				return_val = TD_DBERR;
15080Sstevel@tonic-gate 		}
15090Sstevel@tonic-gate 	}
15100Sstevel@tonic-gate 
15110Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
15120Sstevel@tonic-gate 	ph_unlock(th_p->th_ta_p);
15130Sstevel@tonic-gate 	return (return_val);
15140Sstevel@tonic-gate }
15150Sstevel@tonic-gate 
15160Sstevel@tonic-gate /*
15170Sstevel@tonic-gate  * Enable or disable tracing for a given thread.  Tracing
15180Sstevel@tonic-gate  * is filtered based on the event mask of each thread.  Tracing
15190Sstevel@tonic-gate  * can be turned on/off for the thread without changing thread
15200Sstevel@tonic-gate  * event mask.
15210Sstevel@tonic-gate  * Currently unused by dbx.
15220Sstevel@tonic-gate  */
15230Sstevel@tonic-gate #pragma weak td_thr_event_enable = __td_thr_event_enable
15240Sstevel@tonic-gate td_err_e
15250Sstevel@tonic-gate __td_thr_event_enable(td_thrhandle_t *th_p, int onoff)
15260Sstevel@tonic-gate {
15270Sstevel@tonic-gate 	td_thr_events_t	evset;
15280Sstevel@tonic-gate 
15290Sstevel@tonic-gate 	td_event_emptyset(&evset);
15300Sstevel@tonic-gate 	td_event_addset(&evset, TD_EVENTS_ENABLE);
15310Sstevel@tonic-gate 	return (mod_eventset(th_p, &evset, onoff));
15320Sstevel@tonic-gate }
15330Sstevel@tonic-gate 
15340Sstevel@tonic-gate /*
15350Sstevel@tonic-gate  * Set event mask to enable event. event is turned on in
15360Sstevel@tonic-gate  * event mask for thread.  If a thread encounters an event
15370Sstevel@tonic-gate  * for which its event mask is on, notification will be sent
15380Sstevel@tonic-gate  * to the debugger.
15390Sstevel@tonic-gate  * Addresses for each event are provided to the
15400Sstevel@tonic-gate  * debugger.  It is assumed that a breakpoint of some type will
15410Sstevel@tonic-gate  * be placed at that address.  If the event mask for the thread
15420Sstevel@tonic-gate  * is on, the instruction at the address will be executed.
15430Sstevel@tonic-gate  * Otherwise, the instruction will be skipped.
15440Sstevel@tonic-gate  */
15450Sstevel@tonic-gate #pragma weak td_thr_set_event = __td_thr_set_event
15460Sstevel@tonic-gate td_err_e
15470Sstevel@tonic-gate __td_thr_set_event(td_thrhandle_t *th_p, td_thr_events_t *events)
15480Sstevel@tonic-gate {
15490Sstevel@tonic-gate 	return (mod_eventset(th_p, events, 1));
15500Sstevel@tonic-gate }
15510Sstevel@tonic-gate 
15520Sstevel@tonic-gate /*
15530Sstevel@tonic-gate  * Enable or disable a set of events in the process-global event mask,
15540Sstevel@tonic-gate  * depending on the value of onoff.
15550Sstevel@tonic-gate  */
15560Sstevel@tonic-gate static td_err_e
15570Sstevel@tonic-gate td_ta_mod_event(td_thragent_t *ta_p, td_thr_events_t *events, int onoff)
15580Sstevel@tonic-gate {
15590Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
15600Sstevel@tonic-gate 	td_thr_events_t targ_eventset;
15610Sstevel@tonic-gate 	td_err_e	return_val;
15620Sstevel@tonic-gate 
15630Sstevel@tonic-gate 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
15640Sstevel@tonic-gate 		return (return_val);
15650Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK) {
15660Sstevel@tonic-gate 		ph_unlock(ta_p);
15670Sstevel@tonic-gate 		return (TD_DBERR);
15680Sstevel@tonic-gate 	}
15690Sstevel@tonic-gate 	if (ps_pdread(ph_p, ta_p->tdb_eventmask_addr,
15700Sstevel@tonic-gate 	    &targ_eventset, sizeof (targ_eventset)) != PS_OK)
15710Sstevel@tonic-gate 		return_val = TD_DBERR;
15720Sstevel@tonic-gate 	else {
15730Sstevel@tonic-gate 		if (onoff)
15740Sstevel@tonic-gate 			eventsetaddset(&targ_eventset, events);
15750Sstevel@tonic-gate 		else
15760Sstevel@tonic-gate 			eventsetdelset(&targ_eventset, events);
15770Sstevel@tonic-gate 		if (ps_pdwrite(ph_p, ta_p->tdb_eventmask_addr,
15780Sstevel@tonic-gate 		    &targ_eventset, sizeof (targ_eventset)) != PS_OK)
15790Sstevel@tonic-gate 			return_val = TD_DBERR;
15800Sstevel@tonic-gate 	}
15810Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
15820Sstevel@tonic-gate 	ph_unlock(ta_p);
15830Sstevel@tonic-gate 	return (return_val);
15840Sstevel@tonic-gate }
15850Sstevel@tonic-gate 
15860Sstevel@tonic-gate /*
15870Sstevel@tonic-gate  * Enable a set of events in the process-global event mask.
15880Sstevel@tonic-gate  */
15890Sstevel@tonic-gate #pragma weak td_ta_set_event = __td_ta_set_event
15900Sstevel@tonic-gate td_err_e
15910Sstevel@tonic-gate __td_ta_set_event(td_thragent_t *ta_p, td_thr_events_t *events)
15920Sstevel@tonic-gate {
15930Sstevel@tonic-gate 	return (td_ta_mod_event(ta_p, events, 1));
15940Sstevel@tonic-gate }
15950Sstevel@tonic-gate 
15960Sstevel@tonic-gate /*
15970Sstevel@tonic-gate  * Set event mask to disable the given event set; these events are cleared
15980Sstevel@tonic-gate  * from the event mask of the thread.  Events that occur for a thread
15990Sstevel@tonic-gate  * with the event masked off will not cause notification to be
16000Sstevel@tonic-gate  * sent to the debugger (see td_thr_set_event for fuller description).
16010Sstevel@tonic-gate  */
16020Sstevel@tonic-gate #pragma weak td_thr_clear_event = __td_thr_clear_event
16030Sstevel@tonic-gate td_err_e
16040Sstevel@tonic-gate __td_thr_clear_event(td_thrhandle_t *th_p, td_thr_events_t *events)
16050Sstevel@tonic-gate {
16060Sstevel@tonic-gate 	return (mod_eventset(th_p, events, 0));
16070Sstevel@tonic-gate }
16080Sstevel@tonic-gate 
16090Sstevel@tonic-gate /*
16100Sstevel@tonic-gate  * Disable a set of events in the process-global event mask.
16110Sstevel@tonic-gate  */
16120Sstevel@tonic-gate #pragma weak td_ta_clear_event = __td_ta_clear_event
16130Sstevel@tonic-gate td_err_e
16140Sstevel@tonic-gate __td_ta_clear_event(td_thragent_t *ta_p, td_thr_events_t *events)
16150Sstevel@tonic-gate {
16160Sstevel@tonic-gate 	return (td_ta_mod_event(ta_p, events, 0));
16170Sstevel@tonic-gate }
16180Sstevel@tonic-gate 
16190Sstevel@tonic-gate /*
16200Sstevel@tonic-gate  * This function returns the most recent event message, if any,
16210Sstevel@tonic-gate  * associated with a thread.  Given a thread handle, return the message
16220Sstevel@tonic-gate  * corresponding to the event encountered by the thread.  Only one
16230Sstevel@tonic-gate  * message per thread is saved.  Messages from earlier events are lost
16240Sstevel@tonic-gate  * when later events occur.
16250Sstevel@tonic-gate  */
16260Sstevel@tonic-gate #pragma weak td_thr_event_getmsg = __td_thr_event_getmsg
16270Sstevel@tonic-gate td_err_e
16280Sstevel@tonic-gate __td_thr_event_getmsg(td_thrhandle_t *th_p, td_event_msg_t *msg)
16290Sstevel@tonic-gate {
16300Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
16310Sstevel@tonic-gate 	td_err_e	return_val = TD_OK;
16320Sstevel@tonic-gate 	psaddr_t	psaddr;
16330Sstevel@tonic-gate 
16340Sstevel@tonic-gate 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
16350Sstevel@tonic-gate 		return (return_val);
16360Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK) {
16370Sstevel@tonic-gate 		ph_unlock(th_p->th_ta_p);
16380Sstevel@tonic-gate 		return (TD_BADTA);
16390Sstevel@tonic-gate 	}
16400Sstevel@tonic-gate 	if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
16410Sstevel@tonic-gate 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
16420Sstevel@tonic-gate 		td_evbuf_t evbuf;
16430Sstevel@tonic-gate 
16440Sstevel@tonic-gate 		psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
16450Sstevel@tonic-gate 		if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
16460Sstevel@tonic-gate 			return_val = TD_DBERR;
16470Sstevel@tonic-gate 		} else if (evbuf.eventnum == TD_EVENT_NONE) {
16480Sstevel@tonic-gate 			return_val = TD_NOEVENT;
16490Sstevel@tonic-gate 		} else {
16500Sstevel@tonic-gate 			msg->event = evbuf.eventnum;
16510Sstevel@tonic-gate 			msg->th_p = (td_thrhandle_t *)th_p;
16520Sstevel@tonic-gate 			msg->msg.data = (uintptr_t)evbuf.eventdata;
16530Sstevel@tonic-gate 			/* "Consume" the message */
16540Sstevel@tonic-gate 			evbuf.eventnum = TD_EVENT_NONE;
16550Sstevel@tonic-gate 			evbuf.eventdata = NULL;
16560Sstevel@tonic-gate 			if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
16570Sstevel@tonic-gate 			    != PS_OK)
16580Sstevel@tonic-gate 				return_val = TD_DBERR;
16590Sstevel@tonic-gate 		}
16600Sstevel@tonic-gate 	} else {
16610Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
16620Sstevel@tonic-gate 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
16630Sstevel@tonic-gate 		td_evbuf32_t evbuf;
16640Sstevel@tonic-gate 
16650Sstevel@tonic-gate 		psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
16660Sstevel@tonic-gate 		if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
16670Sstevel@tonic-gate 			return_val = TD_DBERR;
16680Sstevel@tonic-gate 		} else if (evbuf.eventnum == TD_EVENT_NONE) {
16690Sstevel@tonic-gate 			return_val = TD_NOEVENT;
16700Sstevel@tonic-gate 		} else {
16710Sstevel@tonic-gate 			msg->event = evbuf.eventnum;
16720Sstevel@tonic-gate 			msg->th_p = (td_thrhandle_t *)th_p;
16730Sstevel@tonic-gate 			msg->msg.data = (uintptr_t)evbuf.eventdata;
16740Sstevel@tonic-gate 			/* "Consume" the message */
16750Sstevel@tonic-gate 			evbuf.eventnum = TD_EVENT_NONE;
16760Sstevel@tonic-gate 			evbuf.eventdata = NULL;
16770Sstevel@tonic-gate 			if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
16780Sstevel@tonic-gate 			    != PS_OK)
16790Sstevel@tonic-gate 				return_val = TD_DBERR;
16800Sstevel@tonic-gate 		}
16810Sstevel@tonic-gate #else
16820Sstevel@tonic-gate 		return_val = TD_ERR;
16830Sstevel@tonic-gate #endif	/* _SYSCALL32 */
16840Sstevel@tonic-gate 	}
16850Sstevel@tonic-gate 
16860Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
16870Sstevel@tonic-gate 	ph_unlock(th_p->th_ta_p);
16880Sstevel@tonic-gate 	return (return_val);
16890Sstevel@tonic-gate }
16900Sstevel@tonic-gate 
16910Sstevel@tonic-gate /*
16920Sstevel@tonic-gate  * The callback function td_ta_event_getmsg uses when looking for
16930Sstevel@tonic-gate  * a thread with an event.  A thin wrapper around td_thr_event_getmsg.
16940Sstevel@tonic-gate  */
16950Sstevel@tonic-gate static int
16960Sstevel@tonic-gate event_msg_cb(const td_thrhandle_t *th_p, void *arg)
16970Sstevel@tonic-gate {
16980Sstevel@tonic-gate 	static td_thrhandle_t th;
16990Sstevel@tonic-gate 	td_event_msg_t *msg = arg;
17000Sstevel@tonic-gate 
17010Sstevel@tonic-gate 	if (__td_thr_event_getmsg((td_thrhandle_t *)th_p, msg) == TD_OK) {
17020Sstevel@tonic-gate 		/*
17030Sstevel@tonic-gate 		 * Got an event, stop iterating.
17040Sstevel@tonic-gate 		 *
17050Sstevel@tonic-gate 		 * Because of past mistakes in interface definition,
17060Sstevel@tonic-gate 		 * we are forced to pass back a static local variable
17070Sstevel@tonic-gate 		 * for the thread handle because th_p is a pointer
17080Sstevel@tonic-gate 		 * to a local variable in __td_ta_thr_iter().
17090Sstevel@tonic-gate 		 * Grr...
17100Sstevel@tonic-gate 		 */
17110Sstevel@tonic-gate 		th = *th_p;
17120Sstevel@tonic-gate 		msg->th_p = &th;
17130Sstevel@tonic-gate 		return (1);
17140Sstevel@tonic-gate 	}
17150Sstevel@tonic-gate 	return (0);
17160Sstevel@tonic-gate }
17170Sstevel@tonic-gate 
17180Sstevel@tonic-gate /*
17190Sstevel@tonic-gate  * This function is just like td_thr_event_getmsg, except that it is
17200Sstevel@tonic-gate  * passed a process handle rather than a thread handle, and returns
17210Sstevel@tonic-gate  * an event message for some thread in the process that has an event
17220Sstevel@tonic-gate  * message pending.  If no thread has an event message pending, this
17230Sstevel@tonic-gate  * routine returns TD_NOEVENT.  Thus, all pending event messages may
17240Sstevel@tonic-gate  * be collected from a process by repeatedly calling this routine
17250Sstevel@tonic-gate  * until it returns TD_NOEVENT.
17260Sstevel@tonic-gate  */
17270Sstevel@tonic-gate #pragma weak td_ta_event_getmsg = __td_ta_event_getmsg
17280Sstevel@tonic-gate td_err_e
17290Sstevel@tonic-gate __td_ta_event_getmsg(td_thragent_t *ta_p, td_event_msg_t *msg)
17300Sstevel@tonic-gate {
17310Sstevel@tonic-gate 	td_err_e return_val;
17320Sstevel@tonic-gate 
17330Sstevel@tonic-gate 	if (ta_p == NULL)
17340Sstevel@tonic-gate 		return (TD_BADTA);
17350Sstevel@tonic-gate 	if (ta_p->ph_p == NULL)
17360Sstevel@tonic-gate 		return (TD_BADPH);
17370Sstevel@tonic-gate 	if (msg == NULL)
17380Sstevel@tonic-gate 		return (TD_ERR);
17390Sstevel@tonic-gate 	msg->event = TD_EVENT_NONE;
17400Sstevel@tonic-gate 	if ((return_val = __td_ta_thr_iter(ta_p, event_msg_cb, msg,
17410Sstevel@tonic-gate 	    TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK,
17420Sstevel@tonic-gate 	    TD_THR_ANY_USER_FLAGS)) != TD_OK)
17430Sstevel@tonic-gate 		return (return_val);
17440Sstevel@tonic-gate 	if (msg->event == TD_EVENT_NONE)
17450Sstevel@tonic-gate 		return (TD_NOEVENT);
17460Sstevel@tonic-gate 	return (TD_OK);
17470Sstevel@tonic-gate }
17480Sstevel@tonic-gate 
17490Sstevel@tonic-gate static lwpid_t
17500Sstevel@tonic-gate thr_to_lwpid(const td_thrhandle_t *th_p)
17510Sstevel@tonic-gate {
17520Sstevel@tonic-gate 	struct ps_prochandle *ph_p = th_p->th_ta_p->ph_p;
17530Sstevel@tonic-gate 	lwpid_t lwpid;
17540Sstevel@tonic-gate 
17550Sstevel@tonic-gate 	/*
17560Sstevel@tonic-gate 	 * The caller holds the prochandle lock
17570Sstevel@tonic-gate 	 * and has already verfied everything.
17580Sstevel@tonic-gate 	 */
17590Sstevel@tonic-gate 	if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
17600Sstevel@tonic-gate 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
17610Sstevel@tonic-gate 
17620Sstevel@tonic-gate 		if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
17630Sstevel@tonic-gate 		    &lwpid, sizeof (lwpid)) != PS_OK)
17640Sstevel@tonic-gate 			lwpid = 0;
17650Sstevel@tonic-gate 		else if (lwpid == 0)
17660Sstevel@tonic-gate 			lwpid = 1;
17670Sstevel@tonic-gate 	} else {
17680Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
17690Sstevel@tonic-gate 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
17700Sstevel@tonic-gate 
17710Sstevel@tonic-gate 		if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
17720Sstevel@tonic-gate 		    &lwpid, sizeof (lwpid)) != PS_OK)
17730Sstevel@tonic-gate 			lwpid = 0;
17740Sstevel@tonic-gate 		else if (lwpid == 0)
17750Sstevel@tonic-gate 			lwpid = 1;
17760Sstevel@tonic-gate #else
17770Sstevel@tonic-gate 		lwpid = 0;
17780Sstevel@tonic-gate #endif	/* _SYSCALL32 */
17790Sstevel@tonic-gate 	}
17800Sstevel@tonic-gate 
17810Sstevel@tonic-gate 	return (lwpid);
17820Sstevel@tonic-gate }
17830Sstevel@tonic-gate 
17840Sstevel@tonic-gate /*
17850Sstevel@tonic-gate  * Suspend a thread.
17860Sstevel@tonic-gate  * XXX: What does this mean in a one-level model?
17870Sstevel@tonic-gate  */
17880Sstevel@tonic-gate #pragma weak td_thr_dbsuspend = __td_thr_dbsuspend
17890Sstevel@tonic-gate td_err_e
17900Sstevel@tonic-gate __td_thr_dbsuspend(const td_thrhandle_t *th_p)
17910Sstevel@tonic-gate {
17920Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
17930Sstevel@tonic-gate 	td_err_e return_val;
17940Sstevel@tonic-gate 
17950Sstevel@tonic-gate 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
17960Sstevel@tonic-gate 		return (return_val);
17970Sstevel@tonic-gate 	if (ps_lstop(ph_p, thr_to_lwpid(th_p)) != PS_OK)
17980Sstevel@tonic-gate 		return_val = TD_DBERR;
17990Sstevel@tonic-gate 	ph_unlock(th_p->th_ta_p);
18000Sstevel@tonic-gate 	return (return_val);
18010Sstevel@tonic-gate }
18020Sstevel@tonic-gate 
18030Sstevel@tonic-gate /*
18040Sstevel@tonic-gate  * Resume a suspended thread.
18050Sstevel@tonic-gate  * XXX: What does this mean in a one-level model?
18060Sstevel@tonic-gate  */
18070Sstevel@tonic-gate #pragma weak td_thr_dbresume = __td_thr_dbresume
18080Sstevel@tonic-gate td_err_e
18090Sstevel@tonic-gate __td_thr_dbresume(const td_thrhandle_t *th_p)
18100Sstevel@tonic-gate {
18110Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
18120Sstevel@tonic-gate 	td_err_e return_val;
18130Sstevel@tonic-gate 
18140Sstevel@tonic-gate 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
18150Sstevel@tonic-gate 		return (return_val);
18160Sstevel@tonic-gate 	if (ps_lcontinue(ph_p, thr_to_lwpid(th_p)) != PS_OK)
18170Sstevel@tonic-gate 		return_val = TD_DBERR;
18180Sstevel@tonic-gate 	ph_unlock(th_p->th_ta_p);
18190Sstevel@tonic-gate 	return (return_val);
18200Sstevel@tonic-gate }
18210Sstevel@tonic-gate 
18220Sstevel@tonic-gate /*
18230Sstevel@tonic-gate  * Set a thread's signal mask.
18240Sstevel@tonic-gate  * Currently unused by dbx.
18250Sstevel@tonic-gate  */
18260Sstevel@tonic-gate #pragma weak td_thr_sigsetmask = __td_thr_sigsetmask
18270Sstevel@tonic-gate /* ARGSUSED */
18280Sstevel@tonic-gate td_err_e
18290Sstevel@tonic-gate __td_thr_sigsetmask(const td_thrhandle_t *th_p, const sigset_t ti_sigmask)
18300Sstevel@tonic-gate {
18310Sstevel@tonic-gate 	return (TD_NOCAPAB);
18320Sstevel@tonic-gate }
18330Sstevel@tonic-gate 
18340Sstevel@tonic-gate /*
18350Sstevel@tonic-gate  * Set a thread's "signals-pending" set.
18360Sstevel@tonic-gate  * Currently unused by dbx.
18370Sstevel@tonic-gate  */
18380Sstevel@tonic-gate #pragma weak td_thr_setsigpending = __td_thr_setsigpending
18390Sstevel@tonic-gate /* ARGSUSED */
18400Sstevel@tonic-gate td_err_e
18410Sstevel@tonic-gate __td_thr_setsigpending(const td_thrhandle_t *th_p,
18420Sstevel@tonic-gate 	uchar_t ti_pending_flag, const sigset_t ti_pending)
18430Sstevel@tonic-gate {
18440Sstevel@tonic-gate 	return (TD_NOCAPAB);
18450Sstevel@tonic-gate }
18460Sstevel@tonic-gate 
18470Sstevel@tonic-gate /*
18480Sstevel@tonic-gate  * Get a thread's general register set.
18490Sstevel@tonic-gate  */
18500Sstevel@tonic-gate #pragma weak td_thr_getgregs = __td_thr_getgregs
18510Sstevel@tonic-gate td_err_e
18520Sstevel@tonic-gate __td_thr_getgregs(td_thrhandle_t *th_p, prgregset_t regset)
18530Sstevel@tonic-gate {
18540Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
18550Sstevel@tonic-gate 	td_err_e return_val;
18560Sstevel@tonic-gate 
18570Sstevel@tonic-gate 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
18580Sstevel@tonic-gate 		return (return_val);
18590Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK) {
18600Sstevel@tonic-gate 		ph_unlock(th_p->th_ta_p);
18610Sstevel@tonic-gate 		return (TD_DBERR);
18620Sstevel@tonic-gate 	}
18630Sstevel@tonic-gate 
18640Sstevel@tonic-gate 	if (ps_lgetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
18650Sstevel@tonic-gate 		return_val = TD_DBERR;
18660Sstevel@tonic-gate 
18670Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
18680Sstevel@tonic-gate 	ph_unlock(th_p->th_ta_p);
18690Sstevel@tonic-gate 	return (return_val);
18700Sstevel@tonic-gate }
18710Sstevel@tonic-gate 
18720Sstevel@tonic-gate /*
18730Sstevel@tonic-gate  * Set a thread's general register set.
18740Sstevel@tonic-gate  */
18750Sstevel@tonic-gate #pragma weak td_thr_setgregs = __td_thr_setgregs
18760Sstevel@tonic-gate td_err_e
18770Sstevel@tonic-gate __td_thr_setgregs(td_thrhandle_t *th_p, const prgregset_t regset)
18780Sstevel@tonic-gate {
18790Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
18800Sstevel@tonic-gate 	td_err_e return_val;
18810Sstevel@tonic-gate 
18820Sstevel@tonic-gate 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
18830Sstevel@tonic-gate 		return (return_val);
18840Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK) {
18850Sstevel@tonic-gate 		ph_unlock(th_p->th_ta_p);
18860Sstevel@tonic-gate 		return (TD_DBERR);
18870Sstevel@tonic-gate 	}
18880Sstevel@tonic-gate 
18890Sstevel@tonic-gate 	if (ps_lsetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
18900Sstevel@tonic-gate 		return_val = TD_DBERR;
18910Sstevel@tonic-gate 
18920Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
18930Sstevel@tonic-gate 	ph_unlock(th_p->th_ta_p);
18940Sstevel@tonic-gate 	return (return_val);
18950Sstevel@tonic-gate }
18960Sstevel@tonic-gate 
18970Sstevel@tonic-gate /*
18980Sstevel@tonic-gate  * Get a thread's floating-point register set.
18990Sstevel@tonic-gate  */
19000Sstevel@tonic-gate #pragma weak td_thr_getfpregs = __td_thr_getfpregs
19010Sstevel@tonic-gate td_err_e
19020Sstevel@tonic-gate __td_thr_getfpregs(td_thrhandle_t *th_p, prfpregset_t *fpregset)
19030Sstevel@tonic-gate {
19040Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
19050Sstevel@tonic-gate 	td_err_e return_val;
19060Sstevel@tonic-gate 
19070Sstevel@tonic-gate 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
19080Sstevel@tonic-gate 		return (return_val);
19090Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK) {
19100Sstevel@tonic-gate 		ph_unlock(th_p->th_ta_p);
19110Sstevel@tonic-gate 		return (TD_DBERR);
19120Sstevel@tonic-gate 	}
19130Sstevel@tonic-gate 
19140Sstevel@tonic-gate 	if (ps_lgetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
19150Sstevel@tonic-gate 		return_val = TD_DBERR;
19160Sstevel@tonic-gate 
19170Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
19180Sstevel@tonic-gate 	ph_unlock(th_p->th_ta_p);
19190Sstevel@tonic-gate 	return (return_val);
19200Sstevel@tonic-gate }
19210Sstevel@tonic-gate 
19220Sstevel@tonic-gate /*
19230Sstevel@tonic-gate  * Set a thread's floating-point register set.
19240Sstevel@tonic-gate  */
19250Sstevel@tonic-gate #pragma weak td_thr_setfpregs = __td_thr_setfpregs
19260Sstevel@tonic-gate td_err_e
19270Sstevel@tonic-gate __td_thr_setfpregs(td_thrhandle_t *th_p, const prfpregset_t *fpregset)
19280Sstevel@tonic-gate {
19290Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
19300Sstevel@tonic-gate 	td_err_e return_val;
19310Sstevel@tonic-gate 
19320Sstevel@tonic-gate 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
19330Sstevel@tonic-gate 		return (return_val);
19340Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK) {
19350Sstevel@tonic-gate 		ph_unlock(th_p->th_ta_p);
19360Sstevel@tonic-gate 		return (TD_DBERR);
19370Sstevel@tonic-gate 	}
19380Sstevel@tonic-gate 
19390Sstevel@tonic-gate 	if (ps_lsetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
19400Sstevel@tonic-gate 		return_val = TD_DBERR;
19410Sstevel@tonic-gate 
19420Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
19430Sstevel@tonic-gate 	ph_unlock(th_p->th_ta_p);
19440Sstevel@tonic-gate 	return (return_val);
19450Sstevel@tonic-gate }
19460Sstevel@tonic-gate 
19470Sstevel@tonic-gate /*
19480Sstevel@tonic-gate  * Get the size of the extra state register set for this architecture.
19490Sstevel@tonic-gate  * Currently unused by dbx.
19500Sstevel@tonic-gate  */
19510Sstevel@tonic-gate #pragma weak td_thr_getxregsize = __td_thr_getxregsize
19520Sstevel@tonic-gate /* ARGSUSED */
19530Sstevel@tonic-gate td_err_e
19540Sstevel@tonic-gate __td_thr_getxregsize(td_thrhandle_t *th_p, int *xregsize)
19550Sstevel@tonic-gate {
19560Sstevel@tonic-gate #if defined(__sparc)
19570Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
19580Sstevel@tonic-gate 	td_err_e return_val;
19590Sstevel@tonic-gate 
19600Sstevel@tonic-gate 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
19610Sstevel@tonic-gate 		return (return_val);
19620Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK) {
19630Sstevel@tonic-gate 		ph_unlock(th_p->th_ta_p);
19640Sstevel@tonic-gate 		return (TD_DBERR);
19650Sstevel@tonic-gate 	}
19660Sstevel@tonic-gate 
19670Sstevel@tonic-gate 	if (ps_lgetxregsize(ph_p, thr_to_lwpid(th_p), xregsize) != PS_OK)
19680Sstevel@tonic-gate 		return_val = TD_DBERR;
19690Sstevel@tonic-gate 
19700Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
19710Sstevel@tonic-gate 	ph_unlock(th_p->th_ta_p);
19720Sstevel@tonic-gate 	return (return_val);
19730Sstevel@tonic-gate #else	/* __sparc */
19740Sstevel@tonic-gate 	return (TD_NOXREGS);
19750Sstevel@tonic-gate #endif	/* __sparc */
19760Sstevel@tonic-gate }
19770Sstevel@tonic-gate 
19780Sstevel@tonic-gate /*
19790Sstevel@tonic-gate  * Get a thread's extra state register set.
19800Sstevel@tonic-gate  */
19810Sstevel@tonic-gate #pragma weak td_thr_getxregs = __td_thr_getxregs
19820Sstevel@tonic-gate /* ARGSUSED */
19830Sstevel@tonic-gate td_err_e
19840Sstevel@tonic-gate __td_thr_getxregs(td_thrhandle_t *th_p, void *xregset)
19850Sstevel@tonic-gate {
19860Sstevel@tonic-gate #if defined(__sparc)
19870Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
19880Sstevel@tonic-gate 	td_err_e return_val;
19890Sstevel@tonic-gate 
19900Sstevel@tonic-gate 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
19910Sstevel@tonic-gate 		return (return_val);
19920Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK) {
19930Sstevel@tonic-gate 		ph_unlock(th_p->th_ta_p);
19940Sstevel@tonic-gate 		return (TD_DBERR);
19950Sstevel@tonic-gate 	}
19960Sstevel@tonic-gate 
19970Sstevel@tonic-gate 	if (ps_lgetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
19980Sstevel@tonic-gate 		return_val = TD_DBERR;
19990Sstevel@tonic-gate 
20000Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
20010Sstevel@tonic-gate 	ph_unlock(th_p->th_ta_p);
20020Sstevel@tonic-gate 	return (return_val);
20030Sstevel@tonic-gate #else	/* __sparc */
20040Sstevel@tonic-gate 	return (TD_NOXREGS);
20050Sstevel@tonic-gate #endif	/* __sparc */
20060Sstevel@tonic-gate }
20070Sstevel@tonic-gate 
20080Sstevel@tonic-gate /*
20090Sstevel@tonic-gate  * Set a thread's extra state register set.
20100Sstevel@tonic-gate  */
20110Sstevel@tonic-gate #pragma weak td_thr_setxregs = __td_thr_setxregs
20120Sstevel@tonic-gate /* ARGSUSED */
20130Sstevel@tonic-gate td_err_e
20140Sstevel@tonic-gate __td_thr_setxregs(td_thrhandle_t *th_p, const void *xregset)
20150Sstevel@tonic-gate {
20160Sstevel@tonic-gate #if defined(__sparc)
20170Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
20180Sstevel@tonic-gate 	td_err_e return_val;
20190Sstevel@tonic-gate 
20200Sstevel@tonic-gate 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
20210Sstevel@tonic-gate 		return (return_val);
20220Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK) {
20230Sstevel@tonic-gate 		ph_unlock(th_p->th_ta_p);
20240Sstevel@tonic-gate 		return (TD_DBERR);
20250Sstevel@tonic-gate 	}
20260Sstevel@tonic-gate 
20270Sstevel@tonic-gate 	if (ps_lsetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
20280Sstevel@tonic-gate 		return_val = TD_DBERR;
20290Sstevel@tonic-gate 
20300Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
20310Sstevel@tonic-gate 	ph_unlock(th_p->th_ta_p);
20320Sstevel@tonic-gate 	return (return_val);
20330Sstevel@tonic-gate #else	/* __sparc */
20340Sstevel@tonic-gate 	return (TD_NOXREGS);
20350Sstevel@tonic-gate #endif	/* __sparc */
20360Sstevel@tonic-gate }
20370Sstevel@tonic-gate 
20380Sstevel@tonic-gate struct searcher {
20390Sstevel@tonic-gate 	psaddr_t	addr;
20400Sstevel@tonic-gate 	int		status;
20410Sstevel@tonic-gate };
20420Sstevel@tonic-gate 
20430Sstevel@tonic-gate /*
20440Sstevel@tonic-gate  * Check the struct thread address in *th_p again first
20450Sstevel@tonic-gate  * value in "data".  If value in data is found, set second value
20460Sstevel@tonic-gate  * in "data" to 1 and return 1 to terminate iterations.
20470Sstevel@tonic-gate  * This function is used by td_thr_validate() to verify that
20480Sstevel@tonic-gate  * a thread handle is valid.
20490Sstevel@tonic-gate  */
20500Sstevel@tonic-gate static int
20510Sstevel@tonic-gate td_searcher(const td_thrhandle_t *th_p, void *data)
20520Sstevel@tonic-gate {
20530Sstevel@tonic-gate 	struct searcher *searcher_data = (struct searcher *)data;
20540Sstevel@tonic-gate 
20550Sstevel@tonic-gate 	if (searcher_data->addr == th_p->th_unique) {
20560Sstevel@tonic-gate 		searcher_data->status = 1;
20570Sstevel@tonic-gate 		return (1);
20580Sstevel@tonic-gate 	}
20590Sstevel@tonic-gate 	return (0);
20600Sstevel@tonic-gate }
20610Sstevel@tonic-gate 
20620Sstevel@tonic-gate /*
20630Sstevel@tonic-gate  * Validate the thread handle.  Check that
20640Sstevel@tonic-gate  * a thread exists in the thread agent/process that
20650Sstevel@tonic-gate  * corresponds to thread with handle *th_p.
20660Sstevel@tonic-gate  * Currently unused by dbx.
20670Sstevel@tonic-gate  */
20680Sstevel@tonic-gate #pragma weak td_thr_validate = __td_thr_validate
20690Sstevel@tonic-gate td_err_e
20700Sstevel@tonic-gate __td_thr_validate(const td_thrhandle_t *th_p)
20710Sstevel@tonic-gate {
20720Sstevel@tonic-gate 	td_err_e return_val;
20730Sstevel@tonic-gate 	struct searcher searcher_data = {0, 0};
20740Sstevel@tonic-gate 
20750Sstevel@tonic-gate 	if (th_p == NULL)
20760Sstevel@tonic-gate 		return (TD_BADTH);
20770Sstevel@tonic-gate 	if (th_p->th_unique == NULL || th_p->th_ta_p == NULL)
20780Sstevel@tonic-gate 		return (TD_BADTH);
20790Sstevel@tonic-gate 
20800Sstevel@tonic-gate 	/*
20810Sstevel@tonic-gate 	 * LOCKING EXCEPTION - Locking is not required
20820Sstevel@tonic-gate 	 * here because no use of the thread agent is made (other
20830Sstevel@tonic-gate 	 * than the sanity check) and checking of the thread
20840Sstevel@tonic-gate 	 * agent will be done in __td_ta_thr_iter.
20850Sstevel@tonic-gate 	 */
20860Sstevel@tonic-gate 
20870Sstevel@tonic-gate 	searcher_data.addr = th_p->th_unique;
20880Sstevel@tonic-gate 	return_val = __td_ta_thr_iter(th_p->th_ta_p,
20890Sstevel@tonic-gate 		td_searcher, &searcher_data,
20900Sstevel@tonic-gate 		TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
20910Sstevel@tonic-gate 		TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
20920Sstevel@tonic-gate 
20930Sstevel@tonic-gate 	if (return_val == TD_OK && searcher_data.status == 0)
20940Sstevel@tonic-gate 		return_val = TD_NOTHR;
20950Sstevel@tonic-gate 
20960Sstevel@tonic-gate 	return (return_val);
20970Sstevel@tonic-gate }
20980Sstevel@tonic-gate 
20990Sstevel@tonic-gate /*
21000Sstevel@tonic-gate  * Get a thread's private binding to a given thread specific
21010Sstevel@tonic-gate  * data(TSD) key(see thr_getspecific(3T).  If the thread doesn't
21020Sstevel@tonic-gate  * have a binding for a particular key, then NULL is returned.
21030Sstevel@tonic-gate  */
21040Sstevel@tonic-gate #pragma weak td_thr_tsd = __td_thr_tsd
21050Sstevel@tonic-gate td_err_e
21060Sstevel@tonic-gate __td_thr_tsd(td_thrhandle_t *th_p, thread_key_t key, void **data_pp)
21070Sstevel@tonic-gate {
21080Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
21090Sstevel@tonic-gate 	td_thragent_t	*ta_p;
21100Sstevel@tonic-gate 	td_err_e	return_val;
21110Sstevel@tonic-gate 	int		maxkey;
21120Sstevel@tonic-gate 	int		nkey;
21130Sstevel@tonic-gate 	psaddr_t	tsd_paddr;
21140Sstevel@tonic-gate 
21150Sstevel@tonic-gate 	if (data_pp == NULL)
21160Sstevel@tonic-gate 		return (TD_ERR);
21170Sstevel@tonic-gate 	*data_pp = NULL;
21180Sstevel@tonic-gate 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
21190Sstevel@tonic-gate 		return (return_val);
21200Sstevel@tonic-gate 	ta_p = th_p->th_ta_p;
21210Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK) {
21220Sstevel@tonic-gate 		ph_unlock(ta_p);
21230Sstevel@tonic-gate 		return (TD_DBERR);
21240Sstevel@tonic-gate 	}
21250Sstevel@tonic-gate 
21260Sstevel@tonic-gate 	if (ta_p->model == PR_MODEL_NATIVE) {
21270Sstevel@tonic-gate 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
21280Sstevel@tonic-gate 		tsd_metadata_t tsdm;
21290Sstevel@tonic-gate 		tsd_t stsd;
21300Sstevel@tonic-gate 
21310Sstevel@tonic-gate 		if (ps_pdread(ph_p,
21320Sstevel@tonic-gate 		    ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
21330Sstevel@tonic-gate 		    &tsdm, sizeof (tsdm)) != PS_OK)
21340Sstevel@tonic-gate 			return_val = TD_DBERR;
21350Sstevel@tonic-gate 		else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
21360Sstevel@tonic-gate 		    &tsd_paddr, sizeof (tsd_paddr)) != PS_OK)
21370Sstevel@tonic-gate 			return_val = TD_DBERR;
21380Sstevel@tonic-gate 		else if (tsd_paddr != NULL &&
21390Sstevel@tonic-gate 		    ps_pdread(ph_p, tsd_paddr, &stsd, sizeof (stsd)) != PS_OK)
21400Sstevel@tonic-gate 			return_val = TD_DBERR;
21410Sstevel@tonic-gate 		else {
21420Sstevel@tonic-gate 			maxkey = tsdm.tsdm_nused;
21430Sstevel@tonic-gate 			nkey = tsd_paddr == NULL ? TSD_NFAST : stsd.tsd_nalloc;
21440Sstevel@tonic-gate 
21450Sstevel@tonic-gate 			if (key < TSD_NFAST)
21460Sstevel@tonic-gate 				tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
21470Sstevel@tonic-gate 		}
21480Sstevel@tonic-gate 	} else {
21490Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
21500Sstevel@tonic-gate 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
21510Sstevel@tonic-gate 		tsd_metadata32_t tsdm;
21520Sstevel@tonic-gate 		tsd32_t stsd;
21530Sstevel@tonic-gate 		caddr32_t addr;
21540Sstevel@tonic-gate 
21550Sstevel@tonic-gate 		if (ps_pdread(ph_p,
21560Sstevel@tonic-gate 		    ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
21570Sstevel@tonic-gate 		    &tsdm, sizeof (tsdm)) != PS_OK)
21580Sstevel@tonic-gate 			return_val = TD_DBERR;
21590Sstevel@tonic-gate 		else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
21600Sstevel@tonic-gate 		    &addr, sizeof (addr)) != PS_OK)
21610Sstevel@tonic-gate 			return_val = TD_DBERR;
21620Sstevel@tonic-gate 		else if (addr != NULL &&
21630Sstevel@tonic-gate 		    ps_pdread(ph_p, addr, &stsd, sizeof (stsd)) != PS_OK)
21640Sstevel@tonic-gate 			return_val = TD_DBERR;
21650Sstevel@tonic-gate 		else {
21660Sstevel@tonic-gate 			maxkey = tsdm.tsdm_nused;
21670Sstevel@tonic-gate 			nkey = addr == NULL ? TSD_NFAST : stsd.tsd_nalloc;
21680Sstevel@tonic-gate 
21690Sstevel@tonic-gate 			if (key < TSD_NFAST) {
21700Sstevel@tonic-gate 				tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
21710Sstevel@tonic-gate 			} else {
21720Sstevel@tonic-gate 				tsd_paddr = addr;
21730Sstevel@tonic-gate 			}
21740Sstevel@tonic-gate 		}
21750Sstevel@tonic-gate #else
21760Sstevel@tonic-gate 		return_val = TD_ERR;
21770Sstevel@tonic-gate #endif	/* _SYSCALL32 */
21780Sstevel@tonic-gate 	}
21790Sstevel@tonic-gate 
21800Sstevel@tonic-gate 	if (return_val == TD_OK && (key < 1 || key >= maxkey))
21810Sstevel@tonic-gate 		return_val = TD_NOTSD;
21820Sstevel@tonic-gate 	if (return_val != TD_OK || key >= nkey) {
21830Sstevel@tonic-gate 		/* NULL has already been stored in data_pp */
21840Sstevel@tonic-gate 		(void) ps_pcontinue(ph_p);
21850Sstevel@tonic-gate 		ph_unlock(ta_p);
21860Sstevel@tonic-gate 		return (return_val);
21870Sstevel@tonic-gate 	}
21880Sstevel@tonic-gate 
21890Sstevel@tonic-gate 	/*
21900Sstevel@tonic-gate 	 * Read the value from the thread's tsd array.
21910Sstevel@tonic-gate 	 */
21920Sstevel@tonic-gate 	if (ta_p->model == PR_MODEL_NATIVE) {
21930Sstevel@tonic-gate 		void *value;
21940Sstevel@tonic-gate 
21950Sstevel@tonic-gate 		if (ps_pdread(ph_p, tsd_paddr + key * sizeof (void *),
21960Sstevel@tonic-gate 		    &value, sizeof (value)) != PS_OK)
21970Sstevel@tonic-gate 			return_val = TD_DBERR;
21980Sstevel@tonic-gate 		else
21990Sstevel@tonic-gate 			*data_pp = value;
22000Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
22010Sstevel@tonic-gate 	} else {
22020Sstevel@tonic-gate 		caddr32_t value32;
22030Sstevel@tonic-gate 
22040Sstevel@tonic-gate 		if (ps_pdread(ph_p, tsd_paddr + key * sizeof (caddr32_t),
22050Sstevel@tonic-gate 		    &value32, sizeof (value32)) != PS_OK)
22060Sstevel@tonic-gate 			return_val = TD_DBERR;
22070Sstevel@tonic-gate 		else
22080Sstevel@tonic-gate 			*data_pp = (void *)(uintptr_t)value32;
22090Sstevel@tonic-gate #endif	/* _SYSCALL32 */
22100Sstevel@tonic-gate 	}
22110Sstevel@tonic-gate 
22120Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
22130Sstevel@tonic-gate 	ph_unlock(ta_p);
22140Sstevel@tonic-gate 	return (return_val);
22150Sstevel@tonic-gate }
22160Sstevel@tonic-gate 
22170Sstevel@tonic-gate /*
22180Sstevel@tonic-gate  * Get the base address of a thread's thread local storage (TLS) block
22190Sstevel@tonic-gate  * for the module (executable or shared object) identified by 'moduleid'.
22200Sstevel@tonic-gate  */
22210Sstevel@tonic-gate #pragma weak td_thr_tlsbase = __td_thr_tlsbase
22220Sstevel@tonic-gate td_err_e
22230Sstevel@tonic-gate __td_thr_tlsbase(td_thrhandle_t *th_p, ulong_t moduleid, psaddr_t *base)
22240Sstevel@tonic-gate {
22250Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
22260Sstevel@tonic-gate 	td_thragent_t	*ta_p;
22270Sstevel@tonic-gate 	td_err_e	return_val;
22280Sstevel@tonic-gate 
22290Sstevel@tonic-gate 	if (base == NULL)
22300Sstevel@tonic-gate 		return (TD_ERR);
22310Sstevel@tonic-gate 	*base = NULL;
22320Sstevel@tonic-gate 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
22330Sstevel@tonic-gate 		return (return_val);
22340Sstevel@tonic-gate 	ta_p = th_p->th_ta_p;
22350Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK) {
22360Sstevel@tonic-gate 		ph_unlock(ta_p);
22370Sstevel@tonic-gate 		return (TD_DBERR);
22380Sstevel@tonic-gate 	}
22390Sstevel@tonic-gate 
22400Sstevel@tonic-gate 	if (ta_p->model == PR_MODEL_NATIVE) {
22410Sstevel@tonic-gate 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
22420Sstevel@tonic-gate 		tls_metadata_t tls_metadata;
22430Sstevel@tonic-gate 		TLS_modinfo tlsmod;
22440Sstevel@tonic-gate 		tls_t tls;
22450Sstevel@tonic-gate 
22460Sstevel@tonic-gate 		if (ps_pdread(ph_p,
22470Sstevel@tonic-gate 		    ta_p->uberdata_addr + offsetof(uberdata_t, tls_metadata),
22480Sstevel@tonic-gate 		    &tls_metadata, sizeof (tls_metadata)) != PS_OK)
22490Sstevel@tonic-gate 			return_val = TD_DBERR;
22500Sstevel@tonic-gate 		else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
22510Sstevel@tonic-gate 			return_val = TD_NOTLS;
22520Sstevel@tonic-gate 		else if (ps_pdread(ph_p,
22530Sstevel@tonic-gate 		    (psaddr_t)((TLS_modinfo *)
22540Sstevel@tonic-gate 		    tls_metadata.tls_modinfo.tls_data + moduleid),
22550Sstevel@tonic-gate 		    &tlsmod, sizeof (tlsmod)) != PS_OK)
22560Sstevel@tonic-gate 			return_val = TD_DBERR;
22570Sstevel@tonic-gate 		else if (tlsmod.tm_memsz == 0)
22580Sstevel@tonic-gate 			return_val = TD_NOTLS;
22590Sstevel@tonic-gate 		else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
22600Sstevel@tonic-gate 			*base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
22610Sstevel@tonic-gate 		else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
22620Sstevel@tonic-gate 		    &tls, sizeof (tls)) != PS_OK)
22630Sstevel@tonic-gate 			return_val = TD_DBERR;
22640Sstevel@tonic-gate 		else if (moduleid >= tls.tls_size)
22650Sstevel@tonic-gate 			return_val = TD_TLSDEFER;
22660Sstevel@tonic-gate 		else if (ps_pdread(ph_p,
22670Sstevel@tonic-gate 		    (psaddr_t)((tls_t *)tls.tls_data + moduleid),
22680Sstevel@tonic-gate 		    &tls, sizeof (tls)) != PS_OK)
22690Sstevel@tonic-gate 			return_val = TD_DBERR;
22700Sstevel@tonic-gate 		else if (tls.tls_size == 0)
22710Sstevel@tonic-gate 			return_val = TD_TLSDEFER;
22720Sstevel@tonic-gate 		else
22730Sstevel@tonic-gate 			*base = (psaddr_t)tls.tls_data;
22740Sstevel@tonic-gate 	} else {
22750Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
22760Sstevel@tonic-gate 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
22770Sstevel@tonic-gate 		tls_metadata32_t tls_metadata;
22780Sstevel@tonic-gate 		TLS_modinfo32 tlsmod;
22790Sstevel@tonic-gate 		tls32_t tls;
22800Sstevel@tonic-gate 
22810Sstevel@tonic-gate 		if (ps_pdread(ph_p,
22820Sstevel@tonic-gate 		    ta_p->uberdata_addr + offsetof(uberdata32_t, tls_metadata),
22830Sstevel@tonic-gate 		    &tls_metadata, sizeof (tls_metadata)) != PS_OK)
22840Sstevel@tonic-gate 			return_val = TD_DBERR;
22850Sstevel@tonic-gate 		else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
22860Sstevel@tonic-gate 			return_val = TD_NOTLS;
22870Sstevel@tonic-gate 		else if (ps_pdread(ph_p,
22880Sstevel@tonic-gate 		    (psaddr_t)((TLS_modinfo32 *)
22890Sstevel@tonic-gate 		    (uintptr_t)tls_metadata.tls_modinfo.tls_data + moduleid),
22900Sstevel@tonic-gate 		    &tlsmod, sizeof (tlsmod)) != PS_OK)
22910Sstevel@tonic-gate 			return_val = TD_DBERR;
22920Sstevel@tonic-gate 		else if (tlsmod.tm_memsz == 0)
22930Sstevel@tonic-gate 			return_val = TD_NOTLS;
22940Sstevel@tonic-gate 		else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
22950Sstevel@tonic-gate 			*base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
22960Sstevel@tonic-gate 		else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
22970Sstevel@tonic-gate 		    &tls, sizeof (tls)) != PS_OK)
22980Sstevel@tonic-gate 			return_val = TD_DBERR;
22990Sstevel@tonic-gate 		else if (moduleid >= tls.tls_size)
23000Sstevel@tonic-gate 			return_val = TD_TLSDEFER;
23010Sstevel@tonic-gate 		else if (ps_pdread(ph_p,
23020Sstevel@tonic-gate 		    (psaddr_t)((tls32_t *)(uintptr_t)tls.tls_data + moduleid),
23030Sstevel@tonic-gate 		    &tls, sizeof (tls)) != PS_OK)
23040Sstevel@tonic-gate 			return_val = TD_DBERR;
23050Sstevel@tonic-gate 		else if (tls.tls_size == 0)
23060Sstevel@tonic-gate 			return_val = TD_TLSDEFER;
23070Sstevel@tonic-gate 		else
23080Sstevel@tonic-gate 			*base = (psaddr_t)tls.tls_data;
23090Sstevel@tonic-gate #else
23100Sstevel@tonic-gate 		return_val = TD_ERR;
23110Sstevel@tonic-gate #endif	/* _SYSCALL32 */
23120Sstevel@tonic-gate 	}
23130Sstevel@tonic-gate 
23140Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
23150Sstevel@tonic-gate 	ph_unlock(ta_p);
23160Sstevel@tonic-gate 	return (return_val);
23170Sstevel@tonic-gate }
23180Sstevel@tonic-gate 
23190Sstevel@tonic-gate /*
23200Sstevel@tonic-gate  * Change a thread's priority to the value specified by ti_pri.
23210Sstevel@tonic-gate  * Currently unused by dbx.
23220Sstevel@tonic-gate  */
23230Sstevel@tonic-gate #pragma weak td_thr_setprio = __td_thr_setprio
23240Sstevel@tonic-gate td_err_e
23250Sstevel@tonic-gate __td_thr_setprio(td_thrhandle_t *th_p, int ti_pri)
23260Sstevel@tonic-gate {
23270Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
23280Sstevel@tonic-gate 	pri_t		priority = ti_pri;
23290Sstevel@tonic-gate 	td_err_e	return_val = TD_OK;
23300Sstevel@tonic-gate 
23310Sstevel@tonic-gate 	if (ti_pri < THREAD_MIN_PRIORITY || ti_pri > THREAD_MAX_PRIORITY)
23320Sstevel@tonic-gate 		return (TD_ERR);
23330Sstevel@tonic-gate 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
23340Sstevel@tonic-gate 		return (return_val);
23350Sstevel@tonic-gate 
23360Sstevel@tonic-gate 	if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
23370Sstevel@tonic-gate 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
23380Sstevel@tonic-gate 
23390Sstevel@tonic-gate 		if (ps_pdwrite(ph_p, (psaddr_t)&ulwp->ul_pri,
23400Sstevel@tonic-gate 		    &priority, sizeof (priority)) != PS_OK)
23410Sstevel@tonic-gate 			return_val = TD_DBERR;
23420Sstevel@tonic-gate 	} else {
23430Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
23440Sstevel@tonic-gate 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
23450Sstevel@tonic-gate 
23460Sstevel@tonic-gate 		if (ps_pdwrite(ph_p, (psaddr_t)&ulwp->ul_pri,
23470Sstevel@tonic-gate 		    &priority, sizeof (priority)) != PS_OK)
23480Sstevel@tonic-gate 			return_val = TD_DBERR;
23490Sstevel@tonic-gate #else
23500Sstevel@tonic-gate 		return_val = TD_ERR;
23510Sstevel@tonic-gate #endif	/* _SYSCALL32 */
23520Sstevel@tonic-gate 	}
23530Sstevel@tonic-gate 
23540Sstevel@tonic-gate 	ph_unlock(th_p->th_ta_p);
23550Sstevel@tonic-gate 	return (return_val);
23560Sstevel@tonic-gate }
23570Sstevel@tonic-gate 
23580Sstevel@tonic-gate /*
23590Sstevel@tonic-gate  * This structure links td_thr_lockowner and the lowner_cb callback function.
23600Sstevel@tonic-gate  */
23610Sstevel@tonic-gate typedef struct {
23620Sstevel@tonic-gate 	td_sync_iter_f	*owner_cb;
23630Sstevel@tonic-gate 	void		*owner_cb_arg;
23640Sstevel@tonic-gate 	td_thrhandle_t	*th_p;
23650Sstevel@tonic-gate } lowner_cb_ctl_t;
23660Sstevel@tonic-gate 
23670Sstevel@tonic-gate static int
23680Sstevel@tonic-gate lowner_cb(const td_synchandle_t *sh_p, void *arg)
23690Sstevel@tonic-gate {
23700Sstevel@tonic-gate 	lowner_cb_ctl_t *ocb = arg;
23710Sstevel@tonic-gate 	int trunc = 0;
23720Sstevel@tonic-gate 	union {
23730Sstevel@tonic-gate 		rwlock_t rwl;
23740Sstevel@tonic-gate 		mutex_t mx;
23750Sstevel@tonic-gate 	} rw_m;
23760Sstevel@tonic-gate 
23770Sstevel@tonic-gate 	if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
23780Sstevel@tonic-gate 	    &rw_m, sizeof (rw_m)) != PS_OK) {
23790Sstevel@tonic-gate 		trunc = 1;
23800Sstevel@tonic-gate 		if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
23810Sstevel@tonic-gate 		    &rw_m.mx, sizeof (rw_m.mx)) != PS_OK)
23820Sstevel@tonic-gate 			return (0);
23830Sstevel@tonic-gate 	}
23840Sstevel@tonic-gate 	if (rw_m.mx.mutex_magic == MUTEX_MAGIC &&
23850Sstevel@tonic-gate 	    rw_m.mx.mutex_owner == ocb->th_p->th_unique)
23860Sstevel@tonic-gate 		return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
23870Sstevel@tonic-gate 	if (!trunc && rw_m.rwl.magic == RWL_MAGIC) {
23880Sstevel@tonic-gate 		mutex_t *rwlock = &rw_m.rwl.mutex;
23890Sstevel@tonic-gate 		if (rwlock->mutex_owner == ocb->th_p->th_unique)
23900Sstevel@tonic-gate 			return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
23910Sstevel@tonic-gate 	}
23920Sstevel@tonic-gate 	return (0);
23930Sstevel@tonic-gate }
23940Sstevel@tonic-gate 
23950Sstevel@tonic-gate /*
23960Sstevel@tonic-gate  * Iterate over the set of locks owned by a specified thread.
23970Sstevel@tonic-gate  * If cb returns a non-zero value, terminate iterations.
23980Sstevel@tonic-gate  */
23990Sstevel@tonic-gate #pragma weak td_thr_lockowner = __td_thr_lockowner
24000Sstevel@tonic-gate td_err_e
24010Sstevel@tonic-gate __td_thr_lockowner(const td_thrhandle_t *th_p, td_sync_iter_f *cb,
24020Sstevel@tonic-gate 	void *cb_data)
24030Sstevel@tonic-gate {
24040Sstevel@tonic-gate 	td_thragent_t	*ta_p;
24050Sstevel@tonic-gate 	td_err_e	return_val;
24060Sstevel@tonic-gate 	lowner_cb_ctl_t	lcb;
24070Sstevel@tonic-gate 
24080Sstevel@tonic-gate 	/*
24090Sstevel@tonic-gate 	 * Just sanity checks.
24100Sstevel@tonic-gate 	 */
24110Sstevel@tonic-gate 	if (ph_lock_th((td_thrhandle_t *)th_p, &return_val) == NULL)
24120Sstevel@tonic-gate 		return (return_val);
24130Sstevel@tonic-gate 	ta_p = th_p->th_ta_p;
24140Sstevel@tonic-gate 	ph_unlock(ta_p);
24150Sstevel@tonic-gate 
24160Sstevel@tonic-gate 	lcb.owner_cb = cb;
24170Sstevel@tonic-gate 	lcb.owner_cb_arg = cb_data;
24180Sstevel@tonic-gate 	lcb.th_p = (td_thrhandle_t *)th_p;
24190Sstevel@tonic-gate 	return (__td_ta_sync_iter(ta_p, lowner_cb, &lcb));
24200Sstevel@tonic-gate }
24210Sstevel@tonic-gate 
24220Sstevel@tonic-gate /*
24230Sstevel@tonic-gate  * If a thread is asleep on a synchronization variable,
24240Sstevel@tonic-gate  * then get the synchronization handle.
24250Sstevel@tonic-gate  */
24260Sstevel@tonic-gate #pragma weak td_thr_sleepinfo = __td_thr_sleepinfo
24270Sstevel@tonic-gate td_err_e
24280Sstevel@tonic-gate __td_thr_sleepinfo(const td_thrhandle_t *th_p, td_synchandle_t *sh_p)
24290Sstevel@tonic-gate {
24300Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
24310Sstevel@tonic-gate 	td_err_e	return_val = TD_OK;
24320Sstevel@tonic-gate 	uintptr_t	wchan;
24330Sstevel@tonic-gate 
24340Sstevel@tonic-gate 	if (sh_p == NULL)
24350Sstevel@tonic-gate 		return (TD_ERR);
24360Sstevel@tonic-gate 	if ((ph_p = ph_lock_th((td_thrhandle_t *)th_p, &return_val)) == NULL)
24370Sstevel@tonic-gate 		return (return_val);
24380Sstevel@tonic-gate 
24390Sstevel@tonic-gate 	/*
24400Sstevel@tonic-gate 	 * No need to stop the process for a simple read.
24410Sstevel@tonic-gate 	 */
24420Sstevel@tonic-gate 	if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
24430Sstevel@tonic-gate 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
24440Sstevel@tonic-gate 
24450Sstevel@tonic-gate 		if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
24460Sstevel@tonic-gate 		    &wchan, sizeof (wchan)) != PS_OK)
24470Sstevel@tonic-gate 			return_val = TD_DBERR;
24480Sstevel@tonic-gate 	} else {
24490Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
24500Sstevel@tonic-gate 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
24510Sstevel@tonic-gate 		caddr32_t wchan32;
24520Sstevel@tonic-gate 
24530Sstevel@tonic-gate 		if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
24540Sstevel@tonic-gate 		    &wchan32, sizeof (wchan32)) != PS_OK)
24550Sstevel@tonic-gate 			return_val = TD_DBERR;
24560Sstevel@tonic-gate 		wchan = wchan32;
24570Sstevel@tonic-gate #else
24580Sstevel@tonic-gate 		return_val = TD_ERR;
24590Sstevel@tonic-gate #endif	/* _SYSCALL32 */
24600Sstevel@tonic-gate 	}
24610Sstevel@tonic-gate 
24620Sstevel@tonic-gate 	if (return_val != TD_OK || wchan == NULL) {
24630Sstevel@tonic-gate 		sh_p->sh_ta_p = NULL;
24640Sstevel@tonic-gate 		sh_p->sh_unique = NULL;
24650Sstevel@tonic-gate 		if (return_val == TD_OK)
24660Sstevel@tonic-gate 			return_val = TD_ERR;
24670Sstevel@tonic-gate 	} else {
24680Sstevel@tonic-gate 		sh_p->sh_ta_p = th_p->th_ta_p;
24690Sstevel@tonic-gate 		sh_p->sh_unique = (psaddr_t)wchan;
24700Sstevel@tonic-gate 	}
24710Sstevel@tonic-gate 
24720Sstevel@tonic-gate 	ph_unlock(th_p->th_ta_p);
24730Sstevel@tonic-gate 	return (return_val);
24740Sstevel@tonic-gate }
24750Sstevel@tonic-gate 
24760Sstevel@tonic-gate /*
24770Sstevel@tonic-gate  * Which thread is running on an lwp?
24780Sstevel@tonic-gate  */
24790Sstevel@tonic-gate #pragma weak td_ta_map_lwp2thr = __td_ta_map_lwp2thr
24800Sstevel@tonic-gate td_err_e
24810Sstevel@tonic-gate __td_ta_map_lwp2thr(td_thragent_t *ta_p, lwpid_t lwpid,
24820Sstevel@tonic-gate 	td_thrhandle_t *th_p)
24830Sstevel@tonic-gate {
24840Sstevel@tonic-gate 	return (__td_ta_map_id2thr(ta_p, lwpid, th_p));
24850Sstevel@tonic-gate }
24860Sstevel@tonic-gate 
24870Sstevel@tonic-gate /*
24880Sstevel@tonic-gate  * Common code for td_sync_get_info() and td_sync_get_stats()
24890Sstevel@tonic-gate  */
24900Sstevel@tonic-gate static td_err_e
24910Sstevel@tonic-gate sync_get_info_common(const td_synchandle_t *sh_p, struct ps_prochandle *ph_p,
24920Sstevel@tonic-gate 	td_syncinfo_t *si_p)
24930Sstevel@tonic-gate {
24940Sstevel@tonic-gate 	int trunc = 0;
24950Sstevel@tonic-gate 	td_so_un_t generic_so;
24960Sstevel@tonic-gate 
24970Sstevel@tonic-gate 	/*
24980Sstevel@tonic-gate 	 * Determine the sync. object type; a little type fudgery here.
24990Sstevel@tonic-gate 	 * First attempt to read the whole union.  If that fails, attempt
25000Sstevel@tonic-gate 	 * to read just the condvar.  A condvar is the smallest sync. object.
25010Sstevel@tonic-gate 	 */
25020Sstevel@tonic-gate 	if (ps_pdread(ph_p, sh_p->sh_unique,
25030Sstevel@tonic-gate 	    &generic_so, sizeof (generic_so)) != PS_OK) {
25040Sstevel@tonic-gate 		trunc = 1;
25050Sstevel@tonic-gate 		if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
25060Sstevel@tonic-gate 		    sizeof (generic_so.condition)) != PS_OK)
25070Sstevel@tonic-gate 			return (TD_DBERR);
25080Sstevel@tonic-gate 	}
25090Sstevel@tonic-gate 
25100Sstevel@tonic-gate 	switch (generic_so.condition.cond_magic) {
25110Sstevel@tonic-gate 	case MUTEX_MAGIC:
25120Sstevel@tonic-gate 		if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
25130Sstevel@tonic-gate 		    &generic_so.lock, sizeof (generic_so.lock)) != PS_OK)
25140Sstevel@tonic-gate 			return (TD_DBERR);
25150Sstevel@tonic-gate 		si_p->si_type = TD_SYNC_MUTEX;
25160Sstevel@tonic-gate 		si_p->si_shared_type = generic_so.lock.mutex_type;
25170Sstevel@tonic-gate 		(void) memcpy(si_p->si_flags, &generic_so.lock.mutex_flag,
25180Sstevel@tonic-gate 		    sizeof (generic_so.lock.mutex_flag));
25190Sstevel@tonic-gate 		si_p->si_state.mutex_locked =
25200Sstevel@tonic-gate 		    (generic_so.lock.mutex_lockw != 0);
25210Sstevel@tonic-gate 		si_p->si_size = sizeof (generic_so.lock);
25220Sstevel@tonic-gate 		si_p->si_has_waiters = generic_so.lock.mutex_waiters;
25230Sstevel@tonic-gate 		si_p->si_rcount = generic_so.lock.mutex_rcount;
25240Sstevel@tonic-gate 		si_p->si_prioceiling = generic_so.lock.mutex_ceiling;
25250Sstevel@tonic-gate 		if (si_p->si_state.mutex_locked) {
25260Sstevel@tonic-gate 			if (si_p->si_shared_type &
25270Sstevel@tonic-gate 			    (USYNC_PROCESS | USYNC_PROCESS_ROBUST))
25280Sstevel@tonic-gate 				si_p->si_ownerpid =
25290Sstevel@tonic-gate 					generic_so.lock.mutex_ownerpid;
25300Sstevel@tonic-gate 			si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
25310Sstevel@tonic-gate 			si_p->si_owner.th_unique = generic_so.lock.mutex_owner;
25320Sstevel@tonic-gate 		}
25330Sstevel@tonic-gate 		break;
25340Sstevel@tonic-gate 	case COND_MAGIC:
25350Sstevel@tonic-gate 		si_p->si_type = TD_SYNC_COND;
25360Sstevel@tonic-gate 		si_p->si_shared_type = generic_so.condition.cond_type;
25370Sstevel@tonic-gate 		(void) memcpy(si_p->si_flags, generic_so.condition.flags.flag,
25380Sstevel@tonic-gate 		    sizeof (generic_so.condition.flags.flag));
25390Sstevel@tonic-gate 		si_p->si_size = sizeof (generic_so.condition);
25400Sstevel@tonic-gate 		si_p->si_has_waiters =
25410Sstevel@tonic-gate 			(generic_so.condition.cond_waiters_user |
25420Sstevel@tonic-gate 			generic_so.condition.cond_waiters_kernel)? 1 : 0;
25430Sstevel@tonic-gate 		break;
25440Sstevel@tonic-gate 	case SEMA_MAGIC:
25450Sstevel@tonic-gate 		if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
25460Sstevel@tonic-gate 		    &generic_so.semaphore, sizeof (generic_so.semaphore))
25470Sstevel@tonic-gate 		    != PS_OK)
25480Sstevel@tonic-gate 			return (TD_DBERR);
25490Sstevel@tonic-gate 		si_p->si_type = TD_SYNC_SEMA;
25500Sstevel@tonic-gate 		si_p->si_shared_type = generic_so.semaphore.type;
25510Sstevel@tonic-gate 		si_p->si_state.sem_count = generic_so.semaphore.count;
25520Sstevel@tonic-gate 		si_p->si_size = sizeof (generic_so.semaphore);
25530Sstevel@tonic-gate 		si_p->si_has_waiters =
25540Sstevel@tonic-gate 		    ((lwp_sema_t *)&generic_so.semaphore)->flags[7];
25550Sstevel@tonic-gate 		/* this is useless but the old interface provided it */
25560Sstevel@tonic-gate 		si_p->si_data = (psaddr_t)generic_so.semaphore.count;
25570Sstevel@tonic-gate 		break;
25580Sstevel@tonic-gate 	case RWL_MAGIC:
2559*4570Sraf 	    {
2560*4570Sraf 		uint32_t rwstate;
2561*4570Sraf 
25620Sstevel@tonic-gate 		if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
25630Sstevel@tonic-gate 		    &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK)
25640Sstevel@tonic-gate 			return (TD_DBERR);
25650Sstevel@tonic-gate 		si_p->si_type = TD_SYNC_RWLOCK;
25660Sstevel@tonic-gate 		si_p->si_shared_type = generic_so.rwlock.rwlock_type;
25670Sstevel@tonic-gate 		si_p->si_size = sizeof (generic_so.rwlock);
25680Sstevel@tonic-gate 
2569*4570Sraf 		rwstate = (uint32_t)generic_so.rwlock.rwlock_readers;
2570*4570Sraf 		if (rwstate & URW_WRITE_LOCKED) {
2571*4570Sraf 			si_p->si_state.nreaders = -1;
2572*4570Sraf 			si_p->si_is_wlock = 1;
2573*4570Sraf 			si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2574*4570Sraf 			si_p->si_owner.th_unique =
2575*4570Sraf 				generic_so.rwlock.rwlock_owner;
2576*4570Sraf 			if (si_p->si_shared_type & USYNC_PROCESS)
2577*4570Sraf 				si_p->si_ownerpid =
2578*4570Sraf 					generic_so.rwlock.rwlock_ownerpid;
25790Sstevel@tonic-gate 		} else {
2580*4570Sraf 			si_p->si_state.nreaders = (rwstate & URW_READERS_MASK);
25810Sstevel@tonic-gate 		}
2582*4570Sraf 		si_p->si_has_waiters = ((rwstate & URW_HAS_WAITERS) != 0);
2583*4570Sraf 
25840Sstevel@tonic-gate 		/* this is useless but the old interface provided it */
25850Sstevel@tonic-gate 		si_p->si_data = (psaddr_t)generic_so.rwlock.readers;
25860Sstevel@tonic-gate 		break;
2587*4570Sraf 	    }
25880Sstevel@tonic-gate 	default:
25890Sstevel@tonic-gate 		return (TD_BADSH);
25900Sstevel@tonic-gate 	}
25910Sstevel@tonic-gate 
25920Sstevel@tonic-gate 	si_p->si_ta_p = sh_p->sh_ta_p;
25930Sstevel@tonic-gate 	si_p->si_sv_addr = sh_p->sh_unique;
25940Sstevel@tonic-gate 	return (TD_OK);
25950Sstevel@tonic-gate }
25960Sstevel@tonic-gate 
25970Sstevel@tonic-gate /*
25980Sstevel@tonic-gate  * Given a synchronization handle, fill in the
25990Sstevel@tonic-gate  * information for the synchronization variable into *si_p.
26000Sstevel@tonic-gate  */
26010Sstevel@tonic-gate #pragma weak td_sync_get_info = __td_sync_get_info
26020Sstevel@tonic-gate td_err_e
26030Sstevel@tonic-gate __td_sync_get_info(const td_synchandle_t *sh_p, td_syncinfo_t *si_p)
26040Sstevel@tonic-gate {
26050Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
26060Sstevel@tonic-gate 	td_err_e return_val;
26070Sstevel@tonic-gate 
26080Sstevel@tonic-gate 	if (si_p == NULL)
26090Sstevel@tonic-gate 		return (TD_ERR);
26100Sstevel@tonic-gate 	(void) memset(si_p, 0, sizeof (*si_p));
26110Sstevel@tonic-gate 	if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
26120Sstevel@tonic-gate 		return (return_val);
26130Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK) {
26140Sstevel@tonic-gate 		ph_unlock(sh_p->sh_ta_p);
26150Sstevel@tonic-gate 		return (TD_DBERR);
26160Sstevel@tonic-gate 	}
26170Sstevel@tonic-gate 
26180Sstevel@tonic-gate 	return_val = sync_get_info_common(sh_p, ph_p, si_p);
26190Sstevel@tonic-gate 
26200Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
26210Sstevel@tonic-gate 	ph_unlock(sh_p->sh_ta_p);
26220Sstevel@tonic-gate 	return (return_val);
26230Sstevel@tonic-gate }
26240Sstevel@tonic-gate 
26250Sstevel@tonic-gate static uint_t
26260Sstevel@tonic-gate tdb_addr_hash64(uint64_t addr)
26270Sstevel@tonic-gate {
26280Sstevel@tonic-gate 	uint64_t value60 = (addr >> 4);
26290Sstevel@tonic-gate 	uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff);
26300Sstevel@tonic-gate 	return ((value30 >> 15) ^ (value30 & 0x7fff));
26310Sstevel@tonic-gate }
26320Sstevel@tonic-gate 
26330Sstevel@tonic-gate static uint_t
26340Sstevel@tonic-gate tdb_addr_hash32(uint64_t addr)
26350Sstevel@tonic-gate {
26360Sstevel@tonic-gate 	uint32_t value30 = (addr >> 2);		/* 30 bits */
26370Sstevel@tonic-gate 	return ((value30 >> 15) ^ (value30 & 0x7fff));
26380Sstevel@tonic-gate }
26390Sstevel@tonic-gate 
26400Sstevel@tonic-gate static td_err_e
26410Sstevel@tonic-gate read_sync_stats(td_thragent_t *ta_p, psaddr_t hash_table,
26420Sstevel@tonic-gate 	psaddr_t sync_obj_addr, tdb_sync_stats_t *sync_stats)
26430Sstevel@tonic-gate {
26440Sstevel@tonic-gate 	psaddr_t next_desc;
26450Sstevel@tonic-gate 	uint64_t first;
26460Sstevel@tonic-gate 	uint_t ix;
26470Sstevel@tonic-gate 
26480Sstevel@tonic-gate 	/*
26490Sstevel@tonic-gate 	 * Compute the hash table index from the synch object's address.
26500Sstevel@tonic-gate 	 */
26510Sstevel@tonic-gate 	if (ta_p->model == PR_MODEL_LP64)
26520Sstevel@tonic-gate 		ix = tdb_addr_hash64(sync_obj_addr);
26530Sstevel@tonic-gate 	else
26540Sstevel@tonic-gate 		ix = tdb_addr_hash32(sync_obj_addr);
26550Sstevel@tonic-gate 
26560Sstevel@tonic-gate 	/*
26570Sstevel@tonic-gate 	 * Get the address of the first element in the linked list.
26580Sstevel@tonic-gate 	 */
26590Sstevel@tonic-gate 	if (ps_pdread(ta_p->ph_p, hash_table + ix * sizeof (uint64_t),
26600Sstevel@tonic-gate 	    &first, sizeof (first)) != PS_OK)
26610Sstevel@tonic-gate 		return (TD_DBERR);
26620Sstevel@tonic-gate 
26630Sstevel@tonic-gate 	/*
26640Sstevel@tonic-gate 	 * Search the linked list for an entry for the synch object..
26650Sstevel@tonic-gate 	 */
26660Sstevel@tonic-gate 	for (next_desc = (psaddr_t)first; next_desc != NULL;
26670Sstevel@tonic-gate 	    next_desc = (psaddr_t)sync_stats->next) {
26680Sstevel@tonic-gate 		if (ps_pdread(ta_p->ph_p, next_desc,
26690Sstevel@tonic-gate 		    sync_stats, sizeof (*sync_stats)) != PS_OK)
26700Sstevel@tonic-gate 			return (TD_DBERR);
26710Sstevel@tonic-gate 		if (sync_stats->sync_addr == sync_obj_addr)
26720Sstevel@tonic-gate 			return (TD_OK);
26730Sstevel@tonic-gate 	}
26740Sstevel@tonic-gate 
26750Sstevel@tonic-gate 	(void) memset(sync_stats, 0, sizeof (*sync_stats));
26760Sstevel@tonic-gate 	return (TD_OK);
26770Sstevel@tonic-gate }
26780Sstevel@tonic-gate 
26790Sstevel@tonic-gate /*
26800Sstevel@tonic-gate  * Given a synchronization handle, fill in the
26810Sstevel@tonic-gate  * statistics for the synchronization variable into *ss_p.
26820Sstevel@tonic-gate  */
26830Sstevel@tonic-gate #pragma weak td_sync_get_stats = __td_sync_get_stats
26840Sstevel@tonic-gate td_err_e
26850Sstevel@tonic-gate __td_sync_get_stats(const td_synchandle_t *sh_p, td_syncstats_t *ss_p)
26860Sstevel@tonic-gate {
26870Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
26880Sstevel@tonic-gate 	td_thragent_t *ta_p;
26890Sstevel@tonic-gate 	td_err_e return_val;
26900Sstevel@tonic-gate 	register_sync_t enable;
26910Sstevel@tonic-gate 	psaddr_t hashaddr;
26920Sstevel@tonic-gate 	tdb_sync_stats_t sync_stats;
26930Sstevel@tonic-gate 	size_t ix;
26940Sstevel@tonic-gate 
26950Sstevel@tonic-gate 	if (ss_p == NULL)
26960Sstevel@tonic-gate 		return (TD_ERR);
26970Sstevel@tonic-gate 	(void) memset(ss_p, 0, sizeof (*ss_p));
26980Sstevel@tonic-gate 	if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
26990Sstevel@tonic-gate 		return (return_val);
27000Sstevel@tonic-gate 	ta_p = sh_p->sh_ta_p;
27010Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK) {
27020Sstevel@tonic-gate 		ph_unlock(ta_p);
27030Sstevel@tonic-gate 		return (TD_DBERR);
27040Sstevel@tonic-gate 	}
27050Sstevel@tonic-gate 
27060Sstevel@tonic-gate 	if ((return_val = sync_get_info_common(sh_p, ph_p, &ss_p->ss_info))
27070Sstevel@tonic-gate 	    != TD_OK) {
27080Sstevel@tonic-gate 		if (return_val != TD_BADSH)
27090Sstevel@tonic-gate 			goto out;
27100Sstevel@tonic-gate 		/* we can correct TD_BADSH */
27110Sstevel@tonic-gate 		(void) memset(&ss_p->ss_info, 0, sizeof (ss_p->ss_info));
27120Sstevel@tonic-gate 		ss_p->ss_info.si_ta_p = sh_p->sh_ta_p;
27130Sstevel@tonic-gate 		ss_p->ss_info.si_sv_addr = sh_p->sh_unique;
27140Sstevel@tonic-gate 		/* we correct si_type and si_size below */
27150Sstevel@tonic-gate 		return_val = TD_OK;
27160Sstevel@tonic-gate 	}
27170Sstevel@tonic-gate 	if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
27180Sstevel@tonic-gate 	    &enable, sizeof (enable)) != PS_OK) {
27190Sstevel@tonic-gate 		return_val = TD_DBERR;
27200Sstevel@tonic-gate 		goto out;
27210Sstevel@tonic-gate 	}
27220Sstevel@tonic-gate 	if (enable != REGISTER_SYNC_ON)
27230Sstevel@tonic-gate 		goto out;
27240Sstevel@tonic-gate 
27250Sstevel@tonic-gate 	/*
27260Sstevel@tonic-gate 	 * Get the address of the hash table in the target process.
27270Sstevel@tonic-gate 	 */
27280Sstevel@tonic-gate 	if (ta_p->model == PR_MODEL_NATIVE) {
27290Sstevel@tonic-gate 		if (ps_pdread(ph_p, ta_p->uberdata_addr +
27300Sstevel@tonic-gate 		    offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
27310Sstevel@tonic-gate 		    &hashaddr, sizeof (&hashaddr)) != PS_OK) {
27320Sstevel@tonic-gate 			return_val = TD_DBERR;
27330Sstevel@tonic-gate 			goto out;
27340Sstevel@tonic-gate 		}
27350Sstevel@tonic-gate 	} else {
27360Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
27370Sstevel@tonic-gate 		caddr32_t addr;
27380Sstevel@tonic-gate 
27390Sstevel@tonic-gate 		if (ps_pdread(ph_p, ta_p->uberdata_addr +
27400Sstevel@tonic-gate 		    offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
27410Sstevel@tonic-gate 		    &addr, sizeof (addr)) != PS_OK) {
27420Sstevel@tonic-gate 			return_val = TD_DBERR;
27430Sstevel@tonic-gate 			goto out;
27440Sstevel@tonic-gate 		}
27450Sstevel@tonic-gate 		hashaddr = addr;
27460Sstevel@tonic-gate #else
27470Sstevel@tonic-gate 		return_val = TD_ERR;
27480Sstevel@tonic-gate 		goto out;
27490Sstevel@tonic-gate #endif	/* _SYSCALL32 */
27500Sstevel@tonic-gate 	}
27510Sstevel@tonic-gate 
27520Sstevel@tonic-gate 	if (hashaddr == 0)
27530Sstevel@tonic-gate 		return_val = TD_BADSH;
27540Sstevel@tonic-gate 	else
27550Sstevel@tonic-gate 		return_val = read_sync_stats(ta_p, hashaddr,
27560Sstevel@tonic-gate 			sh_p->sh_unique, &sync_stats);
27570Sstevel@tonic-gate 	if (return_val != TD_OK)
27580Sstevel@tonic-gate 		goto out;
27590Sstevel@tonic-gate 
27600Sstevel@tonic-gate 	/*
27610Sstevel@tonic-gate 	 * We have the hash table entry.  Transfer the data to
27620Sstevel@tonic-gate 	 * the td_syncstats_t structure provided by the caller.
27630Sstevel@tonic-gate 	 */
27640Sstevel@tonic-gate 	switch (sync_stats.un.type) {
27650Sstevel@tonic-gate 	case TDB_MUTEX:
27660Sstevel@tonic-gate 	    {
27670Sstevel@tonic-gate 		td_mutex_stats_t *msp = &ss_p->ss_un.mutex;
27680Sstevel@tonic-gate 
27690Sstevel@tonic-gate 		ss_p->ss_info.si_type = TD_SYNC_MUTEX;
27700Sstevel@tonic-gate 		ss_p->ss_info.si_size = sizeof (mutex_t);
27710Sstevel@tonic-gate 		msp->mutex_lock =
27720Sstevel@tonic-gate 			sync_stats.un.mutex.mutex_lock;
27730Sstevel@tonic-gate 		msp->mutex_sleep =
27740Sstevel@tonic-gate 			sync_stats.un.mutex.mutex_sleep;
27750Sstevel@tonic-gate 		msp->mutex_sleep_time =
27760Sstevel@tonic-gate 			sync_stats.un.mutex.mutex_sleep_time;
27770Sstevel@tonic-gate 		msp->mutex_hold_time =
27780Sstevel@tonic-gate 			sync_stats.un.mutex.mutex_hold_time;
27790Sstevel@tonic-gate 		msp->mutex_try =
27800Sstevel@tonic-gate 			sync_stats.un.mutex.mutex_try;
27810Sstevel@tonic-gate 		msp->mutex_try_fail =
27820Sstevel@tonic-gate 			sync_stats.un.mutex.mutex_try_fail;
27830Sstevel@tonic-gate 		if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
27840Sstevel@tonic-gate 		    (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
27850Sstevel@tonic-gate 		    < ta_p->hash_size * sizeof (thr_hash_table_t))
27860Sstevel@tonic-gate 			msp->mutex_internal =
27870Sstevel@tonic-gate 				ix / sizeof (thr_hash_table_t) + 1;
27880Sstevel@tonic-gate 		break;
27890Sstevel@tonic-gate 	    }
27900Sstevel@tonic-gate 	case TDB_COND:
27910Sstevel@tonic-gate 	    {
27920Sstevel@tonic-gate 		td_cond_stats_t *csp = &ss_p->ss_un.cond;
27930Sstevel@tonic-gate 
27940Sstevel@tonic-gate 		ss_p->ss_info.si_type = TD_SYNC_COND;
27950Sstevel@tonic-gate 		ss_p->ss_info.si_size = sizeof (cond_t);
27960Sstevel@tonic-gate 		csp->cond_wait =
27970Sstevel@tonic-gate 			sync_stats.un.cond.cond_wait;
27980Sstevel@tonic-gate 		csp->cond_timedwait =
27990Sstevel@tonic-gate 			sync_stats.un.cond.cond_timedwait;
28000Sstevel@tonic-gate 		csp->cond_wait_sleep_time =
28010Sstevel@tonic-gate 			sync_stats.un.cond.cond_wait_sleep_time;
28020Sstevel@tonic-gate 		csp->cond_timedwait_sleep_time =
28030Sstevel@tonic-gate 			sync_stats.un.cond.cond_timedwait_sleep_time;
28040Sstevel@tonic-gate 		csp->cond_timedwait_timeout =
28050Sstevel@tonic-gate 			sync_stats.un.cond.cond_timedwait_timeout;
28060Sstevel@tonic-gate 		csp->cond_signal =
28070Sstevel@tonic-gate 			sync_stats.un.cond.cond_signal;
28080Sstevel@tonic-gate 		csp->cond_broadcast =
28090Sstevel@tonic-gate 			sync_stats.un.cond.cond_broadcast;
28100Sstevel@tonic-gate 		if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
28110Sstevel@tonic-gate 		    (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
28120Sstevel@tonic-gate 		    < ta_p->hash_size * sizeof (thr_hash_table_t))
28130Sstevel@tonic-gate 			csp->cond_internal =
28140Sstevel@tonic-gate 				ix / sizeof (thr_hash_table_t) + 1;
28150Sstevel@tonic-gate 		break;
28160Sstevel@tonic-gate 	    }
28170Sstevel@tonic-gate 	case TDB_RWLOCK:
28180Sstevel@tonic-gate 	    {
28190Sstevel@tonic-gate 		td_rwlock_stats_t *rwsp = &ss_p->ss_un.rwlock;
28200Sstevel@tonic-gate 
28210Sstevel@tonic-gate 		ss_p->ss_info.si_type = TD_SYNC_RWLOCK;
28220Sstevel@tonic-gate 		ss_p->ss_info.si_size = sizeof (rwlock_t);
28230Sstevel@tonic-gate 		rwsp->rw_rdlock =
28240Sstevel@tonic-gate 			sync_stats.un.rwlock.rw_rdlock;
28250Sstevel@tonic-gate 		rwsp->rw_rdlock_try =
28260Sstevel@tonic-gate 			sync_stats.un.rwlock.rw_rdlock_try;
28270Sstevel@tonic-gate 		rwsp->rw_rdlock_try_fail =
28280Sstevel@tonic-gate 			sync_stats.un.rwlock.rw_rdlock_try_fail;
28290Sstevel@tonic-gate 		rwsp->rw_wrlock =
28300Sstevel@tonic-gate 			sync_stats.un.rwlock.rw_wrlock;
28310Sstevel@tonic-gate 		rwsp->rw_wrlock_hold_time =
28320Sstevel@tonic-gate 			sync_stats.un.rwlock.rw_wrlock_hold_time;
28330Sstevel@tonic-gate 		rwsp->rw_wrlock_try =
28340Sstevel@tonic-gate 			sync_stats.un.rwlock.rw_wrlock_try;
28350Sstevel@tonic-gate 		rwsp->rw_wrlock_try_fail =
28360Sstevel@tonic-gate 			sync_stats.un.rwlock.rw_wrlock_try_fail;
28370Sstevel@tonic-gate 		break;
28380Sstevel@tonic-gate 	    }
28390Sstevel@tonic-gate 	case TDB_SEMA:
28400Sstevel@tonic-gate 	    {
28410Sstevel@tonic-gate 		td_sema_stats_t *ssp = &ss_p->ss_un.sema;
28420Sstevel@tonic-gate 
28430Sstevel@tonic-gate 		ss_p->ss_info.si_type = TD_SYNC_SEMA;
28440Sstevel@tonic-gate 		ss_p->ss_info.si_size = sizeof (sema_t);
28450Sstevel@tonic-gate 		ssp->sema_wait =
28460Sstevel@tonic-gate 			sync_stats.un.sema.sema_wait;
28470Sstevel@tonic-gate 		ssp->sema_wait_sleep =
28480Sstevel@tonic-gate 			sync_stats.un.sema.sema_wait_sleep;
28490Sstevel@tonic-gate 		ssp->sema_wait_sleep_time =
28500Sstevel@tonic-gate 			sync_stats.un.sema.sema_wait_sleep_time;
28510Sstevel@tonic-gate 		ssp->sema_trywait =
28520Sstevel@tonic-gate 			sync_stats.un.sema.sema_trywait;
28530Sstevel@tonic-gate 		ssp->sema_trywait_fail =
28540Sstevel@tonic-gate 			sync_stats.un.sema.sema_trywait_fail;
28550Sstevel@tonic-gate 		ssp->sema_post =
28560Sstevel@tonic-gate 			sync_stats.un.sema.sema_post;
28570Sstevel@tonic-gate 		ssp->sema_max_count =
28580Sstevel@tonic-gate 			sync_stats.un.sema.sema_max_count;
28590Sstevel@tonic-gate 		ssp->sema_min_count =
28600Sstevel@tonic-gate 			sync_stats.un.sema.sema_min_count;
28610Sstevel@tonic-gate 		break;
28620Sstevel@tonic-gate 	    }
28630Sstevel@tonic-gate 	default:
28640Sstevel@tonic-gate 		return_val = TD_BADSH;
28650Sstevel@tonic-gate 		break;
28660Sstevel@tonic-gate 	}
28670Sstevel@tonic-gate 
28680Sstevel@tonic-gate out:
28690Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
28700Sstevel@tonic-gate 	ph_unlock(ta_p);
28710Sstevel@tonic-gate 	return (return_val);
28720Sstevel@tonic-gate }
28730Sstevel@tonic-gate 
28740Sstevel@tonic-gate /*
28750Sstevel@tonic-gate  * Change the state of a synchronization variable.
28760Sstevel@tonic-gate  *	1) mutex lock state set to value
28770Sstevel@tonic-gate  *	2) semaphore's count set to value
2878*4570Sraf  *	3) writer's lock set by value < 0
2879*4570Sraf  *	4) reader's lock number of readers set to value >= 0
28800Sstevel@tonic-gate  * Currently unused by dbx.
28810Sstevel@tonic-gate  */
28820Sstevel@tonic-gate #pragma weak td_sync_setstate = __td_sync_setstate
28830Sstevel@tonic-gate td_err_e
28840Sstevel@tonic-gate __td_sync_setstate(const td_synchandle_t *sh_p, long lvalue)
28850Sstevel@tonic-gate {
28860Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
28870Sstevel@tonic-gate 	int		trunc = 0;
28880Sstevel@tonic-gate 	td_err_e	return_val;
28890Sstevel@tonic-gate 	td_so_un_t	generic_so;
2890*4570Sraf 	uint32_t	*rwstate;
28910Sstevel@tonic-gate 	int		value = (int)lvalue;
28920Sstevel@tonic-gate 
28930Sstevel@tonic-gate 	if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
28940Sstevel@tonic-gate 		return (return_val);
28950Sstevel@tonic-gate 	if (ps_pstop(ph_p) != PS_OK) {
28960Sstevel@tonic-gate 		ph_unlock(sh_p->sh_ta_p);
28970Sstevel@tonic-gate 		return (TD_DBERR);
28980Sstevel@tonic-gate 	}
28990Sstevel@tonic-gate 
29000Sstevel@tonic-gate 	/*
29010Sstevel@tonic-gate 	 * Read the synch. variable information.
29020Sstevel@tonic-gate 	 * First attempt to read the whole union and if that fails
29030Sstevel@tonic-gate 	 * fall back to reading only the smallest member, the condvar.
29040Sstevel@tonic-gate 	 */
29050Sstevel@tonic-gate 	if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so,
29060Sstevel@tonic-gate 	    sizeof (generic_so)) != PS_OK) {
29070Sstevel@tonic-gate 		trunc = 1;
29080Sstevel@tonic-gate 		if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
29090Sstevel@tonic-gate 		    sizeof (generic_so.condition)) != PS_OK) {
29100Sstevel@tonic-gate 			(void) ps_pcontinue(ph_p);
29110Sstevel@tonic-gate 			ph_unlock(sh_p->sh_ta_p);
29120Sstevel@tonic-gate 			return (TD_DBERR);
29130Sstevel@tonic-gate 		}
29140Sstevel@tonic-gate 	}
29150Sstevel@tonic-gate 
29160Sstevel@tonic-gate 	/*
29170Sstevel@tonic-gate 	 * Set the new value in the sync. variable, read the synch. variable
29180Sstevel@tonic-gate 	 * information. from the process, reset its value and write it back.
29190Sstevel@tonic-gate 	 */
29200Sstevel@tonic-gate 	switch (generic_so.condition.mutex_magic) {
29210Sstevel@tonic-gate 	case MUTEX_MAGIC:
29220Sstevel@tonic-gate 		if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
29230Sstevel@tonic-gate 		    &generic_so.lock, sizeof (generic_so.lock)) != PS_OK) {
29240Sstevel@tonic-gate 			return_val = TD_DBERR;
29250Sstevel@tonic-gate 			break;
29260Sstevel@tonic-gate 		}
29270Sstevel@tonic-gate 		generic_so.lock.mutex_lockw = (uint8_t)value;
29280Sstevel@tonic-gate 		if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.lock,
29290Sstevel@tonic-gate 		    sizeof (generic_so.lock)) != PS_OK)
29300Sstevel@tonic-gate 			return_val = TD_DBERR;
29310Sstevel@tonic-gate 		break;
29320Sstevel@tonic-gate 	case SEMA_MAGIC:
29330Sstevel@tonic-gate 		if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
29340Sstevel@tonic-gate 		    &generic_so.semaphore, sizeof (generic_so.semaphore))
29350Sstevel@tonic-gate 		    != PS_OK) {
29360Sstevel@tonic-gate 			return_val = TD_DBERR;
29370Sstevel@tonic-gate 			break;
29380Sstevel@tonic-gate 		}
29390Sstevel@tonic-gate 		generic_so.semaphore.count = value;
29400Sstevel@tonic-gate 		if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.semaphore,
29410Sstevel@tonic-gate 		    sizeof (generic_so.semaphore)) != PS_OK)
29420Sstevel@tonic-gate 			return_val = TD_DBERR;
29430Sstevel@tonic-gate 		break;
29440Sstevel@tonic-gate 	case COND_MAGIC:
29450Sstevel@tonic-gate 		/* Operation not supported on a condition variable */
29460Sstevel@tonic-gate 		return_val = TD_ERR;
29470Sstevel@tonic-gate 		break;
29480Sstevel@tonic-gate 	case RWL_MAGIC:
29490Sstevel@tonic-gate 		if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
29500Sstevel@tonic-gate 		    &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK) {
29510Sstevel@tonic-gate 			return_val = TD_DBERR;
29520Sstevel@tonic-gate 			break;
29530Sstevel@tonic-gate 		}
2954*4570Sraf 		rwstate = (uint32_t *)&generic_so.rwlock.readers;
2955*4570Sraf 		*rwstate &= URW_HAS_WAITERS;
2956*4570Sraf 		if (value < 0)
2957*4570Sraf 			*rwstate |= URW_WRITE_LOCKED;
2958*4570Sraf 		else
2959*4570Sraf 			*rwstate |= (value & URW_READERS_MASK);
29600Sstevel@tonic-gate 		if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.rwlock,
29610Sstevel@tonic-gate 		    sizeof (generic_so.rwlock)) != PS_OK)
29620Sstevel@tonic-gate 			return_val = TD_DBERR;
29630Sstevel@tonic-gate 		break;
29640Sstevel@tonic-gate 	default:
29650Sstevel@tonic-gate 		/* Bad sync. object type */
29660Sstevel@tonic-gate 		return_val = TD_BADSH;
29670Sstevel@tonic-gate 		break;
29680Sstevel@tonic-gate 	}
29690Sstevel@tonic-gate 
29700Sstevel@tonic-gate 	(void) ps_pcontinue(ph_p);
29710Sstevel@tonic-gate 	ph_unlock(sh_p->sh_ta_p);
29720Sstevel@tonic-gate 	return (return_val);
29730Sstevel@tonic-gate }
29740Sstevel@tonic-gate 
29750Sstevel@tonic-gate typedef struct {
29760Sstevel@tonic-gate 	td_thr_iter_f	*waiter_cb;
29770Sstevel@tonic-gate 	psaddr_t	sync_obj_addr;
29780Sstevel@tonic-gate 	uint16_t	sync_magic;
29790Sstevel@tonic-gate 	void		*waiter_cb_arg;
29800Sstevel@tonic-gate 	td_err_e	errcode;
29810Sstevel@tonic-gate } waiter_cb_ctl_t;
29820Sstevel@tonic-gate 
29830Sstevel@tonic-gate static int
29840Sstevel@tonic-gate waiters_cb(const td_thrhandle_t *th_p, void *arg)
29850Sstevel@tonic-gate {
29860Sstevel@tonic-gate 	td_thragent_t	*ta_p = th_p->th_ta_p;
29870Sstevel@tonic-gate 	struct ps_prochandle *ph_p = ta_p->ph_p;
29880Sstevel@tonic-gate 	waiter_cb_ctl_t	*wcb = arg;
29890Sstevel@tonic-gate 	caddr_t		wchan;
29900Sstevel@tonic-gate 
29910Sstevel@tonic-gate 	if (ta_p->model == PR_MODEL_NATIVE) {
29920Sstevel@tonic-gate 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
29930Sstevel@tonic-gate 
29940Sstevel@tonic-gate 		if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
29950Sstevel@tonic-gate 		    &wchan, sizeof (wchan)) != PS_OK) {
29960Sstevel@tonic-gate 			wcb->errcode = TD_DBERR;
29970Sstevel@tonic-gate 			return (1);
29980Sstevel@tonic-gate 		}
29990Sstevel@tonic-gate 	} else {
30000Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
30010Sstevel@tonic-gate 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
30020Sstevel@tonic-gate 		caddr32_t wchan32;
30030Sstevel@tonic-gate 
30040Sstevel@tonic-gate 		if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
30050Sstevel@tonic-gate 		    &wchan32, sizeof (wchan32)) != PS_OK) {
30060Sstevel@tonic-gate 			wcb->errcode = TD_DBERR;
30070Sstevel@tonic-gate 			return (1);
30080Sstevel@tonic-gate 		}
30090Sstevel@tonic-gate 		wchan = (caddr_t)(uintptr_t)wchan32;
30100Sstevel@tonic-gate #else
30110Sstevel@tonic-gate 		wcb->errcode = TD_ERR;
30120Sstevel@tonic-gate 		return (1);
30130Sstevel@tonic-gate #endif	/* _SYSCALL32 */
30140Sstevel@tonic-gate 	}
30150Sstevel@tonic-gate 
30160Sstevel@tonic-gate 	if (wchan == NULL)
30170Sstevel@tonic-gate 		return (0);
30180Sstevel@tonic-gate 
30190Sstevel@tonic-gate 	if (wchan == (caddr_t)wcb->sync_obj_addr)
30200Sstevel@tonic-gate 		return ((*wcb->waiter_cb)(th_p, wcb->waiter_cb_arg));
30210Sstevel@tonic-gate 
30220Sstevel@tonic-gate 	return (0);
30230Sstevel@tonic-gate }
30240Sstevel@tonic-gate 
30250Sstevel@tonic-gate /*
30260Sstevel@tonic-gate  * For a given synchronization variable, iterate over the
30270Sstevel@tonic-gate  * set of waiting threads.  The call back function is passed
30280Sstevel@tonic-gate  * two parameters, a pointer to a thread handle and a pointer
30290Sstevel@tonic-gate  * to extra call back data.
30300Sstevel@tonic-gate  */
30310Sstevel@tonic-gate #pragma weak td_sync_waiters = __td_sync_waiters
30320Sstevel@tonic-gate td_err_e
30330Sstevel@tonic-gate __td_sync_waiters(const td_synchandle_t *sh_p, td_thr_iter_f *cb, void *cb_data)
30340Sstevel@tonic-gate {
30350Sstevel@tonic-gate 	struct ps_prochandle *ph_p;
30360Sstevel@tonic-gate 	waiter_cb_ctl_t	wcb;
30370Sstevel@tonic-gate 	td_err_e	return_val;
30380Sstevel@tonic-gate 
30390Sstevel@tonic-gate 	if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
30400Sstevel@tonic-gate 		return (return_val);
30410Sstevel@tonic-gate 	if (ps_pdread(ph_p,
30420Sstevel@tonic-gate 	    (psaddr_t)&((mutex_t *)sh_p->sh_unique)->mutex_magic,
30430Sstevel@tonic-gate 	    (caddr_t)&wcb.sync_magic, sizeof (wcb.sync_magic)) != PS_OK) {
30440Sstevel@tonic-gate 		ph_unlock(sh_p->sh_ta_p);
30450Sstevel@tonic-gate 		return (TD_DBERR);
30460Sstevel@tonic-gate 	}
30470Sstevel@tonic-gate 	ph_unlock(sh_p->sh_ta_p);
30480Sstevel@tonic-gate 
30490Sstevel@tonic-gate 	switch (wcb.sync_magic) {
30500Sstevel@tonic-gate 	case MUTEX_MAGIC:
30510Sstevel@tonic-gate 	case COND_MAGIC:
30520Sstevel@tonic-gate 	case SEMA_MAGIC:
30530Sstevel@tonic-gate 	case RWL_MAGIC:
30540Sstevel@tonic-gate 		break;
30550Sstevel@tonic-gate 	default:
30560Sstevel@tonic-gate 		return (TD_BADSH);
30570Sstevel@tonic-gate 	}
30580Sstevel@tonic-gate 
30590Sstevel@tonic-gate 	wcb.waiter_cb = cb;
30600Sstevel@tonic-gate 	wcb.sync_obj_addr = sh_p->sh_unique;
30610Sstevel@tonic-gate 	wcb.waiter_cb_arg = cb_data;
30620Sstevel@tonic-gate 	wcb.errcode = TD_OK;
30630Sstevel@tonic-gate 	return_val = __td_ta_thr_iter(sh_p->sh_ta_p, waiters_cb, &wcb,
30640Sstevel@tonic-gate 		TD_THR_SLEEP, TD_THR_LOWEST_PRIORITY,
30650Sstevel@tonic-gate 		TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
30660Sstevel@tonic-gate 
30670Sstevel@tonic-gate 	if (return_val != TD_OK)
30680Sstevel@tonic-gate 		return (return_val);
30690Sstevel@tonic-gate 
30700Sstevel@tonic-gate 	return (wcb.errcode);
30710Sstevel@tonic-gate }
3072