10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
54570Sraf * Common Development and Distribution License (the "License").
64570Sraf * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
214570Sraf
220Sstevel@tonic-gate /*
236247Sraf * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
240Sstevel@tonic-gate * Use is subject to license terms.
250Sstevel@tonic-gate */
260Sstevel@tonic-gate
270Sstevel@tonic-gate #include <stdio.h>
280Sstevel@tonic-gate #include <stdlib.h>
290Sstevel@tonic-gate #include <stddef.h>
300Sstevel@tonic-gate #include <unistd.h>
310Sstevel@tonic-gate #include <thr_uberdata.h>
320Sstevel@tonic-gate #include <thread_db.h>
330Sstevel@tonic-gate #include <libc_int.h>
340Sstevel@tonic-gate
350Sstevel@tonic-gate /*
360Sstevel@tonic-gate * Private structures.
370Sstevel@tonic-gate */
380Sstevel@tonic-gate
390Sstevel@tonic-gate typedef union {
400Sstevel@tonic-gate mutex_t lock;
410Sstevel@tonic-gate rwlock_t rwlock;
420Sstevel@tonic-gate sema_t semaphore;
430Sstevel@tonic-gate cond_t condition;
440Sstevel@tonic-gate } td_so_un_t;
450Sstevel@tonic-gate
460Sstevel@tonic-gate struct td_thragent {
470Sstevel@tonic-gate rwlock_t rwlock;
480Sstevel@tonic-gate struct ps_prochandle *ph_p;
490Sstevel@tonic-gate int initialized;
500Sstevel@tonic-gate int sync_tracking;
510Sstevel@tonic-gate int model;
520Sstevel@tonic-gate int primary_map;
530Sstevel@tonic-gate psaddr_t bootstrap_addr;
540Sstevel@tonic-gate psaddr_t uberdata_addr;
550Sstevel@tonic-gate psaddr_t tdb_eventmask_addr;
560Sstevel@tonic-gate psaddr_t tdb_register_sync_addr;
570Sstevel@tonic-gate psaddr_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
580Sstevel@tonic-gate psaddr_t hash_table_addr;
590Sstevel@tonic-gate int hash_size;
600Sstevel@tonic-gate lwpid_t single_lwpid;
610Sstevel@tonic-gate psaddr_t single_ulwp_addr;
620Sstevel@tonic-gate };
630Sstevel@tonic-gate
640Sstevel@tonic-gate /*
650Sstevel@tonic-gate * This is the name of the variable in libc that contains
660Sstevel@tonic-gate * the uberdata address that we will need.
670Sstevel@tonic-gate */
680Sstevel@tonic-gate #define TD_BOOTSTRAP_NAME "_tdb_bootstrap"
690Sstevel@tonic-gate /*
700Sstevel@tonic-gate * This is the actual name of uberdata, used in the event
710Sstevel@tonic-gate * that tdb_bootstrap has not yet been initialized.
720Sstevel@tonic-gate */
730Sstevel@tonic-gate #define TD_UBERDATA_NAME "_uberdata"
740Sstevel@tonic-gate /*
750Sstevel@tonic-gate * The library name should end with ".so.1", but older versions of
760Sstevel@tonic-gate * dbx expect the unadorned name and malfunction if ".1" is specified.
770Sstevel@tonic-gate * Unfortunately, if ".1" is not specified, mdb malfunctions when it
780Sstevel@tonic-gate * is applied to another instance of itself (due to the presence of
790Sstevel@tonic-gate * /usr/lib/mdb/proc/libc.so). So we try it both ways.
800Sstevel@tonic-gate */
810Sstevel@tonic-gate #define TD_LIBRARY_NAME "libc.so"
820Sstevel@tonic-gate #define TD_LIBRARY_NAME_1 "libc.so.1"
830Sstevel@tonic-gate
840Sstevel@tonic-gate td_err_e __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p);
850Sstevel@tonic-gate
860Sstevel@tonic-gate td_err_e __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
870Sstevel@tonic-gate void *cbdata_p, td_thr_state_e state, int ti_pri,
880Sstevel@tonic-gate sigset_t *ti_sigmask_p, unsigned ti_user_flags);
890Sstevel@tonic-gate
900Sstevel@tonic-gate /*
910Sstevel@tonic-gate * Initialize threads debugging interface.
920Sstevel@tonic-gate */
930Sstevel@tonic-gate #pragma weak td_init = __td_init
940Sstevel@tonic-gate td_err_e
__td_init()950Sstevel@tonic-gate __td_init()
960Sstevel@tonic-gate {
970Sstevel@tonic-gate return (TD_OK);
980Sstevel@tonic-gate }
990Sstevel@tonic-gate
1000Sstevel@tonic-gate /*
1010Sstevel@tonic-gate * This function does nothing, and never did.
1020Sstevel@tonic-gate * But the symbol is in the ABI, so we can't delete it.
1030Sstevel@tonic-gate */
1040Sstevel@tonic-gate #pragma weak td_log = __td_log
1050Sstevel@tonic-gate void
__td_log()1060Sstevel@tonic-gate __td_log()
1070Sstevel@tonic-gate {
1080Sstevel@tonic-gate }
1090Sstevel@tonic-gate
1100Sstevel@tonic-gate /*
1110Sstevel@tonic-gate * Short-cut to read just the hash table size from the process,
1120Sstevel@tonic-gate * to avoid repeatedly reading the full uberdata structure when
1130Sstevel@tonic-gate * dealing with a single-threaded process.
1140Sstevel@tonic-gate */
1150Sstevel@tonic-gate static uint_t
td_read_hash_size(td_thragent_t * ta_p)1160Sstevel@tonic-gate td_read_hash_size(td_thragent_t *ta_p)
1170Sstevel@tonic-gate {
1180Sstevel@tonic-gate psaddr_t addr;
1190Sstevel@tonic-gate uint_t hash_size;
1200Sstevel@tonic-gate
1210Sstevel@tonic-gate switch (ta_p->initialized) {
1220Sstevel@tonic-gate default: /* uninitialized */
1230Sstevel@tonic-gate return (0);
1240Sstevel@tonic-gate case 1: /* partially initialized */
1250Sstevel@tonic-gate break;
1260Sstevel@tonic-gate case 2: /* fully initialized */
1270Sstevel@tonic-gate return (ta_p->hash_size);
1280Sstevel@tonic-gate }
1290Sstevel@tonic-gate
1300Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) {
1310Sstevel@tonic-gate addr = ta_p->uberdata_addr + offsetof(uberdata_t, hash_size);
1320Sstevel@tonic-gate } else {
1330Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
1340Sstevel@tonic-gate addr = ta_p->uberdata_addr + offsetof(uberdata32_t, hash_size);
1350Sstevel@tonic-gate #else
1360Sstevel@tonic-gate addr = 0;
1370Sstevel@tonic-gate #endif
1380Sstevel@tonic-gate }
1390Sstevel@tonic-gate if (ps_pdread(ta_p->ph_p, addr, &hash_size, sizeof (hash_size))
1400Sstevel@tonic-gate != PS_OK)
1410Sstevel@tonic-gate return (0);
1420Sstevel@tonic-gate return (hash_size);
1430Sstevel@tonic-gate }
1440Sstevel@tonic-gate
1450Sstevel@tonic-gate static td_err_e
td_read_uberdata(td_thragent_t * ta_p)1460Sstevel@tonic-gate td_read_uberdata(td_thragent_t *ta_p)
1470Sstevel@tonic-gate {
1480Sstevel@tonic-gate struct ps_prochandle *ph_p = ta_p->ph_p;
1490Sstevel@tonic-gate
1500Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) {
1510Sstevel@tonic-gate uberdata_t uberdata;
1520Sstevel@tonic-gate
1530Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->uberdata_addr,
1540Sstevel@tonic-gate &uberdata, sizeof (uberdata)) != PS_OK)
1550Sstevel@tonic-gate return (TD_DBERR);
1560Sstevel@tonic-gate ta_p->primary_map = uberdata.primary_map;
1570Sstevel@tonic-gate ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
1586247Sraf offsetof(uberdata_t, tdb.tdb_ev_global_mask);
1590Sstevel@tonic-gate ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
1606247Sraf offsetof(uberdata_t, uberflags.uf_tdb_register_sync);
1610Sstevel@tonic-gate ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
1620Sstevel@tonic-gate ta_p->hash_size = uberdata.hash_size;
1630Sstevel@tonic-gate if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
1640Sstevel@tonic-gate ta_p->tdb_events, sizeof (ta_p->tdb_events)) != PS_OK)
1650Sstevel@tonic-gate return (TD_DBERR);
1660Sstevel@tonic-gate
1670Sstevel@tonic-gate } else {
1680Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
1690Sstevel@tonic-gate uberdata32_t uberdata;
1700Sstevel@tonic-gate caddr32_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
1710Sstevel@tonic-gate int i;
1720Sstevel@tonic-gate
1730Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->uberdata_addr,
1740Sstevel@tonic-gate &uberdata, sizeof (uberdata)) != PS_OK)
1750Sstevel@tonic-gate return (TD_DBERR);
1760Sstevel@tonic-gate ta_p->primary_map = uberdata.primary_map;
1770Sstevel@tonic-gate ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
1786247Sraf offsetof(uberdata32_t, tdb.tdb_ev_global_mask);
1790Sstevel@tonic-gate ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
1806247Sraf offsetof(uberdata32_t, uberflags.uf_tdb_register_sync);
1810Sstevel@tonic-gate ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
1820Sstevel@tonic-gate ta_p->hash_size = uberdata.hash_size;
1830Sstevel@tonic-gate if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
1840Sstevel@tonic-gate tdb_events, sizeof (tdb_events)) != PS_OK)
1850Sstevel@tonic-gate return (TD_DBERR);
1860Sstevel@tonic-gate for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++)
1870Sstevel@tonic-gate ta_p->tdb_events[i] = tdb_events[i];
1880Sstevel@tonic-gate #else
1890Sstevel@tonic-gate return (TD_DBERR);
1900Sstevel@tonic-gate #endif
1910Sstevel@tonic-gate }
1920Sstevel@tonic-gate if (ta_p->hash_size != 1) { /* multi-threaded */
1930Sstevel@tonic-gate ta_p->initialized = 2;
1940Sstevel@tonic-gate ta_p->single_lwpid = 0;
1950Sstevel@tonic-gate ta_p->single_ulwp_addr = NULL;
1960Sstevel@tonic-gate } else { /* single-threaded */
1970Sstevel@tonic-gate ta_p->initialized = 1;
1980Sstevel@tonic-gate /*
1990Sstevel@tonic-gate * Get the address and lwpid of the single thread/LWP.
2000Sstevel@tonic-gate * It may not be ulwp_one if this is a child of fork1().
2010Sstevel@tonic-gate */
2020Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) {
2030Sstevel@tonic-gate thr_hash_table_t head;
2040Sstevel@tonic-gate lwpid_t lwpid = 0;
2050Sstevel@tonic-gate
2060Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->hash_table_addr,
2070Sstevel@tonic-gate &head, sizeof (head)) != PS_OK)
2080Sstevel@tonic-gate return (TD_DBERR);
2090Sstevel@tonic-gate if ((psaddr_t)head.hash_bucket == NULL)
2100Sstevel@tonic-gate ta_p->initialized = 0;
2110Sstevel@tonic-gate else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
2120Sstevel@tonic-gate offsetof(ulwp_t, ul_lwpid),
2130Sstevel@tonic-gate &lwpid, sizeof (lwpid)) != PS_OK)
2140Sstevel@tonic-gate return (TD_DBERR);
2150Sstevel@tonic-gate ta_p->single_lwpid = lwpid;
2160Sstevel@tonic-gate ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
2170Sstevel@tonic-gate } else {
2180Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
2190Sstevel@tonic-gate thr_hash_table32_t head;
2200Sstevel@tonic-gate lwpid_t lwpid = 0;
2210Sstevel@tonic-gate
2220Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->hash_table_addr,
2230Sstevel@tonic-gate &head, sizeof (head)) != PS_OK)
2240Sstevel@tonic-gate return (TD_DBERR);
2250Sstevel@tonic-gate if ((psaddr_t)head.hash_bucket == NULL)
2260Sstevel@tonic-gate ta_p->initialized = 0;
2270Sstevel@tonic-gate else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
2280Sstevel@tonic-gate offsetof(ulwp32_t, ul_lwpid),
2290Sstevel@tonic-gate &lwpid, sizeof (lwpid)) != PS_OK)
2300Sstevel@tonic-gate return (TD_DBERR);
2310Sstevel@tonic-gate ta_p->single_lwpid = lwpid;
2320Sstevel@tonic-gate ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
2330Sstevel@tonic-gate #else
2340Sstevel@tonic-gate return (TD_DBERR);
2350Sstevel@tonic-gate #endif
2360Sstevel@tonic-gate }
2370Sstevel@tonic-gate }
2380Sstevel@tonic-gate if (!ta_p->primary_map)
2390Sstevel@tonic-gate ta_p->initialized = 0;
2400Sstevel@tonic-gate return (TD_OK);
2410Sstevel@tonic-gate }
2420Sstevel@tonic-gate
2430Sstevel@tonic-gate static td_err_e
td_read_bootstrap_data(td_thragent_t * ta_p)2440Sstevel@tonic-gate td_read_bootstrap_data(td_thragent_t *ta_p)
2450Sstevel@tonic-gate {
2460Sstevel@tonic-gate struct ps_prochandle *ph_p = ta_p->ph_p;
2470Sstevel@tonic-gate psaddr_t bootstrap_addr;
2480Sstevel@tonic-gate psaddr_t uberdata_addr;
2490Sstevel@tonic-gate ps_err_e db_return;
2500Sstevel@tonic-gate td_err_e return_val;
2510Sstevel@tonic-gate int do_1;
2520Sstevel@tonic-gate
2530Sstevel@tonic-gate switch (ta_p->initialized) {
2540Sstevel@tonic-gate case 2: /* fully initialized */
2550Sstevel@tonic-gate return (TD_OK);
2560Sstevel@tonic-gate case 1: /* partially initialized */
2570Sstevel@tonic-gate if (td_read_hash_size(ta_p) == 1)
2580Sstevel@tonic-gate return (TD_OK);
2590Sstevel@tonic-gate return (td_read_uberdata(ta_p));
2600Sstevel@tonic-gate }
2610Sstevel@tonic-gate
2620Sstevel@tonic-gate /*
2630Sstevel@tonic-gate * Uninitialized -- do the startup work.
2640Sstevel@tonic-gate * We set ta_p->initialized to -1 to cut off recursive calls
2650Sstevel@tonic-gate * into libc_db by code in the provider of ps_pglobal_lookup().
2660Sstevel@tonic-gate */
2670Sstevel@tonic-gate do_1 = 0;
2680Sstevel@tonic-gate ta_p->initialized = -1;
2690Sstevel@tonic-gate db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME,
2700Sstevel@tonic-gate TD_BOOTSTRAP_NAME, &bootstrap_addr);
2710Sstevel@tonic-gate if (db_return == PS_NOSYM) {
2720Sstevel@tonic-gate do_1 = 1;
2730Sstevel@tonic-gate db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME_1,
2740Sstevel@tonic-gate TD_BOOTSTRAP_NAME, &bootstrap_addr);
2750Sstevel@tonic-gate }
2760Sstevel@tonic-gate if (db_return == PS_NOSYM) /* libc is not linked yet */
2770Sstevel@tonic-gate return (TD_NOLIBTHREAD);
2780Sstevel@tonic-gate if (db_return != PS_OK)
2790Sstevel@tonic-gate return (TD_ERR);
2800Sstevel@tonic-gate db_return = ps_pglobal_lookup(ph_p,
2810Sstevel@tonic-gate do_1? TD_LIBRARY_NAME_1 : TD_LIBRARY_NAME,
2820Sstevel@tonic-gate TD_UBERDATA_NAME, &uberdata_addr);
2830Sstevel@tonic-gate if (db_return == PS_NOSYM) /* libc is not linked yet */
2840Sstevel@tonic-gate return (TD_NOLIBTHREAD);
2850Sstevel@tonic-gate if (db_return != PS_OK)
2860Sstevel@tonic-gate return (TD_ERR);
2870Sstevel@tonic-gate
2880Sstevel@tonic-gate /*
2890Sstevel@tonic-gate * Read the uberdata address into the thread agent structure.
2900Sstevel@tonic-gate */
2910Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) {
2920Sstevel@tonic-gate psaddr_t psaddr;
2930Sstevel@tonic-gate if (ps_pdread(ph_p, bootstrap_addr,
2940Sstevel@tonic-gate &psaddr, sizeof (psaddr)) != PS_OK)
2950Sstevel@tonic-gate return (TD_DBERR);
2960Sstevel@tonic-gate if ((ta_p->bootstrap_addr = psaddr) == NULL)
2970Sstevel@tonic-gate psaddr = uberdata_addr;
2980Sstevel@tonic-gate else if (ps_pdread(ph_p, psaddr,
2990Sstevel@tonic-gate &psaddr, sizeof (psaddr)) != PS_OK)
3000Sstevel@tonic-gate return (TD_DBERR);
301*7927SEdward.Pilatowicz@Sun.COM if (psaddr == NULL) {
302*7927SEdward.Pilatowicz@Sun.COM /* primary linkmap in the tgt is not initialized */
303*7927SEdward.Pilatowicz@Sun.COM ta_p->bootstrap_addr = NULL;
304*7927SEdward.Pilatowicz@Sun.COM psaddr = uberdata_addr;
305*7927SEdward.Pilatowicz@Sun.COM }
3060Sstevel@tonic-gate ta_p->uberdata_addr = psaddr;
3070Sstevel@tonic-gate } else {
3080Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
3090Sstevel@tonic-gate caddr32_t psaddr;
3100Sstevel@tonic-gate if (ps_pdread(ph_p, bootstrap_addr,
3110Sstevel@tonic-gate &psaddr, sizeof (psaddr)) != PS_OK)
3120Sstevel@tonic-gate return (TD_DBERR);
3130Sstevel@tonic-gate if ((ta_p->bootstrap_addr = (psaddr_t)psaddr) == NULL)
3140Sstevel@tonic-gate psaddr = (caddr32_t)uberdata_addr;
3150Sstevel@tonic-gate else if (ps_pdread(ph_p, (psaddr_t)psaddr,
3160Sstevel@tonic-gate &psaddr, sizeof (psaddr)) != PS_OK)
3170Sstevel@tonic-gate return (TD_DBERR);
318*7927SEdward.Pilatowicz@Sun.COM if (psaddr == NULL) {
319*7927SEdward.Pilatowicz@Sun.COM /* primary linkmap in the tgt is not initialized */
320*7927SEdward.Pilatowicz@Sun.COM ta_p->bootstrap_addr = NULL;
321*7927SEdward.Pilatowicz@Sun.COM psaddr = (caddr32_t)uberdata_addr;
322*7927SEdward.Pilatowicz@Sun.COM }
3230Sstevel@tonic-gate ta_p->uberdata_addr = (psaddr_t)psaddr;
3240Sstevel@tonic-gate #else
3250Sstevel@tonic-gate return (TD_DBERR);
3260Sstevel@tonic-gate #endif /* _SYSCALL32 */
3270Sstevel@tonic-gate }
3280Sstevel@tonic-gate
3290Sstevel@tonic-gate if ((return_val = td_read_uberdata(ta_p)) != TD_OK)
3300Sstevel@tonic-gate return (return_val);
3310Sstevel@tonic-gate if (ta_p->bootstrap_addr == NULL)
3320Sstevel@tonic-gate ta_p->initialized = 0;
3330Sstevel@tonic-gate return (TD_OK);
3340Sstevel@tonic-gate }
3350Sstevel@tonic-gate
3360Sstevel@tonic-gate #pragma weak ps_kill
3370Sstevel@tonic-gate #pragma weak ps_lrolltoaddr
3380Sstevel@tonic-gate
3390Sstevel@tonic-gate /*
3400Sstevel@tonic-gate * Allocate a new agent process handle ("thread agent").
3410Sstevel@tonic-gate */
3420Sstevel@tonic-gate #pragma weak td_ta_new = __td_ta_new
3430Sstevel@tonic-gate td_err_e
__td_ta_new(struct ps_prochandle * ph_p,td_thragent_t ** ta_pp)3440Sstevel@tonic-gate __td_ta_new(struct ps_prochandle *ph_p, td_thragent_t **ta_pp)
3450Sstevel@tonic-gate {
3460Sstevel@tonic-gate td_thragent_t *ta_p;
3470Sstevel@tonic-gate int model;
3480Sstevel@tonic-gate td_err_e return_val = TD_OK;
3490Sstevel@tonic-gate
3500Sstevel@tonic-gate if (ph_p == NULL)
3510Sstevel@tonic-gate return (TD_BADPH);
3520Sstevel@tonic-gate if (ta_pp == NULL)
3530Sstevel@tonic-gate return (TD_ERR);
3540Sstevel@tonic-gate *ta_pp = NULL;
3550Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK)
3560Sstevel@tonic-gate return (TD_DBERR);
3570Sstevel@tonic-gate /*
3580Sstevel@tonic-gate * ps_pdmodel might not be defined if this is an older client.
3590Sstevel@tonic-gate * Make it a weak symbol and test if it exists before calling.
3600Sstevel@tonic-gate */
3610Sstevel@tonic-gate #pragma weak ps_pdmodel
3620Sstevel@tonic-gate if (ps_pdmodel == NULL) {
3630Sstevel@tonic-gate model = PR_MODEL_NATIVE;
3640Sstevel@tonic-gate } else if (ps_pdmodel(ph_p, &model) != PS_OK) {
3650Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
3660Sstevel@tonic-gate return (TD_ERR);
3670Sstevel@tonic-gate }
3680Sstevel@tonic-gate if ((ta_p = malloc(sizeof (*ta_p))) == NULL) {
3690Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
3700Sstevel@tonic-gate return (TD_MALLOC);
3710Sstevel@tonic-gate }
3720Sstevel@tonic-gate
3730Sstevel@tonic-gate /*
3740Sstevel@tonic-gate * Initialize the agent process handle.
3750Sstevel@tonic-gate * Pick up the symbol value we need from the target process.
3760Sstevel@tonic-gate */
3770Sstevel@tonic-gate (void) memset(ta_p, 0, sizeof (*ta_p));
3780Sstevel@tonic-gate ta_p->ph_p = ph_p;
3790Sstevel@tonic-gate (void) rwlock_init(&ta_p->rwlock, USYNC_THREAD, NULL);
3800Sstevel@tonic-gate ta_p->model = model;
3810Sstevel@tonic-gate return_val = td_read_bootstrap_data(ta_p);
3820Sstevel@tonic-gate
3830Sstevel@tonic-gate /*
3840Sstevel@tonic-gate * Because the old libthread_db enabled lock tracking by default,
3850Sstevel@tonic-gate * we must also do it. However, we do it only if the application
3860Sstevel@tonic-gate * provides the ps_kill() and ps_lrolltoaddr() interfaces.
3870Sstevel@tonic-gate * (dbx provides the ps_kill() and ps_lrolltoaddr() interfaces.)
3880Sstevel@tonic-gate */
3890Sstevel@tonic-gate if (return_val == TD_OK && ps_kill != NULL && ps_lrolltoaddr != NULL) {
3900Sstevel@tonic-gate register_sync_t oldenable;
3910Sstevel@tonic-gate register_sync_t enable = REGISTER_SYNC_ENABLE;
3920Sstevel@tonic-gate psaddr_t psaddr = ta_p->tdb_register_sync_addr;
3930Sstevel@tonic-gate
3940Sstevel@tonic-gate if (ps_pdread(ph_p, psaddr,
3950Sstevel@tonic-gate &oldenable, sizeof (oldenable)) != PS_OK)
3960Sstevel@tonic-gate return_val = TD_DBERR;
3970Sstevel@tonic-gate else if (oldenable != REGISTER_SYNC_OFF ||
3980Sstevel@tonic-gate ps_pdwrite(ph_p, psaddr,
3990Sstevel@tonic-gate &enable, sizeof (enable)) != PS_OK) {
4000Sstevel@tonic-gate /*
4010Sstevel@tonic-gate * Lock tracking was already enabled or we
4020Sstevel@tonic-gate * failed to enable it, probably because we
4030Sstevel@tonic-gate * are examining a core file. In either case
4040Sstevel@tonic-gate * set the sync_tracking flag non-zero to
4050Sstevel@tonic-gate * indicate that we should not attempt to
4060Sstevel@tonic-gate * disable lock tracking when we delete the
4070Sstevel@tonic-gate * agent process handle in td_ta_delete().
4080Sstevel@tonic-gate */
4090Sstevel@tonic-gate ta_p->sync_tracking = 1;
4100Sstevel@tonic-gate }
4110Sstevel@tonic-gate }
4120Sstevel@tonic-gate
4130Sstevel@tonic-gate if (return_val == TD_OK)
4140Sstevel@tonic-gate *ta_pp = ta_p;
4150Sstevel@tonic-gate else
4160Sstevel@tonic-gate free(ta_p);
4170Sstevel@tonic-gate
4180Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
4190Sstevel@tonic-gate return (return_val);
4200Sstevel@tonic-gate }
4210Sstevel@tonic-gate
4220Sstevel@tonic-gate /*
4230Sstevel@tonic-gate * Utility function to grab the readers lock and return the prochandle,
4240Sstevel@tonic-gate * given an agent process handle. Performs standard error checking.
4250Sstevel@tonic-gate * Returns non-NULL with the lock held, or NULL with the lock not held.
4260Sstevel@tonic-gate */
4270Sstevel@tonic-gate static struct ps_prochandle *
ph_lock_ta(td_thragent_t * ta_p,td_err_e * err)4280Sstevel@tonic-gate ph_lock_ta(td_thragent_t *ta_p, td_err_e *err)
4290Sstevel@tonic-gate {
4300Sstevel@tonic-gate struct ps_prochandle *ph_p = NULL;
4310Sstevel@tonic-gate td_err_e error;
4320Sstevel@tonic-gate
4330Sstevel@tonic-gate if (ta_p == NULL || ta_p->initialized == -1) {
4340Sstevel@tonic-gate *err = TD_BADTA;
4350Sstevel@tonic-gate } else if (rw_rdlock(&ta_p->rwlock) != 0) { /* can't happen? */
4360Sstevel@tonic-gate *err = TD_BADTA;
4370Sstevel@tonic-gate } else if ((ph_p = ta_p->ph_p) == NULL) {
4380Sstevel@tonic-gate (void) rw_unlock(&ta_p->rwlock);
4390Sstevel@tonic-gate *err = TD_BADPH;
4400Sstevel@tonic-gate } else if (ta_p->initialized != 2 &&
4410Sstevel@tonic-gate (error = td_read_bootstrap_data(ta_p)) != TD_OK) {
4420Sstevel@tonic-gate (void) rw_unlock(&ta_p->rwlock);
4430Sstevel@tonic-gate ph_p = NULL;
4440Sstevel@tonic-gate *err = error;
4450Sstevel@tonic-gate } else {
4460Sstevel@tonic-gate *err = TD_OK;
4470Sstevel@tonic-gate }
4480Sstevel@tonic-gate
4490Sstevel@tonic-gate return (ph_p);
4500Sstevel@tonic-gate }
4510Sstevel@tonic-gate
4520Sstevel@tonic-gate /*
4530Sstevel@tonic-gate * Utility function to grab the readers lock and return the prochandle,
4540Sstevel@tonic-gate * given an agent thread handle. Performs standard error checking.
4550Sstevel@tonic-gate * Returns non-NULL with the lock held, or NULL with the lock not held.
4560Sstevel@tonic-gate */
4570Sstevel@tonic-gate static struct ps_prochandle *
ph_lock_th(const td_thrhandle_t * th_p,td_err_e * err)4580Sstevel@tonic-gate ph_lock_th(const td_thrhandle_t *th_p, td_err_e *err)
4590Sstevel@tonic-gate {
4600Sstevel@tonic-gate if (th_p == NULL || th_p->th_unique == NULL) {
4610Sstevel@tonic-gate *err = TD_BADTH;
4620Sstevel@tonic-gate return (NULL);
4630Sstevel@tonic-gate }
4640Sstevel@tonic-gate return (ph_lock_ta(th_p->th_ta_p, err));
4650Sstevel@tonic-gate }
4660Sstevel@tonic-gate
4670Sstevel@tonic-gate /*
4680Sstevel@tonic-gate * Utility function to grab the readers lock and return the prochandle,
4690Sstevel@tonic-gate * given a synchronization object handle. Performs standard error checking.
4700Sstevel@tonic-gate * Returns non-NULL with the lock held, or NULL with the lock not held.
4710Sstevel@tonic-gate */
4720Sstevel@tonic-gate static struct ps_prochandle *
ph_lock_sh(const td_synchandle_t * sh_p,td_err_e * err)4730Sstevel@tonic-gate ph_lock_sh(const td_synchandle_t *sh_p, td_err_e *err)
4740Sstevel@tonic-gate {
4750Sstevel@tonic-gate if (sh_p == NULL || sh_p->sh_unique == NULL) {
4760Sstevel@tonic-gate *err = TD_BADSH;
4770Sstevel@tonic-gate return (NULL);
4780Sstevel@tonic-gate }
4790Sstevel@tonic-gate return (ph_lock_ta(sh_p->sh_ta_p, err));
4800Sstevel@tonic-gate }
4810Sstevel@tonic-gate
4820Sstevel@tonic-gate /*
4830Sstevel@tonic-gate * Unlock the agent process handle obtained from ph_lock_*().
4840Sstevel@tonic-gate */
4850Sstevel@tonic-gate static void
ph_unlock(td_thragent_t * ta_p)4860Sstevel@tonic-gate ph_unlock(td_thragent_t *ta_p)
4870Sstevel@tonic-gate {
4880Sstevel@tonic-gate (void) rw_unlock(&ta_p->rwlock);
4890Sstevel@tonic-gate }
4900Sstevel@tonic-gate
4910Sstevel@tonic-gate /*
4920Sstevel@tonic-gate * De-allocate an agent process handle,
4930Sstevel@tonic-gate * releasing all related resources.
4940Sstevel@tonic-gate *
4950Sstevel@tonic-gate * XXX -- This is hopelessly broken ---
4960Sstevel@tonic-gate * Storage for thread agent is not deallocated. The prochandle
4970Sstevel@tonic-gate * in the thread agent is set to NULL so that future uses of
4980Sstevel@tonic-gate * the thread agent can be detected and an error value returned.
4990Sstevel@tonic-gate * All functions in the external user interface that make
5000Sstevel@tonic-gate * use of the thread agent are expected
5010Sstevel@tonic-gate * to check for a NULL prochandle in the thread agent.
5020Sstevel@tonic-gate * All such functions are also expected to obtain a
5030Sstevel@tonic-gate * reader lock on the thread agent while it is using it.
5040Sstevel@tonic-gate */
5050Sstevel@tonic-gate #pragma weak td_ta_delete = __td_ta_delete
5060Sstevel@tonic-gate td_err_e
__td_ta_delete(td_thragent_t * ta_p)5070Sstevel@tonic-gate __td_ta_delete(td_thragent_t *ta_p)
5080Sstevel@tonic-gate {
5090Sstevel@tonic-gate struct ps_prochandle *ph_p;
5100Sstevel@tonic-gate
5110Sstevel@tonic-gate /*
5120Sstevel@tonic-gate * This is the only place we grab the writer lock.
5130Sstevel@tonic-gate * We are going to NULL out the prochandle.
5140Sstevel@tonic-gate */
5150Sstevel@tonic-gate if (ta_p == NULL || rw_wrlock(&ta_p->rwlock) != 0)
5160Sstevel@tonic-gate return (TD_BADTA);
5170Sstevel@tonic-gate if ((ph_p = ta_p->ph_p) == NULL) {
5180Sstevel@tonic-gate (void) rw_unlock(&ta_p->rwlock);
5190Sstevel@tonic-gate return (TD_BADPH);
5200Sstevel@tonic-gate }
5210Sstevel@tonic-gate /*
5220Sstevel@tonic-gate * If synch. tracking was disabled when td_ta_new() was called and
5230Sstevel@tonic-gate * if td_ta_sync_tracking_enable() was never called, then disable
5240Sstevel@tonic-gate * synch. tracking (it was enabled by default in td_ta_new()).
5250Sstevel@tonic-gate */
5260Sstevel@tonic-gate if (ta_p->sync_tracking == 0 &&
5270Sstevel@tonic-gate ps_kill != NULL && ps_lrolltoaddr != NULL) {
5280Sstevel@tonic-gate register_sync_t enable = REGISTER_SYNC_DISABLE;
5290Sstevel@tonic-gate
5300Sstevel@tonic-gate (void) ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
5310Sstevel@tonic-gate &enable, sizeof (enable));
5320Sstevel@tonic-gate }
5330Sstevel@tonic-gate ta_p->ph_p = NULL;
5340Sstevel@tonic-gate (void) rw_unlock(&ta_p->rwlock);
5350Sstevel@tonic-gate return (TD_OK);
5360Sstevel@tonic-gate }
5370Sstevel@tonic-gate
5380Sstevel@tonic-gate /*
5390Sstevel@tonic-gate * Map an agent process handle to a client prochandle.
5400Sstevel@tonic-gate * Currently unused by dbx.
5410Sstevel@tonic-gate */
5420Sstevel@tonic-gate #pragma weak td_ta_get_ph = __td_ta_get_ph
5430Sstevel@tonic-gate td_err_e
__td_ta_get_ph(td_thragent_t * ta_p,struct ps_prochandle ** ph_pp)5440Sstevel@tonic-gate __td_ta_get_ph(td_thragent_t *ta_p, struct ps_prochandle **ph_pp)
5450Sstevel@tonic-gate {
5460Sstevel@tonic-gate td_err_e return_val;
5470Sstevel@tonic-gate
5480Sstevel@tonic-gate if (ph_pp != NULL) /* protect stupid callers */
5490Sstevel@tonic-gate *ph_pp = NULL;
5500Sstevel@tonic-gate if (ph_pp == NULL)
5510Sstevel@tonic-gate return (TD_ERR);
5520Sstevel@tonic-gate if ((*ph_pp = ph_lock_ta(ta_p, &return_val)) == NULL)
5530Sstevel@tonic-gate return (return_val);
5540Sstevel@tonic-gate ph_unlock(ta_p);
5550Sstevel@tonic-gate return (TD_OK);
5560Sstevel@tonic-gate }
5570Sstevel@tonic-gate
5580Sstevel@tonic-gate /*
5590Sstevel@tonic-gate * Set the process's suggested concurrency level.
5600Sstevel@tonic-gate * This is a no-op in a one-level model.
5610Sstevel@tonic-gate * Currently unused by dbx.
5620Sstevel@tonic-gate */
5630Sstevel@tonic-gate #pragma weak td_ta_setconcurrency = __td_ta_setconcurrency
5640Sstevel@tonic-gate /* ARGSUSED1 */
5650Sstevel@tonic-gate td_err_e
__td_ta_setconcurrency(const td_thragent_t * ta_p,int level)5660Sstevel@tonic-gate __td_ta_setconcurrency(const td_thragent_t *ta_p, int level)
5670Sstevel@tonic-gate {
5680Sstevel@tonic-gate if (ta_p == NULL)
5690Sstevel@tonic-gate return (TD_BADTA);
5700Sstevel@tonic-gate if (ta_p->ph_p == NULL)
5710Sstevel@tonic-gate return (TD_BADPH);
5720Sstevel@tonic-gate return (TD_OK);
5730Sstevel@tonic-gate }
5740Sstevel@tonic-gate
5750Sstevel@tonic-gate /*
5760Sstevel@tonic-gate * Get the number of threads in the process.
5770Sstevel@tonic-gate */
5780Sstevel@tonic-gate #pragma weak td_ta_get_nthreads = __td_ta_get_nthreads
5790Sstevel@tonic-gate td_err_e
__td_ta_get_nthreads(td_thragent_t * ta_p,int * nthread_p)5800Sstevel@tonic-gate __td_ta_get_nthreads(td_thragent_t *ta_p, int *nthread_p)
5810Sstevel@tonic-gate {
5820Sstevel@tonic-gate struct ps_prochandle *ph_p;
5830Sstevel@tonic-gate td_err_e return_val;
5840Sstevel@tonic-gate int nthreads;
5850Sstevel@tonic-gate int nzombies;
5860Sstevel@tonic-gate psaddr_t nthreads_addr;
5870Sstevel@tonic-gate psaddr_t nzombies_addr;
5880Sstevel@tonic-gate
5890Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) {
5900Sstevel@tonic-gate nthreads_addr = ta_p->uberdata_addr +
5916247Sraf offsetof(uberdata_t, nthreads);
5920Sstevel@tonic-gate nzombies_addr = ta_p->uberdata_addr +
5936247Sraf offsetof(uberdata_t, nzombies);
5940Sstevel@tonic-gate } else {
5950Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
5960Sstevel@tonic-gate nthreads_addr = ta_p->uberdata_addr +
5976247Sraf offsetof(uberdata32_t, nthreads);
5980Sstevel@tonic-gate nzombies_addr = ta_p->uberdata_addr +
5996247Sraf offsetof(uberdata32_t, nzombies);
6000Sstevel@tonic-gate #else
6010Sstevel@tonic-gate nthreads_addr = 0;
6020Sstevel@tonic-gate nzombies_addr = 0;
6030Sstevel@tonic-gate #endif /* _SYSCALL32 */
6040Sstevel@tonic-gate }
6050Sstevel@tonic-gate
6060Sstevel@tonic-gate if (nthread_p == NULL)
6070Sstevel@tonic-gate return (TD_ERR);
6080Sstevel@tonic-gate if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
6090Sstevel@tonic-gate return (return_val);
6100Sstevel@tonic-gate if (ps_pdread(ph_p, nthreads_addr, &nthreads, sizeof (int)) != PS_OK)
6110Sstevel@tonic-gate return_val = TD_DBERR;
6120Sstevel@tonic-gate if (ps_pdread(ph_p, nzombies_addr, &nzombies, sizeof (int)) != PS_OK)
6130Sstevel@tonic-gate return_val = TD_DBERR;
6140Sstevel@tonic-gate ph_unlock(ta_p);
6150Sstevel@tonic-gate if (return_val == TD_OK)
6160Sstevel@tonic-gate *nthread_p = nthreads + nzombies;
6170Sstevel@tonic-gate return (return_val);
6180Sstevel@tonic-gate }
6190Sstevel@tonic-gate
6200Sstevel@tonic-gate typedef struct {
6210Sstevel@tonic-gate thread_t tid;
6220Sstevel@tonic-gate int found;
6230Sstevel@tonic-gate td_thrhandle_t th;
6240Sstevel@tonic-gate } td_mapper_param_t;
6250Sstevel@tonic-gate
6260Sstevel@tonic-gate /*
6270Sstevel@tonic-gate * Check the value in data against the thread id.
6280Sstevel@tonic-gate * If it matches, return 1 to terminate iterations.
6290Sstevel@tonic-gate * This function is used by td_ta_map_id2thr() to map a tid to a thread handle.
6300Sstevel@tonic-gate */
6310Sstevel@tonic-gate static int
td_mapper_id2thr(td_thrhandle_t * th_p,td_mapper_param_t * data)6320Sstevel@tonic-gate td_mapper_id2thr(td_thrhandle_t *th_p, td_mapper_param_t *data)
6330Sstevel@tonic-gate {
6340Sstevel@tonic-gate td_thrinfo_t ti;
6350Sstevel@tonic-gate
6360Sstevel@tonic-gate if (__td_thr_get_info(th_p, &ti) == TD_OK &&
6370Sstevel@tonic-gate data->tid == ti.ti_tid) {
6380Sstevel@tonic-gate data->found = 1;
6390Sstevel@tonic-gate data->th = *th_p;
6400Sstevel@tonic-gate return (1);
6410Sstevel@tonic-gate }
6420Sstevel@tonic-gate return (0);
6430Sstevel@tonic-gate }
6440Sstevel@tonic-gate
6450Sstevel@tonic-gate /*
6460Sstevel@tonic-gate * Given a thread identifier, return the corresponding thread handle.
6470Sstevel@tonic-gate */
6480Sstevel@tonic-gate #pragma weak td_ta_map_id2thr = __td_ta_map_id2thr
6490Sstevel@tonic-gate td_err_e
__td_ta_map_id2thr(td_thragent_t * ta_p,thread_t tid,td_thrhandle_t * th_p)6500Sstevel@tonic-gate __td_ta_map_id2thr(td_thragent_t *ta_p, thread_t tid,
6510Sstevel@tonic-gate td_thrhandle_t *th_p)
6520Sstevel@tonic-gate {
6530Sstevel@tonic-gate td_err_e return_val;
6540Sstevel@tonic-gate td_mapper_param_t data;
6550Sstevel@tonic-gate
6560Sstevel@tonic-gate if (th_p != NULL && /* optimize for a single thread */
6570Sstevel@tonic-gate ta_p != NULL &&
6580Sstevel@tonic-gate ta_p->initialized == 1 &&
6590Sstevel@tonic-gate (td_read_hash_size(ta_p) == 1 ||
6600Sstevel@tonic-gate td_read_uberdata(ta_p) == TD_OK) &&
6610Sstevel@tonic-gate ta_p->initialized == 1 &&
6620Sstevel@tonic-gate ta_p->single_lwpid == tid) {
6630Sstevel@tonic-gate th_p->th_ta_p = ta_p;
6640Sstevel@tonic-gate if ((th_p->th_unique = ta_p->single_ulwp_addr) == 0)
6650Sstevel@tonic-gate return (TD_NOTHR);
6660Sstevel@tonic-gate return (TD_OK);
6670Sstevel@tonic-gate }
6680Sstevel@tonic-gate
6690Sstevel@tonic-gate /*
6700Sstevel@tonic-gate * LOCKING EXCEPTION - Locking is not required here because
6710Sstevel@tonic-gate * the locking and checking will be done in __td_ta_thr_iter.
6720Sstevel@tonic-gate */
6730Sstevel@tonic-gate
6740Sstevel@tonic-gate if (ta_p == NULL)
6750Sstevel@tonic-gate return (TD_BADTA);
6760Sstevel@tonic-gate if (th_p == NULL)
6770Sstevel@tonic-gate return (TD_BADTH);
6780Sstevel@tonic-gate if (tid == 0)
6790Sstevel@tonic-gate return (TD_NOTHR);
6800Sstevel@tonic-gate
6810Sstevel@tonic-gate data.tid = tid;
6820Sstevel@tonic-gate data.found = 0;
6830Sstevel@tonic-gate return_val = __td_ta_thr_iter(ta_p,
6846247Sraf (td_thr_iter_f *)td_mapper_id2thr, (void *)&data,
6856247Sraf TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
6866247Sraf TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
6870Sstevel@tonic-gate if (return_val == TD_OK) {
6880Sstevel@tonic-gate if (data.found == 0)
6890Sstevel@tonic-gate return_val = TD_NOTHR;
6900Sstevel@tonic-gate else
6910Sstevel@tonic-gate *th_p = data.th;
6920Sstevel@tonic-gate }
6930Sstevel@tonic-gate
6940Sstevel@tonic-gate return (return_val);
6950Sstevel@tonic-gate }
6960Sstevel@tonic-gate
6970Sstevel@tonic-gate /*
6980Sstevel@tonic-gate * Map the address of a synchronization object to a sync. object handle.
6990Sstevel@tonic-gate */
7000Sstevel@tonic-gate #pragma weak td_ta_map_addr2sync = __td_ta_map_addr2sync
7010Sstevel@tonic-gate td_err_e
__td_ta_map_addr2sync(td_thragent_t * ta_p,psaddr_t addr,td_synchandle_t * sh_p)7020Sstevel@tonic-gate __td_ta_map_addr2sync(td_thragent_t *ta_p, psaddr_t addr, td_synchandle_t *sh_p)
7030Sstevel@tonic-gate {
7040Sstevel@tonic-gate struct ps_prochandle *ph_p;
7050Sstevel@tonic-gate td_err_e return_val;
7060Sstevel@tonic-gate uint16_t sync_magic;
7070Sstevel@tonic-gate
7080Sstevel@tonic-gate if (sh_p == NULL)
7090Sstevel@tonic-gate return (TD_BADSH);
7100Sstevel@tonic-gate if (addr == NULL)
7110Sstevel@tonic-gate return (TD_ERR);
7120Sstevel@tonic-gate if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
7130Sstevel@tonic-gate return (return_val);
7140Sstevel@tonic-gate /*
7150Sstevel@tonic-gate * Check the magic number of the sync. object to make sure it's valid.
7160Sstevel@tonic-gate * The magic number is at the same offset for all sync. objects.
7170Sstevel@tonic-gate */
7180Sstevel@tonic-gate if (ps_pdread(ph_p, (psaddr_t)&((mutex_t *)addr)->mutex_magic,
7190Sstevel@tonic-gate &sync_magic, sizeof (sync_magic)) != PS_OK) {
7200Sstevel@tonic-gate ph_unlock(ta_p);
7210Sstevel@tonic-gate return (TD_BADSH);
7220Sstevel@tonic-gate }
7230Sstevel@tonic-gate ph_unlock(ta_p);
7240Sstevel@tonic-gate if (sync_magic != MUTEX_MAGIC && sync_magic != COND_MAGIC &&
7250Sstevel@tonic-gate sync_magic != SEMA_MAGIC && sync_magic != RWL_MAGIC)
7260Sstevel@tonic-gate return (TD_BADSH);
7270Sstevel@tonic-gate /*
7280Sstevel@tonic-gate * Just fill in the appropriate fields of the sync. handle.
7290Sstevel@tonic-gate */
7300Sstevel@tonic-gate sh_p->sh_ta_p = (td_thragent_t *)ta_p;
7310Sstevel@tonic-gate sh_p->sh_unique = addr;
7320Sstevel@tonic-gate return (TD_OK);
7330Sstevel@tonic-gate }
7340Sstevel@tonic-gate
7350Sstevel@tonic-gate /*
7360Sstevel@tonic-gate * Iterate over the set of global TSD keys.
7370Sstevel@tonic-gate * The call back function is called with three arguments,
7380Sstevel@tonic-gate * a key, a pointer to the destructor function, and the cbdata pointer.
7390Sstevel@tonic-gate * Currently unused by dbx.
7400Sstevel@tonic-gate */
7410Sstevel@tonic-gate #pragma weak td_ta_tsd_iter = __td_ta_tsd_iter
7420Sstevel@tonic-gate td_err_e
__td_ta_tsd_iter(td_thragent_t * ta_p,td_key_iter_f * cb,void * cbdata_p)7430Sstevel@tonic-gate __td_ta_tsd_iter(td_thragent_t *ta_p, td_key_iter_f *cb, void *cbdata_p)
7440Sstevel@tonic-gate {
7450Sstevel@tonic-gate struct ps_prochandle *ph_p;
7460Sstevel@tonic-gate td_err_e return_val;
7470Sstevel@tonic-gate int key;
7480Sstevel@tonic-gate int numkeys;
7490Sstevel@tonic-gate psaddr_t dest_addr;
7500Sstevel@tonic-gate psaddr_t *destructors = NULL;
7510Sstevel@tonic-gate PFrV destructor;
7520Sstevel@tonic-gate
7530Sstevel@tonic-gate if (cb == NULL)
7540Sstevel@tonic-gate return (TD_ERR);
7550Sstevel@tonic-gate if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
7560Sstevel@tonic-gate return (return_val);
7570Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) {
7580Sstevel@tonic-gate ph_unlock(ta_p);
7590Sstevel@tonic-gate return (TD_DBERR);
7600Sstevel@tonic-gate }
7610Sstevel@tonic-gate
7620Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) {
7630Sstevel@tonic-gate tsd_metadata_t tsdm;
7640Sstevel@tonic-gate
7650Sstevel@tonic-gate if (ps_pdread(ph_p,
7660Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
7670Sstevel@tonic-gate &tsdm, sizeof (tsdm)) != PS_OK)
7680Sstevel@tonic-gate return_val = TD_DBERR;
7690Sstevel@tonic-gate else {
7700Sstevel@tonic-gate numkeys = tsdm.tsdm_nused;
7710Sstevel@tonic-gate dest_addr = (psaddr_t)tsdm.tsdm_destro;
7720Sstevel@tonic-gate if (numkeys > 0)
7730Sstevel@tonic-gate destructors =
7740Sstevel@tonic-gate malloc(numkeys * sizeof (psaddr_t));
7750Sstevel@tonic-gate }
7760Sstevel@tonic-gate } else {
7770Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
7780Sstevel@tonic-gate tsd_metadata32_t tsdm;
7790Sstevel@tonic-gate
7800Sstevel@tonic-gate if (ps_pdread(ph_p,
7810Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
7820Sstevel@tonic-gate &tsdm, sizeof (tsdm)) != PS_OK)
7830Sstevel@tonic-gate return_val = TD_DBERR;
7840Sstevel@tonic-gate else {
7850Sstevel@tonic-gate numkeys = tsdm.tsdm_nused;
7860Sstevel@tonic-gate dest_addr = (psaddr_t)tsdm.tsdm_destro;
7870Sstevel@tonic-gate if (numkeys > 0)
7880Sstevel@tonic-gate destructors =
7890Sstevel@tonic-gate malloc(numkeys * sizeof (caddr32_t));
7900Sstevel@tonic-gate }
7910Sstevel@tonic-gate #else
7920Sstevel@tonic-gate return_val = TD_DBERR;
7930Sstevel@tonic-gate #endif /* _SYSCALL32 */
7940Sstevel@tonic-gate }
7950Sstevel@tonic-gate
7960Sstevel@tonic-gate if (return_val != TD_OK || numkeys <= 0) {
7970Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
7980Sstevel@tonic-gate ph_unlock(ta_p);
7990Sstevel@tonic-gate return (return_val);
8000Sstevel@tonic-gate }
8010Sstevel@tonic-gate
8020Sstevel@tonic-gate if (destructors == NULL)
8030Sstevel@tonic-gate return_val = TD_MALLOC;
8040Sstevel@tonic-gate else if (ta_p->model == PR_MODEL_NATIVE) {
8050Sstevel@tonic-gate if (ps_pdread(ph_p, dest_addr,
8060Sstevel@tonic-gate destructors, numkeys * sizeof (psaddr_t)) != PS_OK)
8070Sstevel@tonic-gate return_val = TD_DBERR;
8080Sstevel@tonic-gate else {
8090Sstevel@tonic-gate for (key = 1; key < numkeys; key++) {
8100Sstevel@tonic-gate destructor = (PFrV)destructors[key];
8110Sstevel@tonic-gate if (destructor != TSD_UNALLOCATED &&
8120Sstevel@tonic-gate (*cb)(key, destructor, cbdata_p))
8130Sstevel@tonic-gate break;
8140Sstevel@tonic-gate }
8150Sstevel@tonic-gate }
8160Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
8170Sstevel@tonic-gate } else {
8180Sstevel@tonic-gate caddr32_t *destructors32 = (caddr32_t *)destructors;
8190Sstevel@tonic-gate caddr32_t destruct32;
8200Sstevel@tonic-gate
8210Sstevel@tonic-gate if (ps_pdread(ph_p, dest_addr,
8220Sstevel@tonic-gate destructors32, numkeys * sizeof (caddr32_t)) != PS_OK)
8230Sstevel@tonic-gate return_val = TD_DBERR;
8240Sstevel@tonic-gate else {
8250Sstevel@tonic-gate for (key = 1; key < numkeys; key++) {
8260Sstevel@tonic-gate destruct32 = destructors32[key];
8270Sstevel@tonic-gate if (destruct32 != (caddr32_t)TSD_UNALLOCATED &&
8280Sstevel@tonic-gate (*cb)(key, (PFrV)(uintptr_t)destruct32,
8290Sstevel@tonic-gate cbdata_p))
8300Sstevel@tonic-gate break;
8310Sstevel@tonic-gate }
8320Sstevel@tonic-gate }
8330Sstevel@tonic-gate #endif /* _SYSCALL32 */
8340Sstevel@tonic-gate }
8350Sstevel@tonic-gate
8360Sstevel@tonic-gate if (destructors)
8370Sstevel@tonic-gate free(destructors);
8380Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
8390Sstevel@tonic-gate ph_unlock(ta_p);
8400Sstevel@tonic-gate return (return_val);
8410Sstevel@tonic-gate }
8420Sstevel@tonic-gate
8430Sstevel@tonic-gate int
sigequalset(const sigset_t * s1,const sigset_t * s2)8440Sstevel@tonic-gate sigequalset(const sigset_t *s1, const sigset_t *s2)
8450Sstevel@tonic-gate {
8466247Sraf return (
8476247Sraf s1->__sigbits[0] == s2->__sigbits[0] &&
8486247Sraf s1->__sigbits[1] == s2->__sigbits[1] &&
8496247Sraf s1->__sigbits[2] == s2->__sigbits[2] &&
8506247Sraf s1->__sigbits[3] == s2->__sigbits[3]);
8510Sstevel@tonic-gate }
8520Sstevel@tonic-gate
8530Sstevel@tonic-gate /*
8540Sstevel@tonic-gate * Description:
8550Sstevel@tonic-gate * Iterate over all threads. For each thread call
8560Sstevel@tonic-gate * the function pointed to by "cb" with a pointer
8570Sstevel@tonic-gate * to a thread handle, and a pointer to data which
8580Sstevel@tonic-gate * can be NULL. Only call td_thr_iter_f() on threads
8590Sstevel@tonic-gate * which match the properties of state, ti_pri,
8600Sstevel@tonic-gate * ti_sigmask_p, and ti_user_flags. If cb returns
8610Sstevel@tonic-gate * a non-zero value, terminate iterations.
8620Sstevel@tonic-gate *
8630Sstevel@tonic-gate * Input:
8640Sstevel@tonic-gate * *ta_p - thread agent
8650Sstevel@tonic-gate * *cb - call back function defined by user.
8660Sstevel@tonic-gate * td_thr_iter_f() takes a thread handle and
8670Sstevel@tonic-gate * cbdata_p as a parameter.
8680Sstevel@tonic-gate * cbdata_p - parameter for td_thr_iter_f().
8690Sstevel@tonic-gate *
8700Sstevel@tonic-gate * state - state of threads of interest. A value of
8710Sstevel@tonic-gate * TD_THR_ANY_STATE from enum td_thr_state_e
8720Sstevel@tonic-gate * does not restrict iterations by state.
8730Sstevel@tonic-gate * ti_pri - lower bound of priorities of threads of
8740Sstevel@tonic-gate * interest. A value of TD_THR_LOWEST_PRIORITY
8750Sstevel@tonic-gate * defined in thread_db.h does not restrict
8760Sstevel@tonic-gate * iterations by priority. A thread with priority
8770Sstevel@tonic-gate * less than ti_pri will NOT be passed to the callback
8780Sstevel@tonic-gate * function.
8790Sstevel@tonic-gate * ti_sigmask_p - signal mask of threads of interest.
8800Sstevel@tonic-gate * A value of TD_SIGNO_MASK defined in thread_db.h
8810Sstevel@tonic-gate * does not restrict iterations by signal mask.
8820Sstevel@tonic-gate * ti_user_flags - user flags of threads of interest. A
8830Sstevel@tonic-gate * value of TD_THR_ANY_USER_FLAGS defined in thread_db.h
8840Sstevel@tonic-gate * does not restrict iterations by user flags.
8850Sstevel@tonic-gate */
8860Sstevel@tonic-gate #pragma weak td_ta_thr_iter = __td_ta_thr_iter
8870Sstevel@tonic-gate td_err_e
__td_ta_thr_iter(td_thragent_t * ta_p,td_thr_iter_f * cb,void * cbdata_p,td_thr_state_e state,int ti_pri,sigset_t * ti_sigmask_p,unsigned ti_user_flags)8880Sstevel@tonic-gate __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
8890Sstevel@tonic-gate void *cbdata_p, td_thr_state_e state, int ti_pri,
8900Sstevel@tonic-gate sigset_t *ti_sigmask_p, unsigned ti_user_flags)
8910Sstevel@tonic-gate {
8920Sstevel@tonic-gate struct ps_prochandle *ph_p;
8930Sstevel@tonic-gate psaddr_t first_lwp_addr;
8940Sstevel@tonic-gate psaddr_t first_zombie_addr;
8950Sstevel@tonic-gate psaddr_t curr_lwp_addr;
8960Sstevel@tonic-gate psaddr_t next_lwp_addr;
8970Sstevel@tonic-gate td_thrhandle_t th;
8980Sstevel@tonic-gate ps_err_e db_return;
8990Sstevel@tonic-gate ps_err_e db_return2;
9000Sstevel@tonic-gate td_err_e return_val;
9010Sstevel@tonic-gate
9020Sstevel@tonic-gate if (cb == NULL)
9030Sstevel@tonic-gate return (TD_ERR);
9040Sstevel@tonic-gate /*
9050Sstevel@tonic-gate * If state is not within bound, short circuit.
9060Sstevel@tonic-gate */
9070Sstevel@tonic-gate if (state < TD_THR_ANY_STATE || state > TD_THR_STOPPED_ASLEEP)
9080Sstevel@tonic-gate return (TD_OK);
9090Sstevel@tonic-gate
9100Sstevel@tonic-gate if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
9110Sstevel@tonic-gate return (return_val);
9120Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) {
9130Sstevel@tonic-gate ph_unlock(ta_p);
9140Sstevel@tonic-gate return (TD_DBERR);
9150Sstevel@tonic-gate }
9160Sstevel@tonic-gate
9170Sstevel@tonic-gate /*
9180Sstevel@tonic-gate * For each ulwp_t in the circular linked lists pointed
9190Sstevel@tonic-gate * to by "all_lwps" and "all_zombies":
9200Sstevel@tonic-gate * (1) Filter each thread.
9210Sstevel@tonic-gate * (2) Create the thread_object for each thread that passes.
9220Sstevel@tonic-gate * (3) Call the call back function on each thread.
9230Sstevel@tonic-gate */
9240Sstevel@tonic-gate
9250Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) {
9260Sstevel@tonic-gate db_return = ps_pdread(ph_p,
9270Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata_t, all_lwps),
9280Sstevel@tonic-gate &first_lwp_addr, sizeof (first_lwp_addr));
9290Sstevel@tonic-gate db_return2 = ps_pdread(ph_p,
9300Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata_t, all_zombies),
9310Sstevel@tonic-gate &first_zombie_addr, sizeof (first_zombie_addr));
9320Sstevel@tonic-gate } else {
9330Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
9340Sstevel@tonic-gate caddr32_t addr32;
9350Sstevel@tonic-gate
9360Sstevel@tonic-gate db_return = ps_pdread(ph_p,
9370Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata32_t, all_lwps),
9380Sstevel@tonic-gate &addr32, sizeof (addr32));
9390Sstevel@tonic-gate first_lwp_addr = addr32;
9400Sstevel@tonic-gate db_return2 = ps_pdread(ph_p,
9410Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata32_t, all_zombies),
9420Sstevel@tonic-gate &addr32, sizeof (addr32));
9430Sstevel@tonic-gate first_zombie_addr = addr32;
9440Sstevel@tonic-gate #else /* _SYSCALL32 */
9450Sstevel@tonic-gate db_return = PS_ERR;
9460Sstevel@tonic-gate db_return2 = PS_ERR;
9470Sstevel@tonic-gate #endif /* _SYSCALL32 */
9480Sstevel@tonic-gate }
9490Sstevel@tonic-gate if (db_return == PS_OK)
9500Sstevel@tonic-gate db_return = db_return2;
9510Sstevel@tonic-gate
9520Sstevel@tonic-gate /*
9530Sstevel@tonic-gate * If first_lwp_addr and first_zombie_addr are both NULL,
9540Sstevel@tonic-gate * libc must not yet be initialized or all threads have
9550Sstevel@tonic-gate * exited. Return TD_NOTHR and all will be well.
9560Sstevel@tonic-gate */
9570Sstevel@tonic-gate if (db_return == PS_OK &&
9580Sstevel@tonic-gate first_lwp_addr == NULL && first_zombie_addr == NULL) {
9590Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
9600Sstevel@tonic-gate ph_unlock(ta_p);
9610Sstevel@tonic-gate return (TD_NOTHR);
9620Sstevel@tonic-gate }
9630Sstevel@tonic-gate if (db_return != PS_OK) {
9640Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
9650Sstevel@tonic-gate ph_unlock(ta_p);
9660Sstevel@tonic-gate return (TD_DBERR);
9670Sstevel@tonic-gate }
9680Sstevel@tonic-gate
9690Sstevel@tonic-gate /*
9700Sstevel@tonic-gate * Run down the lists of all living and dead lwps.
9710Sstevel@tonic-gate */
9720Sstevel@tonic-gate if (first_lwp_addr == NULL)
9730Sstevel@tonic-gate first_lwp_addr = first_zombie_addr;
9740Sstevel@tonic-gate curr_lwp_addr = first_lwp_addr;
9750Sstevel@tonic-gate for (;;) {
9760Sstevel@tonic-gate td_thr_state_e ts_state;
9770Sstevel@tonic-gate int userpri;
9780Sstevel@tonic-gate unsigned userflags;
9790Sstevel@tonic-gate sigset_t mask;
9800Sstevel@tonic-gate
9810Sstevel@tonic-gate /*
9820Sstevel@tonic-gate * Read the ulwp struct.
9830Sstevel@tonic-gate */
9840Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) {
9850Sstevel@tonic-gate ulwp_t ulwp;
9860Sstevel@tonic-gate
9870Sstevel@tonic-gate if (ps_pdread(ph_p, curr_lwp_addr,
9880Sstevel@tonic-gate &ulwp, sizeof (ulwp)) != PS_OK &&
9890Sstevel@tonic-gate ((void) memset(&ulwp, 0, sizeof (ulwp)),
9900Sstevel@tonic-gate ps_pdread(ph_p, curr_lwp_addr,
9910Sstevel@tonic-gate &ulwp, REPLACEMENT_SIZE)) != PS_OK) {
9920Sstevel@tonic-gate return_val = TD_DBERR;
9930Sstevel@tonic-gate break;
9940Sstevel@tonic-gate }
9950Sstevel@tonic-gate next_lwp_addr = (psaddr_t)ulwp.ul_forw;
9960Sstevel@tonic-gate
9970Sstevel@tonic-gate ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
9986247Sraf ulwp.ul_stop? TD_THR_STOPPED :
9996247Sraf ulwp.ul_wchan? TD_THR_SLEEP :
10006247Sraf TD_THR_ACTIVE;
10010Sstevel@tonic-gate userpri = ulwp.ul_pri;
10020Sstevel@tonic-gate userflags = ulwp.ul_usropts;
10030Sstevel@tonic-gate if (ulwp.ul_dead)
10040Sstevel@tonic-gate (void) sigemptyset(&mask);
10050Sstevel@tonic-gate else
10060Sstevel@tonic-gate mask = *(sigset_t *)&ulwp.ul_sigmask;
10070Sstevel@tonic-gate } else {
10080Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
10090Sstevel@tonic-gate ulwp32_t ulwp;
10100Sstevel@tonic-gate
10110Sstevel@tonic-gate if (ps_pdread(ph_p, curr_lwp_addr,
10120Sstevel@tonic-gate &ulwp, sizeof (ulwp)) != PS_OK &&
10130Sstevel@tonic-gate ((void) memset(&ulwp, 0, sizeof (ulwp)),
10140Sstevel@tonic-gate ps_pdread(ph_p, curr_lwp_addr,
10150Sstevel@tonic-gate &ulwp, REPLACEMENT_SIZE32)) != PS_OK) {
10160Sstevel@tonic-gate return_val = TD_DBERR;
10170Sstevel@tonic-gate break;
10180Sstevel@tonic-gate }
10190Sstevel@tonic-gate next_lwp_addr = (psaddr_t)ulwp.ul_forw;
10200Sstevel@tonic-gate
10210Sstevel@tonic-gate ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
10226247Sraf ulwp.ul_stop? TD_THR_STOPPED :
10236247Sraf ulwp.ul_wchan? TD_THR_SLEEP :
10246247Sraf TD_THR_ACTIVE;
10250Sstevel@tonic-gate userpri = ulwp.ul_pri;
10260Sstevel@tonic-gate userflags = ulwp.ul_usropts;
10270Sstevel@tonic-gate if (ulwp.ul_dead)
10280Sstevel@tonic-gate (void) sigemptyset(&mask);
10290Sstevel@tonic-gate else
10300Sstevel@tonic-gate mask = *(sigset_t *)&ulwp.ul_sigmask;
10310Sstevel@tonic-gate #else /* _SYSCALL32 */
10320Sstevel@tonic-gate return_val = TD_ERR;
10330Sstevel@tonic-gate break;
10340Sstevel@tonic-gate #endif /* _SYSCALL32 */
10350Sstevel@tonic-gate }
10360Sstevel@tonic-gate
10370Sstevel@tonic-gate /*
10380Sstevel@tonic-gate * Filter on state, priority, sigmask, and user flags.
10390Sstevel@tonic-gate */
10400Sstevel@tonic-gate
10410Sstevel@tonic-gate if ((state != ts_state) &&
10420Sstevel@tonic-gate (state != TD_THR_ANY_STATE))
10430Sstevel@tonic-gate goto advance;
10440Sstevel@tonic-gate
10450Sstevel@tonic-gate if (ti_pri > userpri)
10460Sstevel@tonic-gate goto advance;
10470Sstevel@tonic-gate
10480Sstevel@tonic-gate if (ti_sigmask_p != TD_SIGNO_MASK &&
10490Sstevel@tonic-gate !sigequalset(ti_sigmask_p, &mask))
10500Sstevel@tonic-gate goto advance;
10510Sstevel@tonic-gate
10520Sstevel@tonic-gate if (ti_user_flags != userflags &&
10530Sstevel@tonic-gate ti_user_flags != (unsigned)TD_THR_ANY_USER_FLAGS)
10540Sstevel@tonic-gate goto advance;
10550Sstevel@tonic-gate
10560Sstevel@tonic-gate /*
10570Sstevel@tonic-gate * Call back - break if the return
10580Sstevel@tonic-gate * from the call back is non-zero.
10590Sstevel@tonic-gate */
10600Sstevel@tonic-gate th.th_ta_p = (td_thragent_t *)ta_p;
10610Sstevel@tonic-gate th.th_unique = curr_lwp_addr;
10620Sstevel@tonic-gate if ((*cb)(&th, cbdata_p))
10630Sstevel@tonic-gate break;
10640Sstevel@tonic-gate
10650Sstevel@tonic-gate advance:
10660Sstevel@tonic-gate if ((curr_lwp_addr = next_lwp_addr) == first_lwp_addr) {
10670Sstevel@tonic-gate /*
10680Sstevel@tonic-gate * Switch to the zombie list, unless it is NULL
10690Sstevel@tonic-gate * or we have already been doing the zombie list,
10700Sstevel@tonic-gate * in which case terminate the loop.
10710Sstevel@tonic-gate */
10720Sstevel@tonic-gate if (first_zombie_addr == NULL ||
10730Sstevel@tonic-gate first_lwp_addr == first_zombie_addr)
10740Sstevel@tonic-gate break;
10750Sstevel@tonic-gate curr_lwp_addr = first_lwp_addr = first_zombie_addr;
10760Sstevel@tonic-gate }
10770Sstevel@tonic-gate }
10780Sstevel@tonic-gate
10790Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
10800Sstevel@tonic-gate ph_unlock(ta_p);
10810Sstevel@tonic-gate return (return_val);
10820Sstevel@tonic-gate }
10830Sstevel@tonic-gate
10840Sstevel@tonic-gate /*
10850Sstevel@tonic-gate * Enable or disable process synchronization object tracking.
10860Sstevel@tonic-gate * Currently unused by dbx.
10870Sstevel@tonic-gate */
10880Sstevel@tonic-gate #pragma weak td_ta_sync_tracking_enable = __td_ta_sync_tracking_enable
10890Sstevel@tonic-gate td_err_e
__td_ta_sync_tracking_enable(td_thragent_t * ta_p,int onoff)10900Sstevel@tonic-gate __td_ta_sync_tracking_enable(td_thragent_t *ta_p, int onoff)
10910Sstevel@tonic-gate {
10920Sstevel@tonic-gate struct ps_prochandle *ph_p;
10930Sstevel@tonic-gate td_err_e return_val;
10940Sstevel@tonic-gate register_sync_t enable;
10950Sstevel@tonic-gate
10960Sstevel@tonic-gate if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
10970Sstevel@tonic-gate return (return_val);
10980Sstevel@tonic-gate /*
10990Sstevel@tonic-gate * Values of tdb_register_sync in the victim process:
11000Sstevel@tonic-gate * REGISTER_SYNC_ENABLE enables registration of synch objects
11010Sstevel@tonic-gate * REGISTER_SYNC_DISABLE disables registration of synch objects
11020Sstevel@tonic-gate * These cause the table to be cleared and tdb_register_sync set to:
11030Sstevel@tonic-gate * REGISTER_SYNC_ON registration in effect
11040Sstevel@tonic-gate * REGISTER_SYNC_OFF registration not in effect
11050Sstevel@tonic-gate */
11060Sstevel@tonic-gate enable = onoff? REGISTER_SYNC_ENABLE : REGISTER_SYNC_DISABLE;
11070Sstevel@tonic-gate if (ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
11080Sstevel@tonic-gate &enable, sizeof (enable)) != PS_OK)
11090Sstevel@tonic-gate return_val = TD_DBERR;
11100Sstevel@tonic-gate /*
11110Sstevel@tonic-gate * Remember that this interface was called (see td_ta_delete()).
11120Sstevel@tonic-gate */
11130Sstevel@tonic-gate ta_p->sync_tracking = 1;
11140Sstevel@tonic-gate ph_unlock(ta_p);
11150Sstevel@tonic-gate return (return_val);
11160Sstevel@tonic-gate }
11170Sstevel@tonic-gate
11180Sstevel@tonic-gate /*
11190Sstevel@tonic-gate * Iterate over all known synchronization variables.
11200Sstevel@tonic-gate * It is very possible that the list generated is incomplete,
11210Sstevel@tonic-gate * because the iterator can only find synchronization variables
11220Sstevel@tonic-gate * that have been registered by the process since synchronization
11230Sstevel@tonic-gate * object registration was enabled.
11240Sstevel@tonic-gate * The call back function cb is called for each synchronization
11250Sstevel@tonic-gate * variable with two arguments: a pointer to the synchronization
11260Sstevel@tonic-gate * handle and the passed-in argument cbdata.
11270Sstevel@tonic-gate * If cb returns a non-zero value, iterations are terminated.
11280Sstevel@tonic-gate */
11290Sstevel@tonic-gate #pragma weak td_ta_sync_iter = __td_ta_sync_iter
11300Sstevel@tonic-gate td_err_e
__td_ta_sync_iter(td_thragent_t * ta_p,td_sync_iter_f * cb,void * cbdata)11310Sstevel@tonic-gate __td_ta_sync_iter(td_thragent_t *ta_p, td_sync_iter_f *cb, void *cbdata)
11320Sstevel@tonic-gate {
11330Sstevel@tonic-gate struct ps_prochandle *ph_p;
11340Sstevel@tonic-gate td_err_e return_val;
11350Sstevel@tonic-gate int i;
11360Sstevel@tonic-gate register_sync_t enable;
11370Sstevel@tonic-gate psaddr_t next_desc;
11380Sstevel@tonic-gate tdb_sync_stats_t sync_stats;
11390Sstevel@tonic-gate td_synchandle_t synchandle;
11400Sstevel@tonic-gate psaddr_t psaddr;
11410Sstevel@tonic-gate void *vaddr;
11420Sstevel@tonic-gate uint64_t *sync_addr_hash = NULL;
11430Sstevel@tonic-gate
11440Sstevel@tonic-gate if (cb == NULL)
11450Sstevel@tonic-gate return (TD_ERR);
11460Sstevel@tonic-gate if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
11470Sstevel@tonic-gate return (return_val);
11480Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) {
11490Sstevel@tonic-gate ph_unlock(ta_p);
11500Sstevel@tonic-gate return (TD_DBERR);
11510Sstevel@tonic-gate }
11520Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
11530Sstevel@tonic-gate &enable, sizeof (enable)) != PS_OK) {
11540Sstevel@tonic-gate return_val = TD_DBERR;
11550Sstevel@tonic-gate goto out;
11560Sstevel@tonic-gate }
11570Sstevel@tonic-gate if (enable != REGISTER_SYNC_ON)
11580Sstevel@tonic-gate goto out;
11590Sstevel@tonic-gate
11600Sstevel@tonic-gate /*
11610Sstevel@tonic-gate * First read the hash table.
11620Sstevel@tonic-gate * The hash table is large; allocate with mmap().
11630Sstevel@tonic-gate */
11640Sstevel@tonic-gate if ((vaddr = mmap(NULL, TDB_HASH_SIZE * sizeof (uint64_t),
11650Sstevel@tonic-gate PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
11660Sstevel@tonic-gate == MAP_FAILED) {
11670Sstevel@tonic-gate return_val = TD_MALLOC;
11680Sstevel@tonic-gate goto out;
11690Sstevel@tonic-gate }
11700Sstevel@tonic-gate sync_addr_hash = vaddr;
11710Sstevel@tonic-gate
11720Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) {
11730Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->uberdata_addr +
11740Sstevel@tonic-gate offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
11750Sstevel@tonic-gate &psaddr, sizeof (&psaddr)) != PS_OK) {
11760Sstevel@tonic-gate return_val = TD_DBERR;
11770Sstevel@tonic-gate goto out;
11780Sstevel@tonic-gate }
11790Sstevel@tonic-gate } else {
11800Sstevel@tonic-gate #ifdef _SYSCALL32
11810Sstevel@tonic-gate caddr32_t addr;
11820Sstevel@tonic-gate
11830Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->uberdata_addr +
11840Sstevel@tonic-gate offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
11850Sstevel@tonic-gate &addr, sizeof (addr)) != PS_OK) {
11860Sstevel@tonic-gate return_val = TD_DBERR;
11870Sstevel@tonic-gate goto out;
11880Sstevel@tonic-gate }
11890Sstevel@tonic-gate psaddr = addr;
11900Sstevel@tonic-gate #else
11910Sstevel@tonic-gate return_val = TD_ERR;
11920Sstevel@tonic-gate goto out;
11930Sstevel@tonic-gate #endif /* _SYSCALL32 */
11940Sstevel@tonic-gate }
11950Sstevel@tonic-gate
11960Sstevel@tonic-gate if (psaddr == NULL)
11970Sstevel@tonic-gate goto out;
11980Sstevel@tonic-gate if (ps_pdread(ph_p, psaddr, sync_addr_hash,
11990Sstevel@tonic-gate TDB_HASH_SIZE * sizeof (uint64_t)) != PS_OK) {
12000Sstevel@tonic-gate return_val = TD_DBERR;
12010Sstevel@tonic-gate goto out;
12020Sstevel@tonic-gate }
12030Sstevel@tonic-gate
12040Sstevel@tonic-gate /*
12050Sstevel@tonic-gate * Now scan the hash table.
12060Sstevel@tonic-gate */
12070Sstevel@tonic-gate for (i = 0; i < TDB_HASH_SIZE; i++) {
12080Sstevel@tonic-gate for (next_desc = (psaddr_t)sync_addr_hash[i];
12090Sstevel@tonic-gate next_desc != NULL;
12100Sstevel@tonic-gate next_desc = (psaddr_t)sync_stats.next) {
12110Sstevel@tonic-gate if (ps_pdread(ph_p, next_desc,
12120Sstevel@tonic-gate &sync_stats, sizeof (sync_stats)) != PS_OK) {
12130Sstevel@tonic-gate return_val = TD_DBERR;
12140Sstevel@tonic-gate goto out;
12150Sstevel@tonic-gate }
12160Sstevel@tonic-gate if (sync_stats.un.type == TDB_NONE) {
12170Sstevel@tonic-gate /* not registered since registration enabled */
12180Sstevel@tonic-gate continue;
12190Sstevel@tonic-gate }
12200Sstevel@tonic-gate synchandle.sh_ta_p = ta_p;
12210Sstevel@tonic-gate synchandle.sh_unique = (psaddr_t)sync_stats.sync_addr;
12220Sstevel@tonic-gate if ((*cb)(&synchandle, cbdata) != 0)
12230Sstevel@tonic-gate goto out;
12240Sstevel@tonic-gate }
12250Sstevel@tonic-gate }
12260Sstevel@tonic-gate
12270Sstevel@tonic-gate out:
12280Sstevel@tonic-gate if (sync_addr_hash != NULL)
12290Sstevel@tonic-gate (void) munmap((void *)sync_addr_hash,
12300Sstevel@tonic-gate TDB_HASH_SIZE * sizeof (uint64_t));
12310Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
12320Sstevel@tonic-gate ph_unlock(ta_p);
12330Sstevel@tonic-gate return (return_val);
12340Sstevel@tonic-gate }
12350Sstevel@tonic-gate
12360Sstevel@tonic-gate /*
12370Sstevel@tonic-gate * Enable process statistics collection.
12380Sstevel@tonic-gate */
12390Sstevel@tonic-gate #pragma weak td_ta_enable_stats = __td_ta_enable_stats
12400Sstevel@tonic-gate /* ARGSUSED */
12410Sstevel@tonic-gate td_err_e
__td_ta_enable_stats(const td_thragent_t * ta_p,int onoff)12420Sstevel@tonic-gate __td_ta_enable_stats(const td_thragent_t *ta_p, int onoff)
12430Sstevel@tonic-gate {
12440Sstevel@tonic-gate return (TD_NOCAPAB);
12450Sstevel@tonic-gate }
12460Sstevel@tonic-gate
12470Sstevel@tonic-gate /*
12480Sstevel@tonic-gate * Reset process statistics.
12490Sstevel@tonic-gate */
12500Sstevel@tonic-gate #pragma weak td_ta_reset_stats = __td_ta_reset_stats
12510Sstevel@tonic-gate /* ARGSUSED */
12520Sstevel@tonic-gate td_err_e
__td_ta_reset_stats(const td_thragent_t * ta_p)12530Sstevel@tonic-gate __td_ta_reset_stats(const td_thragent_t *ta_p)
12540Sstevel@tonic-gate {
12550Sstevel@tonic-gate return (TD_NOCAPAB);
12560Sstevel@tonic-gate }
12570Sstevel@tonic-gate
12580Sstevel@tonic-gate /*
12590Sstevel@tonic-gate * Read process statistics.
12600Sstevel@tonic-gate */
12610Sstevel@tonic-gate #pragma weak td_ta_get_stats = __td_ta_get_stats
12620Sstevel@tonic-gate /* ARGSUSED */
12630Sstevel@tonic-gate td_err_e
__td_ta_get_stats(const td_thragent_t * ta_p,td_ta_stats_t * tstats)12640Sstevel@tonic-gate __td_ta_get_stats(const td_thragent_t *ta_p, td_ta_stats_t *tstats)
12650Sstevel@tonic-gate {
12660Sstevel@tonic-gate return (TD_NOCAPAB);
12670Sstevel@tonic-gate }
12680Sstevel@tonic-gate
12690Sstevel@tonic-gate /*
12700Sstevel@tonic-gate * Transfer information from lwp struct to thread information struct.
12710Sstevel@tonic-gate * XXX -- lots of this needs cleaning up.
12720Sstevel@tonic-gate */
12730Sstevel@tonic-gate static void
td_thr2to(td_thragent_t * ta_p,psaddr_t ts_addr,ulwp_t * ulwp,td_thrinfo_t * ti_p)12740Sstevel@tonic-gate td_thr2to(td_thragent_t *ta_p, psaddr_t ts_addr,
12750Sstevel@tonic-gate ulwp_t *ulwp, td_thrinfo_t *ti_p)
12760Sstevel@tonic-gate {
12770Sstevel@tonic-gate lwpid_t lwpid;
12780Sstevel@tonic-gate
12790Sstevel@tonic-gate if ((lwpid = ulwp->ul_lwpid) == 0)
12800Sstevel@tonic-gate lwpid = 1;
12810Sstevel@tonic-gate (void) memset(ti_p, 0, sizeof (*ti_p));
12820Sstevel@tonic-gate ti_p->ti_ta_p = ta_p;
12830Sstevel@tonic-gate ti_p->ti_user_flags = ulwp->ul_usropts;
12840Sstevel@tonic-gate ti_p->ti_tid = lwpid;
12850Sstevel@tonic-gate ti_p->ti_exitval = ulwp->ul_rval;
12860Sstevel@tonic-gate ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
12870Sstevel@tonic-gate if (!ulwp->ul_dead) {
12880Sstevel@tonic-gate /*
12890Sstevel@tonic-gate * The bloody fools got this backwards!
12900Sstevel@tonic-gate */
12910Sstevel@tonic-gate ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
12920Sstevel@tonic-gate ti_p->ti_stksize = ulwp->ul_stksiz;
12930Sstevel@tonic-gate }
12940Sstevel@tonic-gate ti_p->ti_ro_area = ts_addr;
12950Sstevel@tonic-gate ti_p->ti_ro_size = ulwp->ul_replace?
12966247Sraf REPLACEMENT_SIZE : sizeof (ulwp_t);
12970Sstevel@tonic-gate ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
12986247Sraf ulwp->ul_stop? TD_THR_STOPPED :
12996247Sraf ulwp->ul_wchan? TD_THR_SLEEP :
13006247Sraf TD_THR_ACTIVE;
13010Sstevel@tonic-gate ti_p->ti_db_suspended = 0;
13020Sstevel@tonic-gate ti_p->ti_type = TD_THR_USER;
13030Sstevel@tonic-gate ti_p->ti_sp = ulwp->ul_sp;
13040Sstevel@tonic-gate ti_p->ti_flags = 0;
13050Sstevel@tonic-gate ti_p->ti_pri = ulwp->ul_pri;
13060Sstevel@tonic-gate ti_p->ti_lid = lwpid;
13070Sstevel@tonic-gate if (!ulwp->ul_dead)
13080Sstevel@tonic-gate ti_p->ti_sigmask = ulwp->ul_sigmask;
13090Sstevel@tonic-gate ti_p->ti_traceme = 0;
13100Sstevel@tonic-gate ti_p->ti_preemptflag = 0;
13110Sstevel@tonic-gate ti_p->ti_pirecflag = 0;
13120Sstevel@tonic-gate (void) sigemptyset(&ti_p->ti_pending);
13130Sstevel@tonic-gate ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
13140Sstevel@tonic-gate }
13150Sstevel@tonic-gate
13160Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
13170Sstevel@tonic-gate static void
td_thr2to32(td_thragent_t * ta_p,psaddr_t ts_addr,ulwp32_t * ulwp,td_thrinfo_t * ti_p)13180Sstevel@tonic-gate td_thr2to32(td_thragent_t *ta_p, psaddr_t ts_addr,
13190Sstevel@tonic-gate ulwp32_t *ulwp, td_thrinfo_t *ti_p)
13200Sstevel@tonic-gate {
13210Sstevel@tonic-gate lwpid_t lwpid;
13220Sstevel@tonic-gate
13230Sstevel@tonic-gate if ((lwpid = ulwp->ul_lwpid) == 0)
13240Sstevel@tonic-gate lwpid = 1;
13250Sstevel@tonic-gate (void) memset(ti_p, 0, sizeof (*ti_p));
13260Sstevel@tonic-gate ti_p->ti_ta_p = ta_p;
13270Sstevel@tonic-gate ti_p->ti_user_flags = ulwp->ul_usropts;
13280Sstevel@tonic-gate ti_p->ti_tid = lwpid;
13290Sstevel@tonic-gate ti_p->ti_exitval = (void *)(uintptr_t)ulwp->ul_rval;
13300Sstevel@tonic-gate ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
13310Sstevel@tonic-gate if (!ulwp->ul_dead) {
13320Sstevel@tonic-gate /*
13330Sstevel@tonic-gate * The bloody fools got this backwards!
13340Sstevel@tonic-gate */
13350Sstevel@tonic-gate ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
13360Sstevel@tonic-gate ti_p->ti_stksize = ulwp->ul_stksiz;
13370Sstevel@tonic-gate }
13380Sstevel@tonic-gate ti_p->ti_ro_area = ts_addr;
13390Sstevel@tonic-gate ti_p->ti_ro_size = ulwp->ul_replace?
13406247Sraf REPLACEMENT_SIZE32 : sizeof (ulwp32_t);
13410Sstevel@tonic-gate ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
13426247Sraf ulwp->ul_stop? TD_THR_STOPPED :
13436247Sraf ulwp->ul_wchan? TD_THR_SLEEP :
13446247Sraf TD_THR_ACTIVE;
13450Sstevel@tonic-gate ti_p->ti_db_suspended = 0;
13460Sstevel@tonic-gate ti_p->ti_type = TD_THR_USER;
13470Sstevel@tonic-gate ti_p->ti_sp = (uint32_t)ulwp->ul_sp;
13480Sstevel@tonic-gate ti_p->ti_flags = 0;
13490Sstevel@tonic-gate ti_p->ti_pri = ulwp->ul_pri;
13500Sstevel@tonic-gate ti_p->ti_lid = lwpid;
13510Sstevel@tonic-gate if (!ulwp->ul_dead)
13520Sstevel@tonic-gate ti_p->ti_sigmask = *(sigset_t *)&ulwp->ul_sigmask;
13530Sstevel@tonic-gate ti_p->ti_traceme = 0;
13540Sstevel@tonic-gate ti_p->ti_preemptflag = 0;
13550Sstevel@tonic-gate ti_p->ti_pirecflag = 0;
13560Sstevel@tonic-gate (void) sigemptyset(&ti_p->ti_pending);
13570Sstevel@tonic-gate ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
13580Sstevel@tonic-gate }
13590Sstevel@tonic-gate #endif /* _SYSCALL32 */
13600Sstevel@tonic-gate
13610Sstevel@tonic-gate /*
13620Sstevel@tonic-gate * Get thread information.
13630Sstevel@tonic-gate */
13640Sstevel@tonic-gate #pragma weak td_thr_get_info = __td_thr_get_info
13650Sstevel@tonic-gate td_err_e
__td_thr_get_info(td_thrhandle_t * th_p,td_thrinfo_t * ti_p)13660Sstevel@tonic-gate __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p)
13670Sstevel@tonic-gate {
13680Sstevel@tonic-gate struct ps_prochandle *ph_p;
13690Sstevel@tonic-gate td_thragent_t *ta_p;
13700Sstevel@tonic-gate td_err_e return_val;
13710Sstevel@tonic-gate psaddr_t psaddr;
13720Sstevel@tonic-gate
13730Sstevel@tonic-gate if (ti_p == NULL)
13740Sstevel@tonic-gate return (TD_ERR);
13750Sstevel@tonic-gate (void) memset(ti_p, NULL, sizeof (*ti_p));
13760Sstevel@tonic-gate
13770Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
13780Sstevel@tonic-gate return (return_val);
13790Sstevel@tonic-gate ta_p = th_p->th_ta_p;
13800Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) {
13810Sstevel@tonic-gate ph_unlock(ta_p);
13820Sstevel@tonic-gate return (TD_DBERR);
13830Sstevel@tonic-gate }
13840Sstevel@tonic-gate
13850Sstevel@tonic-gate /*
13860Sstevel@tonic-gate * Read the ulwp struct from the process.
13870Sstevel@tonic-gate * Transfer the ulwp struct to the thread information struct.
13880Sstevel@tonic-gate */
13890Sstevel@tonic-gate psaddr = th_p->th_unique;
13900Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) {
13910Sstevel@tonic-gate ulwp_t ulwp;
13920Sstevel@tonic-gate
13930Sstevel@tonic-gate if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
13940Sstevel@tonic-gate ((void) memset(&ulwp, 0, sizeof (ulwp)),
13950Sstevel@tonic-gate ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE)) != PS_OK)
13960Sstevel@tonic-gate return_val = TD_DBERR;
13970Sstevel@tonic-gate else
13980Sstevel@tonic-gate td_thr2to(ta_p, psaddr, &ulwp, ti_p);
13990Sstevel@tonic-gate } else {
14000Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
14010Sstevel@tonic-gate ulwp32_t ulwp;
14020Sstevel@tonic-gate
14030Sstevel@tonic-gate if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
14040Sstevel@tonic-gate ((void) memset(&ulwp, 0, sizeof (ulwp)),
14050Sstevel@tonic-gate ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE32)) !=
14066247Sraf PS_OK)
14070Sstevel@tonic-gate return_val = TD_DBERR;
14080Sstevel@tonic-gate else
14090Sstevel@tonic-gate td_thr2to32(ta_p, psaddr, &ulwp, ti_p);
14100Sstevel@tonic-gate #else
14110Sstevel@tonic-gate return_val = TD_ERR;
14120Sstevel@tonic-gate #endif /* _SYSCALL32 */
14130Sstevel@tonic-gate }
14140Sstevel@tonic-gate
14150Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
14160Sstevel@tonic-gate ph_unlock(ta_p);
14170Sstevel@tonic-gate return (return_val);
14180Sstevel@tonic-gate }
14190Sstevel@tonic-gate
14200Sstevel@tonic-gate /*
14210Sstevel@tonic-gate * Given a process and an event number, return information about
14220Sstevel@tonic-gate * an address in the process or at which a breakpoint can be set
14230Sstevel@tonic-gate * to monitor the event.
14240Sstevel@tonic-gate */
14250Sstevel@tonic-gate #pragma weak td_ta_event_addr = __td_ta_event_addr
14260Sstevel@tonic-gate td_err_e
__td_ta_event_addr(td_thragent_t * ta_p,td_event_e event,td_notify_t * notify_p)14270Sstevel@tonic-gate __td_ta_event_addr(td_thragent_t *ta_p, td_event_e event, td_notify_t *notify_p)
14280Sstevel@tonic-gate {
14290Sstevel@tonic-gate if (ta_p == NULL)
14300Sstevel@tonic-gate return (TD_BADTA);
14310Sstevel@tonic-gate if (event < TD_MIN_EVENT_NUM || event > TD_MAX_EVENT_NUM)
14320Sstevel@tonic-gate return (TD_NOEVENT);
14330Sstevel@tonic-gate if (notify_p == NULL)
14340Sstevel@tonic-gate return (TD_ERR);
14350Sstevel@tonic-gate
14360Sstevel@tonic-gate notify_p->type = NOTIFY_BPT;
14370Sstevel@tonic-gate notify_p->u.bptaddr = ta_p->tdb_events[event - TD_MIN_EVENT_NUM];
14380Sstevel@tonic-gate
14390Sstevel@tonic-gate return (TD_OK);
14400Sstevel@tonic-gate }
14410Sstevel@tonic-gate
14420Sstevel@tonic-gate /*
14430Sstevel@tonic-gate * Add the events in eventset 2 to eventset 1.
14440Sstevel@tonic-gate */
14450Sstevel@tonic-gate static void
eventsetaddset(td_thr_events_t * event1_p,td_thr_events_t * event2_p)14460Sstevel@tonic-gate eventsetaddset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
14470Sstevel@tonic-gate {
14480Sstevel@tonic-gate int i;
14490Sstevel@tonic-gate
14500Sstevel@tonic-gate for (i = 0; i < TD_EVENTSIZE; i++)
14510Sstevel@tonic-gate event1_p->event_bits[i] |= event2_p->event_bits[i];
14520Sstevel@tonic-gate }
14530Sstevel@tonic-gate
14540Sstevel@tonic-gate /*
14550Sstevel@tonic-gate * Delete the events in eventset 2 from eventset 1.
14560Sstevel@tonic-gate */
14570Sstevel@tonic-gate static void
eventsetdelset(td_thr_events_t * event1_p,td_thr_events_t * event2_p)14580Sstevel@tonic-gate eventsetdelset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
14590Sstevel@tonic-gate {
14600Sstevel@tonic-gate int i;
14610Sstevel@tonic-gate
14620Sstevel@tonic-gate for (i = 0; i < TD_EVENTSIZE; i++)
14630Sstevel@tonic-gate event1_p->event_bits[i] &= ~event2_p->event_bits[i];
14640Sstevel@tonic-gate }
14650Sstevel@tonic-gate
14660Sstevel@tonic-gate /*
14670Sstevel@tonic-gate * Either add or delete the given event set from a thread's event mask.
14680Sstevel@tonic-gate */
14690Sstevel@tonic-gate static td_err_e
mod_eventset(td_thrhandle_t * th_p,td_thr_events_t * events,int onoff)14700Sstevel@tonic-gate mod_eventset(td_thrhandle_t *th_p, td_thr_events_t *events, int onoff)
14710Sstevel@tonic-gate {
14720Sstevel@tonic-gate struct ps_prochandle *ph_p;
14730Sstevel@tonic-gate td_err_e return_val = TD_OK;
14740Sstevel@tonic-gate char enable;
14750Sstevel@tonic-gate td_thr_events_t evset;
14760Sstevel@tonic-gate psaddr_t psaddr_evset;
14770Sstevel@tonic-gate psaddr_t psaddr_enab;
14780Sstevel@tonic-gate
14790Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
14800Sstevel@tonic-gate return (return_val);
14810Sstevel@tonic-gate if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
14820Sstevel@tonic-gate ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
14830Sstevel@tonic-gate psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
14840Sstevel@tonic-gate psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
14850Sstevel@tonic-gate } else {
14860Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
14870Sstevel@tonic-gate ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
14880Sstevel@tonic-gate psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
14890Sstevel@tonic-gate psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
14900Sstevel@tonic-gate #else
14910Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
14920Sstevel@tonic-gate return (TD_ERR);
14930Sstevel@tonic-gate #endif /* _SYSCALL32 */
14940Sstevel@tonic-gate }
14950Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) {
14960Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
14970Sstevel@tonic-gate return (TD_DBERR);
14980Sstevel@tonic-gate }
14990Sstevel@tonic-gate
15000Sstevel@tonic-gate if (ps_pdread(ph_p, psaddr_evset, &evset, sizeof (evset)) != PS_OK)
15010Sstevel@tonic-gate return_val = TD_DBERR;
15020Sstevel@tonic-gate else {
15030Sstevel@tonic-gate if (onoff)
15040Sstevel@tonic-gate eventsetaddset(&evset, events);
15050Sstevel@tonic-gate else
15060Sstevel@tonic-gate eventsetdelset(&evset, events);
15070Sstevel@tonic-gate if (ps_pdwrite(ph_p, psaddr_evset, &evset, sizeof (evset))
15080Sstevel@tonic-gate != PS_OK)
15090Sstevel@tonic-gate return_val = TD_DBERR;
15100Sstevel@tonic-gate else {
15110Sstevel@tonic-gate enable = 0;
15120Sstevel@tonic-gate if (td_eventismember(&evset, TD_EVENTS_ENABLE))
15130Sstevel@tonic-gate enable = 1;
15140Sstevel@tonic-gate if (ps_pdwrite(ph_p, psaddr_enab,
15150Sstevel@tonic-gate &enable, sizeof (enable)) != PS_OK)
15160Sstevel@tonic-gate return_val = TD_DBERR;
15170Sstevel@tonic-gate }
15180Sstevel@tonic-gate }
15190Sstevel@tonic-gate
15200Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
15210Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
15220Sstevel@tonic-gate return (return_val);
15230Sstevel@tonic-gate }
15240Sstevel@tonic-gate
15250Sstevel@tonic-gate /*
15260Sstevel@tonic-gate * Enable or disable tracing for a given thread. Tracing
15270Sstevel@tonic-gate * is filtered based on the event mask of each thread. Tracing
15280Sstevel@tonic-gate * can be turned on/off for the thread without changing thread
15290Sstevel@tonic-gate * event mask.
15300Sstevel@tonic-gate * Currently unused by dbx.
15310Sstevel@tonic-gate */
15320Sstevel@tonic-gate #pragma weak td_thr_event_enable = __td_thr_event_enable
15330Sstevel@tonic-gate td_err_e
__td_thr_event_enable(td_thrhandle_t * th_p,int onoff)15340Sstevel@tonic-gate __td_thr_event_enable(td_thrhandle_t *th_p, int onoff)
15350Sstevel@tonic-gate {
15360Sstevel@tonic-gate td_thr_events_t evset;
15370Sstevel@tonic-gate
15380Sstevel@tonic-gate td_event_emptyset(&evset);
15390Sstevel@tonic-gate td_event_addset(&evset, TD_EVENTS_ENABLE);
15400Sstevel@tonic-gate return (mod_eventset(th_p, &evset, onoff));
15410Sstevel@tonic-gate }
15420Sstevel@tonic-gate
15430Sstevel@tonic-gate /*
15440Sstevel@tonic-gate * Set event mask to enable event. event is turned on in
15450Sstevel@tonic-gate * event mask for thread. If a thread encounters an event
15460Sstevel@tonic-gate * for which its event mask is on, notification will be sent
15470Sstevel@tonic-gate * to the debugger.
15480Sstevel@tonic-gate * Addresses for each event are provided to the
15490Sstevel@tonic-gate * debugger. It is assumed that a breakpoint of some type will
15500Sstevel@tonic-gate * be placed at that address. If the event mask for the thread
15510Sstevel@tonic-gate * is on, the instruction at the address will be executed.
15520Sstevel@tonic-gate * Otherwise, the instruction will be skipped.
15530Sstevel@tonic-gate */
15540Sstevel@tonic-gate #pragma weak td_thr_set_event = __td_thr_set_event
15550Sstevel@tonic-gate td_err_e
__td_thr_set_event(td_thrhandle_t * th_p,td_thr_events_t * events)15560Sstevel@tonic-gate __td_thr_set_event(td_thrhandle_t *th_p, td_thr_events_t *events)
15570Sstevel@tonic-gate {
15580Sstevel@tonic-gate return (mod_eventset(th_p, events, 1));
15590Sstevel@tonic-gate }
15600Sstevel@tonic-gate
15610Sstevel@tonic-gate /*
15620Sstevel@tonic-gate * Enable or disable a set of events in the process-global event mask,
15630Sstevel@tonic-gate * depending on the value of onoff.
15640Sstevel@tonic-gate */
15650Sstevel@tonic-gate static td_err_e
td_ta_mod_event(td_thragent_t * ta_p,td_thr_events_t * events,int onoff)15660Sstevel@tonic-gate td_ta_mod_event(td_thragent_t *ta_p, td_thr_events_t *events, int onoff)
15670Sstevel@tonic-gate {
15680Sstevel@tonic-gate struct ps_prochandle *ph_p;
15690Sstevel@tonic-gate td_thr_events_t targ_eventset;
15700Sstevel@tonic-gate td_err_e return_val;
15710Sstevel@tonic-gate
15720Sstevel@tonic-gate if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
15730Sstevel@tonic-gate return (return_val);
15740Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) {
15750Sstevel@tonic-gate ph_unlock(ta_p);
15760Sstevel@tonic-gate return (TD_DBERR);
15770Sstevel@tonic-gate }
15780Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->tdb_eventmask_addr,
15790Sstevel@tonic-gate &targ_eventset, sizeof (targ_eventset)) != PS_OK)
15800Sstevel@tonic-gate return_val = TD_DBERR;
15810Sstevel@tonic-gate else {
15820Sstevel@tonic-gate if (onoff)
15830Sstevel@tonic-gate eventsetaddset(&targ_eventset, events);
15840Sstevel@tonic-gate else
15850Sstevel@tonic-gate eventsetdelset(&targ_eventset, events);
15860Sstevel@tonic-gate if (ps_pdwrite(ph_p, ta_p->tdb_eventmask_addr,
15870Sstevel@tonic-gate &targ_eventset, sizeof (targ_eventset)) != PS_OK)
15880Sstevel@tonic-gate return_val = TD_DBERR;
15890Sstevel@tonic-gate }
15900Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
15910Sstevel@tonic-gate ph_unlock(ta_p);
15920Sstevel@tonic-gate return (return_val);
15930Sstevel@tonic-gate }
15940Sstevel@tonic-gate
15950Sstevel@tonic-gate /*
15960Sstevel@tonic-gate * Enable a set of events in the process-global event mask.
15970Sstevel@tonic-gate */
15980Sstevel@tonic-gate #pragma weak td_ta_set_event = __td_ta_set_event
15990Sstevel@tonic-gate td_err_e
__td_ta_set_event(td_thragent_t * ta_p,td_thr_events_t * events)16000Sstevel@tonic-gate __td_ta_set_event(td_thragent_t *ta_p, td_thr_events_t *events)
16010Sstevel@tonic-gate {
16020Sstevel@tonic-gate return (td_ta_mod_event(ta_p, events, 1));
16030Sstevel@tonic-gate }
16040Sstevel@tonic-gate
16050Sstevel@tonic-gate /*
16060Sstevel@tonic-gate * Set event mask to disable the given event set; these events are cleared
16070Sstevel@tonic-gate * from the event mask of the thread. Events that occur for a thread
16080Sstevel@tonic-gate * with the event masked off will not cause notification to be
16090Sstevel@tonic-gate * sent to the debugger (see td_thr_set_event for fuller description).
16100Sstevel@tonic-gate */
16110Sstevel@tonic-gate #pragma weak td_thr_clear_event = __td_thr_clear_event
16120Sstevel@tonic-gate td_err_e
__td_thr_clear_event(td_thrhandle_t * th_p,td_thr_events_t * events)16130Sstevel@tonic-gate __td_thr_clear_event(td_thrhandle_t *th_p, td_thr_events_t *events)
16140Sstevel@tonic-gate {
16150Sstevel@tonic-gate return (mod_eventset(th_p, events, 0));
16160Sstevel@tonic-gate }
16170Sstevel@tonic-gate
16180Sstevel@tonic-gate /*
16190Sstevel@tonic-gate * Disable a set of events in the process-global event mask.
16200Sstevel@tonic-gate */
16210Sstevel@tonic-gate #pragma weak td_ta_clear_event = __td_ta_clear_event
16220Sstevel@tonic-gate td_err_e
__td_ta_clear_event(td_thragent_t * ta_p,td_thr_events_t * events)16230Sstevel@tonic-gate __td_ta_clear_event(td_thragent_t *ta_p, td_thr_events_t *events)
16240Sstevel@tonic-gate {
16250Sstevel@tonic-gate return (td_ta_mod_event(ta_p, events, 0));
16260Sstevel@tonic-gate }
16270Sstevel@tonic-gate
16280Sstevel@tonic-gate /*
16290Sstevel@tonic-gate * This function returns the most recent event message, if any,
16300Sstevel@tonic-gate * associated with a thread. Given a thread handle, return the message
16310Sstevel@tonic-gate * corresponding to the event encountered by the thread. Only one
16320Sstevel@tonic-gate * message per thread is saved. Messages from earlier events are lost
16330Sstevel@tonic-gate * when later events occur.
16340Sstevel@tonic-gate */
16350Sstevel@tonic-gate #pragma weak td_thr_event_getmsg = __td_thr_event_getmsg
16360Sstevel@tonic-gate td_err_e
__td_thr_event_getmsg(td_thrhandle_t * th_p,td_event_msg_t * msg)16370Sstevel@tonic-gate __td_thr_event_getmsg(td_thrhandle_t *th_p, td_event_msg_t *msg)
16380Sstevel@tonic-gate {
16390Sstevel@tonic-gate struct ps_prochandle *ph_p;
16400Sstevel@tonic-gate td_err_e return_val = TD_OK;
16410Sstevel@tonic-gate psaddr_t psaddr;
16420Sstevel@tonic-gate
16430Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
16440Sstevel@tonic-gate return (return_val);
16450Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) {
16460Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
16470Sstevel@tonic-gate return (TD_BADTA);
16480Sstevel@tonic-gate }
16490Sstevel@tonic-gate if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
16500Sstevel@tonic-gate ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
16510Sstevel@tonic-gate td_evbuf_t evbuf;
16520Sstevel@tonic-gate
16530Sstevel@tonic-gate psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
16540Sstevel@tonic-gate if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
16550Sstevel@tonic-gate return_val = TD_DBERR;
16560Sstevel@tonic-gate } else if (evbuf.eventnum == TD_EVENT_NONE) {
16570Sstevel@tonic-gate return_val = TD_NOEVENT;
16580Sstevel@tonic-gate } else {
16590Sstevel@tonic-gate msg->event = evbuf.eventnum;
16600Sstevel@tonic-gate msg->th_p = (td_thrhandle_t *)th_p;
16610Sstevel@tonic-gate msg->msg.data = (uintptr_t)evbuf.eventdata;
16620Sstevel@tonic-gate /* "Consume" the message */
16630Sstevel@tonic-gate evbuf.eventnum = TD_EVENT_NONE;
16640Sstevel@tonic-gate evbuf.eventdata = NULL;
16650Sstevel@tonic-gate if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
16660Sstevel@tonic-gate != PS_OK)
16670Sstevel@tonic-gate return_val = TD_DBERR;
16680Sstevel@tonic-gate }
16690Sstevel@tonic-gate } else {
16700Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
16710Sstevel@tonic-gate ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
16720Sstevel@tonic-gate td_evbuf32_t evbuf;
16730Sstevel@tonic-gate
16740Sstevel@tonic-gate psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
16750Sstevel@tonic-gate if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
16760Sstevel@tonic-gate return_val = TD_DBERR;
16770Sstevel@tonic-gate } else if (evbuf.eventnum == TD_EVENT_NONE) {
16780Sstevel@tonic-gate return_val = TD_NOEVENT;
16790Sstevel@tonic-gate } else {
16800Sstevel@tonic-gate msg->event = evbuf.eventnum;
16810Sstevel@tonic-gate msg->th_p = (td_thrhandle_t *)th_p;
16820Sstevel@tonic-gate msg->msg.data = (uintptr_t)evbuf.eventdata;
16830Sstevel@tonic-gate /* "Consume" the message */
16840Sstevel@tonic-gate evbuf.eventnum = TD_EVENT_NONE;
16850Sstevel@tonic-gate evbuf.eventdata = NULL;
16860Sstevel@tonic-gate if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
16870Sstevel@tonic-gate != PS_OK)
16880Sstevel@tonic-gate return_val = TD_DBERR;
16890Sstevel@tonic-gate }
16900Sstevel@tonic-gate #else
16910Sstevel@tonic-gate return_val = TD_ERR;
16920Sstevel@tonic-gate #endif /* _SYSCALL32 */
16930Sstevel@tonic-gate }
16940Sstevel@tonic-gate
16950Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
16960Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
16970Sstevel@tonic-gate return (return_val);
16980Sstevel@tonic-gate }
16990Sstevel@tonic-gate
17000Sstevel@tonic-gate /*
17010Sstevel@tonic-gate * The callback function td_ta_event_getmsg uses when looking for
17020Sstevel@tonic-gate * a thread with an event. A thin wrapper around td_thr_event_getmsg.
17030Sstevel@tonic-gate */
17040Sstevel@tonic-gate static int
event_msg_cb(const td_thrhandle_t * th_p,void * arg)17050Sstevel@tonic-gate event_msg_cb(const td_thrhandle_t *th_p, void *arg)
17060Sstevel@tonic-gate {
17070Sstevel@tonic-gate static td_thrhandle_t th;
17080Sstevel@tonic-gate td_event_msg_t *msg = arg;
17090Sstevel@tonic-gate
17100Sstevel@tonic-gate if (__td_thr_event_getmsg((td_thrhandle_t *)th_p, msg) == TD_OK) {
17110Sstevel@tonic-gate /*
17120Sstevel@tonic-gate * Got an event, stop iterating.
17130Sstevel@tonic-gate *
17140Sstevel@tonic-gate * Because of past mistakes in interface definition,
17150Sstevel@tonic-gate * we are forced to pass back a static local variable
17160Sstevel@tonic-gate * for the thread handle because th_p is a pointer
17170Sstevel@tonic-gate * to a local variable in __td_ta_thr_iter().
17180Sstevel@tonic-gate * Grr...
17190Sstevel@tonic-gate */
17200Sstevel@tonic-gate th = *th_p;
17210Sstevel@tonic-gate msg->th_p = &th;
17220Sstevel@tonic-gate return (1);
17230Sstevel@tonic-gate }
17240Sstevel@tonic-gate return (0);
17250Sstevel@tonic-gate }
17260Sstevel@tonic-gate
17270Sstevel@tonic-gate /*
17280Sstevel@tonic-gate * This function is just like td_thr_event_getmsg, except that it is
17290Sstevel@tonic-gate * passed a process handle rather than a thread handle, and returns
17300Sstevel@tonic-gate * an event message for some thread in the process that has an event
17310Sstevel@tonic-gate * message pending. If no thread has an event message pending, this
17320Sstevel@tonic-gate * routine returns TD_NOEVENT. Thus, all pending event messages may
17330Sstevel@tonic-gate * be collected from a process by repeatedly calling this routine
17340Sstevel@tonic-gate * until it returns TD_NOEVENT.
17350Sstevel@tonic-gate */
17360Sstevel@tonic-gate #pragma weak td_ta_event_getmsg = __td_ta_event_getmsg
17370Sstevel@tonic-gate td_err_e
__td_ta_event_getmsg(td_thragent_t * ta_p,td_event_msg_t * msg)17380Sstevel@tonic-gate __td_ta_event_getmsg(td_thragent_t *ta_p, td_event_msg_t *msg)
17390Sstevel@tonic-gate {
17400Sstevel@tonic-gate td_err_e return_val;
17410Sstevel@tonic-gate
17420Sstevel@tonic-gate if (ta_p == NULL)
17430Sstevel@tonic-gate return (TD_BADTA);
17440Sstevel@tonic-gate if (ta_p->ph_p == NULL)
17450Sstevel@tonic-gate return (TD_BADPH);
17460Sstevel@tonic-gate if (msg == NULL)
17470Sstevel@tonic-gate return (TD_ERR);
17480Sstevel@tonic-gate msg->event = TD_EVENT_NONE;
17490Sstevel@tonic-gate if ((return_val = __td_ta_thr_iter(ta_p, event_msg_cb, msg,
17500Sstevel@tonic-gate TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK,
17510Sstevel@tonic-gate TD_THR_ANY_USER_FLAGS)) != TD_OK)
17520Sstevel@tonic-gate return (return_val);
17530Sstevel@tonic-gate if (msg->event == TD_EVENT_NONE)
17540Sstevel@tonic-gate return (TD_NOEVENT);
17550Sstevel@tonic-gate return (TD_OK);
17560Sstevel@tonic-gate }
17570Sstevel@tonic-gate
17580Sstevel@tonic-gate static lwpid_t
thr_to_lwpid(const td_thrhandle_t * th_p)17590Sstevel@tonic-gate thr_to_lwpid(const td_thrhandle_t *th_p)
17600Sstevel@tonic-gate {
17610Sstevel@tonic-gate struct ps_prochandle *ph_p = th_p->th_ta_p->ph_p;
17620Sstevel@tonic-gate lwpid_t lwpid;
17630Sstevel@tonic-gate
17640Sstevel@tonic-gate /*
17650Sstevel@tonic-gate * The caller holds the prochandle lock
17660Sstevel@tonic-gate * and has already verfied everything.
17670Sstevel@tonic-gate */
17680Sstevel@tonic-gate if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
17690Sstevel@tonic-gate ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
17700Sstevel@tonic-gate
17710Sstevel@tonic-gate if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
17720Sstevel@tonic-gate &lwpid, sizeof (lwpid)) != PS_OK)
17730Sstevel@tonic-gate lwpid = 0;
17740Sstevel@tonic-gate else if (lwpid == 0)
17750Sstevel@tonic-gate lwpid = 1;
17760Sstevel@tonic-gate } else {
17770Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
17780Sstevel@tonic-gate ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
17790Sstevel@tonic-gate
17800Sstevel@tonic-gate if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
17810Sstevel@tonic-gate &lwpid, sizeof (lwpid)) != PS_OK)
17820Sstevel@tonic-gate lwpid = 0;
17830Sstevel@tonic-gate else if (lwpid == 0)
17840Sstevel@tonic-gate lwpid = 1;
17850Sstevel@tonic-gate #else
17860Sstevel@tonic-gate lwpid = 0;
17870Sstevel@tonic-gate #endif /* _SYSCALL32 */
17880Sstevel@tonic-gate }
17890Sstevel@tonic-gate
17900Sstevel@tonic-gate return (lwpid);
17910Sstevel@tonic-gate }
17920Sstevel@tonic-gate
17930Sstevel@tonic-gate /*
17940Sstevel@tonic-gate * Suspend a thread.
17950Sstevel@tonic-gate * XXX: What does this mean in a one-level model?
17960Sstevel@tonic-gate */
17970Sstevel@tonic-gate #pragma weak td_thr_dbsuspend = __td_thr_dbsuspend
17980Sstevel@tonic-gate td_err_e
__td_thr_dbsuspend(const td_thrhandle_t * th_p)17990Sstevel@tonic-gate __td_thr_dbsuspend(const td_thrhandle_t *th_p)
18000Sstevel@tonic-gate {
18010Sstevel@tonic-gate struct ps_prochandle *ph_p;
18020Sstevel@tonic-gate td_err_e return_val;
18030Sstevel@tonic-gate
18040Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
18050Sstevel@tonic-gate return (return_val);
18060Sstevel@tonic-gate if (ps_lstop(ph_p, thr_to_lwpid(th_p)) != PS_OK)
18070Sstevel@tonic-gate return_val = TD_DBERR;
18080Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
18090Sstevel@tonic-gate return (return_val);
18100Sstevel@tonic-gate }
18110Sstevel@tonic-gate
18120Sstevel@tonic-gate /*
18130Sstevel@tonic-gate * Resume a suspended thread.
18140Sstevel@tonic-gate * XXX: What does this mean in a one-level model?
18150Sstevel@tonic-gate */
18160Sstevel@tonic-gate #pragma weak td_thr_dbresume = __td_thr_dbresume
18170Sstevel@tonic-gate td_err_e
__td_thr_dbresume(const td_thrhandle_t * th_p)18180Sstevel@tonic-gate __td_thr_dbresume(const td_thrhandle_t *th_p)
18190Sstevel@tonic-gate {
18200Sstevel@tonic-gate struct ps_prochandle *ph_p;
18210Sstevel@tonic-gate td_err_e return_val;
18220Sstevel@tonic-gate
18230Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
18240Sstevel@tonic-gate return (return_val);
18250Sstevel@tonic-gate if (ps_lcontinue(ph_p, thr_to_lwpid(th_p)) != PS_OK)
18260Sstevel@tonic-gate return_val = TD_DBERR;
18270Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
18280Sstevel@tonic-gate return (return_val);
18290Sstevel@tonic-gate }
18300Sstevel@tonic-gate
18310Sstevel@tonic-gate /*
18320Sstevel@tonic-gate * Set a thread's signal mask.
18330Sstevel@tonic-gate * Currently unused by dbx.
18340Sstevel@tonic-gate */
18350Sstevel@tonic-gate #pragma weak td_thr_sigsetmask = __td_thr_sigsetmask
18360Sstevel@tonic-gate /* ARGSUSED */
18370Sstevel@tonic-gate td_err_e
__td_thr_sigsetmask(const td_thrhandle_t * th_p,const sigset_t ti_sigmask)18380Sstevel@tonic-gate __td_thr_sigsetmask(const td_thrhandle_t *th_p, const sigset_t ti_sigmask)
18390Sstevel@tonic-gate {
18400Sstevel@tonic-gate return (TD_NOCAPAB);
18410Sstevel@tonic-gate }
18420Sstevel@tonic-gate
18430Sstevel@tonic-gate /*
18440Sstevel@tonic-gate * Set a thread's "signals-pending" set.
18450Sstevel@tonic-gate * Currently unused by dbx.
18460Sstevel@tonic-gate */
18470Sstevel@tonic-gate #pragma weak td_thr_setsigpending = __td_thr_setsigpending
18480Sstevel@tonic-gate /* ARGSUSED */
18490Sstevel@tonic-gate td_err_e
__td_thr_setsigpending(const td_thrhandle_t * th_p,uchar_t ti_pending_flag,const sigset_t ti_pending)18500Sstevel@tonic-gate __td_thr_setsigpending(const td_thrhandle_t *th_p,
18510Sstevel@tonic-gate uchar_t ti_pending_flag, const sigset_t ti_pending)
18520Sstevel@tonic-gate {
18530Sstevel@tonic-gate return (TD_NOCAPAB);
18540Sstevel@tonic-gate }
18550Sstevel@tonic-gate
18560Sstevel@tonic-gate /*
18570Sstevel@tonic-gate * Get a thread's general register set.
18580Sstevel@tonic-gate */
18590Sstevel@tonic-gate #pragma weak td_thr_getgregs = __td_thr_getgregs
18600Sstevel@tonic-gate td_err_e
__td_thr_getgregs(td_thrhandle_t * th_p,prgregset_t regset)18610Sstevel@tonic-gate __td_thr_getgregs(td_thrhandle_t *th_p, prgregset_t regset)
18620Sstevel@tonic-gate {
18630Sstevel@tonic-gate struct ps_prochandle *ph_p;
18640Sstevel@tonic-gate td_err_e return_val;
18650Sstevel@tonic-gate
18660Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
18670Sstevel@tonic-gate return (return_val);
18680Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) {
18690Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
18700Sstevel@tonic-gate return (TD_DBERR);
18710Sstevel@tonic-gate }
18720Sstevel@tonic-gate
18730Sstevel@tonic-gate if (ps_lgetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
18740Sstevel@tonic-gate return_val = TD_DBERR;
18750Sstevel@tonic-gate
18760Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
18770Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
18780Sstevel@tonic-gate return (return_val);
18790Sstevel@tonic-gate }
18800Sstevel@tonic-gate
18810Sstevel@tonic-gate /*
18820Sstevel@tonic-gate * Set a thread's general register set.
18830Sstevel@tonic-gate */
18840Sstevel@tonic-gate #pragma weak td_thr_setgregs = __td_thr_setgregs
18850Sstevel@tonic-gate td_err_e
__td_thr_setgregs(td_thrhandle_t * th_p,const prgregset_t regset)18860Sstevel@tonic-gate __td_thr_setgregs(td_thrhandle_t *th_p, const prgregset_t regset)
18870Sstevel@tonic-gate {
18880Sstevel@tonic-gate struct ps_prochandle *ph_p;
18890Sstevel@tonic-gate td_err_e return_val;
18900Sstevel@tonic-gate
18910Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
18920Sstevel@tonic-gate return (return_val);
18930Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) {
18940Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
18950Sstevel@tonic-gate return (TD_DBERR);
18960Sstevel@tonic-gate }
18970Sstevel@tonic-gate
18980Sstevel@tonic-gate if (ps_lsetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
18990Sstevel@tonic-gate return_val = TD_DBERR;
19000Sstevel@tonic-gate
19010Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
19020Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
19030Sstevel@tonic-gate return (return_val);
19040Sstevel@tonic-gate }
19050Sstevel@tonic-gate
19060Sstevel@tonic-gate /*
19070Sstevel@tonic-gate * Get a thread's floating-point register set.
19080Sstevel@tonic-gate */
19090Sstevel@tonic-gate #pragma weak td_thr_getfpregs = __td_thr_getfpregs
19100Sstevel@tonic-gate td_err_e
__td_thr_getfpregs(td_thrhandle_t * th_p,prfpregset_t * fpregset)19110Sstevel@tonic-gate __td_thr_getfpregs(td_thrhandle_t *th_p, prfpregset_t *fpregset)
19120Sstevel@tonic-gate {
19130Sstevel@tonic-gate struct ps_prochandle *ph_p;
19140Sstevel@tonic-gate td_err_e return_val;
19150Sstevel@tonic-gate
19160Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
19170Sstevel@tonic-gate return (return_val);
19180Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) {
19190Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
19200Sstevel@tonic-gate return (TD_DBERR);
19210Sstevel@tonic-gate }
19220Sstevel@tonic-gate
19230Sstevel@tonic-gate if (ps_lgetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
19240Sstevel@tonic-gate return_val = TD_DBERR;
19250Sstevel@tonic-gate
19260Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
19270Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
19280Sstevel@tonic-gate return (return_val);
19290Sstevel@tonic-gate }
19300Sstevel@tonic-gate
19310Sstevel@tonic-gate /*
19320Sstevel@tonic-gate * Set a thread's floating-point register set.
19330Sstevel@tonic-gate */
19340Sstevel@tonic-gate #pragma weak td_thr_setfpregs = __td_thr_setfpregs
19350Sstevel@tonic-gate td_err_e
__td_thr_setfpregs(td_thrhandle_t * th_p,const prfpregset_t * fpregset)19360Sstevel@tonic-gate __td_thr_setfpregs(td_thrhandle_t *th_p, const prfpregset_t *fpregset)
19370Sstevel@tonic-gate {
19380Sstevel@tonic-gate struct ps_prochandle *ph_p;
19390Sstevel@tonic-gate td_err_e return_val;
19400Sstevel@tonic-gate
19410Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
19420Sstevel@tonic-gate return (return_val);
19430Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) {
19440Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
19450Sstevel@tonic-gate return (TD_DBERR);
19460Sstevel@tonic-gate }
19470Sstevel@tonic-gate
19480Sstevel@tonic-gate if (ps_lsetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
19490Sstevel@tonic-gate return_val = TD_DBERR;
19500Sstevel@tonic-gate
19510Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
19520Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
19530Sstevel@tonic-gate return (return_val);
19540Sstevel@tonic-gate }
19550Sstevel@tonic-gate
19560Sstevel@tonic-gate /*
19570Sstevel@tonic-gate * Get the size of the extra state register set for this architecture.
19580Sstevel@tonic-gate * Currently unused by dbx.
19590Sstevel@tonic-gate */
19600Sstevel@tonic-gate #pragma weak td_thr_getxregsize = __td_thr_getxregsize
19610Sstevel@tonic-gate /* ARGSUSED */
19620Sstevel@tonic-gate td_err_e
__td_thr_getxregsize(td_thrhandle_t * th_p,int * xregsize)19630Sstevel@tonic-gate __td_thr_getxregsize(td_thrhandle_t *th_p, int *xregsize)
19640Sstevel@tonic-gate {
19650Sstevel@tonic-gate #if defined(__sparc)
19660Sstevel@tonic-gate struct ps_prochandle *ph_p;
19670Sstevel@tonic-gate td_err_e return_val;
19680Sstevel@tonic-gate
19690Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
19700Sstevel@tonic-gate return (return_val);
19710Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) {
19720Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
19730Sstevel@tonic-gate return (TD_DBERR);
19740Sstevel@tonic-gate }
19750Sstevel@tonic-gate
19760Sstevel@tonic-gate if (ps_lgetxregsize(ph_p, thr_to_lwpid(th_p), xregsize) != PS_OK)
19770Sstevel@tonic-gate return_val = TD_DBERR;
19780Sstevel@tonic-gate
19790Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
19800Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
19810Sstevel@tonic-gate return (return_val);
19820Sstevel@tonic-gate #else /* __sparc */
19830Sstevel@tonic-gate return (TD_NOXREGS);
19840Sstevel@tonic-gate #endif /* __sparc */
19850Sstevel@tonic-gate }
19860Sstevel@tonic-gate
19870Sstevel@tonic-gate /*
19880Sstevel@tonic-gate * Get a thread's extra state register set.
19890Sstevel@tonic-gate */
19900Sstevel@tonic-gate #pragma weak td_thr_getxregs = __td_thr_getxregs
19910Sstevel@tonic-gate /* ARGSUSED */
19920Sstevel@tonic-gate td_err_e
__td_thr_getxregs(td_thrhandle_t * th_p,void * xregset)19930Sstevel@tonic-gate __td_thr_getxregs(td_thrhandle_t *th_p, void *xregset)
19940Sstevel@tonic-gate {
19950Sstevel@tonic-gate #if defined(__sparc)
19960Sstevel@tonic-gate struct ps_prochandle *ph_p;
19970Sstevel@tonic-gate td_err_e return_val;
19980Sstevel@tonic-gate
19990Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
20000Sstevel@tonic-gate return (return_val);
20010Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) {
20020Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
20030Sstevel@tonic-gate return (TD_DBERR);
20040Sstevel@tonic-gate }
20050Sstevel@tonic-gate
20060Sstevel@tonic-gate if (ps_lgetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
20070Sstevel@tonic-gate return_val = TD_DBERR;
20080Sstevel@tonic-gate
20090Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
20100Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
20110Sstevel@tonic-gate return (return_val);
20120Sstevel@tonic-gate #else /* __sparc */
20130Sstevel@tonic-gate return (TD_NOXREGS);
20140Sstevel@tonic-gate #endif /* __sparc */
20150Sstevel@tonic-gate }
20160Sstevel@tonic-gate
20170Sstevel@tonic-gate /*
20180Sstevel@tonic-gate * Set a thread's extra state register set.
20190Sstevel@tonic-gate */
20200Sstevel@tonic-gate #pragma weak td_thr_setxregs = __td_thr_setxregs
20210Sstevel@tonic-gate /* ARGSUSED */
20220Sstevel@tonic-gate td_err_e
__td_thr_setxregs(td_thrhandle_t * th_p,const void * xregset)20230Sstevel@tonic-gate __td_thr_setxregs(td_thrhandle_t *th_p, const void *xregset)
20240Sstevel@tonic-gate {
20250Sstevel@tonic-gate #if defined(__sparc)
20260Sstevel@tonic-gate struct ps_prochandle *ph_p;
20270Sstevel@tonic-gate td_err_e return_val;
20280Sstevel@tonic-gate
20290Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
20300Sstevel@tonic-gate return (return_val);
20310Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) {
20320Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
20330Sstevel@tonic-gate return (TD_DBERR);
20340Sstevel@tonic-gate }
20350Sstevel@tonic-gate
20360Sstevel@tonic-gate if (ps_lsetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
20370Sstevel@tonic-gate return_val = TD_DBERR;
20380Sstevel@tonic-gate
20390Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
20400Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
20410Sstevel@tonic-gate return (return_val);
20420Sstevel@tonic-gate #else /* __sparc */
20430Sstevel@tonic-gate return (TD_NOXREGS);
20440Sstevel@tonic-gate #endif /* __sparc */
20450Sstevel@tonic-gate }
20460Sstevel@tonic-gate
20470Sstevel@tonic-gate struct searcher {
20480Sstevel@tonic-gate psaddr_t addr;
20490Sstevel@tonic-gate int status;
20500Sstevel@tonic-gate };
20510Sstevel@tonic-gate
20520Sstevel@tonic-gate /*
20530Sstevel@tonic-gate * Check the struct thread address in *th_p again first
20540Sstevel@tonic-gate * value in "data". If value in data is found, set second value
20550Sstevel@tonic-gate * in "data" to 1 and return 1 to terminate iterations.
20560Sstevel@tonic-gate * This function is used by td_thr_validate() to verify that
20570Sstevel@tonic-gate * a thread handle is valid.
20580Sstevel@tonic-gate */
20590Sstevel@tonic-gate static int
td_searcher(const td_thrhandle_t * th_p,void * data)20600Sstevel@tonic-gate td_searcher(const td_thrhandle_t *th_p, void *data)
20610Sstevel@tonic-gate {
20620Sstevel@tonic-gate struct searcher *searcher_data = (struct searcher *)data;
20630Sstevel@tonic-gate
20640Sstevel@tonic-gate if (searcher_data->addr == th_p->th_unique) {
20650Sstevel@tonic-gate searcher_data->status = 1;
20660Sstevel@tonic-gate return (1);
20670Sstevel@tonic-gate }
20680Sstevel@tonic-gate return (0);
20690Sstevel@tonic-gate }
20700Sstevel@tonic-gate
20710Sstevel@tonic-gate /*
20720Sstevel@tonic-gate * Validate the thread handle. Check that
20730Sstevel@tonic-gate * a thread exists in the thread agent/process that
20740Sstevel@tonic-gate * corresponds to thread with handle *th_p.
20750Sstevel@tonic-gate * Currently unused by dbx.
20760Sstevel@tonic-gate */
20770Sstevel@tonic-gate #pragma weak td_thr_validate = __td_thr_validate
20780Sstevel@tonic-gate td_err_e
__td_thr_validate(const td_thrhandle_t * th_p)20790Sstevel@tonic-gate __td_thr_validate(const td_thrhandle_t *th_p)
20800Sstevel@tonic-gate {
20810Sstevel@tonic-gate td_err_e return_val;
20820Sstevel@tonic-gate struct searcher searcher_data = {0, 0};
20830Sstevel@tonic-gate
20840Sstevel@tonic-gate if (th_p == NULL)
20850Sstevel@tonic-gate return (TD_BADTH);
20860Sstevel@tonic-gate if (th_p->th_unique == NULL || th_p->th_ta_p == NULL)
20870Sstevel@tonic-gate return (TD_BADTH);
20880Sstevel@tonic-gate
20890Sstevel@tonic-gate /*
20900Sstevel@tonic-gate * LOCKING EXCEPTION - Locking is not required
20910Sstevel@tonic-gate * here because no use of the thread agent is made (other
20920Sstevel@tonic-gate * than the sanity check) and checking of the thread
20930Sstevel@tonic-gate * agent will be done in __td_ta_thr_iter.
20940Sstevel@tonic-gate */
20950Sstevel@tonic-gate
20960Sstevel@tonic-gate searcher_data.addr = th_p->th_unique;
20970Sstevel@tonic-gate return_val = __td_ta_thr_iter(th_p->th_ta_p,
20986247Sraf td_searcher, &searcher_data,
20996247Sraf TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
21006247Sraf TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
21010Sstevel@tonic-gate
21020Sstevel@tonic-gate if (return_val == TD_OK && searcher_data.status == 0)
21030Sstevel@tonic-gate return_val = TD_NOTHR;
21040Sstevel@tonic-gate
21050Sstevel@tonic-gate return (return_val);
21060Sstevel@tonic-gate }
21070Sstevel@tonic-gate
21080Sstevel@tonic-gate /*
21090Sstevel@tonic-gate * Get a thread's private binding to a given thread specific
21100Sstevel@tonic-gate * data(TSD) key(see thr_getspecific(3T). If the thread doesn't
21110Sstevel@tonic-gate * have a binding for a particular key, then NULL is returned.
21120Sstevel@tonic-gate */
21130Sstevel@tonic-gate #pragma weak td_thr_tsd = __td_thr_tsd
21140Sstevel@tonic-gate td_err_e
__td_thr_tsd(td_thrhandle_t * th_p,thread_key_t key,void ** data_pp)21150Sstevel@tonic-gate __td_thr_tsd(td_thrhandle_t *th_p, thread_key_t key, void **data_pp)
21160Sstevel@tonic-gate {
21170Sstevel@tonic-gate struct ps_prochandle *ph_p;
21180Sstevel@tonic-gate td_thragent_t *ta_p;
21190Sstevel@tonic-gate td_err_e return_val;
21200Sstevel@tonic-gate int maxkey;
21210Sstevel@tonic-gate int nkey;
21220Sstevel@tonic-gate psaddr_t tsd_paddr;
21230Sstevel@tonic-gate
21240Sstevel@tonic-gate if (data_pp == NULL)
21250Sstevel@tonic-gate return (TD_ERR);
21260Sstevel@tonic-gate *data_pp = NULL;
21270Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
21280Sstevel@tonic-gate return (return_val);
21290Sstevel@tonic-gate ta_p = th_p->th_ta_p;
21300Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) {
21310Sstevel@tonic-gate ph_unlock(ta_p);
21320Sstevel@tonic-gate return (TD_DBERR);
21330Sstevel@tonic-gate }
21340Sstevel@tonic-gate
21350Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) {
21360Sstevel@tonic-gate ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
21370Sstevel@tonic-gate tsd_metadata_t tsdm;
21380Sstevel@tonic-gate tsd_t stsd;
21390Sstevel@tonic-gate
21400Sstevel@tonic-gate if (ps_pdread(ph_p,
21410Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
21420Sstevel@tonic-gate &tsdm, sizeof (tsdm)) != PS_OK)
21430Sstevel@tonic-gate return_val = TD_DBERR;
21440Sstevel@tonic-gate else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
21450Sstevel@tonic-gate &tsd_paddr, sizeof (tsd_paddr)) != PS_OK)
21460Sstevel@tonic-gate return_val = TD_DBERR;
21470Sstevel@tonic-gate else if (tsd_paddr != NULL &&
21480Sstevel@tonic-gate ps_pdread(ph_p, tsd_paddr, &stsd, sizeof (stsd)) != PS_OK)
21490Sstevel@tonic-gate return_val = TD_DBERR;
21500Sstevel@tonic-gate else {
21510Sstevel@tonic-gate maxkey = tsdm.tsdm_nused;
21520Sstevel@tonic-gate nkey = tsd_paddr == NULL ? TSD_NFAST : stsd.tsd_nalloc;
21530Sstevel@tonic-gate
21540Sstevel@tonic-gate if (key < TSD_NFAST)
21550Sstevel@tonic-gate tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
21560Sstevel@tonic-gate }
21570Sstevel@tonic-gate } else {
21580Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
21590Sstevel@tonic-gate ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
21600Sstevel@tonic-gate tsd_metadata32_t tsdm;
21610Sstevel@tonic-gate tsd32_t stsd;
21620Sstevel@tonic-gate caddr32_t addr;
21630Sstevel@tonic-gate
21640Sstevel@tonic-gate if (ps_pdread(ph_p,
21650Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
21660Sstevel@tonic-gate &tsdm, sizeof (tsdm)) != PS_OK)
21670Sstevel@tonic-gate return_val = TD_DBERR;
21680Sstevel@tonic-gate else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
21690Sstevel@tonic-gate &addr, sizeof (addr)) != PS_OK)
21700Sstevel@tonic-gate return_val = TD_DBERR;
21710Sstevel@tonic-gate else if (addr != NULL &&
21720Sstevel@tonic-gate ps_pdread(ph_p, addr, &stsd, sizeof (stsd)) != PS_OK)
21730Sstevel@tonic-gate return_val = TD_DBERR;
21740Sstevel@tonic-gate else {
21750Sstevel@tonic-gate maxkey = tsdm.tsdm_nused;
21760Sstevel@tonic-gate nkey = addr == NULL ? TSD_NFAST : stsd.tsd_nalloc;
21770Sstevel@tonic-gate
21780Sstevel@tonic-gate if (key < TSD_NFAST) {
21790Sstevel@tonic-gate tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
21800Sstevel@tonic-gate } else {
21810Sstevel@tonic-gate tsd_paddr = addr;
21820Sstevel@tonic-gate }
21830Sstevel@tonic-gate }
21840Sstevel@tonic-gate #else
21850Sstevel@tonic-gate return_val = TD_ERR;
21860Sstevel@tonic-gate #endif /* _SYSCALL32 */
21870Sstevel@tonic-gate }
21880Sstevel@tonic-gate
21890Sstevel@tonic-gate if (return_val == TD_OK && (key < 1 || key >= maxkey))
21900Sstevel@tonic-gate return_val = TD_NOTSD;
21910Sstevel@tonic-gate if (return_val != TD_OK || key >= nkey) {
21920Sstevel@tonic-gate /* NULL has already been stored in data_pp */
21930Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
21940Sstevel@tonic-gate ph_unlock(ta_p);
21950Sstevel@tonic-gate return (return_val);
21960Sstevel@tonic-gate }
21970Sstevel@tonic-gate
21980Sstevel@tonic-gate /*
21990Sstevel@tonic-gate * Read the value from the thread's tsd array.
22000Sstevel@tonic-gate */
22010Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) {
22020Sstevel@tonic-gate void *value;
22030Sstevel@tonic-gate
22040Sstevel@tonic-gate if (ps_pdread(ph_p, tsd_paddr + key * sizeof (void *),
22050Sstevel@tonic-gate &value, sizeof (value)) != PS_OK)
22060Sstevel@tonic-gate return_val = TD_DBERR;
22070Sstevel@tonic-gate else
22080Sstevel@tonic-gate *data_pp = value;
22090Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
22100Sstevel@tonic-gate } else {
22110Sstevel@tonic-gate caddr32_t value32;
22120Sstevel@tonic-gate
22130Sstevel@tonic-gate if (ps_pdread(ph_p, tsd_paddr + key * sizeof (caddr32_t),
22140Sstevel@tonic-gate &value32, sizeof (value32)) != PS_OK)
22150Sstevel@tonic-gate return_val = TD_DBERR;
22160Sstevel@tonic-gate else
22170Sstevel@tonic-gate *data_pp = (void *)(uintptr_t)value32;
22180Sstevel@tonic-gate #endif /* _SYSCALL32 */
22190Sstevel@tonic-gate }
22200Sstevel@tonic-gate
22210Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
22220Sstevel@tonic-gate ph_unlock(ta_p);
22230Sstevel@tonic-gate return (return_val);
22240Sstevel@tonic-gate }
22250Sstevel@tonic-gate
22260Sstevel@tonic-gate /*
22270Sstevel@tonic-gate * Get the base address of a thread's thread local storage (TLS) block
22280Sstevel@tonic-gate * for the module (executable or shared object) identified by 'moduleid'.
22290Sstevel@tonic-gate */
22300Sstevel@tonic-gate #pragma weak td_thr_tlsbase = __td_thr_tlsbase
22310Sstevel@tonic-gate td_err_e
__td_thr_tlsbase(td_thrhandle_t * th_p,ulong_t moduleid,psaddr_t * base)22320Sstevel@tonic-gate __td_thr_tlsbase(td_thrhandle_t *th_p, ulong_t moduleid, psaddr_t *base)
22330Sstevel@tonic-gate {
22340Sstevel@tonic-gate struct ps_prochandle *ph_p;
22350Sstevel@tonic-gate td_thragent_t *ta_p;
22360Sstevel@tonic-gate td_err_e return_val;
22370Sstevel@tonic-gate
22380Sstevel@tonic-gate if (base == NULL)
22390Sstevel@tonic-gate return (TD_ERR);
22400Sstevel@tonic-gate *base = NULL;
22410Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
22420Sstevel@tonic-gate return (return_val);
22430Sstevel@tonic-gate ta_p = th_p->th_ta_p;
22440Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) {
22450Sstevel@tonic-gate ph_unlock(ta_p);
22460Sstevel@tonic-gate return (TD_DBERR);
22470Sstevel@tonic-gate }
22480Sstevel@tonic-gate
22490Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) {
22500Sstevel@tonic-gate ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
22510Sstevel@tonic-gate tls_metadata_t tls_metadata;
22520Sstevel@tonic-gate TLS_modinfo tlsmod;
22530Sstevel@tonic-gate tls_t tls;
22540Sstevel@tonic-gate
22550Sstevel@tonic-gate if (ps_pdread(ph_p,
22560Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata_t, tls_metadata),
22570Sstevel@tonic-gate &tls_metadata, sizeof (tls_metadata)) != PS_OK)
22580Sstevel@tonic-gate return_val = TD_DBERR;
22590Sstevel@tonic-gate else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
22600Sstevel@tonic-gate return_val = TD_NOTLS;
22610Sstevel@tonic-gate else if (ps_pdread(ph_p,
22620Sstevel@tonic-gate (psaddr_t)((TLS_modinfo *)
22630Sstevel@tonic-gate tls_metadata.tls_modinfo.tls_data + moduleid),
22640Sstevel@tonic-gate &tlsmod, sizeof (tlsmod)) != PS_OK)
22650Sstevel@tonic-gate return_val = TD_DBERR;
22660Sstevel@tonic-gate else if (tlsmod.tm_memsz == 0)
22670Sstevel@tonic-gate return_val = TD_NOTLS;
22680Sstevel@tonic-gate else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
22690Sstevel@tonic-gate *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
22700Sstevel@tonic-gate else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
22710Sstevel@tonic-gate &tls, sizeof (tls)) != PS_OK)
22720Sstevel@tonic-gate return_val = TD_DBERR;
22730Sstevel@tonic-gate else if (moduleid >= tls.tls_size)
22740Sstevel@tonic-gate return_val = TD_TLSDEFER;
22750Sstevel@tonic-gate else if (ps_pdread(ph_p,
22760Sstevel@tonic-gate (psaddr_t)((tls_t *)tls.tls_data + moduleid),
22770Sstevel@tonic-gate &tls, sizeof (tls)) != PS_OK)
22780Sstevel@tonic-gate return_val = TD_DBERR;
22790Sstevel@tonic-gate else if (tls.tls_size == 0)
22800Sstevel@tonic-gate return_val = TD_TLSDEFER;
22810Sstevel@tonic-gate else
22820Sstevel@tonic-gate *base = (psaddr_t)tls.tls_data;
22830Sstevel@tonic-gate } else {
22840Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
22850Sstevel@tonic-gate ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
22860Sstevel@tonic-gate tls_metadata32_t tls_metadata;
22870Sstevel@tonic-gate TLS_modinfo32 tlsmod;
22880Sstevel@tonic-gate tls32_t tls;
22890Sstevel@tonic-gate
22900Sstevel@tonic-gate if (ps_pdread(ph_p,
22910Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata32_t, tls_metadata),
22920Sstevel@tonic-gate &tls_metadata, sizeof (tls_metadata)) != PS_OK)
22930Sstevel@tonic-gate return_val = TD_DBERR;
22940Sstevel@tonic-gate else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
22950Sstevel@tonic-gate return_val = TD_NOTLS;
22960Sstevel@tonic-gate else if (ps_pdread(ph_p,
22970Sstevel@tonic-gate (psaddr_t)((TLS_modinfo32 *)
22980Sstevel@tonic-gate (uintptr_t)tls_metadata.tls_modinfo.tls_data + moduleid),
22990Sstevel@tonic-gate &tlsmod, sizeof (tlsmod)) != PS_OK)
23000Sstevel@tonic-gate return_val = TD_DBERR;
23010Sstevel@tonic-gate else if (tlsmod.tm_memsz == 0)
23020Sstevel@tonic-gate return_val = TD_NOTLS;
23030Sstevel@tonic-gate else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
23040Sstevel@tonic-gate *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
23050Sstevel@tonic-gate else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
23060Sstevel@tonic-gate &tls, sizeof (tls)) != PS_OK)
23070Sstevel@tonic-gate return_val = TD_DBERR;
23080Sstevel@tonic-gate else if (moduleid >= tls.tls_size)
23090Sstevel@tonic-gate return_val = TD_TLSDEFER;
23100Sstevel@tonic-gate else if (ps_pdread(ph_p,
23110Sstevel@tonic-gate (psaddr_t)((tls32_t *)(uintptr_t)tls.tls_data + moduleid),
23120Sstevel@tonic-gate &tls, sizeof (tls)) != PS_OK)
23130Sstevel@tonic-gate return_val = TD_DBERR;
23140Sstevel@tonic-gate else if (tls.tls_size == 0)
23150Sstevel@tonic-gate return_val = TD_TLSDEFER;
23160Sstevel@tonic-gate else
23170Sstevel@tonic-gate *base = (psaddr_t)tls.tls_data;
23180Sstevel@tonic-gate #else
23190Sstevel@tonic-gate return_val = TD_ERR;
23200Sstevel@tonic-gate #endif /* _SYSCALL32 */
23210Sstevel@tonic-gate }
23220Sstevel@tonic-gate
23230Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
23240Sstevel@tonic-gate ph_unlock(ta_p);
23250Sstevel@tonic-gate return (return_val);
23260Sstevel@tonic-gate }
23270Sstevel@tonic-gate
23280Sstevel@tonic-gate /*
23290Sstevel@tonic-gate * Change a thread's priority to the value specified by ti_pri.
23300Sstevel@tonic-gate * Currently unused by dbx.
23310Sstevel@tonic-gate */
23320Sstevel@tonic-gate #pragma weak td_thr_setprio = __td_thr_setprio
23336247Sraf /* ARGSUSED */
23340Sstevel@tonic-gate td_err_e
__td_thr_setprio(td_thrhandle_t * th_p,int ti_pri)23350Sstevel@tonic-gate __td_thr_setprio(td_thrhandle_t *th_p, int ti_pri)
23360Sstevel@tonic-gate {
23376247Sraf return (TD_NOCAPAB);
23380Sstevel@tonic-gate }
23390Sstevel@tonic-gate
23400Sstevel@tonic-gate /*
23410Sstevel@tonic-gate * This structure links td_thr_lockowner and the lowner_cb callback function.
23420Sstevel@tonic-gate */
23430Sstevel@tonic-gate typedef struct {
23440Sstevel@tonic-gate td_sync_iter_f *owner_cb;
23450Sstevel@tonic-gate void *owner_cb_arg;
23460Sstevel@tonic-gate td_thrhandle_t *th_p;
23470Sstevel@tonic-gate } lowner_cb_ctl_t;
23480Sstevel@tonic-gate
23490Sstevel@tonic-gate static int
lowner_cb(const td_synchandle_t * sh_p,void * arg)23500Sstevel@tonic-gate lowner_cb(const td_synchandle_t *sh_p, void *arg)
23510Sstevel@tonic-gate {
23520Sstevel@tonic-gate lowner_cb_ctl_t *ocb = arg;
23530Sstevel@tonic-gate int trunc = 0;
23540Sstevel@tonic-gate union {
23550Sstevel@tonic-gate rwlock_t rwl;
23560Sstevel@tonic-gate mutex_t mx;
23570Sstevel@tonic-gate } rw_m;
23580Sstevel@tonic-gate
23590Sstevel@tonic-gate if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
23600Sstevel@tonic-gate &rw_m, sizeof (rw_m)) != PS_OK) {
23610Sstevel@tonic-gate trunc = 1;
23620Sstevel@tonic-gate if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
23630Sstevel@tonic-gate &rw_m.mx, sizeof (rw_m.mx)) != PS_OK)
23640Sstevel@tonic-gate return (0);
23650Sstevel@tonic-gate }
23660Sstevel@tonic-gate if (rw_m.mx.mutex_magic == MUTEX_MAGIC &&
23670Sstevel@tonic-gate rw_m.mx.mutex_owner == ocb->th_p->th_unique)
23680Sstevel@tonic-gate return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
23690Sstevel@tonic-gate if (!trunc && rw_m.rwl.magic == RWL_MAGIC) {
23700Sstevel@tonic-gate mutex_t *rwlock = &rw_m.rwl.mutex;
23710Sstevel@tonic-gate if (rwlock->mutex_owner == ocb->th_p->th_unique)
23720Sstevel@tonic-gate return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
23730Sstevel@tonic-gate }
23740Sstevel@tonic-gate return (0);
23750Sstevel@tonic-gate }
23760Sstevel@tonic-gate
23770Sstevel@tonic-gate /*
23780Sstevel@tonic-gate * Iterate over the set of locks owned by a specified thread.
23790Sstevel@tonic-gate * If cb returns a non-zero value, terminate iterations.
23800Sstevel@tonic-gate */
23810Sstevel@tonic-gate #pragma weak td_thr_lockowner = __td_thr_lockowner
23820Sstevel@tonic-gate td_err_e
__td_thr_lockowner(const td_thrhandle_t * th_p,td_sync_iter_f * cb,void * cb_data)23830Sstevel@tonic-gate __td_thr_lockowner(const td_thrhandle_t *th_p, td_sync_iter_f *cb,
23840Sstevel@tonic-gate void *cb_data)
23850Sstevel@tonic-gate {
23860Sstevel@tonic-gate td_thragent_t *ta_p;
23870Sstevel@tonic-gate td_err_e return_val;
23880Sstevel@tonic-gate lowner_cb_ctl_t lcb;
23890Sstevel@tonic-gate
23900Sstevel@tonic-gate /*
23910Sstevel@tonic-gate * Just sanity checks.
23920Sstevel@tonic-gate */
23930Sstevel@tonic-gate if (ph_lock_th((td_thrhandle_t *)th_p, &return_val) == NULL)
23940Sstevel@tonic-gate return (return_val);
23950Sstevel@tonic-gate ta_p = th_p->th_ta_p;
23960Sstevel@tonic-gate ph_unlock(ta_p);
23970Sstevel@tonic-gate
23980Sstevel@tonic-gate lcb.owner_cb = cb;
23990Sstevel@tonic-gate lcb.owner_cb_arg = cb_data;
24000Sstevel@tonic-gate lcb.th_p = (td_thrhandle_t *)th_p;
24010Sstevel@tonic-gate return (__td_ta_sync_iter(ta_p, lowner_cb, &lcb));
24020Sstevel@tonic-gate }
24030Sstevel@tonic-gate
24040Sstevel@tonic-gate /*
24050Sstevel@tonic-gate * If a thread is asleep on a synchronization variable,
24060Sstevel@tonic-gate * then get the synchronization handle.
24070Sstevel@tonic-gate */
24080Sstevel@tonic-gate #pragma weak td_thr_sleepinfo = __td_thr_sleepinfo
24090Sstevel@tonic-gate td_err_e
__td_thr_sleepinfo(const td_thrhandle_t * th_p,td_synchandle_t * sh_p)24100Sstevel@tonic-gate __td_thr_sleepinfo(const td_thrhandle_t *th_p, td_synchandle_t *sh_p)
24110Sstevel@tonic-gate {
24120Sstevel@tonic-gate struct ps_prochandle *ph_p;
24130Sstevel@tonic-gate td_err_e return_val = TD_OK;
24140Sstevel@tonic-gate uintptr_t wchan;
24150Sstevel@tonic-gate
24160Sstevel@tonic-gate if (sh_p == NULL)
24170Sstevel@tonic-gate return (TD_ERR);
24180Sstevel@tonic-gate if ((ph_p = ph_lock_th((td_thrhandle_t *)th_p, &return_val)) == NULL)
24190Sstevel@tonic-gate return (return_val);
24200Sstevel@tonic-gate
24210Sstevel@tonic-gate /*
24220Sstevel@tonic-gate * No need to stop the process for a simple read.
24230Sstevel@tonic-gate */
24240Sstevel@tonic-gate if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
24250Sstevel@tonic-gate ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
24260Sstevel@tonic-gate
24270Sstevel@tonic-gate if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
24280Sstevel@tonic-gate &wchan, sizeof (wchan)) != PS_OK)
24290Sstevel@tonic-gate return_val = TD_DBERR;
24300Sstevel@tonic-gate } else {
24310Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
24320Sstevel@tonic-gate ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
24330Sstevel@tonic-gate caddr32_t wchan32;
24340Sstevel@tonic-gate
24350Sstevel@tonic-gate if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
24360Sstevel@tonic-gate &wchan32, sizeof (wchan32)) != PS_OK)
24370Sstevel@tonic-gate return_val = TD_DBERR;
24380Sstevel@tonic-gate wchan = wchan32;
24390Sstevel@tonic-gate #else
24400Sstevel@tonic-gate return_val = TD_ERR;
24410Sstevel@tonic-gate #endif /* _SYSCALL32 */
24420Sstevel@tonic-gate }
24430Sstevel@tonic-gate
24440Sstevel@tonic-gate if (return_val != TD_OK || wchan == NULL) {
24450Sstevel@tonic-gate sh_p->sh_ta_p = NULL;
24460Sstevel@tonic-gate sh_p->sh_unique = NULL;
24470Sstevel@tonic-gate if (return_val == TD_OK)
24480Sstevel@tonic-gate return_val = TD_ERR;
24490Sstevel@tonic-gate } else {
24500Sstevel@tonic-gate sh_p->sh_ta_p = th_p->th_ta_p;
24510Sstevel@tonic-gate sh_p->sh_unique = (psaddr_t)wchan;
24520Sstevel@tonic-gate }
24530Sstevel@tonic-gate
24540Sstevel@tonic-gate ph_unlock(th_p->th_ta_p);
24550Sstevel@tonic-gate return (return_val);
24560Sstevel@tonic-gate }
24570Sstevel@tonic-gate
24580Sstevel@tonic-gate /*
24590Sstevel@tonic-gate * Which thread is running on an lwp?
24600Sstevel@tonic-gate */
24610Sstevel@tonic-gate #pragma weak td_ta_map_lwp2thr = __td_ta_map_lwp2thr
24620Sstevel@tonic-gate td_err_e
__td_ta_map_lwp2thr(td_thragent_t * ta_p,lwpid_t lwpid,td_thrhandle_t * th_p)24630Sstevel@tonic-gate __td_ta_map_lwp2thr(td_thragent_t *ta_p, lwpid_t lwpid,
24640Sstevel@tonic-gate td_thrhandle_t *th_p)
24650Sstevel@tonic-gate {
24660Sstevel@tonic-gate return (__td_ta_map_id2thr(ta_p, lwpid, th_p));
24670Sstevel@tonic-gate }
24680Sstevel@tonic-gate
24690Sstevel@tonic-gate /*
24700Sstevel@tonic-gate * Common code for td_sync_get_info() and td_sync_get_stats()
24710Sstevel@tonic-gate */
24720Sstevel@tonic-gate static td_err_e
sync_get_info_common(const td_synchandle_t * sh_p,struct ps_prochandle * ph_p,td_syncinfo_t * si_p)24730Sstevel@tonic-gate sync_get_info_common(const td_synchandle_t *sh_p, struct ps_prochandle *ph_p,
24740Sstevel@tonic-gate td_syncinfo_t *si_p)
24750Sstevel@tonic-gate {
24760Sstevel@tonic-gate int trunc = 0;
24770Sstevel@tonic-gate td_so_un_t generic_so;
24780Sstevel@tonic-gate
24790Sstevel@tonic-gate /*
24800Sstevel@tonic-gate * Determine the sync. object type; a little type fudgery here.
24810Sstevel@tonic-gate * First attempt to read the whole union. If that fails, attempt
24820Sstevel@tonic-gate * to read just the condvar. A condvar is the smallest sync. object.
24830Sstevel@tonic-gate */
24840Sstevel@tonic-gate if (ps_pdread(ph_p, sh_p->sh_unique,
24850Sstevel@tonic-gate &generic_so, sizeof (generic_so)) != PS_OK) {
24860Sstevel@tonic-gate trunc = 1;
24870Sstevel@tonic-gate if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
24880Sstevel@tonic-gate sizeof (generic_so.condition)) != PS_OK)
24890Sstevel@tonic-gate return (TD_DBERR);
24900Sstevel@tonic-gate }
24910Sstevel@tonic-gate
24920Sstevel@tonic-gate switch (generic_so.condition.cond_magic) {
24930Sstevel@tonic-gate case MUTEX_MAGIC:
24940Sstevel@tonic-gate if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
24950Sstevel@tonic-gate &generic_so.lock, sizeof (generic_so.lock)) != PS_OK)
24960Sstevel@tonic-gate return (TD_DBERR);
24970Sstevel@tonic-gate si_p->si_type = TD_SYNC_MUTEX;
24984574Sraf si_p->si_shared_type =
24994574Sraf (generic_so.lock.mutex_type & USYNC_PROCESS);
25000Sstevel@tonic-gate (void) memcpy(si_p->si_flags, &generic_so.lock.mutex_flag,
25010Sstevel@tonic-gate sizeof (generic_so.lock.mutex_flag));
25020Sstevel@tonic-gate si_p->si_state.mutex_locked =
25030Sstevel@tonic-gate (generic_so.lock.mutex_lockw != 0);
25040Sstevel@tonic-gate si_p->si_size = sizeof (generic_so.lock);
25050Sstevel@tonic-gate si_p->si_has_waiters = generic_so.lock.mutex_waiters;
25060Sstevel@tonic-gate si_p->si_rcount = generic_so.lock.mutex_rcount;
25070Sstevel@tonic-gate si_p->si_prioceiling = generic_so.lock.mutex_ceiling;
25080Sstevel@tonic-gate if (si_p->si_state.mutex_locked) {
25094574Sraf if (si_p->si_shared_type & USYNC_PROCESS)
25100Sstevel@tonic-gate si_p->si_ownerpid =
25116247Sraf generic_so.lock.mutex_ownerpid;
25120Sstevel@tonic-gate si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
25130Sstevel@tonic-gate si_p->si_owner.th_unique = generic_so.lock.mutex_owner;
25140Sstevel@tonic-gate }
25150Sstevel@tonic-gate break;
25160Sstevel@tonic-gate case COND_MAGIC:
25170Sstevel@tonic-gate si_p->si_type = TD_SYNC_COND;
25184574Sraf si_p->si_shared_type =
25194574Sraf (generic_so.condition.cond_type & USYNC_PROCESS);
25200Sstevel@tonic-gate (void) memcpy(si_p->si_flags, generic_so.condition.flags.flag,
25210Sstevel@tonic-gate sizeof (generic_so.condition.flags.flag));
25220Sstevel@tonic-gate si_p->si_size = sizeof (generic_so.condition);
25230Sstevel@tonic-gate si_p->si_has_waiters =
25246247Sraf (generic_so.condition.cond_waiters_user |
25256247Sraf generic_so.condition.cond_waiters_kernel)? 1 : 0;
25260Sstevel@tonic-gate break;
25270Sstevel@tonic-gate case SEMA_MAGIC:
25280Sstevel@tonic-gate if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
25290Sstevel@tonic-gate &generic_so.semaphore, sizeof (generic_so.semaphore))
25300Sstevel@tonic-gate != PS_OK)
25310Sstevel@tonic-gate return (TD_DBERR);
25320Sstevel@tonic-gate si_p->si_type = TD_SYNC_SEMA;
25334574Sraf si_p->si_shared_type =
25344574Sraf (generic_so.semaphore.type & USYNC_PROCESS);
25350Sstevel@tonic-gate si_p->si_state.sem_count = generic_so.semaphore.count;
25360Sstevel@tonic-gate si_p->si_size = sizeof (generic_so.semaphore);
25370Sstevel@tonic-gate si_p->si_has_waiters =
25380Sstevel@tonic-gate ((lwp_sema_t *)&generic_so.semaphore)->flags[7];
25390Sstevel@tonic-gate /* this is useless but the old interface provided it */
25400Sstevel@tonic-gate si_p->si_data = (psaddr_t)generic_so.semaphore.count;
25410Sstevel@tonic-gate break;
25420Sstevel@tonic-gate case RWL_MAGIC:
25436247Sraf {
25444570Sraf uint32_t rwstate;
25454570Sraf
25460Sstevel@tonic-gate if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
25470Sstevel@tonic-gate &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK)
25480Sstevel@tonic-gate return (TD_DBERR);
25490Sstevel@tonic-gate si_p->si_type = TD_SYNC_RWLOCK;
25504574Sraf si_p->si_shared_type =
25514574Sraf (generic_so.rwlock.rwlock_type & USYNC_PROCESS);
25520Sstevel@tonic-gate si_p->si_size = sizeof (generic_so.rwlock);
25530Sstevel@tonic-gate
25544570Sraf rwstate = (uint32_t)generic_so.rwlock.rwlock_readers;
25554570Sraf if (rwstate & URW_WRITE_LOCKED) {
25564570Sraf si_p->si_state.nreaders = -1;
25574570Sraf si_p->si_is_wlock = 1;
25584570Sraf si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
25594570Sraf si_p->si_owner.th_unique =
25606247Sraf generic_so.rwlock.rwlock_owner;
25614570Sraf if (si_p->si_shared_type & USYNC_PROCESS)
25624570Sraf si_p->si_ownerpid =
25636247Sraf generic_so.rwlock.rwlock_ownerpid;
25640Sstevel@tonic-gate } else {
25654570Sraf si_p->si_state.nreaders = (rwstate & URW_READERS_MASK);
25660Sstevel@tonic-gate }
25674570Sraf si_p->si_has_waiters = ((rwstate & URW_HAS_WAITERS) != 0);
25684570Sraf
25690Sstevel@tonic-gate /* this is useless but the old interface provided it */
25700Sstevel@tonic-gate si_p->si_data = (psaddr_t)generic_so.rwlock.readers;
25710Sstevel@tonic-gate break;
25726247Sraf }
25730Sstevel@tonic-gate default:
25740Sstevel@tonic-gate return (TD_BADSH);
25750Sstevel@tonic-gate }
25760Sstevel@tonic-gate
25770Sstevel@tonic-gate si_p->si_ta_p = sh_p->sh_ta_p;
25780Sstevel@tonic-gate si_p->si_sv_addr = sh_p->sh_unique;
25790Sstevel@tonic-gate return (TD_OK);
25800Sstevel@tonic-gate }
25810Sstevel@tonic-gate
25820Sstevel@tonic-gate /*
25830Sstevel@tonic-gate * Given a synchronization handle, fill in the
25840Sstevel@tonic-gate * information for the synchronization variable into *si_p.
25850Sstevel@tonic-gate */
25860Sstevel@tonic-gate #pragma weak td_sync_get_info = __td_sync_get_info
25870Sstevel@tonic-gate td_err_e
__td_sync_get_info(const td_synchandle_t * sh_p,td_syncinfo_t * si_p)25880Sstevel@tonic-gate __td_sync_get_info(const td_synchandle_t *sh_p, td_syncinfo_t *si_p)
25890Sstevel@tonic-gate {
25900Sstevel@tonic-gate struct ps_prochandle *ph_p;
25910Sstevel@tonic-gate td_err_e return_val;
25920Sstevel@tonic-gate
25930Sstevel@tonic-gate if (si_p == NULL)
25940Sstevel@tonic-gate return (TD_ERR);
25950Sstevel@tonic-gate (void) memset(si_p, 0, sizeof (*si_p));
25960Sstevel@tonic-gate if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
25970Sstevel@tonic-gate return (return_val);
25980Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) {
25990Sstevel@tonic-gate ph_unlock(sh_p->sh_ta_p);
26000Sstevel@tonic-gate return (TD_DBERR);
26010Sstevel@tonic-gate }
26020Sstevel@tonic-gate
26030Sstevel@tonic-gate return_val = sync_get_info_common(sh_p, ph_p, si_p);
26040Sstevel@tonic-gate
26050Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
26060Sstevel@tonic-gate ph_unlock(sh_p->sh_ta_p);
26070Sstevel@tonic-gate return (return_val);
26080Sstevel@tonic-gate }
26090Sstevel@tonic-gate
26100Sstevel@tonic-gate static uint_t
tdb_addr_hash64(uint64_t addr)26110Sstevel@tonic-gate tdb_addr_hash64(uint64_t addr)
26120Sstevel@tonic-gate {
26130Sstevel@tonic-gate uint64_t value60 = (addr >> 4);
26140Sstevel@tonic-gate uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff);
26150Sstevel@tonic-gate return ((value30 >> 15) ^ (value30 & 0x7fff));
26160Sstevel@tonic-gate }
26170Sstevel@tonic-gate
26180Sstevel@tonic-gate static uint_t
tdb_addr_hash32(uint64_t addr)26190Sstevel@tonic-gate tdb_addr_hash32(uint64_t addr)
26200Sstevel@tonic-gate {
26210Sstevel@tonic-gate uint32_t value30 = (addr >> 2); /* 30 bits */
26220Sstevel@tonic-gate return ((value30 >> 15) ^ (value30 & 0x7fff));
26230Sstevel@tonic-gate }
26240Sstevel@tonic-gate
26250Sstevel@tonic-gate static td_err_e
read_sync_stats(td_thragent_t * ta_p,psaddr_t hash_table,psaddr_t sync_obj_addr,tdb_sync_stats_t * sync_stats)26260Sstevel@tonic-gate read_sync_stats(td_thragent_t *ta_p, psaddr_t hash_table,
26270Sstevel@tonic-gate psaddr_t sync_obj_addr, tdb_sync_stats_t *sync_stats)
26280Sstevel@tonic-gate {
26290Sstevel@tonic-gate psaddr_t next_desc;
26300Sstevel@tonic-gate uint64_t first;
26310Sstevel@tonic-gate uint_t ix;
26320Sstevel@tonic-gate
26330Sstevel@tonic-gate /*
26340Sstevel@tonic-gate * Compute the hash table index from the synch object's address.
26350Sstevel@tonic-gate */
26360Sstevel@tonic-gate if (ta_p->model == PR_MODEL_LP64)
26370Sstevel@tonic-gate ix = tdb_addr_hash64(sync_obj_addr);
26380Sstevel@tonic-gate else
26390Sstevel@tonic-gate ix = tdb_addr_hash32(sync_obj_addr);
26400Sstevel@tonic-gate
26410Sstevel@tonic-gate /*
26420Sstevel@tonic-gate * Get the address of the first element in the linked list.
26430Sstevel@tonic-gate */
26440Sstevel@tonic-gate if (ps_pdread(ta_p->ph_p, hash_table + ix * sizeof (uint64_t),
26450Sstevel@tonic-gate &first, sizeof (first)) != PS_OK)
26460Sstevel@tonic-gate return (TD_DBERR);
26470Sstevel@tonic-gate
26480Sstevel@tonic-gate /*
26490Sstevel@tonic-gate * Search the linked list for an entry for the synch object..
26500Sstevel@tonic-gate */
26510Sstevel@tonic-gate for (next_desc = (psaddr_t)first; next_desc != NULL;
26520Sstevel@tonic-gate next_desc = (psaddr_t)sync_stats->next) {
26530Sstevel@tonic-gate if (ps_pdread(ta_p->ph_p, next_desc,
26540Sstevel@tonic-gate sync_stats, sizeof (*sync_stats)) != PS_OK)
26550Sstevel@tonic-gate return (TD_DBERR);
26560Sstevel@tonic-gate if (sync_stats->sync_addr == sync_obj_addr)
26570Sstevel@tonic-gate return (TD_OK);
26580Sstevel@tonic-gate }
26590Sstevel@tonic-gate
26600Sstevel@tonic-gate (void) memset(sync_stats, 0, sizeof (*sync_stats));
26610Sstevel@tonic-gate return (TD_OK);
26620Sstevel@tonic-gate }
26630Sstevel@tonic-gate
26640Sstevel@tonic-gate /*
26650Sstevel@tonic-gate * Given a synchronization handle, fill in the
26660Sstevel@tonic-gate * statistics for the synchronization variable into *ss_p.
26670Sstevel@tonic-gate */
26680Sstevel@tonic-gate #pragma weak td_sync_get_stats = __td_sync_get_stats
26690Sstevel@tonic-gate td_err_e
__td_sync_get_stats(const td_synchandle_t * sh_p,td_syncstats_t * ss_p)26700Sstevel@tonic-gate __td_sync_get_stats(const td_synchandle_t *sh_p, td_syncstats_t *ss_p)
26710Sstevel@tonic-gate {
26720Sstevel@tonic-gate struct ps_prochandle *ph_p;
26730Sstevel@tonic-gate td_thragent_t *ta_p;
26740Sstevel@tonic-gate td_err_e return_val;
26750Sstevel@tonic-gate register_sync_t enable;
26760Sstevel@tonic-gate psaddr_t hashaddr;
26770Sstevel@tonic-gate tdb_sync_stats_t sync_stats;
26780Sstevel@tonic-gate size_t ix;
26790Sstevel@tonic-gate
26800Sstevel@tonic-gate if (ss_p == NULL)
26810Sstevel@tonic-gate return (TD_ERR);
26820Sstevel@tonic-gate (void) memset(ss_p, 0, sizeof (*ss_p));
26830Sstevel@tonic-gate if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
26840Sstevel@tonic-gate return (return_val);
26850Sstevel@tonic-gate ta_p = sh_p->sh_ta_p;
26860Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) {
26870Sstevel@tonic-gate ph_unlock(ta_p);
26880Sstevel@tonic-gate return (TD_DBERR);
26890Sstevel@tonic-gate }
26900Sstevel@tonic-gate
26910Sstevel@tonic-gate if ((return_val = sync_get_info_common(sh_p, ph_p, &ss_p->ss_info))
26920Sstevel@tonic-gate != TD_OK) {
26930Sstevel@tonic-gate if (return_val != TD_BADSH)
26940Sstevel@tonic-gate goto out;
26950Sstevel@tonic-gate /* we can correct TD_BADSH */
26960Sstevel@tonic-gate (void) memset(&ss_p->ss_info, 0, sizeof (ss_p->ss_info));
26970Sstevel@tonic-gate ss_p->ss_info.si_ta_p = sh_p->sh_ta_p;
26980Sstevel@tonic-gate ss_p->ss_info.si_sv_addr = sh_p->sh_unique;
26990Sstevel@tonic-gate /* we correct si_type and si_size below */
27000Sstevel@tonic-gate return_val = TD_OK;
27010Sstevel@tonic-gate }
27020Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
27030Sstevel@tonic-gate &enable, sizeof (enable)) != PS_OK) {
27040Sstevel@tonic-gate return_val = TD_DBERR;
27050Sstevel@tonic-gate goto out;
27060Sstevel@tonic-gate }
27070Sstevel@tonic-gate if (enable != REGISTER_SYNC_ON)
27080Sstevel@tonic-gate goto out;
27090Sstevel@tonic-gate
27100Sstevel@tonic-gate /*
27110Sstevel@tonic-gate * Get the address of the hash table in the target process.
27120Sstevel@tonic-gate */
27130Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) {
27140Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->uberdata_addr +
27150Sstevel@tonic-gate offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
27160Sstevel@tonic-gate &hashaddr, sizeof (&hashaddr)) != PS_OK) {
27170Sstevel@tonic-gate return_val = TD_DBERR;
27180Sstevel@tonic-gate goto out;
27190Sstevel@tonic-gate }
27200Sstevel@tonic-gate } else {
27210Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
27220Sstevel@tonic-gate caddr32_t addr;
27230Sstevel@tonic-gate
27240Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->uberdata_addr +
27250Sstevel@tonic-gate offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
27260Sstevel@tonic-gate &addr, sizeof (addr)) != PS_OK) {
27270Sstevel@tonic-gate return_val = TD_DBERR;
27280Sstevel@tonic-gate goto out;
27290Sstevel@tonic-gate }
27300Sstevel@tonic-gate hashaddr = addr;
27310Sstevel@tonic-gate #else
27320Sstevel@tonic-gate return_val = TD_ERR;
27330Sstevel@tonic-gate goto out;
27340Sstevel@tonic-gate #endif /* _SYSCALL32 */
27350Sstevel@tonic-gate }
27360Sstevel@tonic-gate
27370Sstevel@tonic-gate if (hashaddr == 0)
27380Sstevel@tonic-gate return_val = TD_BADSH;
27390Sstevel@tonic-gate else
27400Sstevel@tonic-gate return_val = read_sync_stats(ta_p, hashaddr,
27416247Sraf sh_p->sh_unique, &sync_stats);
27420Sstevel@tonic-gate if (return_val != TD_OK)
27430Sstevel@tonic-gate goto out;
27440Sstevel@tonic-gate
27450Sstevel@tonic-gate /*
27460Sstevel@tonic-gate * We have the hash table entry. Transfer the data to
27470Sstevel@tonic-gate * the td_syncstats_t structure provided by the caller.
27480Sstevel@tonic-gate */
27490Sstevel@tonic-gate switch (sync_stats.un.type) {
27500Sstevel@tonic-gate case TDB_MUTEX:
27516247Sraf {
27520Sstevel@tonic-gate td_mutex_stats_t *msp = &ss_p->ss_un.mutex;
27530Sstevel@tonic-gate
27540Sstevel@tonic-gate ss_p->ss_info.si_type = TD_SYNC_MUTEX;
27550Sstevel@tonic-gate ss_p->ss_info.si_size = sizeof (mutex_t);
27560Sstevel@tonic-gate msp->mutex_lock =
27576247Sraf sync_stats.un.mutex.mutex_lock;
27580Sstevel@tonic-gate msp->mutex_sleep =
27596247Sraf sync_stats.un.mutex.mutex_sleep;
27600Sstevel@tonic-gate msp->mutex_sleep_time =
27616247Sraf sync_stats.un.mutex.mutex_sleep_time;
27620Sstevel@tonic-gate msp->mutex_hold_time =
27636247Sraf sync_stats.un.mutex.mutex_hold_time;
27640Sstevel@tonic-gate msp->mutex_try =
27656247Sraf sync_stats.un.mutex.mutex_try;
27660Sstevel@tonic-gate msp->mutex_try_fail =
27676247Sraf sync_stats.un.mutex.mutex_try_fail;
27680Sstevel@tonic-gate if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
27690Sstevel@tonic-gate (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
27700Sstevel@tonic-gate < ta_p->hash_size * sizeof (thr_hash_table_t))
27710Sstevel@tonic-gate msp->mutex_internal =
27726247Sraf ix / sizeof (thr_hash_table_t) + 1;
27730Sstevel@tonic-gate break;
27746247Sraf }
27750Sstevel@tonic-gate case TDB_COND:
27766247Sraf {
27770Sstevel@tonic-gate td_cond_stats_t *csp = &ss_p->ss_un.cond;
27780Sstevel@tonic-gate
27790Sstevel@tonic-gate ss_p->ss_info.si_type = TD_SYNC_COND;
27800Sstevel@tonic-gate ss_p->ss_info.si_size = sizeof (cond_t);
27810Sstevel@tonic-gate csp->cond_wait =
27826247Sraf sync_stats.un.cond.cond_wait;
27830Sstevel@tonic-gate csp->cond_timedwait =
27846247Sraf sync_stats.un.cond.cond_timedwait;
27850Sstevel@tonic-gate csp->cond_wait_sleep_time =
27866247Sraf sync_stats.un.cond.cond_wait_sleep_time;
27870Sstevel@tonic-gate csp->cond_timedwait_sleep_time =
27886247Sraf sync_stats.un.cond.cond_timedwait_sleep_time;
27890Sstevel@tonic-gate csp->cond_timedwait_timeout =
27906247Sraf sync_stats.un.cond.cond_timedwait_timeout;
27910Sstevel@tonic-gate csp->cond_signal =
27926247Sraf sync_stats.un.cond.cond_signal;
27930Sstevel@tonic-gate csp->cond_broadcast =
27946247Sraf sync_stats.un.cond.cond_broadcast;
27950Sstevel@tonic-gate if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
27960Sstevel@tonic-gate (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
27970Sstevel@tonic-gate < ta_p->hash_size * sizeof (thr_hash_table_t))
27980Sstevel@tonic-gate csp->cond_internal =
27996247Sraf ix / sizeof (thr_hash_table_t) + 1;
28000Sstevel@tonic-gate break;
28016247Sraf }
28020Sstevel@tonic-gate case TDB_RWLOCK:
28036247Sraf {
28040Sstevel@tonic-gate td_rwlock_stats_t *rwsp = &ss_p->ss_un.rwlock;
28050Sstevel@tonic-gate
28060Sstevel@tonic-gate ss_p->ss_info.si_type = TD_SYNC_RWLOCK;
28070Sstevel@tonic-gate ss_p->ss_info.si_size = sizeof (rwlock_t);
28080Sstevel@tonic-gate rwsp->rw_rdlock =
28096247Sraf sync_stats.un.rwlock.rw_rdlock;
28100Sstevel@tonic-gate rwsp->rw_rdlock_try =
28116247Sraf sync_stats.un.rwlock.rw_rdlock_try;
28120Sstevel@tonic-gate rwsp->rw_rdlock_try_fail =
28136247Sraf sync_stats.un.rwlock.rw_rdlock_try_fail;
28140Sstevel@tonic-gate rwsp->rw_wrlock =
28156247Sraf sync_stats.un.rwlock.rw_wrlock;
28160Sstevel@tonic-gate rwsp->rw_wrlock_hold_time =
28176247Sraf sync_stats.un.rwlock.rw_wrlock_hold_time;
28180Sstevel@tonic-gate rwsp->rw_wrlock_try =
28196247Sraf sync_stats.un.rwlock.rw_wrlock_try;
28200Sstevel@tonic-gate rwsp->rw_wrlock_try_fail =
28216247Sraf sync_stats.un.rwlock.rw_wrlock_try_fail;
28220Sstevel@tonic-gate break;
28236247Sraf }
28240Sstevel@tonic-gate case TDB_SEMA:
28256247Sraf {
28260Sstevel@tonic-gate td_sema_stats_t *ssp = &ss_p->ss_un.sema;
28270Sstevel@tonic-gate
28280Sstevel@tonic-gate ss_p->ss_info.si_type = TD_SYNC_SEMA;
28290Sstevel@tonic-gate ss_p->ss_info.si_size = sizeof (sema_t);
28300Sstevel@tonic-gate ssp->sema_wait =
28316247Sraf sync_stats.un.sema.sema_wait;
28320Sstevel@tonic-gate ssp->sema_wait_sleep =
28336247Sraf sync_stats.un.sema.sema_wait_sleep;
28340Sstevel@tonic-gate ssp->sema_wait_sleep_time =
28356247Sraf sync_stats.un.sema.sema_wait_sleep_time;
28360Sstevel@tonic-gate ssp->sema_trywait =
28376247Sraf sync_stats.un.sema.sema_trywait;
28380Sstevel@tonic-gate ssp->sema_trywait_fail =
28396247Sraf sync_stats.un.sema.sema_trywait_fail;
28400Sstevel@tonic-gate ssp->sema_post =
28416247Sraf sync_stats.un.sema.sema_post;
28420Sstevel@tonic-gate ssp->sema_max_count =
28436247Sraf sync_stats.un.sema.sema_max_count;
28440Sstevel@tonic-gate ssp->sema_min_count =
28456247Sraf sync_stats.un.sema.sema_min_count;
28460Sstevel@tonic-gate break;
28476247Sraf }
28480Sstevel@tonic-gate default:
28490Sstevel@tonic-gate return_val = TD_BADSH;
28500Sstevel@tonic-gate break;
28510Sstevel@tonic-gate }
28520Sstevel@tonic-gate
28530Sstevel@tonic-gate out:
28540Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
28550Sstevel@tonic-gate ph_unlock(ta_p);
28560Sstevel@tonic-gate return (return_val);
28570Sstevel@tonic-gate }
28580Sstevel@tonic-gate
28590Sstevel@tonic-gate /*
28600Sstevel@tonic-gate * Change the state of a synchronization variable.
28610Sstevel@tonic-gate * 1) mutex lock state set to value
28620Sstevel@tonic-gate * 2) semaphore's count set to value
28634570Sraf * 3) writer's lock set by value < 0
28644570Sraf * 4) reader's lock number of readers set to value >= 0
28650Sstevel@tonic-gate * Currently unused by dbx.
28660Sstevel@tonic-gate */
28670Sstevel@tonic-gate #pragma weak td_sync_setstate = __td_sync_setstate
28680Sstevel@tonic-gate td_err_e
__td_sync_setstate(const td_synchandle_t * sh_p,long lvalue)28690Sstevel@tonic-gate __td_sync_setstate(const td_synchandle_t *sh_p, long lvalue)
28700Sstevel@tonic-gate {
28710Sstevel@tonic-gate struct ps_prochandle *ph_p;
28720Sstevel@tonic-gate int trunc = 0;
28730Sstevel@tonic-gate td_err_e return_val;
28740Sstevel@tonic-gate td_so_un_t generic_so;
28754570Sraf uint32_t *rwstate;
28760Sstevel@tonic-gate int value = (int)lvalue;
28770Sstevel@tonic-gate
28780Sstevel@tonic-gate if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
28790Sstevel@tonic-gate return (return_val);
28800Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) {
28810Sstevel@tonic-gate ph_unlock(sh_p->sh_ta_p);
28820Sstevel@tonic-gate return (TD_DBERR);
28830Sstevel@tonic-gate }
28840Sstevel@tonic-gate
28850Sstevel@tonic-gate /*
28860Sstevel@tonic-gate * Read the synch. variable information.
28870Sstevel@tonic-gate * First attempt to read the whole union and if that fails
28880Sstevel@tonic-gate * fall back to reading only the smallest member, the condvar.
28890Sstevel@tonic-gate */
28900Sstevel@tonic-gate if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so,
28910Sstevel@tonic-gate sizeof (generic_so)) != PS_OK) {
28920Sstevel@tonic-gate trunc = 1;
28930Sstevel@tonic-gate if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
28940Sstevel@tonic-gate sizeof (generic_so.condition)) != PS_OK) {
28950Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
28960Sstevel@tonic-gate ph_unlock(sh_p->sh_ta_p);
28970Sstevel@tonic-gate return (TD_DBERR);
28980Sstevel@tonic-gate }
28990Sstevel@tonic-gate }
29000Sstevel@tonic-gate
29010Sstevel@tonic-gate /*
29020Sstevel@tonic-gate * Set the new value in the sync. variable, read the synch. variable
29030Sstevel@tonic-gate * information. from the process, reset its value and write it back.
29040Sstevel@tonic-gate */
29050Sstevel@tonic-gate switch (generic_so.condition.mutex_magic) {
29060Sstevel@tonic-gate case MUTEX_MAGIC:
29070Sstevel@tonic-gate if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
29080Sstevel@tonic-gate &generic_so.lock, sizeof (generic_so.lock)) != PS_OK) {
29090Sstevel@tonic-gate return_val = TD_DBERR;
29100Sstevel@tonic-gate break;
29110Sstevel@tonic-gate }
29120Sstevel@tonic-gate generic_so.lock.mutex_lockw = (uint8_t)value;
29130Sstevel@tonic-gate if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.lock,
29140Sstevel@tonic-gate sizeof (generic_so.lock)) != PS_OK)
29150Sstevel@tonic-gate return_val = TD_DBERR;
29160Sstevel@tonic-gate break;
29170Sstevel@tonic-gate case SEMA_MAGIC:
29180Sstevel@tonic-gate if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
29190Sstevel@tonic-gate &generic_so.semaphore, sizeof (generic_so.semaphore))
29200Sstevel@tonic-gate != PS_OK) {
29210Sstevel@tonic-gate return_val = TD_DBERR;
29220Sstevel@tonic-gate break;
29230Sstevel@tonic-gate }
29240Sstevel@tonic-gate generic_so.semaphore.count = value;
29250Sstevel@tonic-gate if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.semaphore,
29260Sstevel@tonic-gate sizeof (generic_so.semaphore)) != PS_OK)
29270Sstevel@tonic-gate return_val = TD_DBERR;
29280Sstevel@tonic-gate break;
29290Sstevel@tonic-gate case COND_MAGIC:
29300Sstevel@tonic-gate /* Operation not supported on a condition variable */
29310Sstevel@tonic-gate return_val = TD_ERR;
29320Sstevel@tonic-gate break;
29330Sstevel@tonic-gate case RWL_MAGIC:
29340Sstevel@tonic-gate if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
29350Sstevel@tonic-gate &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK) {
29360Sstevel@tonic-gate return_val = TD_DBERR;
29370Sstevel@tonic-gate break;
29380Sstevel@tonic-gate }
29394570Sraf rwstate = (uint32_t *)&generic_so.rwlock.readers;
29404570Sraf *rwstate &= URW_HAS_WAITERS;
29414570Sraf if (value < 0)
29424570Sraf *rwstate |= URW_WRITE_LOCKED;
29434570Sraf else
29444570Sraf *rwstate |= (value & URW_READERS_MASK);
29450Sstevel@tonic-gate if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.rwlock,
29460Sstevel@tonic-gate sizeof (generic_so.rwlock)) != PS_OK)
29470Sstevel@tonic-gate return_val = TD_DBERR;
29480Sstevel@tonic-gate break;
29490Sstevel@tonic-gate default:
29500Sstevel@tonic-gate /* Bad sync. object type */
29510Sstevel@tonic-gate return_val = TD_BADSH;
29520Sstevel@tonic-gate break;
29530Sstevel@tonic-gate }
29540Sstevel@tonic-gate
29550Sstevel@tonic-gate (void) ps_pcontinue(ph_p);
29560Sstevel@tonic-gate ph_unlock(sh_p->sh_ta_p);
29570Sstevel@tonic-gate return (return_val);
29580Sstevel@tonic-gate }
29590Sstevel@tonic-gate
29600Sstevel@tonic-gate typedef struct {
29610Sstevel@tonic-gate td_thr_iter_f *waiter_cb;
29620Sstevel@tonic-gate psaddr_t sync_obj_addr;
29630Sstevel@tonic-gate uint16_t sync_magic;
29640Sstevel@tonic-gate void *waiter_cb_arg;
29650Sstevel@tonic-gate td_err_e errcode;
29660Sstevel@tonic-gate } waiter_cb_ctl_t;
29670Sstevel@tonic-gate
29680Sstevel@tonic-gate static int
waiters_cb(const td_thrhandle_t * th_p,void * arg)29690Sstevel@tonic-gate waiters_cb(const td_thrhandle_t *th_p, void *arg)
29700Sstevel@tonic-gate {
29710Sstevel@tonic-gate td_thragent_t *ta_p = th_p->th_ta_p;
29720Sstevel@tonic-gate struct ps_prochandle *ph_p = ta_p->ph_p;
29730Sstevel@tonic-gate waiter_cb_ctl_t *wcb = arg;
29740Sstevel@tonic-gate caddr_t wchan;
29750Sstevel@tonic-gate
29760Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) {
29770Sstevel@tonic-gate ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
29780Sstevel@tonic-gate
29790Sstevel@tonic-gate if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
29800Sstevel@tonic-gate &wchan, sizeof (wchan)) != PS_OK) {
29810Sstevel@tonic-gate wcb->errcode = TD_DBERR;
29820Sstevel@tonic-gate return (1);
29830Sstevel@tonic-gate }
29840Sstevel@tonic-gate } else {
29850Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32)
29860Sstevel@tonic-gate ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
29870Sstevel@tonic-gate caddr32_t wchan32;
29880Sstevel@tonic-gate
29890Sstevel@tonic-gate if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
29900Sstevel@tonic-gate &wchan32, sizeof (wchan32)) != PS_OK) {
29910Sstevel@tonic-gate wcb->errcode = TD_DBERR;
29920Sstevel@tonic-gate return (1);
29930Sstevel@tonic-gate }
29940Sstevel@tonic-gate wchan = (caddr_t)(uintptr_t)wchan32;
29950Sstevel@tonic-gate #else
29960Sstevel@tonic-gate wcb->errcode = TD_ERR;
29970Sstevel@tonic-gate return (1);
29980Sstevel@tonic-gate #endif /* _SYSCALL32 */
29990Sstevel@tonic-gate }
30000Sstevel@tonic-gate
30010Sstevel@tonic-gate if (wchan == NULL)
30020Sstevel@tonic-gate return (0);
30030Sstevel@tonic-gate
30040Sstevel@tonic-gate if (wchan == (caddr_t)wcb->sync_obj_addr)
30050Sstevel@tonic-gate return ((*wcb->waiter_cb)(th_p, wcb->waiter_cb_arg));
30060Sstevel@tonic-gate
30070Sstevel@tonic-gate return (0);
30080Sstevel@tonic-gate }
30090Sstevel@tonic-gate
30100Sstevel@tonic-gate /*
30110Sstevel@tonic-gate * For a given synchronization variable, iterate over the
30120Sstevel@tonic-gate * set of waiting threads. The call back function is passed
30130Sstevel@tonic-gate * two parameters, a pointer to a thread handle and a pointer
30140Sstevel@tonic-gate * to extra call back data.
30150Sstevel@tonic-gate */
30160Sstevel@tonic-gate #pragma weak td_sync_waiters = __td_sync_waiters
30170Sstevel@tonic-gate td_err_e
__td_sync_waiters(const td_synchandle_t * sh_p,td_thr_iter_f * cb,void * cb_data)30180Sstevel@tonic-gate __td_sync_waiters(const td_synchandle_t *sh_p, td_thr_iter_f *cb, void *cb_data)
30190Sstevel@tonic-gate {
30200Sstevel@tonic-gate struct ps_prochandle *ph_p;
30210Sstevel@tonic-gate waiter_cb_ctl_t wcb;
30220Sstevel@tonic-gate td_err_e return_val;
30230Sstevel@tonic-gate
30240Sstevel@tonic-gate if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
30250Sstevel@tonic-gate return (return_val);
30260Sstevel@tonic-gate if (ps_pdread(ph_p,
30270Sstevel@tonic-gate (psaddr_t)&((mutex_t *)sh_p->sh_unique)->mutex_magic,
30280Sstevel@tonic-gate (caddr_t)&wcb.sync_magic, sizeof (wcb.sync_magic)) != PS_OK) {
30290Sstevel@tonic-gate ph_unlock(sh_p->sh_ta_p);
30300Sstevel@tonic-gate return (TD_DBERR);
30310Sstevel@tonic-gate }
30320Sstevel@tonic-gate ph_unlock(sh_p->sh_ta_p);
30330Sstevel@tonic-gate
30340Sstevel@tonic-gate switch (wcb.sync_magic) {
30350Sstevel@tonic-gate case MUTEX_MAGIC:
30360Sstevel@tonic-gate case COND_MAGIC:
30370Sstevel@tonic-gate case SEMA_MAGIC:
30380Sstevel@tonic-gate case RWL_MAGIC:
30390Sstevel@tonic-gate break;
30400Sstevel@tonic-gate default:
30410Sstevel@tonic-gate return (TD_BADSH);
30420Sstevel@tonic-gate }
30430Sstevel@tonic-gate
30440Sstevel@tonic-gate wcb.waiter_cb = cb;
30450Sstevel@tonic-gate wcb.sync_obj_addr = sh_p->sh_unique;
30460Sstevel@tonic-gate wcb.waiter_cb_arg = cb_data;
30470Sstevel@tonic-gate wcb.errcode = TD_OK;
30480Sstevel@tonic-gate return_val = __td_ta_thr_iter(sh_p->sh_ta_p, waiters_cb, &wcb,
30496247Sraf TD_THR_SLEEP, TD_THR_LOWEST_PRIORITY,
30506247Sraf TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
30510Sstevel@tonic-gate
30520Sstevel@tonic-gate if (return_val != TD_OK)
30530Sstevel@tonic-gate return (return_val);
30540Sstevel@tonic-gate
30550Sstevel@tonic-gate return (wcb.errcode);
30560Sstevel@tonic-gate }
3057