1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate #include <stdio.h> 30*0Sstevel@tonic-gate #include <stdlib.h> 31*0Sstevel@tonic-gate #include <stddef.h> 32*0Sstevel@tonic-gate #include <unistd.h> 33*0Sstevel@tonic-gate #include <thr_uberdata.h> 34*0Sstevel@tonic-gate #include <thread_db.h> 35*0Sstevel@tonic-gate #include <libc_int.h> 36*0Sstevel@tonic-gate 37*0Sstevel@tonic-gate /* 38*0Sstevel@tonic-gate * Private structures. 39*0Sstevel@tonic-gate */ 40*0Sstevel@tonic-gate 41*0Sstevel@tonic-gate typedef union { 42*0Sstevel@tonic-gate mutex_t lock; 43*0Sstevel@tonic-gate rwlock_t rwlock; 44*0Sstevel@tonic-gate sema_t semaphore; 45*0Sstevel@tonic-gate cond_t condition; 46*0Sstevel@tonic-gate } td_so_un_t; 47*0Sstevel@tonic-gate 48*0Sstevel@tonic-gate struct td_thragent { 49*0Sstevel@tonic-gate rwlock_t rwlock; 50*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 51*0Sstevel@tonic-gate int initialized; 52*0Sstevel@tonic-gate int sync_tracking; 53*0Sstevel@tonic-gate int model; 54*0Sstevel@tonic-gate int primary_map; 55*0Sstevel@tonic-gate psaddr_t bootstrap_addr; 56*0Sstevel@tonic-gate psaddr_t uberdata_addr; 57*0Sstevel@tonic-gate psaddr_t tdb_eventmask_addr; 58*0Sstevel@tonic-gate psaddr_t tdb_register_sync_addr; 59*0Sstevel@tonic-gate psaddr_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1]; 60*0Sstevel@tonic-gate psaddr_t hash_table_addr; 61*0Sstevel@tonic-gate int hash_size; 62*0Sstevel@tonic-gate lwpid_t single_lwpid; 63*0Sstevel@tonic-gate psaddr_t single_ulwp_addr; 64*0Sstevel@tonic-gate }; 65*0Sstevel@tonic-gate 66*0Sstevel@tonic-gate /* 67*0Sstevel@tonic-gate * This is the name of the variable in libc that contains 68*0Sstevel@tonic-gate * the uberdata address that we will need. 69*0Sstevel@tonic-gate */ 70*0Sstevel@tonic-gate #define TD_BOOTSTRAP_NAME "_tdb_bootstrap" 71*0Sstevel@tonic-gate /* 72*0Sstevel@tonic-gate * This is the actual name of uberdata, used in the event 73*0Sstevel@tonic-gate * that tdb_bootstrap has not yet been initialized. 74*0Sstevel@tonic-gate */ 75*0Sstevel@tonic-gate #define TD_UBERDATA_NAME "_uberdata" 76*0Sstevel@tonic-gate /* 77*0Sstevel@tonic-gate * The library name should end with ".so.1", but older versions of 78*0Sstevel@tonic-gate * dbx expect the unadorned name and malfunction if ".1" is specified. 79*0Sstevel@tonic-gate * Unfortunately, if ".1" is not specified, mdb malfunctions when it 80*0Sstevel@tonic-gate * is applied to another instance of itself (due to the presence of 81*0Sstevel@tonic-gate * /usr/lib/mdb/proc/libc.so). So we try it both ways. 82*0Sstevel@tonic-gate */ 83*0Sstevel@tonic-gate #define TD_LIBRARY_NAME "libc.so" 84*0Sstevel@tonic-gate #define TD_LIBRARY_NAME_1 "libc.so.1" 85*0Sstevel@tonic-gate 86*0Sstevel@tonic-gate td_err_e __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p); 87*0Sstevel@tonic-gate 88*0Sstevel@tonic-gate td_err_e __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb, 89*0Sstevel@tonic-gate void *cbdata_p, td_thr_state_e state, int ti_pri, 90*0Sstevel@tonic-gate sigset_t *ti_sigmask_p, unsigned ti_user_flags); 91*0Sstevel@tonic-gate 92*0Sstevel@tonic-gate /* 93*0Sstevel@tonic-gate * Initialize threads debugging interface. 94*0Sstevel@tonic-gate */ 95*0Sstevel@tonic-gate #pragma weak td_init = __td_init 96*0Sstevel@tonic-gate td_err_e 97*0Sstevel@tonic-gate __td_init() 98*0Sstevel@tonic-gate { 99*0Sstevel@tonic-gate return (TD_OK); 100*0Sstevel@tonic-gate } 101*0Sstevel@tonic-gate 102*0Sstevel@tonic-gate /* 103*0Sstevel@tonic-gate * This function does nothing, and never did. 104*0Sstevel@tonic-gate * But the symbol is in the ABI, so we can't delete it. 105*0Sstevel@tonic-gate */ 106*0Sstevel@tonic-gate #pragma weak td_log = __td_log 107*0Sstevel@tonic-gate void 108*0Sstevel@tonic-gate __td_log() 109*0Sstevel@tonic-gate { 110*0Sstevel@tonic-gate } 111*0Sstevel@tonic-gate 112*0Sstevel@tonic-gate /* 113*0Sstevel@tonic-gate * Short-cut to read just the hash table size from the process, 114*0Sstevel@tonic-gate * to avoid repeatedly reading the full uberdata structure when 115*0Sstevel@tonic-gate * dealing with a single-threaded process. 116*0Sstevel@tonic-gate */ 117*0Sstevel@tonic-gate static uint_t 118*0Sstevel@tonic-gate td_read_hash_size(td_thragent_t *ta_p) 119*0Sstevel@tonic-gate { 120*0Sstevel@tonic-gate psaddr_t addr; 121*0Sstevel@tonic-gate uint_t hash_size; 122*0Sstevel@tonic-gate 123*0Sstevel@tonic-gate switch (ta_p->initialized) { 124*0Sstevel@tonic-gate default: /* uninitialized */ 125*0Sstevel@tonic-gate return (0); 126*0Sstevel@tonic-gate case 1: /* partially initialized */ 127*0Sstevel@tonic-gate break; 128*0Sstevel@tonic-gate case 2: /* fully initialized */ 129*0Sstevel@tonic-gate return (ta_p->hash_size); 130*0Sstevel@tonic-gate } 131*0Sstevel@tonic-gate 132*0Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) { 133*0Sstevel@tonic-gate addr = ta_p->uberdata_addr + offsetof(uberdata_t, hash_size); 134*0Sstevel@tonic-gate } else { 135*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 136*0Sstevel@tonic-gate addr = ta_p->uberdata_addr + offsetof(uberdata32_t, hash_size); 137*0Sstevel@tonic-gate #else 138*0Sstevel@tonic-gate addr = 0; 139*0Sstevel@tonic-gate #endif 140*0Sstevel@tonic-gate } 141*0Sstevel@tonic-gate if (ps_pdread(ta_p->ph_p, addr, &hash_size, sizeof (hash_size)) 142*0Sstevel@tonic-gate != PS_OK) 143*0Sstevel@tonic-gate return (0); 144*0Sstevel@tonic-gate return (hash_size); 145*0Sstevel@tonic-gate } 146*0Sstevel@tonic-gate 147*0Sstevel@tonic-gate static td_err_e 148*0Sstevel@tonic-gate td_read_uberdata(td_thragent_t *ta_p) 149*0Sstevel@tonic-gate { 150*0Sstevel@tonic-gate struct ps_prochandle *ph_p = ta_p->ph_p; 151*0Sstevel@tonic-gate 152*0Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) { 153*0Sstevel@tonic-gate uberdata_t uberdata; 154*0Sstevel@tonic-gate #ifdef __ia64 155*0Sstevel@tonic-gate int i; 156*0Sstevel@tonic-gate #endif 157*0Sstevel@tonic-gate 158*0Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->uberdata_addr, 159*0Sstevel@tonic-gate &uberdata, sizeof (uberdata)) != PS_OK) 160*0Sstevel@tonic-gate return (TD_DBERR); 161*0Sstevel@tonic-gate ta_p->primary_map = uberdata.primary_map; 162*0Sstevel@tonic-gate ta_p->tdb_eventmask_addr = ta_p->uberdata_addr + 163*0Sstevel@tonic-gate offsetof(uberdata_t, tdb.tdb_ev_global_mask); 164*0Sstevel@tonic-gate ta_p->tdb_register_sync_addr = ta_p->uberdata_addr + 165*0Sstevel@tonic-gate offsetof(uberdata_t, uberflags.uf_tdb_register_sync); 166*0Sstevel@tonic-gate ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table; 167*0Sstevel@tonic-gate ta_p->hash_size = uberdata.hash_size; 168*0Sstevel@tonic-gate if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events, 169*0Sstevel@tonic-gate ta_p->tdb_events, sizeof (ta_p->tdb_events)) != PS_OK) 170*0Sstevel@tonic-gate return (TD_DBERR); 171*0Sstevel@tonic-gate #ifdef __ia64 172*0Sstevel@tonic-gate /* 173*0Sstevel@tonic-gate * Deal with stupid ia64 function descriptors. 174*0Sstevel@tonic-gate * We have to go indirect to get the actual function address. 175*0Sstevel@tonic-gate */ 176*0Sstevel@tonic-gate for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++) { 177*0Sstevel@tonic-gate psaddr_t addr; 178*0Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->tdb_events[i], 179*0Sstevel@tonic-gate &addr, sizeof (addr)) == PS_OK) 180*0Sstevel@tonic-gate ta_p->tdb_events[i] = addr; 181*0Sstevel@tonic-gate } 182*0Sstevel@tonic-gate #endif /* __ia64 */ 183*0Sstevel@tonic-gate 184*0Sstevel@tonic-gate } else { 185*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 186*0Sstevel@tonic-gate uberdata32_t uberdata; 187*0Sstevel@tonic-gate caddr32_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1]; 188*0Sstevel@tonic-gate int i; 189*0Sstevel@tonic-gate 190*0Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->uberdata_addr, 191*0Sstevel@tonic-gate &uberdata, sizeof (uberdata)) != PS_OK) 192*0Sstevel@tonic-gate return (TD_DBERR); 193*0Sstevel@tonic-gate ta_p->primary_map = uberdata.primary_map; 194*0Sstevel@tonic-gate ta_p->tdb_eventmask_addr = ta_p->uberdata_addr + 195*0Sstevel@tonic-gate offsetof(uberdata32_t, tdb.tdb_ev_global_mask); 196*0Sstevel@tonic-gate ta_p->tdb_register_sync_addr = ta_p->uberdata_addr + 197*0Sstevel@tonic-gate offsetof(uberdata32_t, uberflags.uf_tdb_register_sync); 198*0Sstevel@tonic-gate ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table; 199*0Sstevel@tonic-gate ta_p->hash_size = uberdata.hash_size; 200*0Sstevel@tonic-gate if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events, 201*0Sstevel@tonic-gate tdb_events, sizeof (tdb_events)) != PS_OK) 202*0Sstevel@tonic-gate return (TD_DBERR); 203*0Sstevel@tonic-gate for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++) 204*0Sstevel@tonic-gate ta_p->tdb_events[i] = tdb_events[i]; 205*0Sstevel@tonic-gate #else 206*0Sstevel@tonic-gate return (TD_DBERR); 207*0Sstevel@tonic-gate #endif 208*0Sstevel@tonic-gate } 209*0Sstevel@tonic-gate if (ta_p->hash_size != 1) { /* multi-threaded */ 210*0Sstevel@tonic-gate ta_p->initialized = 2; 211*0Sstevel@tonic-gate ta_p->single_lwpid = 0; 212*0Sstevel@tonic-gate ta_p->single_ulwp_addr = NULL; 213*0Sstevel@tonic-gate } else { /* single-threaded */ 214*0Sstevel@tonic-gate ta_p->initialized = 1; 215*0Sstevel@tonic-gate /* 216*0Sstevel@tonic-gate * Get the address and lwpid of the single thread/LWP. 217*0Sstevel@tonic-gate * It may not be ulwp_one if this is a child of fork1(). 218*0Sstevel@tonic-gate */ 219*0Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) { 220*0Sstevel@tonic-gate thr_hash_table_t head; 221*0Sstevel@tonic-gate lwpid_t lwpid = 0; 222*0Sstevel@tonic-gate 223*0Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->hash_table_addr, 224*0Sstevel@tonic-gate &head, sizeof (head)) != PS_OK) 225*0Sstevel@tonic-gate return (TD_DBERR); 226*0Sstevel@tonic-gate if ((psaddr_t)head.hash_bucket == NULL) 227*0Sstevel@tonic-gate ta_p->initialized = 0; 228*0Sstevel@tonic-gate else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket + 229*0Sstevel@tonic-gate offsetof(ulwp_t, ul_lwpid), 230*0Sstevel@tonic-gate &lwpid, sizeof (lwpid)) != PS_OK) 231*0Sstevel@tonic-gate return (TD_DBERR); 232*0Sstevel@tonic-gate ta_p->single_lwpid = lwpid; 233*0Sstevel@tonic-gate ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket; 234*0Sstevel@tonic-gate } else { 235*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 236*0Sstevel@tonic-gate thr_hash_table32_t head; 237*0Sstevel@tonic-gate lwpid_t lwpid = 0; 238*0Sstevel@tonic-gate 239*0Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->hash_table_addr, 240*0Sstevel@tonic-gate &head, sizeof (head)) != PS_OK) 241*0Sstevel@tonic-gate return (TD_DBERR); 242*0Sstevel@tonic-gate if ((psaddr_t)head.hash_bucket == NULL) 243*0Sstevel@tonic-gate ta_p->initialized = 0; 244*0Sstevel@tonic-gate else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket + 245*0Sstevel@tonic-gate offsetof(ulwp32_t, ul_lwpid), 246*0Sstevel@tonic-gate &lwpid, sizeof (lwpid)) != PS_OK) 247*0Sstevel@tonic-gate return (TD_DBERR); 248*0Sstevel@tonic-gate ta_p->single_lwpid = lwpid; 249*0Sstevel@tonic-gate ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket; 250*0Sstevel@tonic-gate #else 251*0Sstevel@tonic-gate return (TD_DBERR); 252*0Sstevel@tonic-gate #endif 253*0Sstevel@tonic-gate } 254*0Sstevel@tonic-gate } 255*0Sstevel@tonic-gate if (!ta_p->primary_map) 256*0Sstevel@tonic-gate ta_p->initialized = 0; 257*0Sstevel@tonic-gate return (TD_OK); 258*0Sstevel@tonic-gate } 259*0Sstevel@tonic-gate 260*0Sstevel@tonic-gate static td_err_e 261*0Sstevel@tonic-gate td_read_bootstrap_data(td_thragent_t *ta_p) 262*0Sstevel@tonic-gate { 263*0Sstevel@tonic-gate struct ps_prochandle *ph_p = ta_p->ph_p; 264*0Sstevel@tonic-gate psaddr_t bootstrap_addr; 265*0Sstevel@tonic-gate psaddr_t uberdata_addr; 266*0Sstevel@tonic-gate ps_err_e db_return; 267*0Sstevel@tonic-gate td_err_e return_val; 268*0Sstevel@tonic-gate int do_1; 269*0Sstevel@tonic-gate 270*0Sstevel@tonic-gate switch (ta_p->initialized) { 271*0Sstevel@tonic-gate case 2: /* fully initialized */ 272*0Sstevel@tonic-gate return (TD_OK); 273*0Sstevel@tonic-gate case 1: /* partially initialized */ 274*0Sstevel@tonic-gate if (td_read_hash_size(ta_p) == 1) 275*0Sstevel@tonic-gate return (TD_OK); 276*0Sstevel@tonic-gate return (td_read_uberdata(ta_p)); 277*0Sstevel@tonic-gate } 278*0Sstevel@tonic-gate 279*0Sstevel@tonic-gate /* 280*0Sstevel@tonic-gate * Uninitialized -- do the startup work. 281*0Sstevel@tonic-gate * We set ta_p->initialized to -1 to cut off recursive calls 282*0Sstevel@tonic-gate * into libc_db by code in the provider of ps_pglobal_lookup(). 283*0Sstevel@tonic-gate */ 284*0Sstevel@tonic-gate do_1 = 0; 285*0Sstevel@tonic-gate ta_p->initialized = -1; 286*0Sstevel@tonic-gate db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME, 287*0Sstevel@tonic-gate TD_BOOTSTRAP_NAME, &bootstrap_addr); 288*0Sstevel@tonic-gate if (db_return == PS_NOSYM) { 289*0Sstevel@tonic-gate do_1 = 1; 290*0Sstevel@tonic-gate db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME_1, 291*0Sstevel@tonic-gate TD_BOOTSTRAP_NAME, &bootstrap_addr); 292*0Sstevel@tonic-gate } 293*0Sstevel@tonic-gate if (db_return == PS_NOSYM) /* libc is not linked yet */ 294*0Sstevel@tonic-gate return (TD_NOLIBTHREAD); 295*0Sstevel@tonic-gate if (db_return != PS_OK) 296*0Sstevel@tonic-gate return (TD_ERR); 297*0Sstevel@tonic-gate db_return = ps_pglobal_lookup(ph_p, 298*0Sstevel@tonic-gate do_1? TD_LIBRARY_NAME_1 : TD_LIBRARY_NAME, 299*0Sstevel@tonic-gate TD_UBERDATA_NAME, &uberdata_addr); 300*0Sstevel@tonic-gate if (db_return == PS_NOSYM) /* libc is not linked yet */ 301*0Sstevel@tonic-gate return (TD_NOLIBTHREAD); 302*0Sstevel@tonic-gate if (db_return != PS_OK) 303*0Sstevel@tonic-gate return (TD_ERR); 304*0Sstevel@tonic-gate 305*0Sstevel@tonic-gate /* 306*0Sstevel@tonic-gate * Read the uberdata address into the thread agent structure. 307*0Sstevel@tonic-gate */ 308*0Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) { 309*0Sstevel@tonic-gate psaddr_t psaddr; 310*0Sstevel@tonic-gate if (ps_pdread(ph_p, bootstrap_addr, 311*0Sstevel@tonic-gate &psaddr, sizeof (psaddr)) != PS_OK) 312*0Sstevel@tonic-gate return (TD_DBERR); 313*0Sstevel@tonic-gate if ((ta_p->bootstrap_addr = psaddr) == NULL) 314*0Sstevel@tonic-gate psaddr = uberdata_addr; 315*0Sstevel@tonic-gate else if (ps_pdread(ph_p, psaddr, 316*0Sstevel@tonic-gate &psaddr, sizeof (psaddr)) != PS_OK) 317*0Sstevel@tonic-gate return (TD_DBERR); 318*0Sstevel@tonic-gate ta_p->uberdata_addr = psaddr; 319*0Sstevel@tonic-gate } else { 320*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 321*0Sstevel@tonic-gate caddr32_t psaddr; 322*0Sstevel@tonic-gate if (ps_pdread(ph_p, bootstrap_addr, 323*0Sstevel@tonic-gate &psaddr, sizeof (psaddr)) != PS_OK) 324*0Sstevel@tonic-gate return (TD_DBERR); 325*0Sstevel@tonic-gate if ((ta_p->bootstrap_addr = (psaddr_t)psaddr) == NULL) 326*0Sstevel@tonic-gate psaddr = (caddr32_t)uberdata_addr; 327*0Sstevel@tonic-gate else if (ps_pdread(ph_p, (psaddr_t)psaddr, 328*0Sstevel@tonic-gate &psaddr, sizeof (psaddr)) != PS_OK) 329*0Sstevel@tonic-gate return (TD_DBERR); 330*0Sstevel@tonic-gate ta_p->uberdata_addr = (psaddr_t)psaddr; 331*0Sstevel@tonic-gate #else 332*0Sstevel@tonic-gate return (TD_DBERR); 333*0Sstevel@tonic-gate #endif /* _SYSCALL32 */ 334*0Sstevel@tonic-gate } 335*0Sstevel@tonic-gate 336*0Sstevel@tonic-gate if ((return_val = td_read_uberdata(ta_p)) != TD_OK) 337*0Sstevel@tonic-gate return (return_val); 338*0Sstevel@tonic-gate if (ta_p->bootstrap_addr == NULL) 339*0Sstevel@tonic-gate ta_p->initialized = 0; 340*0Sstevel@tonic-gate return (TD_OK); 341*0Sstevel@tonic-gate } 342*0Sstevel@tonic-gate 343*0Sstevel@tonic-gate #pragma weak ps_kill 344*0Sstevel@tonic-gate #pragma weak ps_lrolltoaddr 345*0Sstevel@tonic-gate 346*0Sstevel@tonic-gate /* 347*0Sstevel@tonic-gate * Allocate a new agent process handle ("thread agent"). 348*0Sstevel@tonic-gate */ 349*0Sstevel@tonic-gate #pragma weak td_ta_new = __td_ta_new 350*0Sstevel@tonic-gate td_err_e 351*0Sstevel@tonic-gate __td_ta_new(struct ps_prochandle *ph_p, td_thragent_t **ta_pp) 352*0Sstevel@tonic-gate { 353*0Sstevel@tonic-gate td_thragent_t *ta_p; 354*0Sstevel@tonic-gate int model; 355*0Sstevel@tonic-gate td_err_e return_val = TD_OK; 356*0Sstevel@tonic-gate 357*0Sstevel@tonic-gate if (ph_p == NULL) 358*0Sstevel@tonic-gate return (TD_BADPH); 359*0Sstevel@tonic-gate if (ta_pp == NULL) 360*0Sstevel@tonic-gate return (TD_ERR); 361*0Sstevel@tonic-gate *ta_pp = NULL; 362*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) 363*0Sstevel@tonic-gate return (TD_DBERR); 364*0Sstevel@tonic-gate /* 365*0Sstevel@tonic-gate * ps_pdmodel might not be defined if this is an older client. 366*0Sstevel@tonic-gate * Make it a weak symbol and test if it exists before calling. 367*0Sstevel@tonic-gate */ 368*0Sstevel@tonic-gate #pragma weak ps_pdmodel 369*0Sstevel@tonic-gate if (ps_pdmodel == NULL) { 370*0Sstevel@tonic-gate model = PR_MODEL_NATIVE; 371*0Sstevel@tonic-gate } else if (ps_pdmodel(ph_p, &model) != PS_OK) { 372*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 373*0Sstevel@tonic-gate return (TD_ERR); 374*0Sstevel@tonic-gate } 375*0Sstevel@tonic-gate if ((ta_p = malloc(sizeof (*ta_p))) == NULL) { 376*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 377*0Sstevel@tonic-gate return (TD_MALLOC); 378*0Sstevel@tonic-gate } 379*0Sstevel@tonic-gate 380*0Sstevel@tonic-gate /* 381*0Sstevel@tonic-gate * Initialize the agent process handle. 382*0Sstevel@tonic-gate * Pick up the symbol value we need from the target process. 383*0Sstevel@tonic-gate */ 384*0Sstevel@tonic-gate (void) memset(ta_p, 0, sizeof (*ta_p)); 385*0Sstevel@tonic-gate ta_p->ph_p = ph_p; 386*0Sstevel@tonic-gate (void) rwlock_init(&ta_p->rwlock, USYNC_THREAD, NULL); 387*0Sstevel@tonic-gate ta_p->model = model; 388*0Sstevel@tonic-gate return_val = td_read_bootstrap_data(ta_p); 389*0Sstevel@tonic-gate 390*0Sstevel@tonic-gate /* 391*0Sstevel@tonic-gate * Because the old libthread_db enabled lock tracking by default, 392*0Sstevel@tonic-gate * we must also do it. However, we do it only if the application 393*0Sstevel@tonic-gate * provides the ps_kill() and ps_lrolltoaddr() interfaces. 394*0Sstevel@tonic-gate * (dbx provides the ps_kill() and ps_lrolltoaddr() interfaces.) 395*0Sstevel@tonic-gate */ 396*0Sstevel@tonic-gate if (return_val == TD_OK && ps_kill != NULL && ps_lrolltoaddr != NULL) { 397*0Sstevel@tonic-gate register_sync_t oldenable; 398*0Sstevel@tonic-gate register_sync_t enable = REGISTER_SYNC_ENABLE; 399*0Sstevel@tonic-gate psaddr_t psaddr = ta_p->tdb_register_sync_addr; 400*0Sstevel@tonic-gate 401*0Sstevel@tonic-gate if (ps_pdread(ph_p, psaddr, 402*0Sstevel@tonic-gate &oldenable, sizeof (oldenable)) != PS_OK) 403*0Sstevel@tonic-gate return_val = TD_DBERR; 404*0Sstevel@tonic-gate else if (oldenable != REGISTER_SYNC_OFF || 405*0Sstevel@tonic-gate ps_pdwrite(ph_p, psaddr, 406*0Sstevel@tonic-gate &enable, sizeof (enable)) != PS_OK) { 407*0Sstevel@tonic-gate /* 408*0Sstevel@tonic-gate * Lock tracking was already enabled or we 409*0Sstevel@tonic-gate * failed to enable it, probably because we 410*0Sstevel@tonic-gate * are examining a core file. In either case 411*0Sstevel@tonic-gate * set the sync_tracking flag non-zero to 412*0Sstevel@tonic-gate * indicate that we should not attempt to 413*0Sstevel@tonic-gate * disable lock tracking when we delete the 414*0Sstevel@tonic-gate * agent process handle in td_ta_delete(). 415*0Sstevel@tonic-gate */ 416*0Sstevel@tonic-gate ta_p->sync_tracking = 1; 417*0Sstevel@tonic-gate } 418*0Sstevel@tonic-gate } 419*0Sstevel@tonic-gate 420*0Sstevel@tonic-gate if (return_val == TD_OK) 421*0Sstevel@tonic-gate *ta_pp = ta_p; 422*0Sstevel@tonic-gate else 423*0Sstevel@tonic-gate free(ta_p); 424*0Sstevel@tonic-gate 425*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 426*0Sstevel@tonic-gate return (return_val); 427*0Sstevel@tonic-gate } 428*0Sstevel@tonic-gate 429*0Sstevel@tonic-gate /* 430*0Sstevel@tonic-gate * Utility function to grab the readers lock and return the prochandle, 431*0Sstevel@tonic-gate * given an agent process handle. Performs standard error checking. 432*0Sstevel@tonic-gate * Returns non-NULL with the lock held, or NULL with the lock not held. 433*0Sstevel@tonic-gate */ 434*0Sstevel@tonic-gate static struct ps_prochandle * 435*0Sstevel@tonic-gate ph_lock_ta(td_thragent_t *ta_p, td_err_e *err) 436*0Sstevel@tonic-gate { 437*0Sstevel@tonic-gate struct ps_prochandle *ph_p = NULL; 438*0Sstevel@tonic-gate td_err_e error; 439*0Sstevel@tonic-gate 440*0Sstevel@tonic-gate if (ta_p == NULL || ta_p->initialized == -1) { 441*0Sstevel@tonic-gate *err = TD_BADTA; 442*0Sstevel@tonic-gate } else if (rw_rdlock(&ta_p->rwlock) != 0) { /* can't happen? */ 443*0Sstevel@tonic-gate *err = TD_BADTA; 444*0Sstevel@tonic-gate } else if ((ph_p = ta_p->ph_p) == NULL) { 445*0Sstevel@tonic-gate (void) rw_unlock(&ta_p->rwlock); 446*0Sstevel@tonic-gate *err = TD_BADPH; 447*0Sstevel@tonic-gate } else if (ta_p->initialized != 2 && 448*0Sstevel@tonic-gate (error = td_read_bootstrap_data(ta_p)) != TD_OK) { 449*0Sstevel@tonic-gate (void) rw_unlock(&ta_p->rwlock); 450*0Sstevel@tonic-gate ph_p = NULL; 451*0Sstevel@tonic-gate *err = error; 452*0Sstevel@tonic-gate } else { 453*0Sstevel@tonic-gate *err = TD_OK; 454*0Sstevel@tonic-gate } 455*0Sstevel@tonic-gate 456*0Sstevel@tonic-gate return (ph_p); 457*0Sstevel@tonic-gate } 458*0Sstevel@tonic-gate 459*0Sstevel@tonic-gate /* 460*0Sstevel@tonic-gate * Utility function to grab the readers lock and return the prochandle, 461*0Sstevel@tonic-gate * given an agent thread handle. Performs standard error checking. 462*0Sstevel@tonic-gate * Returns non-NULL with the lock held, or NULL with the lock not held. 463*0Sstevel@tonic-gate */ 464*0Sstevel@tonic-gate static struct ps_prochandle * 465*0Sstevel@tonic-gate ph_lock_th(const td_thrhandle_t *th_p, td_err_e *err) 466*0Sstevel@tonic-gate { 467*0Sstevel@tonic-gate if (th_p == NULL || th_p->th_unique == NULL) { 468*0Sstevel@tonic-gate *err = TD_BADTH; 469*0Sstevel@tonic-gate return (NULL); 470*0Sstevel@tonic-gate } 471*0Sstevel@tonic-gate return (ph_lock_ta(th_p->th_ta_p, err)); 472*0Sstevel@tonic-gate } 473*0Sstevel@tonic-gate 474*0Sstevel@tonic-gate /* 475*0Sstevel@tonic-gate * Utility function to grab the readers lock and return the prochandle, 476*0Sstevel@tonic-gate * given a synchronization object handle. Performs standard error checking. 477*0Sstevel@tonic-gate * Returns non-NULL with the lock held, or NULL with the lock not held. 478*0Sstevel@tonic-gate */ 479*0Sstevel@tonic-gate static struct ps_prochandle * 480*0Sstevel@tonic-gate ph_lock_sh(const td_synchandle_t *sh_p, td_err_e *err) 481*0Sstevel@tonic-gate { 482*0Sstevel@tonic-gate if (sh_p == NULL || sh_p->sh_unique == NULL) { 483*0Sstevel@tonic-gate *err = TD_BADSH; 484*0Sstevel@tonic-gate return (NULL); 485*0Sstevel@tonic-gate } 486*0Sstevel@tonic-gate return (ph_lock_ta(sh_p->sh_ta_p, err)); 487*0Sstevel@tonic-gate } 488*0Sstevel@tonic-gate 489*0Sstevel@tonic-gate /* 490*0Sstevel@tonic-gate * Unlock the agent process handle obtained from ph_lock_*(). 491*0Sstevel@tonic-gate */ 492*0Sstevel@tonic-gate static void 493*0Sstevel@tonic-gate ph_unlock(td_thragent_t *ta_p) 494*0Sstevel@tonic-gate { 495*0Sstevel@tonic-gate (void) rw_unlock(&ta_p->rwlock); 496*0Sstevel@tonic-gate } 497*0Sstevel@tonic-gate 498*0Sstevel@tonic-gate /* 499*0Sstevel@tonic-gate * De-allocate an agent process handle, 500*0Sstevel@tonic-gate * releasing all related resources. 501*0Sstevel@tonic-gate * 502*0Sstevel@tonic-gate * XXX -- This is hopelessly broken --- 503*0Sstevel@tonic-gate * Storage for thread agent is not deallocated. The prochandle 504*0Sstevel@tonic-gate * in the thread agent is set to NULL so that future uses of 505*0Sstevel@tonic-gate * the thread agent can be detected and an error value returned. 506*0Sstevel@tonic-gate * All functions in the external user interface that make 507*0Sstevel@tonic-gate * use of the thread agent are expected 508*0Sstevel@tonic-gate * to check for a NULL prochandle in the thread agent. 509*0Sstevel@tonic-gate * All such functions are also expected to obtain a 510*0Sstevel@tonic-gate * reader lock on the thread agent while it is using it. 511*0Sstevel@tonic-gate */ 512*0Sstevel@tonic-gate #pragma weak td_ta_delete = __td_ta_delete 513*0Sstevel@tonic-gate td_err_e 514*0Sstevel@tonic-gate __td_ta_delete(td_thragent_t *ta_p) 515*0Sstevel@tonic-gate { 516*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 517*0Sstevel@tonic-gate 518*0Sstevel@tonic-gate /* 519*0Sstevel@tonic-gate * This is the only place we grab the writer lock. 520*0Sstevel@tonic-gate * We are going to NULL out the prochandle. 521*0Sstevel@tonic-gate */ 522*0Sstevel@tonic-gate if (ta_p == NULL || rw_wrlock(&ta_p->rwlock) != 0) 523*0Sstevel@tonic-gate return (TD_BADTA); 524*0Sstevel@tonic-gate if ((ph_p = ta_p->ph_p) == NULL) { 525*0Sstevel@tonic-gate (void) rw_unlock(&ta_p->rwlock); 526*0Sstevel@tonic-gate return (TD_BADPH); 527*0Sstevel@tonic-gate } 528*0Sstevel@tonic-gate /* 529*0Sstevel@tonic-gate * If synch. tracking was disabled when td_ta_new() was called and 530*0Sstevel@tonic-gate * if td_ta_sync_tracking_enable() was never called, then disable 531*0Sstevel@tonic-gate * synch. tracking (it was enabled by default in td_ta_new()). 532*0Sstevel@tonic-gate */ 533*0Sstevel@tonic-gate if (ta_p->sync_tracking == 0 && 534*0Sstevel@tonic-gate ps_kill != NULL && ps_lrolltoaddr != NULL) { 535*0Sstevel@tonic-gate register_sync_t enable = REGISTER_SYNC_DISABLE; 536*0Sstevel@tonic-gate 537*0Sstevel@tonic-gate (void) ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr, 538*0Sstevel@tonic-gate &enable, sizeof (enable)); 539*0Sstevel@tonic-gate } 540*0Sstevel@tonic-gate ta_p->ph_p = NULL; 541*0Sstevel@tonic-gate (void) rw_unlock(&ta_p->rwlock); 542*0Sstevel@tonic-gate return (TD_OK); 543*0Sstevel@tonic-gate } 544*0Sstevel@tonic-gate 545*0Sstevel@tonic-gate /* 546*0Sstevel@tonic-gate * Map an agent process handle to a client prochandle. 547*0Sstevel@tonic-gate * Currently unused by dbx. 548*0Sstevel@tonic-gate */ 549*0Sstevel@tonic-gate #pragma weak td_ta_get_ph = __td_ta_get_ph 550*0Sstevel@tonic-gate td_err_e 551*0Sstevel@tonic-gate __td_ta_get_ph(td_thragent_t *ta_p, struct ps_prochandle **ph_pp) 552*0Sstevel@tonic-gate { 553*0Sstevel@tonic-gate td_err_e return_val; 554*0Sstevel@tonic-gate 555*0Sstevel@tonic-gate if (ph_pp != NULL) /* protect stupid callers */ 556*0Sstevel@tonic-gate *ph_pp = NULL; 557*0Sstevel@tonic-gate if (ph_pp == NULL) 558*0Sstevel@tonic-gate return (TD_ERR); 559*0Sstevel@tonic-gate if ((*ph_pp = ph_lock_ta(ta_p, &return_val)) == NULL) 560*0Sstevel@tonic-gate return (return_val); 561*0Sstevel@tonic-gate ph_unlock(ta_p); 562*0Sstevel@tonic-gate return (TD_OK); 563*0Sstevel@tonic-gate } 564*0Sstevel@tonic-gate 565*0Sstevel@tonic-gate /* 566*0Sstevel@tonic-gate * Set the process's suggested concurrency level. 567*0Sstevel@tonic-gate * This is a no-op in a one-level model. 568*0Sstevel@tonic-gate * Currently unused by dbx. 569*0Sstevel@tonic-gate */ 570*0Sstevel@tonic-gate #pragma weak td_ta_setconcurrency = __td_ta_setconcurrency 571*0Sstevel@tonic-gate /* ARGSUSED1 */ 572*0Sstevel@tonic-gate td_err_e 573*0Sstevel@tonic-gate __td_ta_setconcurrency(const td_thragent_t *ta_p, int level) 574*0Sstevel@tonic-gate { 575*0Sstevel@tonic-gate if (ta_p == NULL) 576*0Sstevel@tonic-gate return (TD_BADTA); 577*0Sstevel@tonic-gate if (ta_p->ph_p == NULL) 578*0Sstevel@tonic-gate return (TD_BADPH); 579*0Sstevel@tonic-gate return (TD_OK); 580*0Sstevel@tonic-gate } 581*0Sstevel@tonic-gate 582*0Sstevel@tonic-gate /* 583*0Sstevel@tonic-gate * Get the number of threads in the process. 584*0Sstevel@tonic-gate */ 585*0Sstevel@tonic-gate #pragma weak td_ta_get_nthreads = __td_ta_get_nthreads 586*0Sstevel@tonic-gate td_err_e 587*0Sstevel@tonic-gate __td_ta_get_nthreads(td_thragent_t *ta_p, int *nthread_p) 588*0Sstevel@tonic-gate { 589*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 590*0Sstevel@tonic-gate td_err_e return_val; 591*0Sstevel@tonic-gate int nthreads; 592*0Sstevel@tonic-gate int nzombies; 593*0Sstevel@tonic-gate psaddr_t nthreads_addr; 594*0Sstevel@tonic-gate psaddr_t nzombies_addr; 595*0Sstevel@tonic-gate 596*0Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) { 597*0Sstevel@tonic-gate nthreads_addr = ta_p->uberdata_addr + 598*0Sstevel@tonic-gate offsetof(uberdata_t, nthreads); 599*0Sstevel@tonic-gate nzombies_addr = ta_p->uberdata_addr + 600*0Sstevel@tonic-gate offsetof(uberdata_t, nzombies); 601*0Sstevel@tonic-gate } else { 602*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 603*0Sstevel@tonic-gate nthreads_addr = ta_p->uberdata_addr + 604*0Sstevel@tonic-gate offsetof(uberdata32_t, nthreads); 605*0Sstevel@tonic-gate nzombies_addr = ta_p->uberdata_addr + 606*0Sstevel@tonic-gate offsetof(uberdata32_t, nzombies); 607*0Sstevel@tonic-gate #else 608*0Sstevel@tonic-gate nthreads_addr = 0; 609*0Sstevel@tonic-gate nzombies_addr = 0; 610*0Sstevel@tonic-gate #endif /* _SYSCALL32 */ 611*0Sstevel@tonic-gate } 612*0Sstevel@tonic-gate 613*0Sstevel@tonic-gate if (nthread_p == NULL) 614*0Sstevel@tonic-gate return (TD_ERR); 615*0Sstevel@tonic-gate if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL) 616*0Sstevel@tonic-gate return (return_val); 617*0Sstevel@tonic-gate if (ps_pdread(ph_p, nthreads_addr, &nthreads, sizeof (int)) != PS_OK) 618*0Sstevel@tonic-gate return_val = TD_DBERR; 619*0Sstevel@tonic-gate if (ps_pdread(ph_p, nzombies_addr, &nzombies, sizeof (int)) != PS_OK) 620*0Sstevel@tonic-gate return_val = TD_DBERR; 621*0Sstevel@tonic-gate ph_unlock(ta_p); 622*0Sstevel@tonic-gate if (return_val == TD_OK) 623*0Sstevel@tonic-gate *nthread_p = nthreads + nzombies; 624*0Sstevel@tonic-gate return (return_val); 625*0Sstevel@tonic-gate } 626*0Sstevel@tonic-gate 627*0Sstevel@tonic-gate typedef struct { 628*0Sstevel@tonic-gate thread_t tid; 629*0Sstevel@tonic-gate int found; 630*0Sstevel@tonic-gate td_thrhandle_t th; 631*0Sstevel@tonic-gate } td_mapper_param_t; 632*0Sstevel@tonic-gate 633*0Sstevel@tonic-gate /* 634*0Sstevel@tonic-gate * Check the value in data against the thread id. 635*0Sstevel@tonic-gate * If it matches, return 1 to terminate iterations. 636*0Sstevel@tonic-gate * This function is used by td_ta_map_id2thr() to map a tid to a thread handle. 637*0Sstevel@tonic-gate */ 638*0Sstevel@tonic-gate static int 639*0Sstevel@tonic-gate td_mapper_id2thr(td_thrhandle_t *th_p, td_mapper_param_t *data) 640*0Sstevel@tonic-gate { 641*0Sstevel@tonic-gate td_thrinfo_t ti; 642*0Sstevel@tonic-gate 643*0Sstevel@tonic-gate if (__td_thr_get_info(th_p, &ti) == TD_OK && 644*0Sstevel@tonic-gate data->tid == ti.ti_tid) { 645*0Sstevel@tonic-gate data->found = 1; 646*0Sstevel@tonic-gate data->th = *th_p; 647*0Sstevel@tonic-gate return (1); 648*0Sstevel@tonic-gate } 649*0Sstevel@tonic-gate return (0); 650*0Sstevel@tonic-gate } 651*0Sstevel@tonic-gate 652*0Sstevel@tonic-gate /* 653*0Sstevel@tonic-gate * Given a thread identifier, return the corresponding thread handle. 654*0Sstevel@tonic-gate */ 655*0Sstevel@tonic-gate #pragma weak td_ta_map_id2thr = __td_ta_map_id2thr 656*0Sstevel@tonic-gate td_err_e 657*0Sstevel@tonic-gate __td_ta_map_id2thr(td_thragent_t *ta_p, thread_t tid, 658*0Sstevel@tonic-gate td_thrhandle_t *th_p) 659*0Sstevel@tonic-gate { 660*0Sstevel@tonic-gate td_err_e return_val; 661*0Sstevel@tonic-gate td_mapper_param_t data; 662*0Sstevel@tonic-gate 663*0Sstevel@tonic-gate if (th_p != NULL && /* optimize for a single thread */ 664*0Sstevel@tonic-gate ta_p != NULL && 665*0Sstevel@tonic-gate ta_p->initialized == 1 && 666*0Sstevel@tonic-gate (td_read_hash_size(ta_p) == 1 || 667*0Sstevel@tonic-gate td_read_uberdata(ta_p) == TD_OK) && 668*0Sstevel@tonic-gate ta_p->initialized == 1 && 669*0Sstevel@tonic-gate ta_p->single_lwpid == tid) { 670*0Sstevel@tonic-gate th_p->th_ta_p = ta_p; 671*0Sstevel@tonic-gate if ((th_p->th_unique = ta_p->single_ulwp_addr) == 0) 672*0Sstevel@tonic-gate return (TD_NOTHR); 673*0Sstevel@tonic-gate return (TD_OK); 674*0Sstevel@tonic-gate } 675*0Sstevel@tonic-gate 676*0Sstevel@tonic-gate /* 677*0Sstevel@tonic-gate * LOCKING EXCEPTION - Locking is not required here because 678*0Sstevel@tonic-gate * the locking and checking will be done in __td_ta_thr_iter. 679*0Sstevel@tonic-gate */ 680*0Sstevel@tonic-gate 681*0Sstevel@tonic-gate if (ta_p == NULL) 682*0Sstevel@tonic-gate return (TD_BADTA); 683*0Sstevel@tonic-gate if (th_p == NULL) 684*0Sstevel@tonic-gate return (TD_BADTH); 685*0Sstevel@tonic-gate if (tid == 0) 686*0Sstevel@tonic-gate return (TD_NOTHR); 687*0Sstevel@tonic-gate 688*0Sstevel@tonic-gate data.tid = tid; 689*0Sstevel@tonic-gate data.found = 0; 690*0Sstevel@tonic-gate return_val = __td_ta_thr_iter(ta_p, 691*0Sstevel@tonic-gate (td_thr_iter_f *)td_mapper_id2thr, (void *)&data, 692*0Sstevel@tonic-gate TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, 693*0Sstevel@tonic-gate TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS); 694*0Sstevel@tonic-gate if (return_val == TD_OK) { 695*0Sstevel@tonic-gate if (data.found == 0) 696*0Sstevel@tonic-gate return_val = TD_NOTHR; 697*0Sstevel@tonic-gate else 698*0Sstevel@tonic-gate *th_p = data.th; 699*0Sstevel@tonic-gate } 700*0Sstevel@tonic-gate 701*0Sstevel@tonic-gate return (return_val); 702*0Sstevel@tonic-gate } 703*0Sstevel@tonic-gate 704*0Sstevel@tonic-gate /* 705*0Sstevel@tonic-gate * Map the address of a synchronization object to a sync. object handle. 706*0Sstevel@tonic-gate */ 707*0Sstevel@tonic-gate #pragma weak td_ta_map_addr2sync = __td_ta_map_addr2sync 708*0Sstevel@tonic-gate td_err_e 709*0Sstevel@tonic-gate __td_ta_map_addr2sync(td_thragent_t *ta_p, psaddr_t addr, td_synchandle_t *sh_p) 710*0Sstevel@tonic-gate { 711*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 712*0Sstevel@tonic-gate td_err_e return_val; 713*0Sstevel@tonic-gate uint16_t sync_magic; 714*0Sstevel@tonic-gate 715*0Sstevel@tonic-gate if (sh_p == NULL) 716*0Sstevel@tonic-gate return (TD_BADSH); 717*0Sstevel@tonic-gate if (addr == NULL) 718*0Sstevel@tonic-gate return (TD_ERR); 719*0Sstevel@tonic-gate if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL) 720*0Sstevel@tonic-gate return (return_val); 721*0Sstevel@tonic-gate /* 722*0Sstevel@tonic-gate * Check the magic number of the sync. object to make sure it's valid. 723*0Sstevel@tonic-gate * The magic number is at the same offset for all sync. objects. 724*0Sstevel@tonic-gate */ 725*0Sstevel@tonic-gate if (ps_pdread(ph_p, (psaddr_t)&((mutex_t *)addr)->mutex_magic, 726*0Sstevel@tonic-gate &sync_magic, sizeof (sync_magic)) != PS_OK) { 727*0Sstevel@tonic-gate ph_unlock(ta_p); 728*0Sstevel@tonic-gate return (TD_BADSH); 729*0Sstevel@tonic-gate } 730*0Sstevel@tonic-gate ph_unlock(ta_p); 731*0Sstevel@tonic-gate if (sync_magic != MUTEX_MAGIC && sync_magic != COND_MAGIC && 732*0Sstevel@tonic-gate sync_magic != SEMA_MAGIC && sync_magic != RWL_MAGIC) 733*0Sstevel@tonic-gate return (TD_BADSH); 734*0Sstevel@tonic-gate /* 735*0Sstevel@tonic-gate * Just fill in the appropriate fields of the sync. handle. 736*0Sstevel@tonic-gate */ 737*0Sstevel@tonic-gate sh_p->sh_ta_p = (td_thragent_t *)ta_p; 738*0Sstevel@tonic-gate sh_p->sh_unique = addr; 739*0Sstevel@tonic-gate return (TD_OK); 740*0Sstevel@tonic-gate } 741*0Sstevel@tonic-gate 742*0Sstevel@tonic-gate /* 743*0Sstevel@tonic-gate * Iterate over the set of global TSD keys. 744*0Sstevel@tonic-gate * The call back function is called with three arguments, 745*0Sstevel@tonic-gate * a key, a pointer to the destructor function, and the cbdata pointer. 746*0Sstevel@tonic-gate * Currently unused by dbx. 747*0Sstevel@tonic-gate */ 748*0Sstevel@tonic-gate #pragma weak td_ta_tsd_iter = __td_ta_tsd_iter 749*0Sstevel@tonic-gate td_err_e 750*0Sstevel@tonic-gate __td_ta_tsd_iter(td_thragent_t *ta_p, td_key_iter_f *cb, void *cbdata_p) 751*0Sstevel@tonic-gate { 752*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 753*0Sstevel@tonic-gate td_err_e return_val; 754*0Sstevel@tonic-gate int key; 755*0Sstevel@tonic-gate int numkeys; 756*0Sstevel@tonic-gate psaddr_t dest_addr; 757*0Sstevel@tonic-gate psaddr_t *destructors = NULL; 758*0Sstevel@tonic-gate PFrV destructor; 759*0Sstevel@tonic-gate 760*0Sstevel@tonic-gate if (cb == NULL) 761*0Sstevel@tonic-gate return (TD_ERR); 762*0Sstevel@tonic-gate if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL) 763*0Sstevel@tonic-gate return (return_val); 764*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) { 765*0Sstevel@tonic-gate ph_unlock(ta_p); 766*0Sstevel@tonic-gate return (TD_DBERR); 767*0Sstevel@tonic-gate } 768*0Sstevel@tonic-gate 769*0Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) { 770*0Sstevel@tonic-gate tsd_metadata_t tsdm; 771*0Sstevel@tonic-gate 772*0Sstevel@tonic-gate if (ps_pdread(ph_p, 773*0Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata), 774*0Sstevel@tonic-gate &tsdm, sizeof (tsdm)) != PS_OK) 775*0Sstevel@tonic-gate return_val = TD_DBERR; 776*0Sstevel@tonic-gate else { 777*0Sstevel@tonic-gate numkeys = tsdm.tsdm_nused; 778*0Sstevel@tonic-gate dest_addr = (psaddr_t)tsdm.tsdm_destro; 779*0Sstevel@tonic-gate if (numkeys > 0) 780*0Sstevel@tonic-gate destructors = 781*0Sstevel@tonic-gate malloc(numkeys * sizeof (psaddr_t)); 782*0Sstevel@tonic-gate } 783*0Sstevel@tonic-gate } else { 784*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 785*0Sstevel@tonic-gate tsd_metadata32_t tsdm; 786*0Sstevel@tonic-gate 787*0Sstevel@tonic-gate if (ps_pdread(ph_p, 788*0Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata), 789*0Sstevel@tonic-gate &tsdm, sizeof (tsdm)) != PS_OK) 790*0Sstevel@tonic-gate return_val = TD_DBERR; 791*0Sstevel@tonic-gate else { 792*0Sstevel@tonic-gate numkeys = tsdm.tsdm_nused; 793*0Sstevel@tonic-gate dest_addr = (psaddr_t)tsdm.tsdm_destro; 794*0Sstevel@tonic-gate if (numkeys > 0) 795*0Sstevel@tonic-gate destructors = 796*0Sstevel@tonic-gate malloc(numkeys * sizeof (caddr32_t)); 797*0Sstevel@tonic-gate } 798*0Sstevel@tonic-gate #else 799*0Sstevel@tonic-gate return_val = TD_DBERR; 800*0Sstevel@tonic-gate #endif /* _SYSCALL32 */ 801*0Sstevel@tonic-gate } 802*0Sstevel@tonic-gate 803*0Sstevel@tonic-gate if (return_val != TD_OK || numkeys <= 0) { 804*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 805*0Sstevel@tonic-gate ph_unlock(ta_p); 806*0Sstevel@tonic-gate return (return_val); 807*0Sstevel@tonic-gate } 808*0Sstevel@tonic-gate 809*0Sstevel@tonic-gate if (destructors == NULL) 810*0Sstevel@tonic-gate return_val = TD_MALLOC; 811*0Sstevel@tonic-gate else if (ta_p->model == PR_MODEL_NATIVE) { 812*0Sstevel@tonic-gate if (ps_pdread(ph_p, dest_addr, 813*0Sstevel@tonic-gate destructors, numkeys * sizeof (psaddr_t)) != PS_OK) 814*0Sstevel@tonic-gate return_val = TD_DBERR; 815*0Sstevel@tonic-gate else { 816*0Sstevel@tonic-gate for (key = 1; key < numkeys; key++) { 817*0Sstevel@tonic-gate destructor = (PFrV)destructors[key]; 818*0Sstevel@tonic-gate if (destructor != TSD_UNALLOCATED && 819*0Sstevel@tonic-gate (*cb)(key, destructor, cbdata_p)) 820*0Sstevel@tonic-gate break; 821*0Sstevel@tonic-gate } 822*0Sstevel@tonic-gate } 823*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 824*0Sstevel@tonic-gate } else { 825*0Sstevel@tonic-gate caddr32_t *destructors32 = (caddr32_t *)destructors; 826*0Sstevel@tonic-gate caddr32_t destruct32; 827*0Sstevel@tonic-gate 828*0Sstevel@tonic-gate if (ps_pdread(ph_p, dest_addr, 829*0Sstevel@tonic-gate destructors32, numkeys * sizeof (caddr32_t)) != PS_OK) 830*0Sstevel@tonic-gate return_val = TD_DBERR; 831*0Sstevel@tonic-gate else { 832*0Sstevel@tonic-gate for (key = 1; key < numkeys; key++) { 833*0Sstevel@tonic-gate destruct32 = destructors32[key]; 834*0Sstevel@tonic-gate if (destruct32 != (caddr32_t)TSD_UNALLOCATED && 835*0Sstevel@tonic-gate (*cb)(key, (PFrV)(uintptr_t)destruct32, 836*0Sstevel@tonic-gate cbdata_p)) 837*0Sstevel@tonic-gate break; 838*0Sstevel@tonic-gate } 839*0Sstevel@tonic-gate } 840*0Sstevel@tonic-gate #endif /* _SYSCALL32 */ 841*0Sstevel@tonic-gate } 842*0Sstevel@tonic-gate 843*0Sstevel@tonic-gate if (destructors) 844*0Sstevel@tonic-gate free(destructors); 845*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 846*0Sstevel@tonic-gate ph_unlock(ta_p); 847*0Sstevel@tonic-gate return (return_val); 848*0Sstevel@tonic-gate } 849*0Sstevel@tonic-gate 850*0Sstevel@tonic-gate int 851*0Sstevel@tonic-gate sigequalset(const sigset_t *s1, const sigset_t *s2) 852*0Sstevel@tonic-gate { 853*0Sstevel@tonic-gate return (s1->__sigbits[0] == s2->__sigbits[0] && 854*0Sstevel@tonic-gate s1->__sigbits[1] == s2->__sigbits[1] && 855*0Sstevel@tonic-gate s1->__sigbits[2] == s2->__sigbits[2] && 856*0Sstevel@tonic-gate s1->__sigbits[3] == s2->__sigbits[3]); 857*0Sstevel@tonic-gate } 858*0Sstevel@tonic-gate 859*0Sstevel@tonic-gate /* 860*0Sstevel@tonic-gate * Description: 861*0Sstevel@tonic-gate * Iterate over all threads. For each thread call 862*0Sstevel@tonic-gate * the function pointed to by "cb" with a pointer 863*0Sstevel@tonic-gate * to a thread handle, and a pointer to data which 864*0Sstevel@tonic-gate * can be NULL. Only call td_thr_iter_f() on threads 865*0Sstevel@tonic-gate * which match the properties of state, ti_pri, 866*0Sstevel@tonic-gate * ti_sigmask_p, and ti_user_flags. If cb returns 867*0Sstevel@tonic-gate * a non-zero value, terminate iterations. 868*0Sstevel@tonic-gate * 869*0Sstevel@tonic-gate * Input: 870*0Sstevel@tonic-gate * *ta_p - thread agent 871*0Sstevel@tonic-gate * *cb - call back function defined by user. 872*0Sstevel@tonic-gate * td_thr_iter_f() takes a thread handle and 873*0Sstevel@tonic-gate * cbdata_p as a parameter. 874*0Sstevel@tonic-gate * cbdata_p - parameter for td_thr_iter_f(). 875*0Sstevel@tonic-gate * 876*0Sstevel@tonic-gate * state - state of threads of interest. A value of 877*0Sstevel@tonic-gate * TD_THR_ANY_STATE from enum td_thr_state_e 878*0Sstevel@tonic-gate * does not restrict iterations by state. 879*0Sstevel@tonic-gate * ti_pri - lower bound of priorities of threads of 880*0Sstevel@tonic-gate * interest. A value of TD_THR_LOWEST_PRIORITY 881*0Sstevel@tonic-gate * defined in thread_db.h does not restrict 882*0Sstevel@tonic-gate * iterations by priority. A thread with priority 883*0Sstevel@tonic-gate * less than ti_pri will NOT be passed to the callback 884*0Sstevel@tonic-gate * function. 885*0Sstevel@tonic-gate * ti_sigmask_p - signal mask of threads of interest. 886*0Sstevel@tonic-gate * A value of TD_SIGNO_MASK defined in thread_db.h 887*0Sstevel@tonic-gate * does not restrict iterations by signal mask. 888*0Sstevel@tonic-gate * ti_user_flags - user flags of threads of interest. A 889*0Sstevel@tonic-gate * value of TD_THR_ANY_USER_FLAGS defined in thread_db.h 890*0Sstevel@tonic-gate * does not restrict iterations by user flags. 891*0Sstevel@tonic-gate */ 892*0Sstevel@tonic-gate #pragma weak td_ta_thr_iter = __td_ta_thr_iter 893*0Sstevel@tonic-gate td_err_e 894*0Sstevel@tonic-gate __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb, 895*0Sstevel@tonic-gate void *cbdata_p, td_thr_state_e state, int ti_pri, 896*0Sstevel@tonic-gate sigset_t *ti_sigmask_p, unsigned ti_user_flags) 897*0Sstevel@tonic-gate { 898*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 899*0Sstevel@tonic-gate psaddr_t first_lwp_addr; 900*0Sstevel@tonic-gate psaddr_t first_zombie_addr; 901*0Sstevel@tonic-gate psaddr_t curr_lwp_addr; 902*0Sstevel@tonic-gate psaddr_t next_lwp_addr; 903*0Sstevel@tonic-gate td_thrhandle_t th; 904*0Sstevel@tonic-gate ps_err_e db_return; 905*0Sstevel@tonic-gate ps_err_e db_return2; 906*0Sstevel@tonic-gate td_err_e return_val; 907*0Sstevel@tonic-gate 908*0Sstevel@tonic-gate if (cb == NULL) 909*0Sstevel@tonic-gate return (TD_ERR); 910*0Sstevel@tonic-gate /* 911*0Sstevel@tonic-gate * If state is not within bound, short circuit. 912*0Sstevel@tonic-gate */ 913*0Sstevel@tonic-gate if (state < TD_THR_ANY_STATE || state > TD_THR_STOPPED_ASLEEP) 914*0Sstevel@tonic-gate return (TD_OK); 915*0Sstevel@tonic-gate 916*0Sstevel@tonic-gate if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL) 917*0Sstevel@tonic-gate return (return_val); 918*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) { 919*0Sstevel@tonic-gate ph_unlock(ta_p); 920*0Sstevel@tonic-gate return (TD_DBERR); 921*0Sstevel@tonic-gate } 922*0Sstevel@tonic-gate 923*0Sstevel@tonic-gate /* 924*0Sstevel@tonic-gate * For each ulwp_t in the circular linked lists pointed 925*0Sstevel@tonic-gate * to by "all_lwps" and "all_zombies": 926*0Sstevel@tonic-gate * (1) Filter each thread. 927*0Sstevel@tonic-gate * (2) Create the thread_object for each thread that passes. 928*0Sstevel@tonic-gate * (3) Call the call back function on each thread. 929*0Sstevel@tonic-gate */ 930*0Sstevel@tonic-gate 931*0Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) { 932*0Sstevel@tonic-gate db_return = ps_pdread(ph_p, 933*0Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata_t, all_lwps), 934*0Sstevel@tonic-gate &first_lwp_addr, sizeof (first_lwp_addr)); 935*0Sstevel@tonic-gate db_return2 = ps_pdread(ph_p, 936*0Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata_t, all_zombies), 937*0Sstevel@tonic-gate &first_zombie_addr, sizeof (first_zombie_addr)); 938*0Sstevel@tonic-gate } else { 939*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 940*0Sstevel@tonic-gate caddr32_t addr32; 941*0Sstevel@tonic-gate 942*0Sstevel@tonic-gate db_return = ps_pdread(ph_p, 943*0Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata32_t, all_lwps), 944*0Sstevel@tonic-gate &addr32, sizeof (addr32)); 945*0Sstevel@tonic-gate first_lwp_addr = addr32; 946*0Sstevel@tonic-gate db_return2 = ps_pdread(ph_p, 947*0Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata32_t, all_zombies), 948*0Sstevel@tonic-gate &addr32, sizeof (addr32)); 949*0Sstevel@tonic-gate first_zombie_addr = addr32; 950*0Sstevel@tonic-gate #else /* _SYSCALL32 */ 951*0Sstevel@tonic-gate db_return = PS_ERR; 952*0Sstevel@tonic-gate db_return2 = PS_ERR; 953*0Sstevel@tonic-gate #endif /* _SYSCALL32 */ 954*0Sstevel@tonic-gate } 955*0Sstevel@tonic-gate if (db_return == PS_OK) 956*0Sstevel@tonic-gate db_return = db_return2; 957*0Sstevel@tonic-gate 958*0Sstevel@tonic-gate /* 959*0Sstevel@tonic-gate * If first_lwp_addr and first_zombie_addr are both NULL, 960*0Sstevel@tonic-gate * libc must not yet be initialized or all threads have 961*0Sstevel@tonic-gate * exited. Return TD_NOTHR and all will be well. 962*0Sstevel@tonic-gate */ 963*0Sstevel@tonic-gate if (db_return == PS_OK && 964*0Sstevel@tonic-gate first_lwp_addr == NULL && first_zombie_addr == NULL) { 965*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 966*0Sstevel@tonic-gate ph_unlock(ta_p); 967*0Sstevel@tonic-gate return (TD_NOTHR); 968*0Sstevel@tonic-gate } 969*0Sstevel@tonic-gate if (db_return != PS_OK) { 970*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 971*0Sstevel@tonic-gate ph_unlock(ta_p); 972*0Sstevel@tonic-gate return (TD_DBERR); 973*0Sstevel@tonic-gate } 974*0Sstevel@tonic-gate 975*0Sstevel@tonic-gate /* 976*0Sstevel@tonic-gate * Run down the lists of all living and dead lwps. 977*0Sstevel@tonic-gate */ 978*0Sstevel@tonic-gate if (first_lwp_addr == NULL) 979*0Sstevel@tonic-gate first_lwp_addr = first_zombie_addr; 980*0Sstevel@tonic-gate curr_lwp_addr = first_lwp_addr; 981*0Sstevel@tonic-gate for (;;) { 982*0Sstevel@tonic-gate td_thr_state_e ts_state; 983*0Sstevel@tonic-gate int userpri; 984*0Sstevel@tonic-gate unsigned userflags; 985*0Sstevel@tonic-gate sigset_t mask; 986*0Sstevel@tonic-gate 987*0Sstevel@tonic-gate /* 988*0Sstevel@tonic-gate * Read the ulwp struct. 989*0Sstevel@tonic-gate */ 990*0Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) { 991*0Sstevel@tonic-gate ulwp_t ulwp; 992*0Sstevel@tonic-gate 993*0Sstevel@tonic-gate if (ps_pdread(ph_p, curr_lwp_addr, 994*0Sstevel@tonic-gate &ulwp, sizeof (ulwp)) != PS_OK && 995*0Sstevel@tonic-gate ((void) memset(&ulwp, 0, sizeof (ulwp)), 996*0Sstevel@tonic-gate ps_pdread(ph_p, curr_lwp_addr, 997*0Sstevel@tonic-gate &ulwp, REPLACEMENT_SIZE)) != PS_OK) { 998*0Sstevel@tonic-gate return_val = TD_DBERR; 999*0Sstevel@tonic-gate break; 1000*0Sstevel@tonic-gate } 1001*0Sstevel@tonic-gate next_lwp_addr = (psaddr_t)ulwp.ul_forw; 1002*0Sstevel@tonic-gate 1003*0Sstevel@tonic-gate ts_state = ulwp.ul_dead? TD_THR_ZOMBIE : 1004*0Sstevel@tonic-gate ulwp.ul_stop? TD_THR_STOPPED : 1005*0Sstevel@tonic-gate ulwp.ul_wchan? TD_THR_SLEEP : 1006*0Sstevel@tonic-gate TD_THR_ACTIVE; 1007*0Sstevel@tonic-gate userpri = ulwp.ul_pri; 1008*0Sstevel@tonic-gate userflags = ulwp.ul_usropts; 1009*0Sstevel@tonic-gate if (ulwp.ul_dead) 1010*0Sstevel@tonic-gate (void) sigemptyset(&mask); 1011*0Sstevel@tonic-gate else 1012*0Sstevel@tonic-gate mask = *(sigset_t *)&ulwp.ul_sigmask; 1013*0Sstevel@tonic-gate } else { 1014*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 1015*0Sstevel@tonic-gate ulwp32_t ulwp; 1016*0Sstevel@tonic-gate 1017*0Sstevel@tonic-gate if (ps_pdread(ph_p, curr_lwp_addr, 1018*0Sstevel@tonic-gate &ulwp, sizeof (ulwp)) != PS_OK && 1019*0Sstevel@tonic-gate ((void) memset(&ulwp, 0, sizeof (ulwp)), 1020*0Sstevel@tonic-gate ps_pdread(ph_p, curr_lwp_addr, 1021*0Sstevel@tonic-gate &ulwp, REPLACEMENT_SIZE32)) != PS_OK) { 1022*0Sstevel@tonic-gate return_val = TD_DBERR; 1023*0Sstevel@tonic-gate break; 1024*0Sstevel@tonic-gate } 1025*0Sstevel@tonic-gate next_lwp_addr = (psaddr_t)ulwp.ul_forw; 1026*0Sstevel@tonic-gate 1027*0Sstevel@tonic-gate ts_state = ulwp.ul_dead? TD_THR_ZOMBIE : 1028*0Sstevel@tonic-gate ulwp.ul_stop? TD_THR_STOPPED : 1029*0Sstevel@tonic-gate ulwp.ul_wchan? TD_THR_SLEEP : 1030*0Sstevel@tonic-gate TD_THR_ACTIVE; 1031*0Sstevel@tonic-gate userpri = ulwp.ul_pri; 1032*0Sstevel@tonic-gate userflags = ulwp.ul_usropts; 1033*0Sstevel@tonic-gate if (ulwp.ul_dead) 1034*0Sstevel@tonic-gate (void) sigemptyset(&mask); 1035*0Sstevel@tonic-gate else 1036*0Sstevel@tonic-gate mask = *(sigset_t *)&ulwp.ul_sigmask; 1037*0Sstevel@tonic-gate #else /* _SYSCALL32 */ 1038*0Sstevel@tonic-gate return_val = TD_ERR; 1039*0Sstevel@tonic-gate break; 1040*0Sstevel@tonic-gate #endif /* _SYSCALL32 */ 1041*0Sstevel@tonic-gate } 1042*0Sstevel@tonic-gate 1043*0Sstevel@tonic-gate /* 1044*0Sstevel@tonic-gate * Filter on state, priority, sigmask, and user flags. 1045*0Sstevel@tonic-gate */ 1046*0Sstevel@tonic-gate 1047*0Sstevel@tonic-gate if ((state != ts_state) && 1048*0Sstevel@tonic-gate (state != TD_THR_ANY_STATE)) 1049*0Sstevel@tonic-gate goto advance; 1050*0Sstevel@tonic-gate 1051*0Sstevel@tonic-gate if (ti_pri > userpri) 1052*0Sstevel@tonic-gate goto advance; 1053*0Sstevel@tonic-gate 1054*0Sstevel@tonic-gate if (ti_sigmask_p != TD_SIGNO_MASK && 1055*0Sstevel@tonic-gate !sigequalset(ti_sigmask_p, &mask)) 1056*0Sstevel@tonic-gate goto advance; 1057*0Sstevel@tonic-gate 1058*0Sstevel@tonic-gate if (ti_user_flags != userflags && 1059*0Sstevel@tonic-gate ti_user_flags != (unsigned)TD_THR_ANY_USER_FLAGS) 1060*0Sstevel@tonic-gate goto advance; 1061*0Sstevel@tonic-gate 1062*0Sstevel@tonic-gate /* 1063*0Sstevel@tonic-gate * Call back - break if the return 1064*0Sstevel@tonic-gate * from the call back is non-zero. 1065*0Sstevel@tonic-gate */ 1066*0Sstevel@tonic-gate th.th_ta_p = (td_thragent_t *)ta_p; 1067*0Sstevel@tonic-gate th.th_unique = curr_lwp_addr; 1068*0Sstevel@tonic-gate if ((*cb)(&th, cbdata_p)) 1069*0Sstevel@tonic-gate break; 1070*0Sstevel@tonic-gate 1071*0Sstevel@tonic-gate advance: 1072*0Sstevel@tonic-gate if ((curr_lwp_addr = next_lwp_addr) == first_lwp_addr) { 1073*0Sstevel@tonic-gate /* 1074*0Sstevel@tonic-gate * Switch to the zombie list, unless it is NULL 1075*0Sstevel@tonic-gate * or we have already been doing the zombie list, 1076*0Sstevel@tonic-gate * in which case terminate the loop. 1077*0Sstevel@tonic-gate */ 1078*0Sstevel@tonic-gate if (first_zombie_addr == NULL || 1079*0Sstevel@tonic-gate first_lwp_addr == first_zombie_addr) 1080*0Sstevel@tonic-gate break; 1081*0Sstevel@tonic-gate curr_lwp_addr = first_lwp_addr = first_zombie_addr; 1082*0Sstevel@tonic-gate } 1083*0Sstevel@tonic-gate } 1084*0Sstevel@tonic-gate 1085*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 1086*0Sstevel@tonic-gate ph_unlock(ta_p); 1087*0Sstevel@tonic-gate return (return_val); 1088*0Sstevel@tonic-gate } 1089*0Sstevel@tonic-gate 1090*0Sstevel@tonic-gate /* 1091*0Sstevel@tonic-gate * Enable or disable process synchronization object tracking. 1092*0Sstevel@tonic-gate * Currently unused by dbx. 1093*0Sstevel@tonic-gate */ 1094*0Sstevel@tonic-gate #pragma weak td_ta_sync_tracking_enable = __td_ta_sync_tracking_enable 1095*0Sstevel@tonic-gate td_err_e 1096*0Sstevel@tonic-gate __td_ta_sync_tracking_enable(td_thragent_t *ta_p, int onoff) 1097*0Sstevel@tonic-gate { 1098*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 1099*0Sstevel@tonic-gate td_err_e return_val; 1100*0Sstevel@tonic-gate register_sync_t enable; 1101*0Sstevel@tonic-gate 1102*0Sstevel@tonic-gate if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL) 1103*0Sstevel@tonic-gate return (return_val); 1104*0Sstevel@tonic-gate /* 1105*0Sstevel@tonic-gate * Values of tdb_register_sync in the victim process: 1106*0Sstevel@tonic-gate * REGISTER_SYNC_ENABLE enables registration of synch objects 1107*0Sstevel@tonic-gate * REGISTER_SYNC_DISABLE disables registration of synch objects 1108*0Sstevel@tonic-gate * These cause the table to be cleared and tdb_register_sync set to: 1109*0Sstevel@tonic-gate * REGISTER_SYNC_ON registration in effect 1110*0Sstevel@tonic-gate * REGISTER_SYNC_OFF registration not in effect 1111*0Sstevel@tonic-gate */ 1112*0Sstevel@tonic-gate enable = onoff? REGISTER_SYNC_ENABLE : REGISTER_SYNC_DISABLE; 1113*0Sstevel@tonic-gate if (ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr, 1114*0Sstevel@tonic-gate &enable, sizeof (enable)) != PS_OK) 1115*0Sstevel@tonic-gate return_val = TD_DBERR; 1116*0Sstevel@tonic-gate /* 1117*0Sstevel@tonic-gate * Remember that this interface was called (see td_ta_delete()). 1118*0Sstevel@tonic-gate */ 1119*0Sstevel@tonic-gate ta_p->sync_tracking = 1; 1120*0Sstevel@tonic-gate ph_unlock(ta_p); 1121*0Sstevel@tonic-gate return (return_val); 1122*0Sstevel@tonic-gate } 1123*0Sstevel@tonic-gate 1124*0Sstevel@tonic-gate /* 1125*0Sstevel@tonic-gate * Iterate over all known synchronization variables. 1126*0Sstevel@tonic-gate * It is very possible that the list generated is incomplete, 1127*0Sstevel@tonic-gate * because the iterator can only find synchronization variables 1128*0Sstevel@tonic-gate * that have been registered by the process since synchronization 1129*0Sstevel@tonic-gate * object registration was enabled. 1130*0Sstevel@tonic-gate * The call back function cb is called for each synchronization 1131*0Sstevel@tonic-gate * variable with two arguments: a pointer to the synchronization 1132*0Sstevel@tonic-gate * handle and the passed-in argument cbdata. 1133*0Sstevel@tonic-gate * If cb returns a non-zero value, iterations are terminated. 1134*0Sstevel@tonic-gate */ 1135*0Sstevel@tonic-gate #pragma weak td_ta_sync_iter = __td_ta_sync_iter 1136*0Sstevel@tonic-gate td_err_e 1137*0Sstevel@tonic-gate __td_ta_sync_iter(td_thragent_t *ta_p, td_sync_iter_f *cb, void *cbdata) 1138*0Sstevel@tonic-gate { 1139*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 1140*0Sstevel@tonic-gate td_err_e return_val; 1141*0Sstevel@tonic-gate int i; 1142*0Sstevel@tonic-gate register_sync_t enable; 1143*0Sstevel@tonic-gate psaddr_t next_desc; 1144*0Sstevel@tonic-gate tdb_sync_stats_t sync_stats; 1145*0Sstevel@tonic-gate td_synchandle_t synchandle; 1146*0Sstevel@tonic-gate psaddr_t psaddr; 1147*0Sstevel@tonic-gate void *vaddr; 1148*0Sstevel@tonic-gate uint64_t *sync_addr_hash = NULL; 1149*0Sstevel@tonic-gate 1150*0Sstevel@tonic-gate if (cb == NULL) 1151*0Sstevel@tonic-gate return (TD_ERR); 1152*0Sstevel@tonic-gate if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL) 1153*0Sstevel@tonic-gate return (return_val); 1154*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) { 1155*0Sstevel@tonic-gate ph_unlock(ta_p); 1156*0Sstevel@tonic-gate return (TD_DBERR); 1157*0Sstevel@tonic-gate } 1158*0Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr, 1159*0Sstevel@tonic-gate &enable, sizeof (enable)) != PS_OK) { 1160*0Sstevel@tonic-gate return_val = TD_DBERR; 1161*0Sstevel@tonic-gate goto out; 1162*0Sstevel@tonic-gate } 1163*0Sstevel@tonic-gate if (enable != REGISTER_SYNC_ON) 1164*0Sstevel@tonic-gate goto out; 1165*0Sstevel@tonic-gate 1166*0Sstevel@tonic-gate /* 1167*0Sstevel@tonic-gate * First read the hash table. 1168*0Sstevel@tonic-gate * The hash table is large; allocate with mmap(). 1169*0Sstevel@tonic-gate */ 1170*0Sstevel@tonic-gate if ((vaddr = mmap(NULL, TDB_HASH_SIZE * sizeof (uint64_t), 1171*0Sstevel@tonic-gate PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0)) 1172*0Sstevel@tonic-gate == MAP_FAILED) { 1173*0Sstevel@tonic-gate return_val = TD_MALLOC; 1174*0Sstevel@tonic-gate goto out; 1175*0Sstevel@tonic-gate } 1176*0Sstevel@tonic-gate sync_addr_hash = vaddr; 1177*0Sstevel@tonic-gate 1178*0Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) { 1179*0Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->uberdata_addr + 1180*0Sstevel@tonic-gate offsetof(uberdata_t, tdb.tdb_sync_addr_hash), 1181*0Sstevel@tonic-gate &psaddr, sizeof (&psaddr)) != PS_OK) { 1182*0Sstevel@tonic-gate return_val = TD_DBERR; 1183*0Sstevel@tonic-gate goto out; 1184*0Sstevel@tonic-gate } 1185*0Sstevel@tonic-gate } else { 1186*0Sstevel@tonic-gate #ifdef _SYSCALL32 1187*0Sstevel@tonic-gate caddr32_t addr; 1188*0Sstevel@tonic-gate 1189*0Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->uberdata_addr + 1190*0Sstevel@tonic-gate offsetof(uberdata32_t, tdb.tdb_sync_addr_hash), 1191*0Sstevel@tonic-gate &addr, sizeof (addr)) != PS_OK) { 1192*0Sstevel@tonic-gate return_val = TD_DBERR; 1193*0Sstevel@tonic-gate goto out; 1194*0Sstevel@tonic-gate } 1195*0Sstevel@tonic-gate psaddr = addr; 1196*0Sstevel@tonic-gate #else 1197*0Sstevel@tonic-gate return_val = TD_ERR; 1198*0Sstevel@tonic-gate goto out; 1199*0Sstevel@tonic-gate #endif /* _SYSCALL32 */ 1200*0Sstevel@tonic-gate } 1201*0Sstevel@tonic-gate 1202*0Sstevel@tonic-gate if (psaddr == NULL) 1203*0Sstevel@tonic-gate goto out; 1204*0Sstevel@tonic-gate if (ps_pdread(ph_p, psaddr, sync_addr_hash, 1205*0Sstevel@tonic-gate TDB_HASH_SIZE * sizeof (uint64_t)) != PS_OK) { 1206*0Sstevel@tonic-gate return_val = TD_DBERR; 1207*0Sstevel@tonic-gate goto out; 1208*0Sstevel@tonic-gate } 1209*0Sstevel@tonic-gate 1210*0Sstevel@tonic-gate /* 1211*0Sstevel@tonic-gate * Now scan the hash table. 1212*0Sstevel@tonic-gate */ 1213*0Sstevel@tonic-gate for (i = 0; i < TDB_HASH_SIZE; i++) { 1214*0Sstevel@tonic-gate for (next_desc = (psaddr_t)sync_addr_hash[i]; 1215*0Sstevel@tonic-gate next_desc != NULL; 1216*0Sstevel@tonic-gate next_desc = (psaddr_t)sync_stats.next) { 1217*0Sstevel@tonic-gate if (ps_pdread(ph_p, next_desc, 1218*0Sstevel@tonic-gate &sync_stats, sizeof (sync_stats)) != PS_OK) { 1219*0Sstevel@tonic-gate return_val = TD_DBERR; 1220*0Sstevel@tonic-gate goto out; 1221*0Sstevel@tonic-gate } 1222*0Sstevel@tonic-gate if (sync_stats.un.type == TDB_NONE) { 1223*0Sstevel@tonic-gate /* not registered since registration enabled */ 1224*0Sstevel@tonic-gate continue; 1225*0Sstevel@tonic-gate } 1226*0Sstevel@tonic-gate synchandle.sh_ta_p = ta_p; 1227*0Sstevel@tonic-gate synchandle.sh_unique = (psaddr_t)sync_stats.sync_addr; 1228*0Sstevel@tonic-gate if ((*cb)(&synchandle, cbdata) != 0) 1229*0Sstevel@tonic-gate goto out; 1230*0Sstevel@tonic-gate } 1231*0Sstevel@tonic-gate } 1232*0Sstevel@tonic-gate 1233*0Sstevel@tonic-gate out: 1234*0Sstevel@tonic-gate if (sync_addr_hash != NULL) 1235*0Sstevel@tonic-gate (void) munmap((void *)sync_addr_hash, 1236*0Sstevel@tonic-gate TDB_HASH_SIZE * sizeof (uint64_t)); 1237*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 1238*0Sstevel@tonic-gate ph_unlock(ta_p); 1239*0Sstevel@tonic-gate return (return_val); 1240*0Sstevel@tonic-gate } 1241*0Sstevel@tonic-gate 1242*0Sstevel@tonic-gate /* 1243*0Sstevel@tonic-gate * Enable process statistics collection. 1244*0Sstevel@tonic-gate */ 1245*0Sstevel@tonic-gate #pragma weak td_ta_enable_stats = __td_ta_enable_stats 1246*0Sstevel@tonic-gate /* ARGSUSED */ 1247*0Sstevel@tonic-gate td_err_e 1248*0Sstevel@tonic-gate __td_ta_enable_stats(const td_thragent_t *ta_p, int onoff) 1249*0Sstevel@tonic-gate { 1250*0Sstevel@tonic-gate return (TD_NOCAPAB); 1251*0Sstevel@tonic-gate } 1252*0Sstevel@tonic-gate 1253*0Sstevel@tonic-gate /* 1254*0Sstevel@tonic-gate * Reset process statistics. 1255*0Sstevel@tonic-gate */ 1256*0Sstevel@tonic-gate #pragma weak td_ta_reset_stats = __td_ta_reset_stats 1257*0Sstevel@tonic-gate /* ARGSUSED */ 1258*0Sstevel@tonic-gate td_err_e 1259*0Sstevel@tonic-gate __td_ta_reset_stats(const td_thragent_t *ta_p) 1260*0Sstevel@tonic-gate { 1261*0Sstevel@tonic-gate return (TD_NOCAPAB); 1262*0Sstevel@tonic-gate } 1263*0Sstevel@tonic-gate 1264*0Sstevel@tonic-gate /* 1265*0Sstevel@tonic-gate * Read process statistics. 1266*0Sstevel@tonic-gate */ 1267*0Sstevel@tonic-gate #pragma weak td_ta_get_stats = __td_ta_get_stats 1268*0Sstevel@tonic-gate /* ARGSUSED */ 1269*0Sstevel@tonic-gate td_err_e 1270*0Sstevel@tonic-gate __td_ta_get_stats(const td_thragent_t *ta_p, td_ta_stats_t *tstats) 1271*0Sstevel@tonic-gate { 1272*0Sstevel@tonic-gate return (TD_NOCAPAB); 1273*0Sstevel@tonic-gate } 1274*0Sstevel@tonic-gate 1275*0Sstevel@tonic-gate /* 1276*0Sstevel@tonic-gate * Transfer information from lwp struct to thread information struct. 1277*0Sstevel@tonic-gate * XXX -- lots of this needs cleaning up. 1278*0Sstevel@tonic-gate */ 1279*0Sstevel@tonic-gate static void 1280*0Sstevel@tonic-gate td_thr2to(td_thragent_t *ta_p, psaddr_t ts_addr, 1281*0Sstevel@tonic-gate ulwp_t *ulwp, td_thrinfo_t *ti_p) 1282*0Sstevel@tonic-gate { 1283*0Sstevel@tonic-gate lwpid_t lwpid; 1284*0Sstevel@tonic-gate 1285*0Sstevel@tonic-gate if ((lwpid = ulwp->ul_lwpid) == 0) 1286*0Sstevel@tonic-gate lwpid = 1; 1287*0Sstevel@tonic-gate (void) memset(ti_p, 0, sizeof (*ti_p)); 1288*0Sstevel@tonic-gate ti_p->ti_ta_p = ta_p; 1289*0Sstevel@tonic-gate ti_p->ti_user_flags = ulwp->ul_usropts; 1290*0Sstevel@tonic-gate ti_p->ti_tid = lwpid; 1291*0Sstevel@tonic-gate ti_p->ti_exitval = ulwp->ul_rval; 1292*0Sstevel@tonic-gate ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc; 1293*0Sstevel@tonic-gate if (!ulwp->ul_dead) { 1294*0Sstevel@tonic-gate /* 1295*0Sstevel@tonic-gate * The bloody fools got this backwards! 1296*0Sstevel@tonic-gate */ 1297*0Sstevel@tonic-gate ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop; 1298*0Sstevel@tonic-gate ti_p->ti_stksize = ulwp->ul_stksiz; 1299*0Sstevel@tonic-gate } 1300*0Sstevel@tonic-gate ti_p->ti_ro_area = ts_addr; 1301*0Sstevel@tonic-gate ti_p->ti_ro_size = ulwp->ul_replace? 1302*0Sstevel@tonic-gate REPLACEMENT_SIZE : sizeof (ulwp_t); 1303*0Sstevel@tonic-gate ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE : 1304*0Sstevel@tonic-gate ulwp->ul_stop? TD_THR_STOPPED : 1305*0Sstevel@tonic-gate ulwp->ul_wchan? TD_THR_SLEEP : 1306*0Sstevel@tonic-gate TD_THR_ACTIVE; 1307*0Sstevel@tonic-gate ti_p->ti_db_suspended = 0; 1308*0Sstevel@tonic-gate ti_p->ti_type = TD_THR_USER; 1309*0Sstevel@tonic-gate ti_p->ti_sp = ulwp->ul_sp; 1310*0Sstevel@tonic-gate ti_p->ti_flags = 0; 1311*0Sstevel@tonic-gate ti_p->ti_pri = ulwp->ul_pri; 1312*0Sstevel@tonic-gate ti_p->ti_lid = lwpid; 1313*0Sstevel@tonic-gate if (!ulwp->ul_dead) 1314*0Sstevel@tonic-gate ti_p->ti_sigmask = ulwp->ul_sigmask; 1315*0Sstevel@tonic-gate ti_p->ti_traceme = 0; 1316*0Sstevel@tonic-gate ti_p->ti_preemptflag = 0; 1317*0Sstevel@tonic-gate ti_p->ti_pirecflag = 0; 1318*0Sstevel@tonic-gate (void) sigemptyset(&ti_p->ti_pending); 1319*0Sstevel@tonic-gate ti_p->ti_events = ulwp->ul_td_evbuf.eventmask; 1320*0Sstevel@tonic-gate } 1321*0Sstevel@tonic-gate 1322*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 1323*0Sstevel@tonic-gate static void 1324*0Sstevel@tonic-gate td_thr2to32(td_thragent_t *ta_p, psaddr_t ts_addr, 1325*0Sstevel@tonic-gate ulwp32_t *ulwp, td_thrinfo_t *ti_p) 1326*0Sstevel@tonic-gate { 1327*0Sstevel@tonic-gate lwpid_t lwpid; 1328*0Sstevel@tonic-gate 1329*0Sstevel@tonic-gate if ((lwpid = ulwp->ul_lwpid) == 0) 1330*0Sstevel@tonic-gate lwpid = 1; 1331*0Sstevel@tonic-gate (void) memset(ti_p, 0, sizeof (*ti_p)); 1332*0Sstevel@tonic-gate ti_p->ti_ta_p = ta_p; 1333*0Sstevel@tonic-gate ti_p->ti_user_flags = ulwp->ul_usropts; 1334*0Sstevel@tonic-gate ti_p->ti_tid = lwpid; 1335*0Sstevel@tonic-gate ti_p->ti_exitval = (void *)(uintptr_t)ulwp->ul_rval; 1336*0Sstevel@tonic-gate ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc; 1337*0Sstevel@tonic-gate if (!ulwp->ul_dead) { 1338*0Sstevel@tonic-gate /* 1339*0Sstevel@tonic-gate * The bloody fools got this backwards! 1340*0Sstevel@tonic-gate */ 1341*0Sstevel@tonic-gate ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop; 1342*0Sstevel@tonic-gate ti_p->ti_stksize = ulwp->ul_stksiz; 1343*0Sstevel@tonic-gate } 1344*0Sstevel@tonic-gate ti_p->ti_ro_area = ts_addr; 1345*0Sstevel@tonic-gate ti_p->ti_ro_size = ulwp->ul_replace? 1346*0Sstevel@tonic-gate REPLACEMENT_SIZE32 : sizeof (ulwp32_t); 1347*0Sstevel@tonic-gate ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE : 1348*0Sstevel@tonic-gate ulwp->ul_stop? TD_THR_STOPPED : 1349*0Sstevel@tonic-gate ulwp->ul_wchan? TD_THR_SLEEP : 1350*0Sstevel@tonic-gate TD_THR_ACTIVE; 1351*0Sstevel@tonic-gate ti_p->ti_db_suspended = 0; 1352*0Sstevel@tonic-gate ti_p->ti_type = TD_THR_USER; 1353*0Sstevel@tonic-gate ti_p->ti_sp = (uint32_t)ulwp->ul_sp; 1354*0Sstevel@tonic-gate ti_p->ti_flags = 0; 1355*0Sstevel@tonic-gate ti_p->ti_pri = ulwp->ul_pri; 1356*0Sstevel@tonic-gate ti_p->ti_lid = lwpid; 1357*0Sstevel@tonic-gate if (!ulwp->ul_dead) 1358*0Sstevel@tonic-gate ti_p->ti_sigmask = *(sigset_t *)&ulwp->ul_sigmask; 1359*0Sstevel@tonic-gate ti_p->ti_traceme = 0; 1360*0Sstevel@tonic-gate ti_p->ti_preemptflag = 0; 1361*0Sstevel@tonic-gate ti_p->ti_pirecflag = 0; 1362*0Sstevel@tonic-gate (void) sigemptyset(&ti_p->ti_pending); 1363*0Sstevel@tonic-gate ti_p->ti_events = ulwp->ul_td_evbuf.eventmask; 1364*0Sstevel@tonic-gate } 1365*0Sstevel@tonic-gate #endif /* _SYSCALL32 */ 1366*0Sstevel@tonic-gate 1367*0Sstevel@tonic-gate /* 1368*0Sstevel@tonic-gate * Get thread information. 1369*0Sstevel@tonic-gate */ 1370*0Sstevel@tonic-gate #pragma weak td_thr_get_info = __td_thr_get_info 1371*0Sstevel@tonic-gate td_err_e 1372*0Sstevel@tonic-gate __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p) 1373*0Sstevel@tonic-gate { 1374*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 1375*0Sstevel@tonic-gate td_thragent_t *ta_p; 1376*0Sstevel@tonic-gate td_err_e return_val; 1377*0Sstevel@tonic-gate psaddr_t psaddr; 1378*0Sstevel@tonic-gate 1379*0Sstevel@tonic-gate if (ti_p == NULL) 1380*0Sstevel@tonic-gate return (TD_ERR); 1381*0Sstevel@tonic-gate (void) memset(ti_p, NULL, sizeof (*ti_p)); 1382*0Sstevel@tonic-gate 1383*0Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1384*0Sstevel@tonic-gate return (return_val); 1385*0Sstevel@tonic-gate ta_p = th_p->th_ta_p; 1386*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) { 1387*0Sstevel@tonic-gate ph_unlock(ta_p); 1388*0Sstevel@tonic-gate return (TD_DBERR); 1389*0Sstevel@tonic-gate } 1390*0Sstevel@tonic-gate 1391*0Sstevel@tonic-gate /* 1392*0Sstevel@tonic-gate * Read the ulwp struct from the process. 1393*0Sstevel@tonic-gate * Transfer the ulwp struct to the thread information struct. 1394*0Sstevel@tonic-gate */ 1395*0Sstevel@tonic-gate psaddr = th_p->th_unique; 1396*0Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) { 1397*0Sstevel@tonic-gate ulwp_t ulwp; 1398*0Sstevel@tonic-gate 1399*0Sstevel@tonic-gate if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK && 1400*0Sstevel@tonic-gate ((void) memset(&ulwp, 0, sizeof (ulwp)), 1401*0Sstevel@tonic-gate ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE)) != PS_OK) 1402*0Sstevel@tonic-gate return_val = TD_DBERR; 1403*0Sstevel@tonic-gate else 1404*0Sstevel@tonic-gate td_thr2to(ta_p, psaddr, &ulwp, ti_p); 1405*0Sstevel@tonic-gate } else { 1406*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 1407*0Sstevel@tonic-gate ulwp32_t ulwp; 1408*0Sstevel@tonic-gate 1409*0Sstevel@tonic-gate if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK && 1410*0Sstevel@tonic-gate ((void) memset(&ulwp, 0, sizeof (ulwp)), 1411*0Sstevel@tonic-gate ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE32)) != 1412*0Sstevel@tonic-gate PS_OK) 1413*0Sstevel@tonic-gate return_val = TD_DBERR; 1414*0Sstevel@tonic-gate else 1415*0Sstevel@tonic-gate td_thr2to32(ta_p, psaddr, &ulwp, ti_p); 1416*0Sstevel@tonic-gate #else 1417*0Sstevel@tonic-gate return_val = TD_ERR; 1418*0Sstevel@tonic-gate #endif /* _SYSCALL32 */ 1419*0Sstevel@tonic-gate } 1420*0Sstevel@tonic-gate 1421*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 1422*0Sstevel@tonic-gate ph_unlock(ta_p); 1423*0Sstevel@tonic-gate return (return_val); 1424*0Sstevel@tonic-gate } 1425*0Sstevel@tonic-gate 1426*0Sstevel@tonic-gate /* 1427*0Sstevel@tonic-gate * Given a process and an event number, return information about 1428*0Sstevel@tonic-gate * an address in the process or at which a breakpoint can be set 1429*0Sstevel@tonic-gate * to monitor the event. 1430*0Sstevel@tonic-gate */ 1431*0Sstevel@tonic-gate #pragma weak td_ta_event_addr = __td_ta_event_addr 1432*0Sstevel@tonic-gate td_err_e 1433*0Sstevel@tonic-gate __td_ta_event_addr(td_thragent_t *ta_p, td_event_e event, td_notify_t *notify_p) 1434*0Sstevel@tonic-gate { 1435*0Sstevel@tonic-gate if (ta_p == NULL) 1436*0Sstevel@tonic-gate return (TD_BADTA); 1437*0Sstevel@tonic-gate if (event < TD_MIN_EVENT_NUM || event > TD_MAX_EVENT_NUM) 1438*0Sstevel@tonic-gate return (TD_NOEVENT); 1439*0Sstevel@tonic-gate if (notify_p == NULL) 1440*0Sstevel@tonic-gate return (TD_ERR); 1441*0Sstevel@tonic-gate 1442*0Sstevel@tonic-gate notify_p->type = NOTIFY_BPT; 1443*0Sstevel@tonic-gate notify_p->u.bptaddr = ta_p->tdb_events[event - TD_MIN_EVENT_NUM]; 1444*0Sstevel@tonic-gate 1445*0Sstevel@tonic-gate return (TD_OK); 1446*0Sstevel@tonic-gate } 1447*0Sstevel@tonic-gate 1448*0Sstevel@tonic-gate /* 1449*0Sstevel@tonic-gate * Add the events in eventset 2 to eventset 1. 1450*0Sstevel@tonic-gate */ 1451*0Sstevel@tonic-gate static void 1452*0Sstevel@tonic-gate eventsetaddset(td_thr_events_t *event1_p, td_thr_events_t *event2_p) 1453*0Sstevel@tonic-gate { 1454*0Sstevel@tonic-gate int i; 1455*0Sstevel@tonic-gate 1456*0Sstevel@tonic-gate for (i = 0; i < TD_EVENTSIZE; i++) 1457*0Sstevel@tonic-gate event1_p->event_bits[i] |= event2_p->event_bits[i]; 1458*0Sstevel@tonic-gate } 1459*0Sstevel@tonic-gate 1460*0Sstevel@tonic-gate /* 1461*0Sstevel@tonic-gate * Delete the events in eventset 2 from eventset 1. 1462*0Sstevel@tonic-gate */ 1463*0Sstevel@tonic-gate static void 1464*0Sstevel@tonic-gate eventsetdelset(td_thr_events_t *event1_p, td_thr_events_t *event2_p) 1465*0Sstevel@tonic-gate { 1466*0Sstevel@tonic-gate int i; 1467*0Sstevel@tonic-gate 1468*0Sstevel@tonic-gate for (i = 0; i < TD_EVENTSIZE; i++) 1469*0Sstevel@tonic-gate event1_p->event_bits[i] &= ~event2_p->event_bits[i]; 1470*0Sstevel@tonic-gate } 1471*0Sstevel@tonic-gate 1472*0Sstevel@tonic-gate /* 1473*0Sstevel@tonic-gate * Either add or delete the given event set from a thread's event mask. 1474*0Sstevel@tonic-gate */ 1475*0Sstevel@tonic-gate static td_err_e 1476*0Sstevel@tonic-gate mod_eventset(td_thrhandle_t *th_p, td_thr_events_t *events, int onoff) 1477*0Sstevel@tonic-gate { 1478*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 1479*0Sstevel@tonic-gate td_err_e return_val = TD_OK; 1480*0Sstevel@tonic-gate char enable; 1481*0Sstevel@tonic-gate td_thr_events_t evset; 1482*0Sstevel@tonic-gate psaddr_t psaddr_evset; 1483*0Sstevel@tonic-gate psaddr_t psaddr_enab; 1484*0Sstevel@tonic-gate 1485*0Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1486*0Sstevel@tonic-gate return (return_val); 1487*0Sstevel@tonic-gate if (th_p->th_ta_p->model == PR_MODEL_NATIVE) { 1488*0Sstevel@tonic-gate ulwp_t *ulwp = (ulwp_t *)th_p->th_unique; 1489*0Sstevel@tonic-gate psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask; 1490*0Sstevel@tonic-gate psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable; 1491*0Sstevel@tonic-gate } else { 1492*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 1493*0Sstevel@tonic-gate ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique; 1494*0Sstevel@tonic-gate psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask; 1495*0Sstevel@tonic-gate psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable; 1496*0Sstevel@tonic-gate #else 1497*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 1498*0Sstevel@tonic-gate return (TD_ERR); 1499*0Sstevel@tonic-gate #endif /* _SYSCALL32 */ 1500*0Sstevel@tonic-gate } 1501*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) { 1502*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 1503*0Sstevel@tonic-gate return (TD_DBERR); 1504*0Sstevel@tonic-gate } 1505*0Sstevel@tonic-gate 1506*0Sstevel@tonic-gate if (ps_pdread(ph_p, psaddr_evset, &evset, sizeof (evset)) != PS_OK) 1507*0Sstevel@tonic-gate return_val = TD_DBERR; 1508*0Sstevel@tonic-gate else { 1509*0Sstevel@tonic-gate if (onoff) 1510*0Sstevel@tonic-gate eventsetaddset(&evset, events); 1511*0Sstevel@tonic-gate else 1512*0Sstevel@tonic-gate eventsetdelset(&evset, events); 1513*0Sstevel@tonic-gate if (ps_pdwrite(ph_p, psaddr_evset, &evset, sizeof (evset)) 1514*0Sstevel@tonic-gate != PS_OK) 1515*0Sstevel@tonic-gate return_val = TD_DBERR; 1516*0Sstevel@tonic-gate else { 1517*0Sstevel@tonic-gate enable = 0; 1518*0Sstevel@tonic-gate if (td_eventismember(&evset, TD_EVENTS_ENABLE)) 1519*0Sstevel@tonic-gate enable = 1; 1520*0Sstevel@tonic-gate if (ps_pdwrite(ph_p, psaddr_enab, 1521*0Sstevel@tonic-gate &enable, sizeof (enable)) != PS_OK) 1522*0Sstevel@tonic-gate return_val = TD_DBERR; 1523*0Sstevel@tonic-gate } 1524*0Sstevel@tonic-gate } 1525*0Sstevel@tonic-gate 1526*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 1527*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 1528*0Sstevel@tonic-gate return (return_val); 1529*0Sstevel@tonic-gate } 1530*0Sstevel@tonic-gate 1531*0Sstevel@tonic-gate /* 1532*0Sstevel@tonic-gate * Enable or disable tracing for a given thread. Tracing 1533*0Sstevel@tonic-gate * is filtered based on the event mask of each thread. Tracing 1534*0Sstevel@tonic-gate * can be turned on/off for the thread without changing thread 1535*0Sstevel@tonic-gate * event mask. 1536*0Sstevel@tonic-gate * Currently unused by dbx. 1537*0Sstevel@tonic-gate */ 1538*0Sstevel@tonic-gate #pragma weak td_thr_event_enable = __td_thr_event_enable 1539*0Sstevel@tonic-gate td_err_e 1540*0Sstevel@tonic-gate __td_thr_event_enable(td_thrhandle_t *th_p, int onoff) 1541*0Sstevel@tonic-gate { 1542*0Sstevel@tonic-gate td_thr_events_t evset; 1543*0Sstevel@tonic-gate 1544*0Sstevel@tonic-gate td_event_emptyset(&evset); 1545*0Sstevel@tonic-gate td_event_addset(&evset, TD_EVENTS_ENABLE); 1546*0Sstevel@tonic-gate return (mod_eventset(th_p, &evset, onoff)); 1547*0Sstevel@tonic-gate } 1548*0Sstevel@tonic-gate 1549*0Sstevel@tonic-gate /* 1550*0Sstevel@tonic-gate * Set event mask to enable event. event is turned on in 1551*0Sstevel@tonic-gate * event mask for thread. If a thread encounters an event 1552*0Sstevel@tonic-gate * for which its event mask is on, notification will be sent 1553*0Sstevel@tonic-gate * to the debugger. 1554*0Sstevel@tonic-gate * Addresses for each event are provided to the 1555*0Sstevel@tonic-gate * debugger. It is assumed that a breakpoint of some type will 1556*0Sstevel@tonic-gate * be placed at that address. If the event mask for the thread 1557*0Sstevel@tonic-gate * is on, the instruction at the address will be executed. 1558*0Sstevel@tonic-gate * Otherwise, the instruction will be skipped. 1559*0Sstevel@tonic-gate */ 1560*0Sstevel@tonic-gate #pragma weak td_thr_set_event = __td_thr_set_event 1561*0Sstevel@tonic-gate td_err_e 1562*0Sstevel@tonic-gate __td_thr_set_event(td_thrhandle_t *th_p, td_thr_events_t *events) 1563*0Sstevel@tonic-gate { 1564*0Sstevel@tonic-gate return (mod_eventset(th_p, events, 1)); 1565*0Sstevel@tonic-gate } 1566*0Sstevel@tonic-gate 1567*0Sstevel@tonic-gate /* 1568*0Sstevel@tonic-gate * Enable or disable a set of events in the process-global event mask, 1569*0Sstevel@tonic-gate * depending on the value of onoff. 1570*0Sstevel@tonic-gate */ 1571*0Sstevel@tonic-gate static td_err_e 1572*0Sstevel@tonic-gate td_ta_mod_event(td_thragent_t *ta_p, td_thr_events_t *events, int onoff) 1573*0Sstevel@tonic-gate { 1574*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 1575*0Sstevel@tonic-gate td_thr_events_t targ_eventset; 1576*0Sstevel@tonic-gate td_err_e return_val; 1577*0Sstevel@tonic-gate 1578*0Sstevel@tonic-gate if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL) 1579*0Sstevel@tonic-gate return (return_val); 1580*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) { 1581*0Sstevel@tonic-gate ph_unlock(ta_p); 1582*0Sstevel@tonic-gate return (TD_DBERR); 1583*0Sstevel@tonic-gate } 1584*0Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->tdb_eventmask_addr, 1585*0Sstevel@tonic-gate &targ_eventset, sizeof (targ_eventset)) != PS_OK) 1586*0Sstevel@tonic-gate return_val = TD_DBERR; 1587*0Sstevel@tonic-gate else { 1588*0Sstevel@tonic-gate if (onoff) 1589*0Sstevel@tonic-gate eventsetaddset(&targ_eventset, events); 1590*0Sstevel@tonic-gate else 1591*0Sstevel@tonic-gate eventsetdelset(&targ_eventset, events); 1592*0Sstevel@tonic-gate if (ps_pdwrite(ph_p, ta_p->tdb_eventmask_addr, 1593*0Sstevel@tonic-gate &targ_eventset, sizeof (targ_eventset)) != PS_OK) 1594*0Sstevel@tonic-gate return_val = TD_DBERR; 1595*0Sstevel@tonic-gate } 1596*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 1597*0Sstevel@tonic-gate ph_unlock(ta_p); 1598*0Sstevel@tonic-gate return (return_val); 1599*0Sstevel@tonic-gate } 1600*0Sstevel@tonic-gate 1601*0Sstevel@tonic-gate /* 1602*0Sstevel@tonic-gate * Enable a set of events in the process-global event mask. 1603*0Sstevel@tonic-gate */ 1604*0Sstevel@tonic-gate #pragma weak td_ta_set_event = __td_ta_set_event 1605*0Sstevel@tonic-gate td_err_e 1606*0Sstevel@tonic-gate __td_ta_set_event(td_thragent_t *ta_p, td_thr_events_t *events) 1607*0Sstevel@tonic-gate { 1608*0Sstevel@tonic-gate return (td_ta_mod_event(ta_p, events, 1)); 1609*0Sstevel@tonic-gate } 1610*0Sstevel@tonic-gate 1611*0Sstevel@tonic-gate /* 1612*0Sstevel@tonic-gate * Set event mask to disable the given event set; these events are cleared 1613*0Sstevel@tonic-gate * from the event mask of the thread. Events that occur for a thread 1614*0Sstevel@tonic-gate * with the event masked off will not cause notification to be 1615*0Sstevel@tonic-gate * sent to the debugger (see td_thr_set_event for fuller description). 1616*0Sstevel@tonic-gate */ 1617*0Sstevel@tonic-gate #pragma weak td_thr_clear_event = __td_thr_clear_event 1618*0Sstevel@tonic-gate td_err_e 1619*0Sstevel@tonic-gate __td_thr_clear_event(td_thrhandle_t *th_p, td_thr_events_t *events) 1620*0Sstevel@tonic-gate { 1621*0Sstevel@tonic-gate return (mod_eventset(th_p, events, 0)); 1622*0Sstevel@tonic-gate } 1623*0Sstevel@tonic-gate 1624*0Sstevel@tonic-gate /* 1625*0Sstevel@tonic-gate * Disable a set of events in the process-global event mask. 1626*0Sstevel@tonic-gate */ 1627*0Sstevel@tonic-gate #pragma weak td_ta_clear_event = __td_ta_clear_event 1628*0Sstevel@tonic-gate td_err_e 1629*0Sstevel@tonic-gate __td_ta_clear_event(td_thragent_t *ta_p, td_thr_events_t *events) 1630*0Sstevel@tonic-gate { 1631*0Sstevel@tonic-gate return (td_ta_mod_event(ta_p, events, 0)); 1632*0Sstevel@tonic-gate } 1633*0Sstevel@tonic-gate 1634*0Sstevel@tonic-gate /* 1635*0Sstevel@tonic-gate * This function returns the most recent event message, if any, 1636*0Sstevel@tonic-gate * associated with a thread. Given a thread handle, return the message 1637*0Sstevel@tonic-gate * corresponding to the event encountered by the thread. Only one 1638*0Sstevel@tonic-gate * message per thread is saved. Messages from earlier events are lost 1639*0Sstevel@tonic-gate * when later events occur. 1640*0Sstevel@tonic-gate */ 1641*0Sstevel@tonic-gate #pragma weak td_thr_event_getmsg = __td_thr_event_getmsg 1642*0Sstevel@tonic-gate td_err_e 1643*0Sstevel@tonic-gate __td_thr_event_getmsg(td_thrhandle_t *th_p, td_event_msg_t *msg) 1644*0Sstevel@tonic-gate { 1645*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 1646*0Sstevel@tonic-gate td_err_e return_val = TD_OK; 1647*0Sstevel@tonic-gate psaddr_t psaddr; 1648*0Sstevel@tonic-gate 1649*0Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1650*0Sstevel@tonic-gate return (return_val); 1651*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) { 1652*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 1653*0Sstevel@tonic-gate return (TD_BADTA); 1654*0Sstevel@tonic-gate } 1655*0Sstevel@tonic-gate if (th_p->th_ta_p->model == PR_MODEL_NATIVE) { 1656*0Sstevel@tonic-gate ulwp_t *ulwp = (ulwp_t *)th_p->th_unique; 1657*0Sstevel@tonic-gate td_evbuf_t evbuf; 1658*0Sstevel@tonic-gate 1659*0Sstevel@tonic-gate psaddr = (psaddr_t)&ulwp->ul_td_evbuf; 1660*0Sstevel@tonic-gate if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) { 1661*0Sstevel@tonic-gate return_val = TD_DBERR; 1662*0Sstevel@tonic-gate } else if (evbuf.eventnum == TD_EVENT_NONE) { 1663*0Sstevel@tonic-gate return_val = TD_NOEVENT; 1664*0Sstevel@tonic-gate } else { 1665*0Sstevel@tonic-gate msg->event = evbuf.eventnum; 1666*0Sstevel@tonic-gate msg->th_p = (td_thrhandle_t *)th_p; 1667*0Sstevel@tonic-gate msg->msg.data = (uintptr_t)evbuf.eventdata; 1668*0Sstevel@tonic-gate /* "Consume" the message */ 1669*0Sstevel@tonic-gate evbuf.eventnum = TD_EVENT_NONE; 1670*0Sstevel@tonic-gate evbuf.eventdata = NULL; 1671*0Sstevel@tonic-gate if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf)) 1672*0Sstevel@tonic-gate != PS_OK) 1673*0Sstevel@tonic-gate return_val = TD_DBERR; 1674*0Sstevel@tonic-gate } 1675*0Sstevel@tonic-gate } else { 1676*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 1677*0Sstevel@tonic-gate ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique; 1678*0Sstevel@tonic-gate td_evbuf32_t evbuf; 1679*0Sstevel@tonic-gate 1680*0Sstevel@tonic-gate psaddr = (psaddr_t)&ulwp->ul_td_evbuf; 1681*0Sstevel@tonic-gate if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) { 1682*0Sstevel@tonic-gate return_val = TD_DBERR; 1683*0Sstevel@tonic-gate } else if (evbuf.eventnum == TD_EVENT_NONE) { 1684*0Sstevel@tonic-gate return_val = TD_NOEVENT; 1685*0Sstevel@tonic-gate } else { 1686*0Sstevel@tonic-gate msg->event = evbuf.eventnum; 1687*0Sstevel@tonic-gate msg->th_p = (td_thrhandle_t *)th_p; 1688*0Sstevel@tonic-gate msg->msg.data = (uintptr_t)evbuf.eventdata; 1689*0Sstevel@tonic-gate /* "Consume" the message */ 1690*0Sstevel@tonic-gate evbuf.eventnum = TD_EVENT_NONE; 1691*0Sstevel@tonic-gate evbuf.eventdata = NULL; 1692*0Sstevel@tonic-gate if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf)) 1693*0Sstevel@tonic-gate != PS_OK) 1694*0Sstevel@tonic-gate return_val = TD_DBERR; 1695*0Sstevel@tonic-gate } 1696*0Sstevel@tonic-gate #else 1697*0Sstevel@tonic-gate return_val = TD_ERR; 1698*0Sstevel@tonic-gate #endif /* _SYSCALL32 */ 1699*0Sstevel@tonic-gate } 1700*0Sstevel@tonic-gate 1701*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 1702*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 1703*0Sstevel@tonic-gate return (return_val); 1704*0Sstevel@tonic-gate } 1705*0Sstevel@tonic-gate 1706*0Sstevel@tonic-gate /* 1707*0Sstevel@tonic-gate * The callback function td_ta_event_getmsg uses when looking for 1708*0Sstevel@tonic-gate * a thread with an event. A thin wrapper around td_thr_event_getmsg. 1709*0Sstevel@tonic-gate */ 1710*0Sstevel@tonic-gate static int 1711*0Sstevel@tonic-gate event_msg_cb(const td_thrhandle_t *th_p, void *arg) 1712*0Sstevel@tonic-gate { 1713*0Sstevel@tonic-gate static td_thrhandle_t th; 1714*0Sstevel@tonic-gate td_event_msg_t *msg = arg; 1715*0Sstevel@tonic-gate 1716*0Sstevel@tonic-gate if (__td_thr_event_getmsg((td_thrhandle_t *)th_p, msg) == TD_OK) { 1717*0Sstevel@tonic-gate /* 1718*0Sstevel@tonic-gate * Got an event, stop iterating. 1719*0Sstevel@tonic-gate * 1720*0Sstevel@tonic-gate * Because of past mistakes in interface definition, 1721*0Sstevel@tonic-gate * we are forced to pass back a static local variable 1722*0Sstevel@tonic-gate * for the thread handle because th_p is a pointer 1723*0Sstevel@tonic-gate * to a local variable in __td_ta_thr_iter(). 1724*0Sstevel@tonic-gate * Grr... 1725*0Sstevel@tonic-gate */ 1726*0Sstevel@tonic-gate th = *th_p; 1727*0Sstevel@tonic-gate msg->th_p = &th; 1728*0Sstevel@tonic-gate return (1); 1729*0Sstevel@tonic-gate } 1730*0Sstevel@tonic-gate return (0); 1731*0Sstevel@tonic-gate } 1732*0Sstevel@tonic-gate 1733*0Sstevel@tonic-gate /* 1734*0Sstevel@tonic-gate * This function is just like td_thr_event_getmsg, except that it is 1735*0Sstevel@tonic-gate * passed a process handle rather than a thread handle, and returns 1736*0Sstevel@tonic-gate * an event message for some thread in the process that has an event 1737*0Sstevel@tonic-gate * message pending. If no thread has an event message pending, this 1738*0Sstevel@tonic-gate * routine returns TD_NOEVENT. Thus, all pending event messages may 1739*0Sstevel@tonic-gate * be collected from a process by repeatedly calling this routine 1740*0Sstevel@tonic-gate * until it returns TD_NOEVENT. 1741*0Sstevel@tonic-gate */ 1742*0Sstevel@tonic-gate #pragma weak td_ta_event_getmsg = __td_ta_event_getmsg 1743*0Sstevel@tonic-gate td_err_e 1744*0Sstevel@tonic-gate __td_ta_event_getmsg(td_thragent_t *ta_p, td_event_msg_t *msg) 1745*0Sstevel@tonic-gate { 1746*0Sstevel@tonic-gate td_err_e return_val; 1747*0Sstevel@tonic-gate 1748*0Sstevel@tonic-gate if (ta_p == NULL) 1749*0Sstevel@tonic-gate return (TD_BADTA); 1750*0Sstevel@tonic-gate if (ta_p->ph_p == NULL) 1751*0Sstevel@tonic-gate return (TD_BADPH); 1752*0Sstevel@tonic-gate if (msg == NULL) 1753*0Sstevel@tonic-gate return (TD_ERR); 1754*0Sstevel@tonic-gate msg->event = TD_EVENT_NONE; 1755*0Sstevel@tonic-gate if ((return_val = __td_ta_thr_iter(ta_p, event_msg_cb, msg, 1756*0Sstevel@tonic-gate TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK, 1757*0Sstevel@tonic-gate TD_THR_ANY_USER_FLAGS)) != TD_OK) 1758*0Sstevel@tonic-gate return (return_val); 1759*0Sstevel@tonic-gate if (msg->event == TD_EVENT_NONE) 1760*0Sstevel@tonic-gate return (TD_NOEVENT); 1761*0Sstevel@tonic-gate return (TD_OK); 1762*0Sstevel@tonic-gate } 1763*0Sstevel@tonic-gate 1764*0Sstevel@tonic-gate static lwpid_t 1765*0Sstevel@tonic-gate thr_to_lwpid(const td_thrhandle_t *th_p) 1766*0Sstevel@tonic-gate { 1767*0Sstevel@tonic-gate struct ps_prochandle *ph_p = th_p->th_ta_p->ph_p; 1768*0Sstevel@tonic-gate lwpid_t lwpid; 1769*0Sstevel@tonic-gate 1770*0Sstevel@tonic-gate /* 1771*0Sstevel@tonic-gate * The caller holds the prochandle lock 1772*0Sstevel@tonic-gate * and has already verfied everything. 1773*0Sstevel@tonic-gate */ 1774*0Sstevel@tonic-gate if (th_p->th_ta_p->model == PR_MODEL_NATIVE) { 1775*0Sstevel@tonic-gate ulwp_t *ulwp = (ulwp_t *)th_p->th_unique; 1776*0Sstevel@tonic-gate 1777*0Sstevel@tonic-gate if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid, 1778*0Sstevel@tonic-gate &lwpid, sizeof (lwpid)) != PS_OK) 1779*0Sstevel@tonic-gate lwpid = 0; 1780*0Sstevel@tonic-gate else if (lwpid == 0) 1781*0Sstevel@tonic-gate lwpid = 1; 1782*0Sstevel@tonic-gate } else { 1783*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 1784*0Sstevel@tonic-gate ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique; 1785*0Sstevel@tonic-gate 1786*0Sstevel@tonic-gate if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid, 1787*0Sstevel@tonic-gate &lwpid, sizeof (lwpid)) != PS_OK) 1788*0Sstevel@tonic-gate lwpid = 0; 1789*0Sstevel@tonic-gate else if (lwpid == 0) 1790*0Sstevel@tonic-gate lwpid = 1; 1791*0Sstevel@tonic-gate #else 1792*0Sstevel@tonic-gate lwpid = 0; 1793*0Sstevel@tonic-gate #endif /* _SYSCALL32 */ 1794*0Sstevel@tonic-gate } 1795*0Sstevel@tonic-gate 1796*0Sstevel@tonic-gate return (lwpid); 1797*0Sstevel@tonic-gate } 1798*0Sstevel@tonic-gate 1799*0Sstevel@tonic-gate /* 1800*0Sstevel@tonic-gate * Suspend a thread. 1801*0Sstevel@tonic-gate * XXX: What does this mean in a one-level model? 1802*0Sstevel@tonic-gate */ 1803*0Sstevel@tonic-gate #pragma weak td_thr_dbsuspend = __td_thr_dbsuspend 1804*0Sstevel@tonic-gate td_err_e 1805*0Sstevel@tonic-gate __td_thr_dbsuspend(const td_thrhandle_t *th_p) 1806*0Sstevel@tonic-gate { 1807*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 1808*0Sstevel@tonic-gate td_err_e return_val; 1809*0Sstevel@tonic-gate 1810*0Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1811*0Sstevel@tonic-gate return (return_val); 1812*0Sstevel@tonic-gate if (ps_lstop(ph_p, thr_to_lwpid(th_p)) != PS_OK) 1813*0Sstevel@tonic-gate return_val = TD_DBERR; 1814*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 1815*0Sstevel@tonic-gate return (return_val); 1816*0Sstevel@tonic-gate } 1817*0Sstevel@tonic-gate 1818*0Sstevel@tonic-gate /* 1819*0Sstevel@tonic-gate * Resume a suspended thread. 1820*0Sstevel@tonic-gate * XXX: What does this mean in a one-level model? 1821*0Sstevel@tonic-gate */ 1822*0Sstevel@tonic-gate #pragma weak td_thr_dbresume = __td_thr_dbresume 1823*0Sstevel@tonic-gate td_err_e 1824*0Sstevel@tonic-gate __td_thr_dbresume(const td_thrhandle_t *th_p) 1825*0Sstevel@tonic-gate { 1826*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 1827*0Sstevel@tonic-gate td_err_e return_val; 1828*0Sstevel@tonic-gate 1829*0Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1830*0Sstevel@tonic-gate return (return_val); 1831*0Sstevel@tonic-gate if (ps_lcontinue(ph_p, thr_to_lwpid(th_p)) != PS_OK) 1832*0Sstevel@tonic-gate return_val = TD_DBERR; 1833*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 1834*0Sstevel@tonic-gate return (return_val); 1835*0Sstevel@tonic-gate } 1836*0Sstevel@tonic-gate 1837*0Sstevel@tonic-gate /* 1838*0Sstevel@tonic-gate * Set a thread's signal mask. 1839*0Sstevel@tonic-gate * Currently unused by dbx. 1840*0Sstevel@tonic-gate */ 1841*0Sstevel@tonic-gate #pragma weak td_thr_sigsetmask = __td_thr_sigsetmask 1842*0Sstevel@tonic-gate /* ARGSUSED */ 1843*0Sstevel@tonic-gate td_err_e 1844*0Sstevel@tonic-gate __td_thr_sigsetmask(const td_thrhandle_t *th_p, const sigset_t ti_sigmask) 1845*0Sstevel@tonic-gate { 1846*0Sstevel@tonic-gate return (TD_NOCAPAB); 1847*0Sstevel@tonic-gate } 1848*0Sstevel@tonic-gate 1849*0Sstevel@tonic-gate /* 1850*0Sstevel@tonic-gate * Set a thread's "signals-pending" set. 1851*0Sstevel@tonic-gate * Currently unused by dbx. 1852*0Sstevel@tonic-gate */ 1853*0Sstevel@tonic-gate #pragma weak td_thr_setsigpending = __td_thr_setsigpending 1854*0Sstevel@tonic-gate /* ARGSUSED */ 1855*0Sstevel@tonic-gate td_err_e 1856*0Sstevel@tonic-gate __td_thr_setsigpending(const td_thrhandle_t *th_p, 1857*0Sstevel@tonic-gate uchar_t ti_pending_flag, const sigset_t ti_pending) 1858*0Sstevel@tonic-gate { 1859*0Sstevel@tonic-gate return (TD_NOCAPAB); 1860*0Sstevel@tonic-gate } 1861*0Sstevel@tonic-gate 1862*0Sstevel@tonic-gate /* 1863*0Sstevel@tonic-gate * Get a thread's general register set. 1864*0Sstevel@tonic-gate */ 1865*0Sstevel@tonic-gate #pragma weak td_thr_getgregs = __td_thr_getgregs 1866*0Sstevel@tonic-gate td_err_e 1867*0Sstevel@tonic-gate __td_thr_getgregs(td_thrhandle_t *th_p, prgregset_t regset) 1868*0Sstevel@tonic-gate { 1869*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 1870*0Sstevel@tonic-gate td_err_e return_val; 1871*0Sstevel@tonic-gate 1872*0Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1873*0Sstevel@tonic-gate return (return_val); 1874*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) { 1875*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 1876*0Sstevel@tonic-gate return (TD_DBERR); 1877*0Sstevel@tonic-gate } 1878*0Sstevel@tonic-gate 1879*0Sstevel@tonic-gate if (ps_lgetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK) 1880*0Sstevel@tonic-gate return_val = TD_DBERR; 1881*0Sstevel@tonic-gate 1882*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 1883*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 1884*0Sstevel@tonic-gate return (return_val); 1885*0Sstevel@tonic-gate } 1886*0Sstevel@tonic-gate 1887*0Sstevel@tonic-gate /* 1888*0Sstevel@tonic-gate * Set a thread's general register set. 1889*0Sstevel@tonic-gate */ 1890*0Sstevel@tonic-gate #pragma weak td_thr_setgregs = __td_thr_setgregs 1891*0Sstevel@tonic-gate td_err_e 1892*0Sstevel@tonic-gate __td_thr_setgregs(td_thrhandle_t *th_p, const prgregset_t regset) 1893*0Sstevel@tonic-gate { 1894*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 1895*0Sstevel@tonic-gate td_err_e return_val; 1896*0Sstevel@tonic-gate 1897*0Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1898*0Sstevel@tonic-gate return (return_val); 1899*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) { 1900*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 1901*0Sstevel@tonic-gate return (TD_DBERR); 1902*0Sstevel@tonic-gate } 1903*0Sstevel@tonic-gate 1904*0Sstevel@tonic-gate if (ps_lsetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK) 1905*0Sstevel@tonic-gate return_val = TD_DBERR; 1906*0Sstevel@tonic-gate 1907*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 1908*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 1909*0Sstevel@tonic-gate return (return_val); 1910*0Sstevel@tonic-gate } 1911*0Sstevel@tonic-gate 1912*0Sstevel@tonic-gate /* 1913*0Sstevel@tonic-gate * Get a thread's floating-point register set. 1914*0Sstevel@tonic-gate */ 1915*0Sstevel@tonic-gate #pragma weak td_thr_getfpregs = __td_thr_getfpregs 1916*0Sstevel@tonic-gate td_err_e 1917*0Sstevel@tonic-gate __td_thr_getfpregs(td_thrhandle_t *th_p, prfpregset_t *fpregset) 1918*0Sstevel@tonic-gate { 1919*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 1920*0Sstevel@tonic-gate td_err_e return_val; 1921*0Sstevel@tonic-gate 1922*0Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1923*0Sstevel@tonic-gate return (return_val); 1924*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) { 1925*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 1926*0Sstevel@tonic-gate return (TD_DBERR); 1927*0Sstevel@tonic-gate } 1928*0Sstevel@tonic-gate 1929*0Sstevel@tonic-gate if (ps_lgetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK) 1930*0Sstevel@tonic-gate return_val = TD_DBERR; 1931*0Sstevel@tonic-gate 1932*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 1933*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 1934*0Sstevel@tonic-gate return (return_val); 1935*0Sstevel@tonic-gate } 1936*0Sstevel@tonic-gate 1937*0Sstevel@tonic-gate /* 1938*0Sstevel@tonic-gate * Set a thread's floating-point register set. 1939*0Sstevel@tonic-gate */ 1940*0Sstevel@tonic-gate #pragma weak td_thr_setfpregs = __td_thr_setfpregs 1941*0Sstevel@tonic-gate td_err_e 1942*0Sstevel@tonic-gate __td_thr_setfpregs(td_thrhandle_t *th_p, const prfpregset_t *fpregset) 1943*0Sstevel@tonic-gate { 1944*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 1945*0Sstevel@tonic-gate td_err_e return_val; 1946*0Sstevel@tonic-gate 1947*0Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1948*0Sstevel@tonic-gate return (return_val); 1949*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) { 1950*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 1951*0Sstevel@tonic-gate return (TD_DBERR); 1952*0Sstevel@tonic-gate } 1953*0Sstevel@tonic-gate 1954*0Sstevel@tonic-gate if (ps_lsetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK) 1955*0Sstevel@tonic-gate return_val = TD_DBERR; 1956*0Sstevel@tonic-gate 1957*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 1958*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 1959*0Sstevel@tonic-gate return (return_val); 1960*0Sstevel@tonic-gate } 1961*0Sstevel@tonic-gate 1962*0Sstevel@tonic-gate /* 1963*0Sstevel@tonic-gate * Get the size of the extra state register set for this architecture. 1964*0Sstevel@tonic-gate * Currently unused by dbx. 1965*0Sstevel@tonic-gate */ 1966*0Sstevel@tonic-gate #pragma weak td_thr_getxregsize = __td_thr_getxregsize 1967*0Sstevel@tonic-gate /* ARGSUSED */ 1968*0Sstevel@tonic-gate td_err_e 1969*0Sstevel@tonic-gate __td_thr_getxregsize(td_thrhandle_t *th_p, int *xregsize) 1970*0Sstevel@tonic-gate { 1971*0Sstevel@tonic-gate #if defined(__sparc) 1972*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 1973*0Sstevel@tonic-gate td_err_e return_val; 1974*0Sstevel@tonic-gate 1975*0Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1976*0Sstevel@tonic-gate return (return_val); 1977*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) { 1978*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 1979*0Sstevel@tonic-gate return (TD_DBERR); 1980*0Sstevel@tonic-gate } 1981*0Sstevel@tonic-gate 1982*0Sstevel@tonic-gate if (ps_lgetxregsize(ph_p, thr_to_lwpid(th_p), xregsize) != PS_OK) 1983*0Sstevel@tonic-gate return_val = TD_DBERR; 1984*0Sstevel@tonic-gate 1985*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 1986*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 1987*0Sstevel@tonic-gate return (return_val); 1988*0Sstevel@tonic-gate #else /* __sparc */ 1989*0Sstevel@tonic-gate return (TD_NOXREGS); 1990*0Sstevel@tonic-gate #endif /* __sparc */ 1991*0Sstevel@tonic-gate } 1992*0Sstevel@tonic-gate 1993*0Sstevel@tonic-gate /* 1994*0Sstevel@tonic-gate * Get a thread's extra state register set. 1995*0Sstevel@tonic-gate */ 1996*0Sstevel@tonic-gate #pragma weak td_thr_getxregs = __td_thr_getxregs 1997*0Sstevel@tonic-gate /* ARGSUSED */ 1998*0Sstevel@tonic-gate td_err_e 1999*0Sstevel@tonic-gate __td_thr_getxregs(td_thrhandle_t *th_p, void *xregset) 2000*0Sstevel@tonic-gate { 2001*0Sstevel@tonic-gate #if defined(__sparc) 2002*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 2003*0Sstevel@tonic-gate td_err_e return_val; 2004*0Sstevel@tonic-gate 2005*0Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 2006*0Sstevel@tonic-gate return (return_val); 2007*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) { 2008*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 2009*0Sstevel@tonic-gate return (TD_DBERR); 2010*0Sstevel@tonic-gate } 2011*0Sstevel@tonic-gate 2012*0Sstevel@tonic-gate if (ps_lgetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK) 2013*0Sstevel@tonic-gate return_val = TD_DBERR; 2014*0Sstevel@tonic-gate 2015*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 2016*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 2017*0Sstevel@tonic-gate return (return_val); 2018*0Sstevel@tonic-gate #else /* __sparc */ 2019*0Sstevel@tonic-gate return (TD_NOXREGS); 2020*0Sstevel@tonic-gate #endif /* __sparc */ 2021*0Sstevel@tonic-gate } 2022*0Sstevel@tonic-gate 2023*0Sstevel@tonic-gate /* 2024*0Sstevel@tonic-gate * Set a thread's extra state register set. 2025*0Sstevel@tonic-gate */ 2026*0Sstevel@tonic-gate #pragma weak td_thr_setxregs = __td_thr_setxregs 2027*0Sstevel@tonic-gate /* ARGSUSED */ 2028*0Sstevel@tonic-gate td_err_e 2029*0Sstevel@tonic-gate __td_thr_setxregs(td_thrhandle_t *th_p, const void *xregset) 2030*0Sstevel@tonic-gate { 2031*0Sstevel@tonic-gate #if defined(__sparc) 2032*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 2033*0Sstevel@tonic-gate td_err_e return_val; 2034*0Sstevel@tonic-gate 2035*0Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 2036*0Sstevel@tonic-gate return (return_val); 2037*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) { 2038*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 2039*0Sstevel@tonic-gate return (TD_DBERR); 2040*0Sstevel@tonic-gate } 2041*0Sstevel@tonic-gate 2042*0Sstevel@tonic-gate if (ps_lsetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK) 2043*0Sstevel@tonic-gate return_val = TD_DBERR; 2044*0Sstevel@tonic-gate 2045*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 2046*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 2047*0Sstevel@tonic-gate return (return_val); 2048*0Sstevel@tonic-gate #else /* __sparc */ 2049*0Sstevel@tonic-gate return (TD_NOXREGS); 2050*0Sstevel@tonic-gate #endif /* __sparc */ 2051*0Sstevel@tonic-gate } 2052*0Sstevel@tonic-gate 2053*0Sstevel@tonic-gate struct searcher { 2054*0Sstevel@tonic-gate psaddr_t addr; 2055*0Sstevel@tonic-gate int status; 2056*0Sstevel@tonic-gate }; 2057*0Sstevel@tonic-gate 2058*0Sstevel@tonic-gate /* 2059*0Sstevel@tonic-gate * Check the struct thread address in *th_p again first 2060*0Sstevel@tonic-gate * value in "data". If value in data is found, set second value 2061*0Sstevel@tonic-gate * in "data" to 1 and return 1 to terminate iterations. 2062*0Sstevel@tonic-gate * This function is used by td_thr_validate() to verify that 2063*0Sstevel@tonic-gate * a thread handle is valid. 2064*0Sstevel@tonic-gate */ 2065*0Sstevel@tonic-gate static int 2066*0Sstevel@tonic-gate td_searcher(const td_thrhandle_t *th_p, void *data) 2067*0Sstevel@tonic-gate { 2068*0Sstevel@tonic-gate struct searcher *searcher_data = (struct searcher *)data; 2069*0Sstevel@tonic-gate 2070*0Sstevel@tonic-gate if (searcher_data->addr == th_p->th_unique) { 2071*0Sstevel@tonic-gate searcher_data->status = 1; 2072*0Sstevel@tonic-gate return (1); 2073*0Sstevel@tonic-gate } 2074*0Sstevel@tonic-gate return (0); 2075*0Sstevel@tonic-gate } 2076*0Sstevel@tonic-gate 2077*0Sstevel@tonic-gate /* 2078*0Sstevel@tonic-gate * Validate the thread handle. Check that 2079*0Sstevel@tonic-gate * a thread exists in the thread agent/process that 2080*0Sstevel@tonic-gate * corresponds to thread with handle *th_p. 2081*0Sstevel@tonic-gate * Currently unused by dbx. 2082*0Sstevel@tonic-gate */ 2083*0Sstevel@tonic-gate #pragma weak td_thr_validate = __td_thr_validate 2084*0Sstevel@tonic-gate td_err_e 2085*0Sstevel@tonic-gate __td_thr_validate(const td_thrhandle_t *th_p) 2086*0Sstevel@tonic-gate { 2087*0Sstevel@tonic-gate td_err_e return_val; 2088*0Sstevel@tonic-gate struct searcher searcher_data = {0, 0}; 2089*0Sstevel@tonic-gate 2090*0Sstevel@tonic-gate if (th_p == NULL) 2091*0Sstevel@tonic-gate return (TD_BADTH); 2092*0Sstevel@tonic-gate if (th_p->th_unique == NULL || th_p->th_ta_p == NULL) 2093*0Sstevel@tonic-gate return (TD_BADTH); 2094*0Sstevel@tonic-gate 2095*0Sstevel@tonic-gate /* 2096*0Sstevel@tonic-gate * LOCKING EXCEPTION - Locking is not required 2097*0Sstevel@tonic-gate * here because no use of the thread agent is made (other 2098*0Sstevel@tonic-gate * than the sanity check) and checking of the thread 2099*0Sstevel@tonic-gate * agent will be done in __td_ta_thr_iter. 2100*0Sstevel@tonic-gate */ 2101*0Sstevel@tonic-gate 2102*0Sstevel@tonic-gate searcher_data.addr = th_p->th_unique; 2103*0Sstevel@tonic-gate return_val = __td_ta_thr_iter(th_p->th_ta_p, 2104*0Sstevel@tonic-gate td_searcher, &searcher_data, 2105*0Sstevel@tonic-gate TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, 2106*0Sstevel@tonic-gate TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS); 2107*0Sstevel@tonic-gate 2108*0Sstevel@tonic-gate if (return_val == TD_OK && searcher_data.status == 0) 2109*0Sstevel@tonic-gate return_val = TD_NOTHR; 2110*0Sstevel@tonic-gate 2111*0Sstevel@tonic-gate return (return_val); 2112*0Sstevel@tonic-gate } 2113*0Sstevel@tonic-gate 2114*0Sstevel@tonic-gate /* 2115*0Sstevel@tonic-gate * Get a thread's private binding to a given thread specific 2116*0Sstevel@tonic-gate * data(TSD) key(see thr_getspecific(3T). If the thread doesn't 2117*0Sstevel@tonic-gate * have a binding for a particular key, then NULL is returned. 2118*0Sstevel@tonic-gate */ 2119*0Sstevel@tonic-gate #pragma weak td_thr_tsd = __td_thr_tsd 2120*0Sstevel@tonic-gate td_err_e 2121*0Sstevel@tonic-gate __td_thr_tsd(td_thrhandle_t *th_p, thread_key_t key, void **data_pp) 2122*0Sstevel@tonic-gate { 2123*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 2124*0Sstevel@tonic-gate td_thragent_t *ta_p; 2125*0Sstevel@tonic-gate td_err_e return_val; 2126*0Sstevel@tonic-gate int maxkey; 2127*0Sstevel@tonic-gate int nkey; 2128*0Sstevel@tonic-gate psaddr_t tsd_paddr; 2129*0Sstevel@tonic-gate 2130*0Sstevel@tonic-gate if (data_pp == NULL) 2131*0Sstevel@tonic-gate return (TD_ERR); 2132*0Sstevel@tonic-gate *data_pp = NULL; 2133*0Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 2134*0Sstevel@tonic-gate return (return_val); 2135*0Sstevel@tonic-gate ta_p = th_p->th_ta_p; 2136*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) { 2137*0Sstevel@tonic-gate ph_unlock(ta_p); 2138*0Sstevel@tonic-gate return (TD_DBERR); 2139*0Sstevel@tonic-gate } 2140*0Sstevel@tonic-gate 2141*0Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) { 2142*0Sstevel@tonic-gate ulwp_t *ulwp = (ulwp_t *)th_p->th_unique; 2143*0Sstevel@tonic-gate tsd_metadata_t tsdm; 2144*0Sstevel@tonic-gate tsd_t stsd; 2145*0Sstevel@tonic-gate 2146*0Sstevel@tonic-gate if (ps_pdread(ph_p, 2147*0Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata), 2148*0Sstevel@tonic-gate &tsdm, sizeof (tsdm)) != PS_OK) 2149*0Sstevel@tonic-gate return_val = TD_DBERR; 2150*0Sstevel@tonic-gate else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd, 2151*0Sstevel@tonic-gate &tsd_paddr, sizeof (tsd_paddr)) != PS_OK) 2152*0Sstevel@tonic-gate return_val = TD_DBERR; 2153*0Sstevel@tonic-gate else if (tsd_paddr != NULL && 2154*0Sstevel@tonic-gate ps_pdread(ph_p, tsd_paddr, &stsd, sizeof (stsd)) != PS_OK) 2155*0Sstevel@tonic-gate return_val = TD_DBERR; 2156*0Sstevel@tonic-gate else { 2157*0Sstevel@tonic-gate maxkey = tsdm.tsdm_nused; 2158*0Sstevel@tonic-gate nkey = tsd_paddr == NULL ? TSD_NFAST : stsd.tsd_nalloc; 2159*0Sstevel@tonic-gate 2160*0Sstevel@tonic-gate if (key < TSD_NFAST) 2161*0Sstevel@tonic-gate tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0]; 2162*0Sstevel@tonic-gate } 2163*0Sstevel@tonic-gate } else { 2164*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 2165*0Sstevel@tonic-gate ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique; 2166*0Sstevel@tonic-gate tsd_metadata32_t tsdm; 2167*0Sstevel@tonic-gate tsd32_t stsd; 2168*0Sstevel@tonic-gate caddr32_t addr; 2169*0Sstevel@tonic-gate 2170*0Sstevel@tonic-gate if (ps_pdread(ph_p, 2171*0Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata), 2172*0Sstevel@tonic-gate &tsdm, sizeof (tsdm)) != PS_OK) 2173*0Sstevel@tonic-gate return_val = TD_DBERR; 2174*0Sstevel@tonic-gate else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd, 2175*0Sstevel@tonic-gate &addr, sizeof (addr)) != PS_OK) 2176*0Sstevel@tonic-gate return_val = TD_DBERR; 2177*0Sstevel@tonic-gate else if (addr != NULL && 2178*0Sstevel@tonic-gate ps_pdread(ph_p, addr, &stsd, sizeof (stsd)) != PS_OK) 2179*0Sstevel@tonic-gate return_val = TD_DBERR; 2180*0Sstevel@tonic-gate else { 2181*0Sstevel@tonic-gate maxkey = tsdm.tsdm_nused; 2182*0Sstevel@tonic-gate nkey = addr == NULL ? TSD_NFAST : stsd.tsd_nalloc; 2183*0Sstevel@tonic-gate 2184*0Sstevel@tonic-gate if (key < TSD_NFAST) { 2185*0Sstevel@tonic-gate tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0]; 2186*0Sstevel@tonic-gate } else { 2187*0Sstevel@tonic-gate tsd_paddr = addr; 2188*0Sstevel@tonic-gate } 2189*0Sstevel@tonic-gate } 2190*0Sstevel@tonic-gate #else 2191*0Sstevel@tonic-gate return_val = TD_ERR; 2192*0Sstevel@tonic-gate #endif /* _SYSCALL32 */ 2193*0Sstevel@tonic-gate } 2194*0Sstevel@tonic-gate 2195*0Sstevel@tonic-gate if (return_val == TD_OK && (key < 1 || key >= maxkey)) 2196*0Sstevel@tonic-gate return_val = TD_NOTSD; 2197*0Sstevel@tonic-gate if (return_val != TD_OK || key >= nkey) { 2198*0Sstevel@tonic-gate /* NULL has already been stored in data_pp */ 2199*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 2200*0Sstevel@tonic-gate ph_unlock(ta_p); 2201*0Sstevel@tonic-gate return (return_val); 2202*0Sstevel@tonic-gate } 2203*0Sstevel@tonic-gate 2204*0Sstevel@tonic-gate /* 2205*0Sstevel@tonic-gate * Read the value from the thread's tsd array. 2206*0Sstevel@tonic-gate */ 2207*0Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) { 2208*0Sstevel@tonic-gate void *value; 2209*0Sstevel@tonic-gate 2210*0Sstevel@tonic-gate if (ps_pdread(ph_p, tsd_paddr + key * sizeof (void *), 2211*0Sstevel@tonic-gate &value, sizeof (value)) != PS_OK) 2212*0Sstevel@tonic-gate return_val = TD_DBERR; 2213*0Sstevel@tonic-gate else 2214*0Sstevel@tonic-gate *data_pp = value; 2215*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 2216*0Sstevel@tonic-gate } else { 2217*0Sstevel@tonic-gate caddr32_t value32; 2218*0Sstevel@tonic-gate 2219*0Sstevel@tonic-gate if (ps_pdread(ph_p, tsd_paddr + key * sizeof (caddr32_t), 2220*0Sstevel@tonic-gate &value32, sizeof (value32)) != PS_OK) 2221*0Sstevel@tonic-gate return_val = TD_DBERR; 2222*0Sstevel@tonic-gate else 2223*0Sstevel@tonic-gate *data_pp = (void *)(uintptr_t)value32; 2224*0Sstevel@tonic-gate #endif /* _SYSCALL32 */ 2225*0Sstevel@tonic-gate } 2226*0Sstevel@tonic-gate 2227*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 2228*0Sstevel@tonic-gate ph_unlock(ta_p); 2229*0Sstevel@tonic-gate return (return_val); 2230*0Sstevel@tonic-gate } 2231*0Sstevel@tonic-gate 2232*0Sstevel@tonic-gate /* 2233*0Sstevel@tonic-gate * Get the base address of a thread's thread local storage (TLS) block 2234*0Sstevel@tonic-gate * for the module (executable or shared object) identified by 'moduleid'. 2235*0Sstevel@tonic-gate */ 2236*0Sstevel@tonic-gate #pragma weak td_thr_tlsbase = __td_thr_tlsbase 2237*0Sstevel@tonic-gate td_err_e 2238*0Sstevel@tonic-gate __td_thr_tlsbase(td_thrhandle_t *th_p, ulong_t moduleid, psaddr_t *base) 2239*0Sstevel@tonic-gate { 2240*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 2241*0Sstevel@tonic-gate td_thragent_t *ta_p; 2242*0Sstevel@tonic-gate td_err_e return_val; 2243*0Sstevel@tonic-gate 2244*0Sstevel@tonic-gate if (base == NULL) 2245*0Sstevel@tonic-gate return (TD_ERR); 2246*0Sstevel@tonic-gate *base = NULL; 2247*0Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 2248*0Sstevel@tonic-gate return (return_val); 2249*0Sstevel@tonic-gate ta_p = th_p->th_ta_p; 2250*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) { 2251*0Sstevel@tonic-gate ph_unlock(ta_p); 2252*0Sstevel@tonic-gate return (TD_DBERR); 2253*0Sstevel@tonic-gate } 2254*0Sstevel@tonic-gate 2255*0Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) { 2256*0Sstevel@tonic-gate ulwp_t *ulwp = (ulwp_t *)th_p->th_unique; 2257*0Sstevel@tonic-gate tls_metadata_t tls_metadata; 2258*0Sstevel@tonic-gate TLS_modinfo tlsmod; 2259*0Sstevel@tonic-gate tls_t tls; 2260*0Sstevel@tonic-gate 2261*0Sstevel@tonic-gate if (ps_pdread(ph_p, 2262*0Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata_t, tls_metadata), 2263*0Sstevel@tonic-gate &tls_metadata, sizeof (tls_metadata)) != PS_OK) 2264*0Sstevel@tonic-gate return_val = TD_DBERR; 2265*0Sstevel@tonic-gate else if (moduleid >= tls_metadata.tls_modinfo.tls_size) 2266*0Sstevel@tonic-gate return_val = TD_NOTLS; 2267*0Sstevel@tonic-gate else if (ps_pdread(ph_p, 2268*0Sstevel@tonic-gate (psaddr_t)((TLS_modinfo *) 2269*0Sstevel@tonic-gate tls_metadata.tls_modinfo.tls_data + moduleid), 2270*0Sstevel@tonic-gate &tlsmod, sizeof (tlsmod)) != PS_OK) 2271*0Sstevel@tonic-gate return_val = TD_DBERR; 2272*0Sstevel@tonic-gate else if (tlsmod.tm_memsz == 0) 2273*0Sstevel@tonic-gate return_val = TD_NOTLS; 2274*0Sstevel@tonic-gate else if (tlsmod.tm_flags & TM_FLG_STATICTLS) 2275*0Sstevel@tonic-gate *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset; 2276*0Sstevel@tonic-gate else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls, 2277*0Sstevel@tonic-gate &tls, sizeof (tls)) != PS_OK) 2278*0Sstevel@tonic-gate return_val = TD_DBERR; 2279*0Sstevel@tonic-gate else if (moduleid >= tls.tls_size) 2280*0Sstevel@tonic-gate return_val = TD_TLSDEFER; 2281*0Sstevel@tonic-gate else if (ps_pdread(ph_p, 2282*0Sstevel@tonic-gate (psaddr_t)((tls_t *)tls.tls_data + moduleid), 2283*0Sstevel@tonic-gate &tls, sizeof (tls)) != PS_OK) 2284*0Sstevel@tonic-gate return_val = TD_DBERR; 2285*0Sstevel@tonic-gate else if (tls.tls_size == 0) 2286*0Sstevel@tonic-gate return_val = TD_TLSDEFER; 2287*0Sstevel@tonic-gate else 2288*0Sstevel@tonic-gate *base = (psaddr_t)tls.tls_data; 2289*0Sstevel@tonic-gate } else { 2290*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 2291*0Sstevel@tonic-gate ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique; 2292*0Sstevel@tonic-gate tls_metadata32_t tls_metadata; 2293*0Sstevel@tonic-gate TLS_modinfo32 tlsmod; 2294*0Sstevel@tonic-gate tls32_t tls; 2295*0Sstevel@tonic-gate 2296*0Sstevel@tonic-gate if (ps_pdread(ph_p, 2297*0Sstevel@tonic-gate ta_p->uberdata_addr + offsetof(uberdata32_t, tls_metadata), 2298*0Sstevel@tonic-gate &tls_metadata, sizeof (tls_metadata)) != PS_OK) 2299*0Sstevel@tonic-gate return_val = TD_DBERR; 2300*0Sstevel@tonic-gate else if (moduleid >= tls_metadata.tls_modinfo.tls_size) 2301*0Sstevel@tonic-gate return_val = TD_NOTLS; 2302*0Sstevel@tonic-gate else if (ps_pdread(ph_p, 2303*0Sstevel@tonic-gate (psaddr_t)((TLS_modinfo32 *) 2304*0Sstevel@tonic-gate (uintptr_t)tls_metadata.tls_modinfo.tls_data + moduleid), 2305*0Sstevel@tonic-gate &tlsmod, sizeof (tlsmod)) != PS_OK) 2306*0Sstevel@tonic-gate return_val = TD_DBERR; 2307*0Sstevel@tonic-gate else if (tlsmod.tm_memsz == 0) 2308*0Sstevel@tonic-gate return_val = TD_NOTLS; 2309*0Sstevel@tonic-gate else if (tlsmod.tm_flags & TM_FLG_STATICTLS) 2310*0Sstevel@tonic-gate *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset; 2311*0Sstevel@tonic-gate else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls, 2312*0Sstevel@tonic-gate &tls, sizeof (tls)) != PS_OK) 2313*0Sstevel@tonic-gate return_val = TD_DBERR; 2314*0Sstevel@tonic-gate else if (moduleid >= tls.tls_size) 2315*0Sstevel@tonic-gate return_val = TD_TLSDEFER; 2316*0Sstevel@tonic-gate else if (ps_pdread(ph_p, 2317*0Sstevel@tonic-gate (psaddr_t)((tls32_t *)(uintptr_t)tls.tls_data + moduleid), 2318*0Sstevel@tonic-gate &tls, sizeof (tls)) != PS_OK) 2319*0Sstevel@tonic-gate return_val = TD_DBERR; 2320*0Sstevel@tonic-gate else if (tls.tls_size == 0) 2321*0Sstevel@tonic-gate return_val = TD_TLSDEFER; 2322*0Sstevel@tonic-gate else 2323*0Sstevel@tonic-gate *base = (psaddr_t)tls.tls_data; 2324*0Sstevel@tonic-gate #else 2325*0Sstevel@tonic-gate return_val = TD_ERR; 2326*0Sstevel@tonic-gate #endif /* _SYSCALL32 */ 2327*0Sstevel@tonic-gate } 2328*0Sstevel@tonic-gate 2329*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 2330*0Sstevel@tonic-gate ph_unlock(ta_p); 2331*0Sstevel@tonic-gate return (return_val); 2332*0Sstevel@tonic-gate } 2333*0Sstevel@tonic-gate 2334*0Sstevel@tonic-gate /* 2335*0Sstevel@tonic-gate * Change a thread's priority to the value specified by ti_pri. 2336*0Sstevel@tonic-gate * Currently unused by dbx. 2337*0Sstevel@tonic-gate */ 2338*0Sstevel@tonic-gate #pragma weak td_thr_setprio = __td_thr_setprio 2339*0Sstevel@tonic-gate td_err_e 2340*0Sstevel@tonic-gate __td_thr_setprio(td_thrhandle_t *th_p, int ti_pri) 2341*0Sstevel@tonic-gate { 2342*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 2343*0Sstevel@tonic-gate pri_t priority = ti_pri; 2344*0Sstevel@tonic-gate td_err_e return_val = TD_OK; 2345*0Sstevel@tonic-gate 2346*0Sstevel@tonic-gate if (ti_pri < THREAD_MIN_PRIORITY || ti_pri > THREAD_MAX_PRIORITY) 2347*0Sstevel@tonic-gate return (TD_ERR); 2348*0Sstevel@tonic-gate if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 2349*0Sstevel@tonic-gate return (return_val); 2350*0Sstevel@tonic-gate 2351*0Sstevel@tonic-gate if (th_p->th_ta_p->model == PR_MODEL_NATIVE) { 2352*0Sstevel@tonic-gate ulwp_t *ulwp = (ulwp_t *)th_p->th_unique; 2353*0Sstevel@tonic-gate 2354*0Sstevel@tonic-gate if (ps_pdwrite(ph_p, (psaddr_t)&ulwp->ul_pri, 2355*0Sstevel@tonic-gate &priority, sizeof (priority)) != PS_OK) 2356*0Sstevel@tonic-gate return_val = TD_DBERR; 2357*0Sstevel@tonic-gate } else { 2358*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 2359*0Sstevel@tonic-gate ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique; 2360*0Sstevel@tonic-gate 2361*0Sstevel@tonic-gate if (ps_pdwrite(ph_p, (psaddr_t)&ulwp->ul_pri, 2362*0Sstevel@tonic-gate &priority, sizeof (priority)) != PS_OK) 2363*0Sstevel@tonic-gate return_val = TD_DBERR; 2364*0Sstevel@tonic-gate #else 2365*0Sstevel@tonic-gate return_val = TD_ERR; 2366*0Sstevel@tonic-gate #endif /* _SYSCALL32 */ 2367*0Sstevel@tonic-gate } 2368*0Sstevel@tonic-gate 2369*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 2370*0Sstevel@tonic-gate return (return_val); 2371*0Sstevel@tonic-gate } 2372*0Sstevel@tonic-gate 2373*0Sstevel@tonic-gate /* 2374*0Sstevel@tonic-gate * This structure links td_thr_lockowner and the lowner_cb callback function. 2375*0Sstevel@tonic-gate */ 2376*0Sstevel@tonic-gate typedef struct { 2377*0Sstevel@tonic-gate td_sync_iter_f *owner_cb; 2378*0Sstevel@tonic-gate void *owner_cb_arg; 2379*0Sstevel@tonic-gate td_thrhandle_t *th_p; 2380*0Sstevel@tonic-gate } lowner_cb_ctl_t; 2381*0Sstevel@tonic-gate 2382*0Sstevel@tonic-gate static int 2383*0Sstevel@tonic-gate lowner_cb(const td_synchandle_t *sh_p, void *arg) 2384*0Sstevel@tonic-gate { 2385*0Sstevel@tonic-gate lowner_cb_ctl_t *ocb = arg; 2386*0Sstevel@tonic-gate int trunc = 0; 2387*0Sstevel@tonic-gate union { 2388*0Sstevel@tonic-gate rwlock_t rwl; 2389*0Sstevel@tonic-gate mutex_t mx; 2390*0Sstevel@tonic-gate } rw_m; 2391*0Sstevel@tonic-gate 2392*0Sstevel@tonic-gate if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique, 2393*0Sstevel@tonic-gate &rw_m, sizeof (rw_m)) != PS_OK) { 2394*0Sstevel@tonic-gate trunc = 1; 2395*0Sstevel@tonic-gate if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique, 2396*0Sstevel@tonic-gate &rw_m.mx, sizeof (rw_m.mx)) != PS_OK) 2397*0Sstevel@tonic-gate return (0); 2398*0Sstevel@tonic-gate } 2399*0Sstevel@tonic-gate if (rw_m.mx.mutex_magic == MUTEX_MAGIC && 2400*0Sstevel@tonic-gate rw_m.mx.mutex_owner == ocb->th_p->th_unique) 2401*0Sstevel@tonic-gate return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg)); 2402*0Sstevel@tonic-gate if (!trunc && rw_m.rwl.magic == RWL_MAGIC) { 2403*0Sstevel@tonic-gate mutex_t *rwlock = &rw_m.rwl.mutex; 2404*0Sstevel@tonic-gate if (rwlock->mutex_owner == ocb->th_p->th_unique) 2405*0Sstevel@tonic-gate return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg)); 2406*0Sstevel@tonic-gate } 2407*0Sstevel@tonic-gate return (0); 2408*0Sstevel@tonic-gate } 2409*0Sstevel@tonic-gate 2410*0Sstevel@tonic-gate /* 2411*0Sstevel@tonic-gate * Iterate over the set of locks owned by a specified thread. 2412*0Sstevel@tonic-gate * If cb returns a non-zero value, terminate iterations. 2413*0Sstevel@tonic-gate */ 2414*0Sstevel@tonic-gate #pragma weak td_thr_lockowner = __td_thr_lockowner 2415*0Sstevel@tonic-gate td_err_e 2416*0Sstevel@tonic-gate __td_thr_lockowner(const td_thrhandle_t *th_p, td_sync_iter_f *cb, 2417*0Sstevel@tonic-gate void *cb_data) 2418*0Sstevel@tonic-gate { 2419*0Sstevel@tonic-gate td_thragent_t *ta_p; 2420*0Sstevel@tonic-gate td_err_e return_val; 2421*0Sstevel@tonic-gate lowner_cb_ctl_t lcb; 2422*0Sstevel@tonic-gate 2423*0Sstevel@tonic-gate /* 2424*0Sstevel@tonic-gate * Just sanity checks. 2425*0Sstevel@tonic-gate */ 2426*0Sstevel@tonic-gate if (ph_lock_th((td_thrhandle_t *)th_p, &return_val) == NULL) 2427*0Sstevel@tonic-gate return (return_val); 2428*0Sstevel@tonic-gate ta_p = th_p->th_ta_p; 2429*0Sstevel@tonic-gate ph_unlock(ta_p); 2430*0Sstevel@tonic-gate 2431*0Sstevel@tonic-gate lcb.owner_cb = cb; 2432*0Sstevel@tonic-gate lcb.owner_cb_arg = cb_data; 2433*0Sstevel@tonic-gate lcb.th_p = (td_thrhandle_t *)th_p; 2434*0Sstevel@tonic-gate return (__td_ta_sync_iter(ta_p, lowner_cb, &lcb)); 2435*0Sstevel@tonic-gate } 2436*0Sstevel@tonic-gate 2437*0Sstevel@tonic-gate /* 2438*0Sstevel@tonic-gate * If a thread is asleep on a synchronization variable, 2439*0Sstevel@tonic-gate * then get the synchronization handle. 2440*0Sstevel@tonic-gate */ 2441*0Sstevel@tonic-gate #pragma weak td_thr_sleepinfo = __td_thr_sleepinfo 2442*0Sstevel@tonic-gate td_err_e 2443*0Sstevel@tonic-gate __td_thr_sleepinfo(const td_thrhandle_t *th_p, td_synchandle_t *sh_p) 2444*0Sstevel@tonic-gate { 2445*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 2446*0Sstevel@tonic-gate td_err_e return_val = TD_OK; 2447*0Sstevel@tonic-gate uintptr_t wchan; 2448*0Sstevel@tonic-gate 2449*0Sstevel@tonic-gate if (sh_p == NULL) 2450*0Sstevel@tonic-gate return (TD_ERR); 2451*0Sstevel@tonic-gate if ((ph_p = ph_lock_th((td_thrhandle_t *)th_p, &return_val)) == NULL) 2452*0Sstevel@tonic-gate return (return_val); 2453*0Sstevel@tonic-gate 2454*0Sstevel@tonic-gate /* 2455*0Sstevel@tonic-gate * No need to stop the process for a simple read. 2456*0Sstevel@tonic-gate */ 2457*0Sstevel@tonic-gate if (th_p->th_ta_p->model == PR_MODEL_NATIVE) { 2458*0Sstevel@tonic-gate ulwp_t *ulwp = (ulwp_t *)th_p->th_unique; 2459*0Sstevel@tonic-gate 2460*0Sstevel@tonic-gate if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan, 2461*0Sstevel@tonic-gate &wchan, sizeof (wchan)) != PS_OK) 2462*0Sstevel@tonic-gate return_val = TD_DBERR; 2463*0Sstevel@tonic-gate } else { 2464*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 2465*0Sstevel@tonic-gate ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique; 2466*0Sstevel@tonic-gate caddr32_t wchan32; 2467*0Sstevel@tonic-gate 2468*0Sstevel@tonic-gate if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan, 2469*0Sstevel@tonic-gate &wchan32, sizeof (wchan32)) != PS_OK) 2470*0Sstevel@tonic-gate return_val = TD_DBERR; 2471*0Sstevel@tonic-gate wchan = wchan32; 2472*0Sstevel@tonic-gate #else 2473*0Sstevel@tonic-gate return_val = TD_ERR; 2474*0Sstevel@tonic-gate #endif /* _SYSCALL32 */ 2475*0Sstevel@tonic-gate } 2476*0Sstevel@tonic-gate 2477*0Sstevel@tonic-gate if (return_val != TD_OK || wchan == NULL) { 2478*0Sstevel@tonic-gate sh_p->sh_ta_p = NULL; 2479*0Sstevel@tonic-gate sh_p->sh_unique = NULL; 2480*0Sstevel@tonic-gate if (return_val == TD_OK) 2481*0Sstevel@tonic-gate return_val = TD_ERR; 2482*0Sstevel@tonic-gate } else { 2483*0Sstevel@tonic-gate sh_p->sh_ta_p = th_p->th_ta_p; 2484*0Sstevel@tonic-gate sh_p->sh_unique = (psaddr_t)wchan; 2485*0Sstevel@tonic-gate } 2486*0Sstevel@tonic-gate 2487*0Sstevel@tonic-gate ph_unlock(th_p->th_ta_p); 2488*0Sstevel@tonic-gate return (return_val); 2489*0Sstevel@tonic-gate } 2490*0Sstevel@tonic-gate 2491*0Sstevel@tonic-gate /* 2492*0Sstevel@tonic-gate * Which thread is running on an lwp? 2493*0Sstevel@tonic-gate */ 2494*0Sstevel@tonic-gate #pragma weak td_ta_map_lwp2thr = __td_ta_map_lwp2thr 2495*0Sstevel@tonic-gate td_err_e 2496*0Sstevel@tonic-gate __td_ta_map_lwp2thr(td_thragent_t *ta_p, lwpid_t lwpid, 2497*0Sstevel@tonic-gate td_thrhandle_t *th_p) 2498*0Sstevel@tonic-gate { 2499*0Sstevel@tonic-gate return (__td_ta_map_id2thr(ta_p, lwpid, th_p)); 2500*0Sstevel@tonic-gate } 2501*0Sstevel@tonic-gate 2502*0Sstevel@tonic-gate /* 2503*0Sstevel@tonic-gate * Common code for td_sync_get_info() and td_sync_get_stats() 2504*0Sstevel@tonic-gate */ 2505*0Sstevel@tonic-gate static td_err_e 2506*0Sstevel@tonic-gate sync_get_info_common(const td_synchandle_t *sh_p, struct ps_prochandle *ph_p, 2507*0Sstevel@tonic-gate td_syncinfo_t *si_p) 2508*0Sstevel@tonic-gate { 2509*0Sstevel@tonic-gate int trunc = 0; 2510*0Sstevel@tonic-gate td_so_un_t generic_so; 2511*0Sstevel@tonic-gate 2512*0Sstevel@tonic-gate /* 2513*0Sstevel@tonic-gate * Determine the sync. object type; a little type fudgery here. 2514*0Sstevel@tonic-gate * First attempt to read the whole union. If that fails, attempt 2515*0Sstevel@tonic-gate * to read just the condvar. A condvar is the smallest sync. object. 2516*0Sstevel@tonic-gate */ 2517*0Sstevel@tonic-gate if (ps_pdread(ph_p, sh_p->sh_unique, 2518*0Sstevel@tonic-gate &generic_so, sizeof (generic_so)) != PS_OK) { 2519*0Sstevel@tonic-gate trunc = 1; 2520*0Sstevel@tonic-gate if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition, 2521*0Sstevel@tonic-gate sizeof (generic_so.condition)) != PS_OK) 2522*0Sstevel@tonic-gate return (TD_DBERR); 2523*0Sstevel@tonic-gate } 2524*0Sstevel@tonic-gate 2525*0Sstevel@tonic-gate switch (generic_so.condition.cond_magic) { 2526*0Sstevel@tonic-gate case MUTEX_MAGIC: 2527*0Sstevel@tonic-gate if (trunc && ps_pdread(ph_p, sh_p->sh_unique, 2528*0Sstevel@tonic-gate &generic_so.lock, sizeof (generic_so.lock)) != PS_OK) 2529*0Sstevel@tonic-gate return (TD_DBERR); 2530*0Sstevel@tonic-gate si_p->si_type = TD_SYNC_MUTEX; 2531*0Sstevel@tonic-gate si_p->si_shared_type = generic_so.lock.mutex_type; 2532*0Sstevel@tonic-gate (void) memcpy(si_p->si_flags, &generic_so.lock.mutex_flag, 2533*0Sstevel@tonic-gate sizeof (generic_so.lock.mutex_flag)); 2534*0Sstevel@tonic-gate si_p->si_state.mutex_locked = 2535*0Sstevel@tonic-gate (generic_so.lock.mutex_lockw != 0); 2536*0Sstevel@tonic-gate si_p->si_size = sizeof (generic_so.lock); 2537*0Sstevel@tonic-gate si_p->si_has_waiters = generic_so.lock.mutex_waiters; 2538*0Sstevel@tonic-gate si_p->si_rcount = generic_so.lock.mutex_rcount; 2539*0Sstevel@tonic-gate si_p->si_prioceiling = generic_so.lock.mutex_ceiling; 2540*0Sstevel@tonic-gate if (si_p->si_state.mutex_locked) { 2541*0Sstevel@tonic-gate if (si_p->si_shared_type & 2542*0Sstevel@tonic-gate (USYNC_PROCESS | USYNC_PROCESS_ROBUST)) 2543*0Sstevel@tonic-gate si_p->si_ownerpid = 2544*0Sstevel@tonic-gate generic_so.lock.mutex_ownerpid; 2545*0Sstevel@tonic-gate si_p->si_owner.th_ta_p = sh_p->sh_ta_p; 2546*0Sstevel@tonic-gate si_p->si_owner.th_unique = generic_so.lock.mutex_owner; 2547*0Sstevel@tonic-gate } 2548*0Sstevel@tonic-gate break; 2549*0Sstevel@tonic-gate case COND_MAGIC: 2550*0Sstevel@tonic-gate si_p->si_type = TD_SYNC_COND; 2551*0Sstevel@tonic-gate si_p->si_shared_type = generic_so.condition.cond_type; 2552*0Sstevel@tonic-gate (void) memcpy(si_p->si_flags, generic_so.condition.flags.flag, 2553*0Sstevel@tonic-gate sizeof (generic_so.condition.flags.flag)); 2554*0Sstevel@tonic-gate si_p->si_size = sizeof (generic_so.condition); 2555*0Sstevel@tonic-gate si_p->si_has_waiters = 2556*0Sstevel@tonic-gate (generic_so.condition.cond_waiters_user | 2557*0Sstevel@tonic-gate generic_so.condition.cond_waiters_kernel)? 1 : 0; 2558*0Sstevel@tonic-gate break; 2559*0Sstevel@tonic-gate case SEMA_MAGIC: 2560*0Sstevel@tonic-gate if (trunc && ps_pdread(ph_p, sh_p->sh_unique, 2561*0Sstevel@tonic-gate &generic_so.semaphore, sizeof (generic_so.semaphore)) 2562*0Sstevel@tonic-gate != PS_OK) 2563*0Sstevel@tonic-gate return (TD_DBERR); 2564*0Sstevel@tonic-gate si_p->si_type = TD_SYNC_SEMA; 2565*0Sstevel@tonic-gate si_p->si_shared_type = generic_so.semaphore.type; 2566*0Sstevel@tonic-gate si_p->si_state.sem_count = generic_so.semaphore.count; 2567*0Sstevel@tonic-gate si_p->si_size = sizeof (generic_so.semaphore); 2568*0Sstevel@tonic-gate si_p->si_has_waiters = 2569*0Sstevel@tonic-gate ((lwp_sema_t *)&generic_so.semaphore)->flags[7]; 2570*0Sstevel@tonic-gate /* this is useless but the old interface provided it */ 2571*0Sstevel@tonic-gate si_p->si_data = (psaddr_t)generic_so.semaphore.count; 2572*0Sstevel@tonic-gate break; 2573*0Sstevel@tonic-gate case RWL_MAGIC: 2574*0Sstevel@tonic-gate if (trunc && ps_pdread(ph_p, sh_p->sh_unique, 2575*0Sstevel@tonic-gate &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK) 2576*0Sstevel@tonic-gate return (TD_DBERR); 2577*0Sstevel@tonic-gate si_p->si_type = TD_SYNC_RWLOCK; 2578*0Sstevel@tonic-gate si_p->si_shared_type = generic_so.rwlock.rwlock_type; 2579*0Sstevel@tonic-gate si_p->si_size = sizeof (generic_so.rwlock); 2580*0Sstevel@tonic-gate if (generic_so.rwlock.rwlock_type == USYNC_PROCESS) { 2581*0Sstevel@tonic-gate uint32_t *rwstate = 2582*0Sstevel@tonic-gate (uint32_t *)&si_p->si_state.nreaders; 2583*0Sstevel@tonic-gate 2584*0Sstevel@tonic-gate if (*rwstate & URW_WRITE_LOCKED) { 2585*0Sstevel@tonic-gate si_p->si_state.nreaders = -1; 2586*0Sstevel@tonic-gate si_p->si_is_wlock = 1; 2587*0Sstevel@tonic-gate si_p->si_owner.th_ta_p = sh_p->sh_ta_p; 2588*0Sstevel@tonic-gate si_p->si_owner.th_unique = 2589*0Sstevel@tonic-gate generic_so.rwlock.rwlock_owner; 2590*0Sstevel@tonic-gate } else if (*rwstate & URW_READERS_MASK) 2591*0Sstevel@tonic-gate si_p->si_state.nreaders = 2592*0Sstevel@tonic-gate *rwstate & URW_READERS_MASK; 2593*0Sstevel@tonic-gate else 2594*0Sstevel@tonic-gate si_p->si_state.nreaders = 0; 2595*0Sstevel@tonic-gate si_p->si_has_waiters = (*rwstate & URW_HAS_WAITERS); 2596*0Sstevel@tonic-gate } else { 2597*0Sstevel@tonic-gate si_p->si_state.nreaders = generic_so.rwlock.readers; 2598*0Sstevel@tonic-gate si_p->si_has_waiters = 2599*0Sstevel@tonic-gate generic_so.rwlock.rwlock_mwaiters; 2600*0Sstevel@tonic-gate if (si_p->si_state.nreaders == -1) { 2601*0Sstevel@tonic-gate si_p->si_is_wlock = 1; 2602*0Sstevel@tonic-gate si_p->si_owner.th_ta_p = sh_p->sh_ta_p; 2603*0Sstevel@tonic-gate si_p->si_owner.th_unique = 2604*0Sstevel@tonic-gate generic_so.rwlock.rwlock_mowner; 2605*0Sstevel@tonic-gate } 2606*0Sstevel@tonic-gate } 2607*0Sstevel@tonic-gate /* this is useless but the old interface provided it */ 2608*0Sstevel@tonic-gate si_p->si_data = (psaddr_t)generic_so.rwlock.readers; 2609*0Sstevel@tonic-gate break; 2610*0Sstevel@tonic-gate default: 2611*0Sstevel@tonic-gate return (TD_BADSH); 2612*0Sstevel@tonic-gate } 2613*0Sstevel@tonic-gate 2614*0Sstevel@tonic-gate si_p->si_ta_p = sh_p->sh_ta_p; 2615*0Sstevel@tonic-gate si_p->si_sv_addr = sh_p->sh_unique; 2616*0Sstevel@tonic-gate return (TD_OK); 2617*0Sstevel@tonic-gate } 2618*0Sstevel@tonic-gate 2619*0Sstevel@tonic-gate /* 2620*0Sstevel@tonic-gate * Given a synchronization handle, fill in the 2621*0Sstevel@tonic-gate * information for the synchronization variable into *si_p. 2622*0Sstevel@tonic-gate */ 2623*0Sstevel@tonic-gate #pragma weak td_sync_get_info = __td_sync_get_info 2624*0Sstevel@tonic-gate td_err_e 2625*0Sstevel@tonic-gate __td_sync_get_info(const td_synchandle_t *sh_p, td_syncinfo_t *si_p) 2626*0Sstevel@tonic-gate { 2627*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 2628*0Sstevel@tonic-gate td_err_e return_val; 2629*0Sstevel@tonic-gate 2630*0Sstevel@tonic-gate if (si_p == NULL) 2631*0Sstevel@tonic-gate return (TD_ERR); 2632*0Sstevel@tonic-gate (void) memset(si_p, 0, sizeof (*si_p)); 2633*0Sstevel@tonic-gate if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL) 2634*0Sstevel@tonic-gate return (return_val); 2635*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) { 2636*0Sstevel@tonic-gate ph_unlock(sh_p->sh_ta_p); 2637*0Sstevel@tonic-gate return (TD_DBERR); 2638*0Sstevel@tonic-gate } 2639*0Sstevel@tonic-gate 2640*0Sstevel@tonic-gate return_val = sync_get_info_common(sh_p, ph_p, si_p); 2641*0Sstevel@tonic-gate 2642*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 2643*0Sstevel@tonic-gate ph_unlock(sh_p->sh_ta_p); 2644*0Sstevel@tonic-gate return (return_val); 2645*0Sstevel@tonic-gate } 2646*0Sstevel@tonic-gate 2647*0Sstevel@tonic-gate static uint_t 2648*0Sstevel@tonic-gate tdb_addr_hash64(uint64_t addr) 2649*0Sstevel@tonic-gate { 2650*0Sstevel@tonic-gate uint64_t value60 = (addr >> 4); 2651*0Sstevel@tonic-gate uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff); 2652*0Sstevel@tonic-gate return ((value30 >> 15) ^ (value30 & 0x7fff)); 2653*0Sstevel@tonic-gate } 2654*0Sstevel@tonic-gate 2655*0Sstevel@tonic-gate static uint_t 2656*0Sstevel@tonic-gate tdb_addr_hash32(uint64_t addr) 2657*0Sstevel@tonic-gate { 2658*0Sstevel@tonic-gate uint32_t value30 = (addr >> 2); /* 30 bits */ 2659*0Sstevel@tonic-gate return ((value30 >> 15) ^ (value30 & 0x7fff)); 2660*0Sstevel@tonic-gate } 2661*0Sstevel@tonic-gate 2662*0Sstevel@tonic-gate static td_err_e 2663*0Sstevel@tonic-gate read_sync_stats(td_thragent_t *ta_p, psaddr_t hash_table, 2664*0Sstevel@tonic-gate psaddr_t sync_obj_addr, tdb_sync_stats_t *sync_stats) 2665*0Sstevel@tonic-gate { 2666*0Sstevel@tonic-gate psaddr_t next_desc; 2667*0Sstevel@tonic-gate uint64_t first; 2668*0Sstevel@tonic-gate uint_t ix; 2669*0Sstevel@tonic-gate 2670*0Sstevel@tonic-gate /* 2671*0Sstevel@tonic-gate * Compute the hash table index from the synch object's address. 2672*0Sstevel@tonic-gate */ 2673*0Sstevel@tonic-gate if (ta_p->model == PR_MODEL_LP64) 2674*0Sstevel@tonic-gate ix = tdb_addr_hash64(sync_obj_addr); 2675*0Sstevel@tonic-gate else 2676*0Sstevel@tonic-gate ix = tdb_addr_hash32(sync_obj_addr); 2677*0Sstevel@tonic-gate 2678*0Sstevel@tonic-gate /* 2679*0Sstevel@tonic-gate * Get the address of the first element in the linked list. 2680*0Sstevel@tonic-gate */ 2681*0Sstevel@tonic-gate if (ps_pdread(ta_p->ph_p, hash_table + ix * sizeof (uint64_t), 2682*0Sstevel@tonic-gate &first, sizeof (first)) != PS_OK) 2683*0Sstevel@tonic-gate return (TD_DBERR); 2684*0Sstevel@tonic-gate 2685*0Sstevel@tonic-gate /* 2686*0Sstevel@tonic-gate * Search the linked list for an entry for the synch object.. 2687*0Sstevel@tonic-gate */ 2688*0Sstevel@tonic-gate for (next_desc = (psaddr_t)first; next_desc != NULL; 2689*0Sstevel@tonic-gate next_desc = (psaddr_t)sync_stats->next) { 2690*0Sstevel@tonic-gate if (ps_pdread(ta_p->ph_p, next_desc, 2691*0Sstevel@tonic-gate sync_stats, sizeof (*sync_stats)) != PS_OK) 2692*0Sstevel@tonic-gate return (TD_DBERR); 2693*0Sstevel@tonic-gate if (sync_stats->sync_addr == sync_obj_addr) 2694*0Sstevel@tonic-gate return (TD_OK); 2695*0Sstevel@tonic-gate } 2696*0Sstevel@tonic-gate 2697*0Sstevel@tonic-gate (void) memset(sync_stats, 0, sizeof (*sync_stats)); 2698*0Sstevel@tonic-gate return (TD_OK); 2699*0Sstevel@tonic-gate } 2700*0Sstevel@tonic-gate 2701*0Sstevel@tonic-gate /* 2702*0Sstevel@tonic-gate * Given a synchronization handle, fill in the 2703*0Sstevel@tonic-gate * statistics for the synchronization variable into *ss_p. 2704*0Sstevel@tonic-gate */ 2705*0Sstevel@tonic-gate #pragma weak td_sync_get_stats = __td_sync_get_stats 2706*0Sstevel@tonic-gate td_err_e 2707*0Sstevel@tonic-gate __td_sync_get_stats(const td_synchandle_t *sh_p, td_syncstats_t *ss_p) 2708*0Sstevel@tonic-gate { 2709*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 2710*0Sstevel@tonic-gate td_thragent_t *ta_p; 2711*0Sstevel@tonic-gate td_err_e return_val; 2712*0Sstevel@tonic-gate register_sync_t enable; 2713*0Sstevel@tonic-gate psaddr_t hashaddr; 2714*0Sstevel@tonic-gate tdb_sync_stats_t sync_stats; 2715*0Sstevel@tonic-gate size_t ix; 2716*0Sstevel@tonic-gate 2717*0Sstevel@tonic-gate if (ss_p == NULL) 2718*0Sstevel@tonic-gate return (TD_ERR); 2719*0Sstevel@tonic-gate (void) memset(ss_p, 0, sizeof (*ss_p)); 2720*0Sstevel@tonic-gate if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL) 2721*0Sstevel@tonic-gate return (return_val); 2722*0Sstevel@tonic-gate ta_p = sh_p->sh_ta_p; 2723*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) { 2724*0Sstevel@tonic-gate ph_unlock(ta_p); 2725*0Sstevel@tonic-gate return (TD_DBERR); 2726*0Sstevel@tonic-gate } 2727*0Sstevel@tonic-gate 2728*0Sstevel@tonic-gate if ((return_val = sync_get_info_common(sh_p, ph_p, &ss_p->ss_info)) 2729*0Sstevel@tonic-gate != TD_OK) { 2730*0Sstevel@tonic-gate if (return_val != TD_BADSH) 2731*0Sstevel@tonic-gate goto out; 2732*0Sstevel@tonic-gate /* we can correct TD_BADSH */ 2733*0Sstevel@tonic-gate (void) memset(&ss_p->ss_info, 0, sizeof (ss_p->ss_info)); 2734*0Sstevel@tonic-gate ss_p->ss_info.si_ta_p = sh_p->sh_ta_p; 2735*0Sstevel@tonic-gate ss_p->ss_info.si_sv_addr = sh_p->sh_unique; 2736*0Sstevel@tonic-gate /* we correct si_type and si_size below */ 2737*0Sstevel@tonic-gate return_val = TD_OK; 2738*0Sstevel@tonic-gate } 2739*0Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr, 2740*0Sstevel@tonic-gate &enable, sizeof (enable)) != PS_OK) { 2741*0Sstevel@tonic-gate return_val = TD_DBERR; 2742*0Sstevel@tonic-gate goto out; 2743*0Sstevel@tonic-gate } 2744*0Sstevel@tonic-gate if (enable != REGISTER_SYNC_ON) 2745*0Sstevel@tonic-gate goto out; 2746*0Sstevel@tonic-gate 2747*0Sstevel@tonic-gate /* 2748*0Sstevel@tonic-gate * Get the address of the hash table in the target process. 2749*0Sstevel@tonic-gate */ 2750*0Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) { 2751*0Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->uberdata_addr + 2752*0Sstevel@tonic-gate offsetof(uberdata_t, tdb.tdb_sync_addr_hash), 2753*0Sstevel@tonic-gate &hashaddr, sizeof (&hashaddr)) != PS_OK) { 2754*0Sstevel@tonic-gate return_val = TD_DBERR; 2755*0Sstevel@tonic-gate goto out; 2756*0Sstevel@tonic-gate } 2757*0Sstevel@tonic-gate } else { 2758*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 2759*0Sstevel@tonic-gate caddr32_t addr; 2760*0Sstevel@tonic-gate 2761*0Sstevel@tonic-gate if (ps_pdread(ph_p, ta_p->uberdata_addr + 2762*0Sstevel@tonic-gate offsetof(uberdata32_t, tdb.tdb_sync_addr_hash), 2763*0Sstevel@tonic-gate &addr, sizeof (addr)) != PS_OK) { 2764*0Sstevel@tonic-gate return_val = TD_DBERR; 2765*0Sstevel@tonic-gate goto out; 2766*0Sstevel@tonic-gate } 2767*0Sstevel@tonic-gate hashaddr = addr; 2768*0Sstevel@tonic-gate #else 2769*0Sstevel@tonic-gate return_val = TD_ERR; 2770*0Sstevel@tonic-gate goto out; 2771*0Sstevel@tonic-gate #endif /* _SYSCALL32 */ 2772*0Sstevel@tonic-gate } 2773*0Sstevel@tonic-gate 2774*0Sstevel@tonic-gate if (hashaddr == 0) 2775*0Sstevel@tonic-gate return_val = TD_BADSH; 2776*0Sstevel@tonic-gate else 2777*0Sstevel@tonic-gate return_val = read_sync_stats(ta_p, hashaddr, 2778*0Sstevel@tonic-gate sh_p->sh_unique, &sync_stats); 2779*0Sstevel@tonic-gate if (return_val != TD_OK) 2780*0Sstevel@tonic-gate goto out; 2781*0Sstevel@tonic-gate 2782*0Sstevel@tonic-gate /* 2783*0Sstevel@tonic-gate * We have the hash table entry. Transfer the data to 2784*0Sstevel@tonic-gate * the td_syncstats_t structure provided by the caller. 2785*0Sstevel@tonic-gate */ 2786*0Sstevel@tonic-gate switch (sync_stats.un.type) { 2787*0Sstevel@tonic-gate case TDB_MUTEX: 2788*0Sstevel@tonic-gate { 2789*0Sstevel@tonic-gate td_mutex_stats_t *msp = &ss_p->ss_un.mutex; 2790*0Sstevel@tonic-gate 2791*0Sstevel@tonic-gate ss_p->ss_info.si_type = TD_SYNC_MUTEX; 2792*0Sstevel@tonic-gate ss_p->ss_info.si_size = sizeof (mutex_t); 2793*0Sstevel@tonic-gate msp->mutex_lock = 2794*0Sstevel@tonic-gate sync_stats.un.mutex.mutex_lock; 2795*0Sstevel@tonic-gate msp->mutex_sleep = 2796*0Sstevel@tonic-gate sync_stats.un.mutex.mutex_sleep; 2797*0Sstevel@tonic-gate msp->mutex_sleep_time = 2798*0Sstevel@tonic-gate sync_stats.un.mutex.mutex_sleep_time; 2799*0Sstevel@tonic-gate msp->mutex_hold_time = 2800*0Sstevel@tonic-gate sync_stats.un.mutex.mutex_hold_time; 2801*0Sstevel@tonic-gate msp->mutex_try = 2802*0Sstevel@tonic-gate sync_stats.un.mutex.mutex_try; 2803*0Sstevel@tonic-gate msp->mutex_try_fail = 2804*0Sstevel@tonic-gate sync_stats.un.mutex.mutex_try_fail; 2805*0Sstevel@tonic-gate if (sync_stats.sync_addr >= ta_p->hash_table_addr && 2806*0Sstevel@tonic-gate (ix = sync_stats.sync_addr - ta_p->hash_table_addr) 2807*0Sstevel@tonic-gate < ta_p->hash_size * sizeof (thr_hash_table_t)) 2808*0Sstevel@tonic-gate msp->mutex_internal = 2809*0Sstevel@tonic-gate ix / sizeof (thr_hash_table_t) + 1; 2810*0Sstevel@tonic-gate break; 2811*0Sstevel@tonic-gate } 2812*0Sstevel@tonic-gate case TDB_COND: 2813*0Sstevel@tonic-gate { 2814*0Sstevel@tonic-gate td_cond_stats_t *csp = &ss_p->ss_un.cond; 2815*0Sstevel@tonic-gate 2816*0Sstevel@tonic-gate ss_p->ss_info.si_type = TD_SYNC_COND; 2817*0Sstevel@tonic-gate ss_p->ss_info.si_size = sizeof (cond_t); 2818*0Sstevel@tonic-gate csp->cond_wait = 2819*0Sstevel@tonic-gate sync_stats.un.cond.cond_wait; 2820*0Sstevel@tonic-gate csp->cond_timedwait = 2821*0Sstevel@tonic-gate sync_stats.un.cond.cond_timedwait; 2822*0Sstevel@tonic-gate csp->cond_wait_sleep_time = 2823*0Sstevel@tonic-gate sync_stats.un.cond.cond_wait_sleep_time; 2824*0Sstevel@tonic-gate csp->cond_timedwait_sleep_time = 2825*0Sstevel@tonic-gate sync_stats.un.cond.cond_timedwait_sleep_time; 2826*0Sstevel@tonic-gate csp->cond_timedwait_timeout = 2827*0Sstevel@tonic-gate sync_stats.un.cond.cond_timedwait_timeout; 2828*0Sstevel@tonic-gate csp->cond_signal = 2829*0Sstevel@tonic-gate sync_stats.un.cond.cond_signal; 2830*0Sstevel@tonic-gate csp->cond_broadcast = 2831*0Sstevel@tonic-gate sync_stats.un.cond.cond_broadcast; 2832*0Sstevel@tonic-gate if (sync_stats.sync_addr >= ta_p->hash_table_addr && 2833*0Sstevel@tonic-gate (ix = sync_stats.sync_addr - ta_p->hash_table_addr) 2834*0Sstevel@tonic-gate < ta_p->hash_size * sizeof (thr_hash_table_t)) 2835*0Sstevel@tonic-gate csp->cond_internal = 2836*0Sstevel@tonic-gate ix / sizeof (thr_hash_table_t) + 1; 2837*0Sstevel@tonic-gate break; 2838*0Sstevel@tonic-gate } 2839*0Sstevel@tonic-gate case TDB_RWLOCK: 2840*0Sstevel@tonic-gate { 2841*0Sstevel@tonic-gate psaddr_t cond_addr; 2842*0Sstevel@tonic-gate tdb_sync_stats_t cond_stats; 2843*0Sstevel@tonic-gate td_rwlock_stats_t *rwsp = &ss_p->ss_un.rwlock; 2844*0Sstevel@tonic-gate 2845*0Sstevel@tonic-gate ss_p->ss_info.si_type = TD_SYNC_RWLOCK; 2846*0Sstevel@tonic-gate ss_p->ss_info.si_size = sizeof (rwlock_t); 2847*0Sstevel@tonic-gate rwsp->rw_rdlock = 2848*0Sstevel@tonic-gate sync_stats.un.rwlock.rw_rdlock; 2849*0Sstevel@tonic-gate cond_addr = (psaddr_t)&((rwlock_t *)sh_p->sh_unique)->readercv; 2850*0Sstevel@tonic-gate if (read_sync_stats(ta_p, hashaddr, cond_addr, &cond_stats) 2851*0Sstevel@tonic-gate == TD_OK) { 2852*0Sstevel@tonic-gate rwsp->rw_rdlock_sleep = 2853*0Sstevel@tonic-gate cond_stats.un.cond.cond_wait; 2854*0Sstevel@tonic-gate rwsp->rw_rdlock_sleep_time = 2855*0Sstevel@tonic-gate cond_stats.un.cond.cond_wait_sleep_time; 2856*0Sstevel@tonic-gate } 2857*0Sstevel@tonic-gate rwsp->rw_rdlock_try = 2858*0Sstevel@tonic-gate sync_stats.un.rwlock.rw_rdlock_try; 2859*0Sstevel@tonic-gate rwsp->rw_rdlock_try_fail = 2860*0Sstevel@tonic-gate sync_stats.un.rwlock.rw_rdlock_try_fail; 2861*0Sstevel@tonic-gate rwsp->rw_wrlock = 2862*0Sstevel@tonic-gate sync_stats.un.rwlock.rw_wrlock; 2863*0Sstevel@tonic-gate cond_addr = (psaddr_t)&((rwlock_t *)sh_p->sh_unique)->writercv; 2864*0Sstevel@tonic-gate if (read_sync_stats(ta_p, hashaddr, cond_addr, &cond_stats) 2865*0Sstevel@tonic-gate == TD_OK) { 2866*0Sstevel@tonic-gate rwsp->rw_wrlock_sleep = 2867*0Sstevel@tonic-gate cond_stats.un.cond.cond_wait; 2868*0Sstevel@tonic-gate rwsp->rw_wrlock_sleep_time = 2869*0Sstevel@tonic-gate cond_stats.un.cond.cond_wait_sleep_time; 2870*0Sstevel@tonic-gate } 2871*0Sstevel@tonic-gate rwsp->rw_wrlock_hold_time = 2872*0Sstevel@tonic-gate sync_stats.un.rwlock.rw_wrlock_hold_time; 2873*0Sstevel@tonic-gate rwsp->rw_wrlock_try = 2874*0Sstevel@tonic-gate sync_stats.un.rwlock.rw_wrlock_try; 2875*0Sstevel@tonic-gate rwsp->rw_wrlock_try_fail = 2876*0Sstevel@tonic-gate sync_stats.un.rwlock.rw_wrlock_try_fail; 2877*0Sstevel@tonic-gate break; 2878*0Sstevel@tonic-gate } 2879*0Sstevel@tonic-gate case TDB_SEMA: 2880*0Sstevel@tonic-gate { 2881*0Sstevel@tonic-gate td_sema_stats_t *ssp = &ss_p->ss_un.sema; 2882*0Sstevel@tonic-gate 2883*0Sstevel@tonic-gate ss_p->ss_info.si_type = TD_SYNC_SEMA; 2884*0Sstevel@tonic-gate ss_p->ss_info.si_size = sizeof (sema_t); 2885*0Sstevel@tonic-gate ssp->sema_wait = 2886*0Sstevel@tonic-gate sync_stats.un.sema.sema_wait; 2887*0Sstevel@tonic-gate ssp->sema_wait_sleep = 2888*0Sstevel@tonic-gate sync_stats.un.sema.sema_wait_sleep; 2889*0Sstevel@tonic-gate ssp->sema_wait_sleep_time = 2890*0Sstevel@tonic-gate sync_stats.un.sema.sema_wait_sleep_time; 2891*0Sstevel@tonic-gate ssp->sema_trywait = 2892*0Sstevel@tonic-gate sync_stats.un.sema.sema_trywait; 2893*0Sstevel@tonic-gate ssp->sema_trywait_fail = 2894*0Sstevel@tonic-gate sync_stats.un.sema.sema_trywait_fail; 2895*0Sstevel@tonic-gate ssp->sema_post = 2896*0Sstevel@tonic-gate sync_stats.un.sema.sema_post; 2897*0Sstevel@tonic-gate ssp->sema_max_count = 2898*0Sstevel@tonic-gate sync_stats.un.sema.sema_max_count; 2899*0Sstevel@tonic-gate ssp->sema_min_count = 2900*0Sstevel@tonic-gate sync_stats.un.sema.sema_min_count; 2901*0Sstevel@tonic-gate break; 2902*0Sstevel@tonic-gate } 2903*0Sstevel@tonic-gate default: 2904*0Sstevel@tonic-gate return_val = TD_BADSH; 2905*0Sstevel@tonic-gate break; 2906*0Sstevel@tonic-gate } 2907*0Sstevel@tonic-gate 2908*0Sstevel@tonic-gate out: 2909*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 2910*0Sstevel@tonic-gate ph_unlock(ta_p); 2911*0Sstevel@tonic-gate return (return_val); 2912*0Sstevel@tonic-gate } 2913*0Sstevel@tonic-gate 2914*0Sstevel@tonic-gate /* 2915*0Sstevel@tonic-gate * Change the state of a synchronization variable. 2916*0Sstevel@tonic-gate * 1) mutex lock state set to value 2917*0Sstevel@tonic-gate * 2) semaphore's count set to value 2918*0Sstevel@tonic-gate * 3) writer's lock set to value 2919*0Sstevel@tonic-gate * 4) reader's lock number of readers set to value 2920*0Sstevel@tonic-gate * Currently unused by dbx. 2921*0Sstevel@tonic-gate */ 2922*0Sstevel@tonic-gate #pragma weak td_sync_setstate = __td_sync_setstate 2923*0Sstevel@tonic-gate td_err_e 2924*0Sstevel@tonic-gate __td_sync_setstate(const td_synchandle_t *sh_p, long lvalue) 2925*0Sstevel@tonic-gate { 2926*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 2927*0Sstevel@tonic-gate int trunc = 0; 2928*0Sstevel@tonic-gate td_err_e return_val; 2929*0Sstevel@tonic-gate td_so_un_t generic_so; 2930*0Sstevel@tonic-gate int value = (int)lvalue; 2931*0Sstevel@tonic-gate 2932*0Sstevel@tonic-gate if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL) 2933*0Sstevel@tonic-gate return (return_val); 2934*0Sstevel@tonic-gate if (ps_pstop(ph_p) != PS_OK) { 2935*0Sstevel@tonic-gate ph_unlock(sh_p->sh_ta_p); 2936*0Sstevel@tonic-gate return (TD_DBERR); 2937*0Sstevel@tonic-gate } 2938*0Sstevel@tonic-gate 2939*0Sstevel@tonic-gate /* 2940*0Sstevel@tonic-gate * Read the synch. variable information. 2941*0Sstevel@tonic-gate * First attempt to read the whole union and if that fails 2942*0Sstevel@tonic-gate * fall back to reading only the smallest member, the condvar. 2943*0Sstevel@tonic-gate */ 2944*0Sstevel@tonic-gate if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so, 2945*0Sstevel@tonic-gate sizeof (generic_so)) != PS_OK) { 2946*0Sstevel@tonic-gate trunc = 1; 2947*0Sstevel@tonic-gate if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition, 2948*0Sstevel@tonic-gate sizeof (generic_so.condition)) != PS_OK) { 2949*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 2950*0Sstevel@tonic-gate ph_unlock(sh_p->sh_ta_p); 2951*0Sstevel@tonic-gate return (TD_DBERR); 2952*0Sstevel@tonic-gate } 2953*0Sstevel@tonic-gate } 2954*0Sstevel@tonic-gate 2955*0Sstevel@tonic-gate /* 2956*0Sstevel@tonic-gate * Set the new value in the sync. variable, read the synch. variable 2957*0Sstevel@tonic-gate * information. from the process, reset its value and write it back. 2958*0Sstevel@tonic-gate */ 2959*0Sstevel@tonic-gate switch (generic_so.condition.mutex_magic) { 2960*0Sstevel@tonic-gate case MUTEX_MAGIC: 2961*0Sstevel@tonic-gate if (trunc && ps_pdread(ph_p, sh_p->sh_unique, 2962*0Sstevel@tonic-gate &generic_so.lock, sizeof (generic_so.lock)) != PS_OK) { 2963*0Sstevel@tonic-gate return_val = TD_DBERR; 2964*0Sstevel@tonic-gate break; 2965*0Sstevel@tonic-gate } 2966*0Sstevel@tonic-gate generic_so.lock.mutex_lockw = (uint8_t)value; 2967*0Sstevel@tonic-gate if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.lock, 2968*0Sstevel@tonic-gate sizeof (generic_so.lock)) != PS_OK) 2969*0Sstevel@tonic-gate return_val = TD_DBERR; 2970*0Sstevel@tonic-gate break; 2971*0Sstevel@tonic-gate case SEMA_MAGIC: 2972*0Sstevel@tonic-gate if (trunc && ps_pdread(ph_p, sh_p->sh_unique, 2973*0Sstevel@tonic-gate &generic_so.semaphore, sizeof (generic_so.semaphore)) 2974*0Sstevel@tonic-gate != PS_OK) { 2975*0Sstevel@tonic-gate return_val = TD_DBERR; 2976*0Sstevel@tonic-gate break; 2977*0Sstevel@tonic-gate } 2978*0Sstevel@tonic-gate generic_so.semaphore.count = value; 2979*0Sstevel@tonic-gate if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.semaphore, 2980*0Sstevel@tonic-gate sizeof (generic_so.semaphore)) != PS_OK) 2981*0Sstevel@tonic-gate return_val = TD_DBERR; 2982*0Sstevel@tonic-gate break; 2983*0Sstevel@tonic-gate case COND_MAGIC: 2984*0Sstevel@tonic-gate /* Operation not supported on a condition variable */ 2985*0Sstevel@tonic-gate return_val = TD_ERR; 2986*0Sstevel@tonic-gate break; 2987*0Sstevel@tonic-gate case RWL_MAGIC: 2988*0Sstevel@tonic-gate if (trunc && ps_pdread(ph_p, sh_p->sh_unique, 2989*0Sstevel@tonic-gate &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK) { 2990*0Sstevel@tonic-gate return_val = TD_DBERR; 2991*0Sstevel@tonic-gate break; 2992*0Sstevel@tonic-gate } 2993*0Sstevel@tonic-gate if (generic_so.rwlock.rwlock_type == USYNC_PROCESS) { 2994*0Sstevel@tonic-gate uint32_t *rwstate = 2995*0Sstevel@tonic-gate (uint32_t *)&generic_so.rwlock.readers; 2996*0Sstevel@tonic-gate if (value < 0) 2997*0Sstevel@tonic-gate *rwstate = URW_WRITE_LOCKED; 2998*0Sstevel@tonic-gate else if (value > 0) 2999*0Sstevel@tonic-gate *rwstate = (value & URW_READERS_MASK); 3000*0Sstevel@tonic-gate else 3001*0Sstevel@tonic-gate *rwstate = 0; 3002*0Sstevel@tonic-gate } else 3003*0Sstevel@tonic-gate generic_so.rwlock.readers = value; 3004*0Sstevel@tonic-gate 3005*0Sstevel@tonic-gate if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.rwlock, 3006*0Sstevel@tonic-gate sizeof (generic_so.rwlock)) != PS_OK) 3007*0Sstevel@tonic-gate return_val = TD_DBERR; 3008*0Sstevel@tonic-gate break; 3009*0Sstevel@tonic-gate default: 3010*0Sstevel@tonic-gate /* Bad sync. object type */ 3011*0Sstevel@tonic-gate return_val = TD_BADSH; 3012*0Sstevel@tonic-gate break; 3013*0Sstevel@tonic-gate } 3014*0Sstevel@tonic-gate 3015*0Sstevel@tonic-gate (void) ps_pcontinue(ph_p); 3016*0Sstevel@tonic-gate ph_unlock(sh_p->sh_ta_p); 3017*0Sstevel@tonic-gate return (return_val); 3018*0Sstevel@tonic-gate } 3019*0Sstevel@tonic-gate 3020*0Sstevel@tonic-gate typedef struct { 3021*0Sstevel@tonic-gate td_thr_iter_f *waiter_cb; 3022*0Sstevel@tonic-gate psaddr_t sync_obj_addr; 3023*0Sstevel@tonic-gate uint16_t sync_magic; 3024*0Sstevel@tonic-gate void *waiter_cb_arg; 3025*0Sstevel@tonic-gate td_err_e errcode; 3026*0Sstevel@tonic-gate } waiter_cb_ctl_t; 3027*0Sstevel@tonic-gate 3028*0Sstevel@tonic-gate static int 3029*0Sstevel@tonic-gate waiters_cb(const td_thrhandle_t *th_p, void *arg) 3030*0Sstevel@tonic-gate { 3031*0Sstevel@tonic-gate td_thragent_t *ta_p = th_p->th_ta_p; 3032*0Sstevel@tonic-gate struct ps_prochandle *ph_p = ta_p->ph_p; 3033*0Sstevel@tonic-gate waiter_cb_ctl_t *wcb = arg; 3034*0Sstevel@tonic-gate caddr_t wchan; 3035*0Sstevel@tonic-gate 3036*0Sstevel@tonic-gate if (ta_p->model == PR_MODEL_NATIVE) { 3037*0Sstevel@tonic-gate ulwp_t *ulwp = (ulwp_t *)th_p->th_unique; 3038*0Sstevel@tonic-gate 3039*0Sstevel@tonic-gate if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan, 3040*0Sstevel@tonic-gate &wchan, sizeof (wchan)) != PS_OK) { 3041*0Sstevel@tonic-gate wcb->errcode = TD_DBERR; 3042*0Sstevel@tonic-gate return (1); 3043*0Sstevel@tonic-gate } 3044*0Sstevel@tonic-gate } else { 3045*0Sstevel@tonic-gate #if defined(_LP64) && defined(_SYSCALL32) 3046*0Sstevel@tonic-gate ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique; 3047*0Sstevel@tonic-gate caddr32_t wchan32; 3048*0Sstevel@tonic-gate 3049*0Sstevel@tonic-gate if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan, 3050*0Sstevel@tonic-gate &wchan32, sizeof (wchan32)) != PS_OK) { 3051*0Sstevel@tonic-gate wcb->errcode = TD_DBERR; 3052*0Sstevel@tonic-gate return (1); 3053*0Sstevel@tonic-gate } 3054*0Sstevel@tonic-gate wchan = (caddr_t)(uintptr_t)wchan32; 3055*0Sstevel@tonic-gate #else 3056*0Sstevel@tonic-gate wcb->errcode = TD_ERR; 3057*0Sstevel@tonic-gate return (1); 3058*0Sstevel@tonic-gate #endif /* _SYSCALL32 */ 3059*0Sstevel@tonic-gate } 3060*0Sstevel@tonic-gate 3061*0Sstevel@tonic-gate if (wchan == NULL) 3062*0Sstevel@tonic-gate return (0); 3063*0Sstevel@tonic-gate 3064*0Sstevel@tonic-gate if (wchan == (caddr_t)wcb->sync_obj_addr) 3065*0Sstevel@tonic-gate return ((*wcb->waiter_cb)(th_p, wcb->waiter_cb_arg)); 3066*0Sstevel@tonic-gate 3067*0Sstevel@tonic-gate return (0); 3068*0Sstevel@tonic-gate } 3069*0Sstevel@tonic-gate 3070*0Sstevel@tonic-gate /* 3071*0Sstevel@tonic-gate * For a given synchronization variable, iterate over the 3072*0Sstevel@tonic-gate * set of waiting threads. The call back function is passed 3073*0Sstevel@tonic-gate * two parameters, a pointer to a thread handle and a pointer 3074*0Sstevel@tonic-gate * to extra call back data. 3075*0Sstevel@tonic-gate */ 3076*0Sstevel@tonic-gate #pragma weak td_sync_waiters = __td_sync_waiters 3077*0Sstevel@tonic-gate td_err_e 3078*0Sstevel@tonic-gate __td_sync_waiters(const td_synchandle_t *sh_p, td_thr_iter_f *cb, void *cb_data) 3079*0Sstevel@tonic-gate { 3080*0Sstevel@tonic-gate struct ps_prochandle *ph_p; 3081*0Sstevel@tonic-gate waiter_cb_ctl_t wcb; 3082*0Sstevel@tonic-gate td_err_e return_val; 3083*0Sstevel@tonic-gate 3084*0Sstevel@tonic-gate if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL) 3085*0Sstevel@tonic-gate return (return_val); 3086*0Sstevel@tonic-gate if (ps_pdread(ph_p, 3087*0Sstevel@tonic-gate (psaddr_t)&((mutex_t *)sh_p->sh_unique)->mutex_magic, 3088*0Sstevel@tonic-gate (caddr_t)&wcb.sync_magic, sizeof (wcb.sync_magic)) != PS_OK) { 3089*0Sstevel@tonic-gate ph_unlock(sh_p->sh_ta_p); 3090*0Sstevel@tonic-gate return (TD_DBERR); 3091*0Sstevel@tonic-gate } 3092*0Sstevel@tonic-gate ph_unlock(sh_p->sh_ta_p); 3093*0Sstevel@tonic-gate 3094*0Sstevel@tonic-gate switch (wcb.sync_magic) { 3095*0Sstevel@tonic-gate case MUTEX_MAGIC: 3096*0Sstevel@tonic-gate case COND_MAGIC: 3097*0Sstevel@tonic-gate case SEMA_MAGIC: 3098*0Sstevel@tonic-gate case RWL_MAGIC: 3099*0Sstevel@tonic-gate break; 3100*0Sstevel@tonic-gate default: 3101*0Sstevel@tonic-gate return (TD_BADSH); 3102*0Sstevel@tonic-gate } 3103*0Sstevel@tonic-gate 3104*0Sstevel@tonic-gate wcb.waiter_cb = cb; 3105*0Sstevel@tonic-gate wcb.sync_obj_addr = sh_p->sh_unique; 3106*0Sstevel@tonic-gate wcb.waiter_cb_arg = cb_data; 3107*0Sstevel@tonic-gate wcb.errcode = TD_OK; 3108*0Sstevel@tonic-gate return_val = __td_ta_thr_iter(sh_p->sh_ta_p, waiters_cb, &wcb, 3109*0Sstevel@tonic-gate TD_THR_SLEEP, TD_THR_LOWEST_PRIORITY, 3110*0Sstevel@tonic-gate TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS); 3111*0Sstevel@tonic-gate 3112*0Sstevel@tonic-gate if (return_val != TD_OK) 3113*0Sstevel@tonic-gate return (return_val); 3114*0Sstevel@tonic-gate 3115*0Sstevel@tonic-gate return (wcb.errcode); 3116*0Sstevel@tonic-gate } 3117