1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate #include "lint.h" 30*0Sstevel@tonic-gate #include "thr_uberdata.h" 31*0Sstevel@tonic-gate #include <procfs.h> 32*0Sstevel@tonic-gate #include <sys/uio.h> 33*0Sstevel@tonic-gate #include <ctype.h> 34*0Sstevel@tonic-gate 35*0Sstevel@tonic-gate #undef errno 36*0Sstevel@tonic-gate extern int errno; 37*0Sstevel@tonic-gate 38*0Sstevel@tonic-gate int __threaded = 0; /* zero until first thr_create() */ 39*0Sstevel@tonic-gate 40*0Sstevel@tonic-gate /* 41*0Sstevel@tonic-gate * thr_concurrency and pthread_concurrency are not used by the library. 42*0Sstevel@tonic-gate * They exist solely to hold and return the values set by calls to 43*0Sstevel@tonic-gate * thr_setconcurrency() and pthread_setconcurrency(). 44*0Sstevel@tonic-gate * Because thr_concurrency is affected by the THR_NEW_LWP flag 45*0Sstevel@tonic-gate * to thr_create(), thr_concurrency is protected by link_lock. 46*0Sstevel@tonic-gate */ 47*0Sstevel@tonic-gate static int thr_concurrency = 1; 48*0Sstevel@tonic-gate static int pthread_concurrency; 49*0Sstevel@tonic-gate 50*0Sstevel@tonic-gate size_t _lpagesize; 51*0Sstevel@tonic-gate 52*0Sstevel@tonic-gate #define HASHTBLSZ 1024 /* must be a power of two */ 53*0Sstevel@tonic-gate #define TIDHASH(tid, udp) (tid & (udp)->hash_mask) 54*0Sstevel@tonic-gate 55*0Sstevel@tonic-gate /* initial allocation, just enough for one lwp */ 56*0Sstevel@tonic-gate #pragma align 64(init_hash_table) 57*0Sstevel@tonic-gate thr_hash_table_t init_hash_table[1] = { 58*0Sstevel@tonic-gate { DEFAULTMUTEX, DEFAULTCV, NULL }, 59*0Sstevel@tonic-gate }; 60*0Sstevel@tonic-gate 61*0Sstevel@tonic-gate extern const Lc_interface rtld_funcs[]; 62*0Sstevel@tonic-gate 63*0Sstevel@tonic-gate /* 64*0Sstevel@tonic-gate * The weak version is known to libc_db and mdb. 65*0Sstevel@tonic-gate */ 66*0Sstevel@tonic-gate #pragma weak _uberdata = __uberdata 67*0Sstevel@tonic-gate uberdata_t __uberdata = { 68*0Sstevel@tonic-gate { DEFAULTMUTEX, DEFAULTCV }, /* link_lock */ 69*0Sstevel@tonic-gate { DEFAULTMUTEX, DEFAULTCV }, /* fork_lock */ 70*0Sstevel@tonic-gate { DEFAULTMUTEX, DEFAULTCV }, /* tdb_hash_lock */ 71*0Sstevel@tonic-gate { 0, }, /* tdb_hash_lock_stats */ 72*0Sstevel@tonic-gate { { 0 }, }, /* siguaction[NSIG] */ 73*0Sstevel@tonic-gate {{ DEFAULTMUTEX, NULL, 0 }, /* bucket[NBUCKETS] */ 74*0Sstevel@tonic-gate { DEFAULTMUTEX, NULL, 0 }, 75*0Sstevel@tonic-gate { DEFAULTMUTEX, NULL, 0 }, 76*0Sstevel@tonic-gate { DEFAULTMUTEX, NULL, 0 }, 77*0Sstevel@tonic-gate { DEFAULTMUTEX, NULL, 0 }, 78*0Sstevel@tonic-gate { DEFAULTMUTEX, NULL, 0 }, 79*0Sstevel@tonic-gate { DEFAULTMUTEX, NULL, 0 }, 80*0Sstevel@tonic-gate { DEFAULTMUTEX, NULL, 0 }, 81*0Sstevel@tonic-gate { DEFAULTMUTEX, NULL, 0 }, 82*0Sstevel@tonic-gate { DEFAULTMUTEX, NULL, 0 }}, 83*0Sstevel@tonic-gate { RECURSIVEMUTEX, NULL, NULL }, /* atexit_root */ 84*0Sstevel@tonic-gate { DEFAULTMUTEX, 0, 0, NULL }, /* tsd_metadata */ 85*0Sstevel@tonic-gate { DEFAULTMUTEX, {0, 0}, {0, 0} }, /* tls_metadata */ 86*0Sstevel@tonic-gate 0, /* primary_map */ 87*0Sstevel@tonic-gate 0, /* bucket_init */ 88*0Sstevel@tonic-gate 0, /* pad[0] */ 89*0Sstevel@tonic-gate 0, /* pad[1] */ 90*0Sstevel@tonic-gate { 0 }, /* uberflags */ 91*0Sstevel@tonic-gate NULL, /* queue_head */ 92*0Sstevel@tonic-gate init_hash_table, /* thr_hash_table */ 93*0Sstevel@tonic-gate 1, /* hash_size: size of the hash table */ 94*0Sstevel@tonic-gate 0, /* hash_mask: hash_size - 1 */ 95*0Sstevel@tonic-gate NULL, /* ulwp_one */ 96*0Sstevel@tonic-gate NULL, /* all_lwps */ 97*0Sstevel@tonic-gate NULL, /* all_zombies */ 98*0Sstevel@tonic-gate 0, /* nthreads */ 99*0Sstevel@tonic-gate 0, /* nzombies */ 100*0Sstevel@tonic-gate 0, /* ndaemons */ 101*0Sstevel@tonic-gate 0, /* pid */ 102*0Sstevel@tonic-gate sigacthandler, /* sigacthandler */ 103*0Sstevel@tonic-gate NULL, /* lwp_stacks */ 104*0Sstevel@tonic-gate NULL, /* lwp_laststack */ 105*0Sstevel@tonic-gate 0, /* nfreestack */ 106*0Sstevel@tonic-gate 10, /* thread_stack_cache */ 107*0Sstevel@tonic-gate NULL, /* ulwp_freelist */ 108*0Sstevel@tonic-gate NULL, /* ulwp_lastfree */ 109*0Sstevel@tonic-gate NULL, /* ulwp_replace_free */ 110*0Sstevel@tonic-gate NULL, /* ulwp_replace_last */ 111*0Sstevel@tonic-gate NULL, /* atforklist */ 112*0Sstevel@tonic-gate NULL, /* __tdb_bootstrap */ 113*0Sstevel@tonic-gate { /* tdb */ 114*0Sstevel@tonic-gate NULL, /* tdb_sync_addr_hash */ 115*0Sstevel@tonic-gate 0, /* tdb_register_count */ 116*0Sstevel@tonic-gate 0, /* tdb_hash_alloc_failed */ 117*0Sstevel@tonic-gate NULL, /* tdb_sync_addr_free */ 118*0Sstevel@tonic-gate NULL, /* tdb_sync_addr_last */ 119*0Sstevel@tonic-gate 0, /* tdb_sync_alloc */ 120*0Sstevel@tonic-gate { 0, 0 }, /* tdb_ev_global_mask */ 121*0Sstevel@tonic-gate tdb_events, /* tdb_events array */ 122*0Sstevel@tonic-gate }, 123*0Sstevel@tonic-gate }; 124*0Sstevel@tonic-gate 125*0Sstevel@tonic-gate /* 126*0Sstevel@tonic-gate * The weak version is known to libc_db and mdb. 127*0Sstevel@tonic-gate */ 128*0Sstevel@tonic-gate #pragma weak _tdb_bootstrap = __tdb_bootstrap 129*0Sstevel@tonic-gate uberdata_t **__tdb_bootstrap = NULL; 130*0Sstevel@tonic-gate 131*0Sstevel@tonic-gate int thread_queue_fifo = 4; 132*0Sstevel@tonic-gate int thread_queue_dump = 0; 133*0Sstevel@tonic-gate int thread_cond_wait_defer = 0; 134*0Sstevel@tonic-gate int thread_error_detection = 0; 135*0Sstevel@tonic-gate int thread_async_safe = 0; 136*0Sstevel@tonic-gate int thread_stack_cache = 10; 137*0Sstevel@tonic-gate 138*0Sstevel@tonic-gate int thread_door_noreserve = 0; 139*0Sstevel@tonic-gate 140*0Sstevel@tonic-gate static ulwp_t *ulwp_alloc(void); 141*0Sstevel@tonic-gate static void ulwp_free(ulwp_t *); 142*0Sstevel@tonic-gate 143*0Sstevel@tonic-gate /* 144*0Sstevel@tonic-gate * Insert the lwp into the hash table. 145*0Sstevel@tonic-gate */ 146*0Sstevel@tonic-gate void 147*0Sstevel@tonic-gate hash_in_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp) 148*0Sstevel@tonic-gate { 149*0Sstevel@tonic-gate ulwp->ul_hash = udp->thr_hash_table[ix].hash_bucket; 150*0Sstevel@tonic-gate udp->thr_hash_table[ix].hash_bucket = ulwp; 151*0Sstevel@tonic-gate ulwp->ul_ix = ix; 152*0Sstevel@tonic-gate } 153*0Sstevel@tonic-gate 154*0Sstevel@tonic-gate void 155*0Sstevel@tonic-gate hash_in(ulwp_t *ulwp, uberdata_t *udp) 156*0Sstevel@tonic-gate { 157*0Sstevel@tonic-gate int ix = TIDHASH(ulwp->ul_lwpid, udp); 158*0Sstevel@tonic-gate mutex_t *mp = &udp->thr_hash_table[ix].hash_lock; 159*0Sstevel@tonic-gate 160*0Sstevel@tonic-gate lmutex_lock(mp); 161*0Sstevel@tonic-gate hash_in_unlocked(ulwp, ix, udp); 162*0Sstevel@tonic-gate lmutex_unlock(mp); 163*0Sstevel@tonic-gate } 164*0Sstevel@tonic-gate 165*0Sstevel@tonic-gate /* 166*0Sstevel@tonic-gate * Delete the lwp from the hash table. 167*0Sstevel@tonic-gate */ 168*0Sstevel@tonic-gate void 169*0Sstevel@tonic-gate hash_out_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp) 170*0Sstevel@tonic-gate { 171*0Sstevel@tonic-gate ulwp_t **ulwpp; 172*0Sstevel@tonic-gate 173*0Sstevel@tonic-gate for (ulwpp = &udp->thr_hash_table[ix].hash_bucket; 174*0Sstevel@tonic-gate ulwp != *ulwpp; 175*0Sstevel@tonic-gate ulwpp = &(*ulwpp)->ul_hash) 176*0Sstevel@tonic-gate ; 177*0Sstevel@tonic-gate *ulwpp = ulwp->ul_hash; 178*0Sstevel@tonic-gate ulwp->ul_hash = NULL; 179*0Sstevel@tonic-gate ulwp->ul_ix = -1; 180*0Sstevel@tonic-gate } 181*0Sstevel@tonic-gate 182*0Sstevel@tonic-gate void 183*0Sstevel@tonic-gate hash_out(ulwp_t *ulwp, uberdata_t *udp) 184*0Sstevel@tonic-gate { 185*0Sstevel@tonic-gate int ix; 186*0Sstevel@tonic-gate 187*0Sstevel@tonic-gate if ((ix = ulwp->ul_ix) >= 0) { 188*0Sstevel@tonic-gate mutex_t *mp = &udp->thr_hash_table[ix].hash_lock; 189*0Sstevel@tonic-gate 190*0Sstevel@tonic-gate lmutex_lock(mp); 191*0Sstevel@tonic-gate hash_out_unlocked(ulwp, ix, udp); 192*0Sstevel@tonic-gate lmutex_unlock(mp); 193*0Sstevel@tonic-gate } 194*0Sstevel@tonic-gate } 195*0Sstevel@tonic-gate 196*0Sstevel@tonic-gate static void 197*0Sstevel@tonic-gate ulwp_clean(ulwp_t *ulwp) 198*0Sstevel@tonic-gate { 199*0Sstevel@tonic-gate ulwp->ul_self = NULL; 200*0Sstevel@tonic-gate ulwp->ul_rval = NULL; 201*0Sstevel@tonic-gate ulwp->ul_lwpid = 0; 202*0Sstevel@tonic-gate ulwp->ul_pri = 0; 203*0Sstevel@tonic-gate ulwp->ul_mappedpri = 0; 204*0Sstevel@tonic-gate ulwp->ul_policy = 0; 205*0Sstevel@tonic-gate ulwp->ul_pri_mapped = 0; 206*0Sstevel@tonic-gate ulwp->ul_mutator = 0; 207*0Sstevel@tonic-gate ulwp->ul_pleasestop = 0; 208*0Sstevel@tonic-gate ulwp->ul_stop = 0; 209*0Sstevel@tonic-gate ulwp->ul_dead = 0; 210*0Sstevel@tonic-gate ulwp->ul_unwind = 0; 211*0Sstevel@tonic-gate ulwp->ul_detached = 0; 212*0Sstevel@tonic-gate ulwp->ul_stopping = 0; 213*0Sstevel@tonic-gate ulwp->ul_sp = 0; 214*0Sstevel@tonic-gate ulwp->ul_critical = 0; 215*0Sstevel@tonic-gate ulwp->ul_cancelable = 0; 216*0Sstevel@tonic-gate ulwp->ul_preempt = 0; 217*0Sstevel@tonic-gate ulwp->ul_sigsuspend = 0; 218*0Sstevel@tonic-gate ulwp->ul_cancel_pending = 0; 219*0Sstevel@tonic-gate ulwp->ul_cancel_disabled = 0; 220*0Sstevel@tonic-gate ulwp->ul_cancel_async = 0; 221*0Sstevel@tonic-gate ulwp->ul_save_async = 0; 222*0Sstevel@tonic-gate ulwp->ul_cursig = 0; 223*0Sstevel@tonic-gate ulwp->ul_created = 0; 224*0Sstevel@tonic-gate ulwp->ul_replace = 0; 225*0Sstevel@tonic-gate ulwp->ul_schedctl_called = NULL; 226*0Sstevel@tonic-gate ulwp->ul_errno = 0; 227*0Sstevel@tonic-gate ulwp->ul_errnop = NULL; 228*0Sstevel@tonic-gate ulwp->ul_clnup_hdr = NULL; 229*0Sstevel@tonic-gate ulwp->ul_schedctl = NULL; 230*0Sstevel@tonic-gate ulwp->ul_bindflags = 0; 231*0Sstevel@tonic-gate (void) _private_memset(&ulwp->ul_td_evbuf, 0, 232*0Sstevel@tonic-gate sizeof (ulwp->ul_td_evbuf)); 233*0Sstevel@tonic-gate ulwp->ul_td_events_enable = 0; 234*0Sstevel@tonic-gate ulwp->ul_qtype = 0; 235*0Sstevel@tonic-gate ulwp->ul_usropts = 0; 236*0Sstevel@tonic-gate ulwp->ul_startpc = NULL; 237*0Sstevel@tonic-gate ulwp->ul_startarg = NULL; 238*0Sstevel@tonic-gate ulwp->ul_wchan = NULL; 239*0Sstevel@tonic-gate ulwp->ul_link = NULL; 240*0Sstevel@tonic-gate ulwp->ul_sleepq = NULL; 241*0Sstevel@tonic-gate ulwp->ul_mxchain = NULL; 242*0Sstevel@tonic-gate ulwp->ul_epri = 0; 243*0Sstevel@tonic-gate ulwp->ul_emappedpri = 0; 244*0Sstevel@tonic-gate /* PROBE_SUPPORT begin */ 245*0Sstevel@tonic-gate ulwp->ul_tpdp = NULL; 246*0Sstevel@tonic-gate /* PROBE_SUPPORT end */ 247*0Sstevel@tonic-gate ulwp->ul_siglink = NULL; 248*0Sstevel@tonic-gate (void) _private_memset(ulwp->ul_ftsd, 0, 249*0Sstevel@tonic-gate sizeof (void *) * TSD_NFAST); 250*0Sstevel@tonic-gate ulwp->ul_stsd = NULL; 251*0Sstevel@tonic-gate (void) _private_memset(&ulwp->ul_spinlock, 0, 252*0Sstevel@tonic-gate sizeof (ulwp->ul_spinlock)); 253*0Sstevel@tonic-gate ulwp->ul_spin_lock_spin = 0; 254*0Sstevel@tonic-gate ulwp->ul_spin_lock_spin2 = 0; 255*0Sstevel@tonic-gate ulwp->ul_spin_lock_sleep = 0; 256*0Sstevel@tonic-gate ulwp->ul_spin_lock_wakeup = 0; 257*0Sstevel@tonic-gate ulwp->ul_ex_unwind = NULL; 258*0Sstevel@tonic-gate } 259*0Sstevel@tonic-gate 260*0Sstevel@tonic-gate static int stackprot; 261*0Sstevel@tonic-gate 262*0Sstevel@tonic-gate /* 263*0Sstevel@tonic-gate * Answer the question, "Is the lwp in question really dead?" 264*0Sstevel@tonic-gate * We must inquire of the operating system to be really sure 265*0Sstevel@tonic-gate * because the lwp may have called lwp_exit() but it has not 266*0Sstevel@tonic-gate * yet completed the exit. 267*0Sstevel@tonic-gate */ 268*0Sstevel@tonic-gate static int 269*0Sstevel@tonic-gate dead_and_buried(ulwp_t *ulwp) 270*0Sstevel@tonic-gate { 271*0Sstevel@tonic-gate if (ulwp->ul_lwpid == (lwpid_t)(-1)) 272*0Sstevel@tonic-gate return (1); 273*0Sstevel@tonic-gate if (ulwp->ul_dead && ulwp->ul_detached && 274*0Sstevel@tonic-gate __lwp_kill(ulwp->ul_lwpid, 0) == ESRCH) { 275*0Sstevel@tonic-gate ulwp->ul_lwpid = (lwpid_t)(-1); 276*0Sstevel@tonic-gate return (1); 277*0Sstevel@tonic-gate } 278*0Sstevel@tonic-gate return (0); 279*0Sstevel@tonic-gate } 280*0Sstevel@tonic-gate 281*0Sstevel@tonic-gate /* 282*0Sstevel@tonic-gate * Attempt to keep the stack cache within the specified cache limit. 283*0Sstevel@tonic-gate */ 284*0Sstevel@tonic-gate static void 285*0Sstevel@tonic-gate trim_stack_cache(int cache_limit) 286*0Sstevel@tonic-gate { 287*0Sstevel@tonic-gate ulwp_t *self = curthread; 288*0Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 289*0Sstevel@tonic-gate ulwp_t *prev = NULL; 290*0Sstevel@tonic-gate ulwp_t **ulwpp = &udp->lwp_stacks; 291*0Sstevel@tonic-gate ulwp_t *ulwp; 292*0Sstevel@tonic-gate 293*0Sstevel@tonic-gate ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, self)); 294*0Sstevel@tonic-gate 295*0Sstevel@tonic-gate while (udp->nfreestack > cache_limit && (ulwp = *ulwpp) != NULL) { 296*0Sstevel@tonic-gate if (dead_and_buried(ulwp)) { 297*0Sstevel@tonic-gate *ulwpp = ulwp->ul_next; 298*0Sstevel@tonic-gate if (ulwp == udp->lwp_laststack) 299*0Sstevel@tonic-gate udp->lwp_laststack = prev; 300*0Sstevel@tonic-gate hash_out(ulwp, udp); 301*0Sstevel@tonic-gate udp->nfreestack--; 302*0Sstevel@tonic-gate (void) _private_munmap(ulwp->ul_stk, ulwp->ul_mapsiz); 303*0Sstevel@tonic-gate /* 304*0Sstevel@tonic-gate * Now put the free ulwp on the ulwp freelist. 305*0Sstevel@tonic-gate */ 306*0Sstevel@tonic-gate ulwp->ul_mapsiz = 0; 307*0Sstevel@tonic-gate ulwp->ul_next = NULL; 308*0Sstevel@tonic-gate if (udp->ulwp_freelist == NULL) 309*0Sstevel@tonic-gate udp->ulwp_freelist = udp->ulwp_lastfree = ulwp; 310*0Sstevel@tonic-gate else { 311*0Sstevel@tonic-gate udp->ulwp_lastfree->ul_next = ulwp; 312*0Sstevel@tonic-gate udp->ulwp_lastfree = ulwp; 313*0Sstevel@tonic-gate } 314*0Sstevel@tonic-gate } else { 315*0Sstevel@tonic-gate prev = ulwp; 316*0Sstevel@tonic-gate ulwpp = &ulwp->ul_next; 317*0Sstevel@tonic-gate } 318*0Sstevel@tonic-gate } 319*0Sstevel@tonic-gate } 320*0Sstevel@tonic-gate 321*0Sstevel@tonic-gate /* 322*0Sstevel@tonic-gate * Find an unused stack of the requested size 323*0Sstevel@tonic-gate * or create a new stack of the requested size. 324*0Sstevel@tonic-gate * Return a pointer to the ulwp_t structure referring to the stack, or NULL. 325*0Sstevel@tonic-gate * thr_exit() stores 1 in the ul_dead member. 326*0Sstevel@tonic-gate * thr_join() stores -1 in the ul_lwpid member. 327*0Sstevel@tonic-gate */ 328*0Sstevel@tonic-gate ulwp_t * 329*0Sstevel@tonic-gate find_stack(size_t stksize, size_t guardsize) 330*0Sstevel@tonic-gate { 331*0Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 332*0Sstevel@tonic-gate size_t mapsize; 333*0Sstevel@tonic-gate ulwp_t *prev; 334*0Sstevel@tonic-gate ulwp_t *ulwp; 335*0Sstevel@tonic-gate ulwp_t **ulwpp; 336*0Sstevel@tonic-gate void *stk; 337*0Sstevel@tonic-gate 338*0Sstevel@tonic-gate /* 339*0Sstevel@tonic-gate * The stack is allocated PROT_READ|PROT_WRITE|PROT_EXEC 340*0Sstevel@tonic-gate * unless overridden by the system's configuration. 341*0Sstevel@tonic-gate */ 342*0Sstevel@tonic-gate if (stackprot == 0) { /* do this once */ 343*0Sstevel@tonic-gate long lprot = _sysconf(_SC_STACK_PROT); 344*0Sstevel@tonic-gate if (lprot <= 0) 345*0Sstevel@tonic-gate lprot = (PROT_READ|PROT_WRITE|PROT_EXEC); 346*0Sstevel@tonic-gate stackprot = (int)lprot; 347*0Sstevel@tonic-gate } 348*0Sstevel@tonic-gate if (_lpagesize == 0) 349*0Sstevel@tonic-gate _lpagesize = _sysconf(_SC_PAGESIZE); 350*0Sstevel@tonic-gate /* 351*0Sstevel@tonic-gate * One megabyte stacks by default, but subtract off 352*0Sstevel@tonic-gate * two pages for the system-created red zones. 353*0Sstevel@tonic-gate * Round up a non-zero stack size to a pagesize multiple. 354*0Sstevel@tonic-gate */ 355*0Sstevel@tonic-gate if (stksize == 0) 356*0Sstevel@tonic-gate stksize = DEFAULTSTACK - 2 * _lpagesize; 357*0Sstevel@tonic-gate else 358*0Sstevel@tonic-gate stksize = ((stksize + _lpagesize - 1) & -_lpagesize); 359*0Sstevel@tonic-gate 360*0Sstevel@tonic-gate /* 361*0Sstevel@tonic-gate * Round up the mapping size to a multiple of pagesize. 362*0Sstevel@tonic-gate * Note: mmap() provides at least one page of red zone 363*0Sstevel@tonic-gate * so we deduct that from the value of guardsize. 364*0Sstevel@tonic-gate */ 365*0Sstevel@tonic-gate if (guardsize != 0) 366*0Sstevel@tonic-gate guardsize = ((guardsize + _lpagesize - 1) & -_lpagesize) - 367*0Sstevel@tonic-gate _lpagesize; 368*0Sstevel@tonic-gate mapsize = stksize + guardsize; 369*0Sstevel@tonic-gate 370*0Sstevel@tonic-gate lmutex_lock(&udp->link_lock); 371*0Sstevel@tonic-gate for (prev = NULL, ulwpp = &udp->lwp_stacks; 372*0Sstevel@tonic-gate (ulwp = *ulwpp) != NULL; 373*0Sstevel@tonic-gate prev = ulwp, ulwpp = &ulwp->ul_next) { 374*0Sstevel@tonic-gate if (ulwp->ul_mapsiz == mapsize && 375*0Sstevel@tonic-gate ulwp->ul_guardsize == guardsize && 376*0Sstevel@tonic-gate dead_and_buried(ulwp)) { 377*0Sstevel@tonic-gate /* 378*0Sstevel@tonic-gate * The previous lwp is gone; reuse the stack. 379*0Sstevel@tonic-gate * Remove the ulwp from the stack list. 380*0Sstevel@tonic-gate */ 381*0Sstevel@tonic-gate *ulwpp = ulwp->ul_next; 382*0Sstevel@tonic-gate ulwp->ul_next = NULL; 383*0Sstevel@tonic-gate if (ulwp == udp->lwp_laststack) 384*0Sstevel@tonic-gate udp->lwp_laststack = prev; 385*0Sstevel@tonic-gate hash_out(ulwp, udp); 386*0Sstevel@tonic-gate udp->nfreestack--; 387*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 388*0Sstevel@tonic-gate ulwp_clean(ulwp); 389*0Sstevel@tonic-gate return (ulwp); 390*0Sstevel@tonic-gate } 391*0Sstevel@tonic-gate } 392*0Sstevel@tonic-gate 393*0Sstevel@tonic-gate /* 394*0Sstevel@tonic-gate * None of the cached stacks matched our mapping size. 395*0Sstevel@tonic-gate * Reduce the stack cache to get rid of possibly 396*0Sstevel@tonic-gate * very old stacks that will never be reused. 397*0Sstevel@tonic-gate */ 398*0Sstevel@tonic-gate if (udp->nfreestack > udp->thread_stack_cache) 399*0Sstevel@tonic-gate trim_stack_cache(udp->thread_stack_cache); 400*0Sstevel@tonic-gate else if (udp->nfreestack > 0) 401*0Sstevel@tonic-gate trim_stack_cache(udp->nfreestack - 1); 402*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 403*0Sstevel@tonic-gate 404*0Sstevel@tonic-gate /* 405*0Sstevel@tonic-gate * Create a new stack. 406*0Sstevel@tonic-gate */ 407*0Sstevel@tonic-gate if ((stk = _private_mmap(NULL, mapsize, stackprot, 408*0Sstevel@tonic-gate MAP_PRIVATE|MAP_NORESERVE|MAP_ANON, -1, (off_t)0)) != MAP_FAILED) { 409*0Sstevel@tonic-gate /* 410*0Sstevel@tonic-gate * We have allocated our stack. Now allocate the ulwp. 411*0Sstevel@tonic-gate */ 412*0Sstevel@tonic-gate ulwp = ulwp_alloc(); 413*0Sstevel@tonic-gate if (ulwp == NULL) 414*0Sstevel@tonic-gate (void) _private_munmap(stk, mapsize); 415*0Sstevel@tonic-gate else { 416*0Sstevel@tonic-gate ulwp->ul_stk = stk; 417*0Sstevel@tonic-gate ulwp->ul_mapsiz = mapsize; 418*0Sstevel@tonic-gate ulwp->ul_guardsize = guardsize; 419*0Sstevel@tonic-gate ulwp->ul_stktop = (uintptr_t)stk + mapsize; 420*0Sstevel@tonic-gate ulwp->ul_stksiz = stksize; 421*0Sstevel@tonic-gate ulwp->ul_ix = -1; 422*0Sstevel@tonic-gate if (guardsize) /* protect the extra red zone */ 423*0Sstevel@tonic-gate (void) _private_mprotect(stk, 424*0Sstevel@tonic-gate guardsize, PROT_NONE); 425*0Sstevel@tonic-gate } 426*0Sstevel@tonic-gate } 427*0Sstevel@tonic-gate return (ulwp); 428*0Sstevel@tonic-gate } 429*0Sstevel@tonic-gate 430*0Sstevel@tonic-gate /* 431*0Sstevel@tonic-gate * Get a ulwp_t structure from the free list or allocate a new one. 432*0Sstevel@tonic-gate * Such ulwp_t's do not have a stack allocated by the library. 433*0Sstevel@tonic-gate */ 434*0Sstevel@tonic-gate static ulwp_t * 435*0Sstevel@tonic-gate ulwp_alloc(void) 436*0Sstevel@tonic-gate { 437*0Sstevel@tonic-gate ulwp_t *self = curthread; 438*0Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 439*0Sstevel@tonic-gate size_t tls_size; 440*0Sstevel@tonic-gate ulwp_t *prev; 441*0Sstevel@tonic-gate ulwp_t *ulwp; 442*0Sstevel@tonic-gate ulwp_t **ulwpp; 443*0Sstevel@tonic-gate caddr_t data; 444*0Sstevel@tonic-gate 445*0Sstevel@tonic-gate lmutex_lock(&udp->link_lock); 446*0Sstevel@tonic-gate for (prev = NULL, ulwpp = &udp->ulwp_freelist; 447*0Sstevel@tonic-gate (ulwp = *ulwpp) != NULL; 448*0Sstevel@tonic-gate prev = ulwp, ulwpp = &ulwp->ul_next) { 449*0Sstevel@tonic-gate if (dead_and_buried(ulwp)) { 450*0Sstevel@tonic-gate *ulwpp = ulwp->ul_next; 451*0Sstevel@tonic-gate ulwp->ul_next = NULL; 452*0Sstevel@tonic-gate if (ulwp == udp->ulwp_lastfree) 453*0Sstevel@tonic-gate udp->ulwp_lastfree = prev; 454*0Sstevel@tonic-gate hash_out(ulwp, udp); 455*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 456*0Sstevel@tonic-gate ulwp_clean(ulwp); 457*0Sstevel@tonic-gate return (ulwp); 458*0Sstevel@tonic-gate } 459*0Sstevel@tonic-gate } 460*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 461*0Sstevel@tonic-gate 462*0Sstevel@tonic-gate tls_size = roundup64(udp->tls_metadata.static_tls.tls_size); 463*0Sstevel@tonic-gate data = lmalloc(sizeof (*ulwp) + tls_size); 464*0Sstevel@tonic-gate if (data != NULL) { 465*0Sstevel@tonic-gate /* LINTED pointer cast may result in improper alignment */ 466*0Sstevel@tonic-gate ulwp = (ulwp_t *)(data + tls_size); 467*0Sstevel@tonic-gate } 468*0Sstevel@tonic-gate return (ulwp); 469*0Sstevel@tonic-gate } 470*0Sstevel@tonic-gate 471*0Sstevel@tonic-gate /* 472*0Sstevel@tonic-gate * Free a ulwp structure. 473*0Sstevel@tonic-gate * If there is an associated stack, put it on the stack list and 474*0Sstevel@tonic-gate * munmap() previously freed stacks up to the residual cache limit. 475*0Sstevel@tonic-gate * Else put it on the ulwp free list and never call lfree() on it. 476*0Sstevel@tonic-gate */ 477*0Sstevel@tonic-gate static void 478*0Sstevel@tonic-gate ulwp_free(ulwp_t *ulwp) 479*0Sstevel@tonic-gate { 480*0Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 481*0Sstevel@tonic-gate 482*0Sstevel@tonic-gate ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, curthread)); 483*0Sstevel@tonic-gate ulwp->ul_next = NULL; 484*0Sstevel@tonic-gate if (ulwp == udp->ulwp_one) /* don't reuse the primoridal stack */ 485*0Sstevel@tonic-gate /*EMPTY*/; 486*0Sstevel@tonic-gate else if (ulwp->ul_mapsiz != 0) { 487*0Sstevel@tonic-gate if (udp->lwp_stacks == NULL) 488*0Sstevel@tonic-gate udp->lwp_stacks = udp->lwp_laststack = ulwp; 489*0Sstevel@tonic-gate else { 490*0Sstevel@tonic-gate udp->lwp_laststack->ul_next = ulwp; 491*0Sstevel@tonic-gate udp->lwp_laststack = ulwp; 492*0Sstevel@tonic-gate } 493*0Sstevel@tonic-gate if (++udp->nfreestack > udp->thread_stack_cache) 494*0Sstevel@tonic-gate trim_stack_cache(udp->thread_stack_cache); 495*0Sstevel@tonic-gate } else { 496*0Sstevel@tonic-gate if (udp->ulwp_freelist == NULL) 497*0Sstevel@tonic-gate udp->ulwp_freelist = udp->ulwp_lastfree = ulwp; 498*0Sstevel@tonic-gate else { 499*0Sstevel@tonic-gate udp->ulwp_lastfree->ul_next = ulwp; 500*0Sstevel@tonic-gate udp->ulwp_lastfree = ulwp; 501*0Sstevel@tonic-gate } 502*0Sstevel@tonic-gate } 503*0Sstevel@tonic-gate } 504*0Sstevel@tonic-gate 505*0Sstevel@tonic-gate /* 506*0Sstevel@tonic-gate * Find a named lwp and return a pointer to its hash list location. 507*0Sstevel@tonic-gate * On success, returns with the hash lock held. 508*0Sstevel@tonic-gate */ 509*0Sstevel@tonic-gate ulwp_t ** 510*0Sstevel@tonic-gate find_lwpp(thread_t tid) 511*0Sstevel@tonic-gate { 512*0Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 513*0Sstevel@tonic-gate int ix = TIDHASH(tid, udp); 514*0Sstevel@tonic-gate mutex_t *mp = &udp->thr_hash_table[ix].hash_lock; 515*0Sstevel@tonic-gate ulwp_t *ulwp; 516*0Sstevel@tonic-gate ulwp_t **ulwpp; 517*0Sstevel@tonic-gate 518*0Sstevel@tonic-gate if (tid == 0) 519*0Sstevel@tonic-gate return (NULL); 520*0Sstevel@tonic-gate 521*0Sstevel@tonic-gate lmutex_lock(mp); 522*0Sstevel@tonic-gate for (ulwpp = &udp->thr_hash_table[ix].hash_bucket; 523*0Sstevel@tonic-gate (ulwp = *ulwpp) != NULL; 524*0Sstevel@tonic-gate ulwpp = &ulwp->ul_hash) { 525*0Sstevel@tonic-gate if (ulwp->ul_lwpid == tid) 526*0Sstevel@tonic-gate return (ulwpp); 527*0Sstevel@tonic-gate } 528*0Sstevel@tonic-gate lmutex_unlock(mp); 529*0Sstevel@tonic-gate return (NULL); 530*0Sstevel@tonic-gate } 531*0Sstevel@tonic-gate 532*0Sstevel@tonic-gate /* 533*0Sstevel@tonic-gate * Wake up all lwps waiting on this lwp for some reason. 534*0Sstevel@tonic-gate */ 535*0Sstevel@tonic-gate void 536*0Sstevel@tonic-gate ulwp_broadcast(ulwp_t *ulwp) 537*0Sstevel@tonic-gate { 538*0Sstevel@tonic-gate ulwp_t *self = curthread; 539*0Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 540*0Sstevel@tonic-gate 541*0Sstevel@tonic-gate ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self)); 542*0Sstevel@tonic-gate (void) cond_broadcast_internal(ulwp_condvar(ulwp, udp)); 543*0Sstevel@tonic-gate } 544*0Sstevel@tonic-gate 545*0Sstevel@tonic-gate /* 546*0Sstevel@tonic-gate * Find a named lwp and return a pointer to it. 547*0Sstevel@tonic-gate * Returns with the hash lock held. 548*0Sstevel@tonic-gate */ 549*0Sstevel@tonic-gate ulwp_t * 550*0Sstevel@tonic-gate find_lwp(thread_t tid) 551*0Sstevel@tonic-gate { 552*0Sstevel@tonic-gate ulwp_t *self = curthread; 553*0Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 554*0Sstevel@tonic-gate ulwp_t *ulwp = NULL; 555*0Sstevel@tonic-gate ulwp_t **ulwpp; 556*0Sstevel@tonic-gate 557*0Sstevel@tonic-gate if (self->ul_lwpid == tid) { 558*0Sstevel@tonic-gate ulwp = self; 559*0Sstevel@tonic-gate ulwp_lock(ulwp, udp); 560*0Sstevel@tonic-gate } else if ((ulwpp = find_lwpp(tid)) != NULL) { 561*0Sstevel@tonic-gate ulwp = *ulwpp; 562*0Sstevel@tonic-gate } 563*0Sstevel@tonic-gate 564*0Sstevel@tonic-gate if (ulwp && ulwp->ul_dead) { 565*0Sstevel@tonic-gate ulwp_unlock(ulwp, udp); 566*0Sstevel@tonic-gate ulwp = NULL; 567*0Sstevel@tonic-gate } 568*0Sstevel@tonic-gate 569*0Sstevel@tonic-gate return (ulwp); 570*0Sstevel@tonic-gate } 571*0Sstevel@tonic-gate 572*0Sstevel@tonic-gate int 573*0Sstevel@tonic-gate _thrp_create(void *stk, size_t stksize, void *(*func)(void *), void *arg, 574*0Sstevel@tonic-gate long flags, thread_t *new_thread, pri_t priority, int policy, 575*0Sstevel@tonic-gate size_t guardsize) 576*0Sstevel@tonic-gate { 577*0Sstevel@tonic-gate ulwp_t *self = curthread; 578*0Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 579*0Sstevel@tonic-gate ucontext_t uc; 580*0Sstevel@tonic-gate uint_t lwp_flags; 581*0Sstevel@tonic-gate thread_t tid; 582*0Sstevel@tonic-gate int error = 0; 583*0Sstevel@tonic-gate ulwp_t *ulwp; 584*0Sstevel@tonic-gate 585*0Sstevel@tonic-gate /* 586*0Sstevel@tonic-gate * Enforce the restriction of not creating any threads 587*0Sstevel@tonic-gate * until the primary link map has been initialized. 588*0Sstevel@tonic-gate * Also, disallow thread creation to a child of vfork(). 589*0Sstevel@tonic-gate */ 590*0Sstevel@tonic-gate if (!self->ul_primarymap || self->ul_vfork) 591*0Sstevel@tonic-gate return (ENOTSUP); 592*0Sstevel@tonic-gate 593*0Sstevel@tonic-gate if (udp->hash_size == 1) 594*0Sstevel@tonic-gate finish_init(); 595*0Sstevel@tonic-gate 596*0Sstevel@tonic-gate if (((stk || stksize) && stksize < MINSTACK) || 597*0Sstevel@tonic-gate priority < THREAD_MIN_PRIORITY || priority > THREAD_MAX_PRIORITY) 598*0Sstevel@tonic-gate return (EINVAL); 599*0Sstevel@tonic-gate 600*0Sstevel@tonic-gate if (stk == NULL) { 601*0Sstevel@tonic-gate if ((ulwp = find_stack(stksize, guardsize)) == NULL) 602*0Sstevel@tonic-gate return (ENOMEM); 603*0Sstevel@tonic-gate stksize = ulwp->ul_mapsiz - ulwp->ul_guardsize; 604*0Sstevel@tonic-gate } else { 605*0Sstevel@tonic-gate /* initialize the private stack */ 606*0Sstevel@tonic-gate if ((ulwp = ulwp_alloc()) == NULL) 607*0Sstevel@tonic-gate return (ENOMEM); 608*0Sstevel@tonic-gate ulwp->ul_stk = stk; 609*0Sstevel@tonic-gate ulwp->ul_stktop = (uintptr_t)stk + stksize; 610*0Sstevel@tonic-gate ulwp->ul_stksiz = stksize; 611*0Sstevel@tonic-gate ulwp->ul_ix = -1; 612*0Sstevel@tonic-gate } 613*0Sstevel@tonic-gate ulwp->ul_errnop = &ulwp->ul_errno; 614*0Sstevel@tonic-gate 615*0Sstevel@tonic-gate lwp_flags = LWP_SUSPENDED; 616*0Sstevel@tonic-gate if (flags & (THR_DETACHED|THR_DAEMON)) { 617*0Sstevel@tonic-gate flags |= THR_DETACHED; 618*0Sstevel@tonic-gate lwp_flags |= LWP_DETACHED; 619*0Sstevel@tonic-gate } 620*0Sstevel@tonic-gate if (flags & THR_DAEMON) 621*0Sstevel@tonic-gate lwp_flags |= LWP_DAEMON; 622*0Sstevel@tonic-gate 623*0Sstevel@tonic-gate /* creating a thread: enforce mt-correctness in _mutex_lock() */ 624*0Sstevel@tonic-gate self->ul_async_safe = 1; 625*0Sstevel@tonic-gate 626*0Sstevel@tonic-gate /* per-thread copies of global variables, for speed */ 627*0Sstevel@tonic-gate ulwp->ul_queue_fifo = self->ul_queue_fifo; 628*0Sstevel@tonic-gate ulwp->ul_cond_wait_defer = self->ul_cond_wait_defer; 629*0Sstevel@tonic-gate ulwp->ul_error_detection = self->ul_error_detection; 630*0Sstevel@tonic-gate ulwp->ul_async_safe = self->ul_async_safe; 631*0Sstevel@tonic-gate ulwp->ul_max_spinners = self->ul_max_spinners; 632*0Sstevel@tonic-gate ulwp->ul_adaptive_spin = self->ul_adaptive_spin; 633*0Sstevel@tonic-gate ulwp->ul_release_spin = self->ul_release_spin; 634*0Sstevel@tonic-gate ulwp->ul_queue_spin = self->ul_queue_spin; 635*0Sstevel@tonic-gate ulwp->ul_door_noreserve = self->ul_door_noreserve; 636*0Sstevel@tonic-gate 637*0Sstevel@tonic-gate ulwp->ul_primarymap = self->ul_primarymap; 638*0Sstevel@tonic-gate ulwp->ul_self = ulwp; 639*0Sstevel@tonic-gate ulwp->ul_uberdata = udp; 640*0Sstevel@tonic-gate 641*0Sstevel@tonic-gate /* debugger support */ 642*0Sstevel@tonic-gate ulwp->ul_usropts = flags; 643*0Sstevel@tonic-gate 644*0Sstevel@tonic-gate #ifdef __sparc 645*0Sstevel@tonic-gate /* 646*0Sstevel@tonic-gate * We cache several instructions in the thread structure for use 647*0Sstevel@tonic-gate * by the fasttrap DTrace provider. When changing this, read the 648*0Sstevel@tonic-gate * comment in fasttrap.h for the all the other places that must 649*0Sstevel@tonic-gate * be changed. 650*0Sstevel@tonic-gate */ 651*0Sstevel@tonic-gate ulwp->ul_dsave = 0x9de04000; /* save %g1, %g0, %sp */ 652*0Sstevel@tonic-gate ulwp->ul_drestore = 0x81e80000; /* restore %g0, %g0, %g0 */ 653*0Sstevel@tonic-gate ulwp->ul_dftret = 0x91d0203a; /* ta 0x3a */ 654*0Sstevel@tonic-gate ulwp->ul_dreturn = 0x81ca0000; /* return %o0 */ 655*0Sstevel@tonic-gate #endif 656*0Sstevel@tonic-gate 657*0Sstevel@tonic-gate ulwp->ul_startpc = func; 658*0Sstevel@tonic-gate ulwp->ul_startarg = arg; 659*0Sstevel@tonic-gate _fpinherit(ulwp); 660*0Sstevel@tonic-gate /* 661*0Sstevel@tonic-gate * Defer signals on the new thread until its TLS constructors 662*0Sstevel@tonic-gate * have been called. _thr_setup() will call sigon() after 663*0Sstevel@tonic-gate * it has called tls_setup(). 664*0Sstevel@tonic-gate */ 665*0Sstevel@tonic-gate ulwp->ul_sigdefer = 1; 666*0Sstevel@tonic-gate 667*0Sstevel@tonic-gate if (setup_context(&uc, _thr_setup, ulwp, 668*0Sstevel@tonic-gate (caddr_t)ulwp->ul_stk + ulwp->ul_guardsize, stksize) != 0) 669*0Sstevel@tonic-gate error = EAGAIN; 670*0Sstevel@tonic-gate 671*0Sstevel@tonic-gate /* 672*0Sstevel@tonic-gate * Call enter_critical() to avoid being suspended until we 673*0Sstevel@tonic-gate * have linked the new thread into the proper lists. 674*0Sstevel@tonic-gate * This is necessary because forkall() and fork1() must 675*0Sstevel@tonic-gate * suspend all threads and they must see a complete list. 676*0Sstevel@tonic-gate */ 677*0Sstevel@tonic-gate enter_critical(self); 678*0Sstevel@tonic-gate uc.uc_sigmask = ulwp->ul_sigmask = self->ul_sigmask; 679*0Sstevel@tonic-gate if (error != 0 || 680*0Sstevel@tonic-gate (error = __lwp_create(&uc, lwp_flags, &tid)) != 0) { 681*0Sstevel@tonic-gate exit_critical(self); 682*0Sstevel@tonic-gate ulwp->ul_lwpid = (lwpid_t)(-1); 683*0Sstevel@tonic-gate ulwp->ul_dead = 1; 684*0Sstevel@tonic-gate ulwp->ul_detached = 1; 685*0Sstevel@tonic-gate lmutex_lock(&udp->link_lock); 686*0Sstevel@tonic-gate ulwp_free(ulwp); 687*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 688*0Sstevel@tonic-gate return (error); 689*0Sstevel@tonic-gate } 690*0Sstevel@tonic-gate self->ul_nocancel = 0; /* cancellation is now possible */ 691*0Sstevel@tonic-gate ulwp->ul_nocancel = 0; 692*0Sstevel@tonic-gate udp->uberflags.uf_mt = 1; 693*0Sstevel@tonic-gate if (new_thread) 694*0Sstevel@tonic-gate *new_thread = tid; 695*0Sstevel@tonic-gate if (flags & THR_DETACHED) 696*0Sstevel@tonic-gate ulwp->ul_detached = 1; 697*0Sstevel@tonic-gate ulwp->ul_lwpid = tid; 698*0Sstevel@tonic-gate ulwp->ul_stop = TSTP_REGULAR; 699*0Sstevel@tonic-gate ulwp->ul_created = 1; 700*0Sstevel@tonic-gate ulwp->ul_policy = policy; 701*0Sstevel@tonic-gate ulwp->ul_pri = priority; 702*0Sstevel@tonic-gate 703*0Sstevel@tonic-gate lmutex_lock(&udp->link_lock); 704*0Sstevel@tonic-gate ulwp->ul_forw = udp->all_lwps; 705*0Sstevel@tonic-gate ulwp->ul_back = udp->all_lwps->ul_back; 706*0Sstevel@tonic-gate ulwp->ul_back->ul_forw = ulwp; 707*0Sstevel@tonic-gate ulwp->ul_forw->ul_back = ulwp; 708*0Sstevel@tonic-gate hash_in(ulwp, udp); 709*0Sstevel@tonic-gate udp->nthreads++; 710*0Sstevel@tonic-gate if (flags & THR_DAEMON) 711*0Sstevel@tonic-gate udp->ndaemons++; 712*0Sstevel@tonic-gate if (flags & THR_NEW_LWP) 713*0Sstevel@tonic-gate thr_concurrency++; 714*0Sstevel@tonic-gate __threaded = 1; /* inform stdio */ 715*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 716*0Sstevel@tonic-gate 717*0Sstevel@tonic-gate if (__td_event_report(self, TD_CREATE, udp)) { 718*0Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_CREATE; 719*0Sstevel@tonic-gate self->ul_td_evbuf.eventdata = (void *)(uintptr_t)tid; 720*0Sstevel@tonic-gate tdb_event(TD_CREATE, udp); 721*0Sstevel@tonic-gate } 722*0Sstevel@tonic-gate if (!(flags & THR_SUSPENDED)) { 723*0Sstevel@tonic-gate ulwp->ul_created = 0; 724*0Sstevel@tonic-gate (void) _thrp_continue(tid, TSTP_REGULAR); 725*0Sstevel@tonic-gate } 726*0Sstevel@tonic-gate 727*0Sstevel@tonic-gate exit_critical(self); 728*0Sstevel@tonic-gate return (0); 729*0Sstevel@tonic-gate } 730*0Sstevel@tonic-gate 731*0Sstevel@tonic-gate #pragma weak thr_create = _thr_create 732*0Sstevel@tonic-gate int 733*0Sstevel@tonic-gate _thr_create(void *stk, size_t stksize, void *(*func)(void *), void *arg, 734*0Sstevel@tonic-gate long flags, thread_t *new_thread) 735*0Sstevel@tonic-gate { 736*0Sstevel@tonic-gate return (_thrp_create(stk, stksize, func, arg, flags, new_thread, 737*0Sstevel@tonic-gate curthread->ul_pri, curthread->ul_policy, 0)); 738*0Sstevel@tonic-gate } 739*0Sstevel@tonic-gate 740*0Sstevel@tonic-gate /* 741*0Sstevel@tonic-gate * A special cancellation cleanup hook for DCE. 742*0Sstevel@tonic-gate * cleanuphndlr, when it is not NULL, will contain a callback 743*0Sstevel@tonic-gate * function to be called before a thread is terminated in 744*0Sstevel@tonic-gate * _thr_exit() as a result of being cancelled. 745*0Sstevel@tonic-gate */ 746*0Sstevel@tonic-gate static void (*cleanuphndlr)(void) = NULL; 747*0Sstevel@tonic-gate 748*0Sstevel@tonic-gate /* 749*0Sstevel@tonic-gate * _pthread_setcleanupinit: sets the cleanup hook. 750*0Sstevel@tonic-gate */ 751*0Sstevel@tonic-gate int 752*0Sstevel@tonic-gate _pthread_setcleanupinit(void (*func)(void)) 753*0Sstevel@tonic-gate { 754*0Sstevel@tonic-gate cleanuphndlr = func; 755*0Sstevel@tonic-gate return (0); 756*0Sstevel@tonic-gate } 757*0Sstevel@tonic-gate 758*0Sstevel@tonic-gate void 759*0Sstevel@tonic-gate _thrp_exit() 760*0Sstevel@tonic-gate { 761*0Sstevel@tonic-gate ulwp_t *self = curthread; 762*0Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 763*0Sstevel@tonic-gate ulwp_t *replace = NULL; 764*0Sstevel@tonic-gate 765*0Sstevel@tonic-gate if (__td_event_report(self, TD_DEATH, udp)) { 766*0Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_DEATH; 767*0Sstevel@tonic-gate tdb_event(TD_DEATH, udp); 768*0Sstevel@tonic-gate } 769*0Sstevel@tonic-gate 770*0Sstevel@tonic-gate ASSERT(self->ul_sigdefer != 0); 771*0Sstevel@tonic-gate 772*0Sstevel@tonic-gate lmutex_lock(&udp->link_lock); 773*0Sstevel@tonic-gate udp->nthreads--; 774*0Sstevel@tonic-gate if (self->ul_usropts & THR_NEW_LWP) 775*0Sstevel@tonic-gate thr_concurrency--; 776*0Sstevel@tonic-gate if (self->ul_usropts & THR_DAEMON) 777*0Sstevel@tonic-gate udp->ndaemons--; 778*0Sstevel@tonic-gate else if (udp->nthreads == udp->ndaemons) { 779*0Sstevel@tonic-gate /* 780*0Sstevel@tonic-gate * We are the last non-daemon thread exiting. 781*0Sstevel@tonic-gate * Exit the process. We retain our TSD and TLS so 782*0Sstevel@tonic-gate * that atexit() application functions can use them. 783*0Sstevel@tonic-gate */ 784*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 785*0Sstevel@tonic-gate exit(0); 786*0Sstevel@tonic-gate thr_panic("_thrp_exit(): exit(0) returned"); 787*0Sstevel@tonic-gate } 788*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 789*0Sstevel@tonic-gate 790*0Sstevel@tonic-gate tsd_exit(); /* deallocate thread-specific data */ 791*0Sstevel@tonic-gate tls_exit(); /* deallocate thread-local storage */ 792*0Sstevel@tonic-gate 793*0Sstevel@tonic-gate /* block all signals to finish exiting */ 794*0Sstevel@tonic-gate block_all_signals(self); 795*0Sstevel@tonic-gate /* also prevent ourself from being suspended */ 796*0Sstevel@tonic-gate enter_critical(self); 797*0Sstevel@tonic-gate rwl_free(self); 798*0Sstevel@tonic-gate lmutex_lock(&udp->link_lock); 799*0Sstevel@tonic-gate ulwp_free(self); 800*0Sstevel@tonic-gate (void) ulwp_lock(self, udp); 801*0Sstevel@tonic-gate 802*0Sstevel@tonic-gate if (self->ul_mapsiz && !self->ul_detached) { 803*0Sstevel@tonic-gate /* 804*0Sstevel@tonic-gate * We want to free the stack for reuse but must keep 805*0Sstevel@tonic-gate * the ulwp_t struct for the benefit of thr_join(). 806*0Sstevel@tonic-gate * For this purpose we allocate a replacement ulwp_t. 807*0Sstevel@tonic-gate */ 808*0Sstevel@tonic-gate if ((replace = udp->ulwp_replace_free) == NULL) 809*0Sstevel@tonic-gate replace = lmalloc(REPLACEMENT_SIZE); 810*0Sstevel@tonic-gate else if ((udp->ulwp_replace_free = replace->ul_next) == NULL) 811*0Sstevel@tonic-gate udp->ulwp_replace_last = NULL; 812*0Sstevel@tonic-gate } 813*0Sstevel@tonic-gate 814*0Sstevel@tonic-gate if (udp->all_lwps == self) 815*0Sstevel@tonic-gate udp->all_lwps = self->ul_forw; 816*0Sstevel@tonic-gate if (udp->all_lwps == self) 817*0Sstevel@tonic-gate udp->all_lwps = NULL; 818*0Sstevel@tonic-gate else { 819*0Sstevel@tonic-gate self->ul_forw->ul_back = self->ul_back; 820*0Sstevel@tonic-gate self->ul_back->ul_forw = self->ul_forw; 821*0Sstevel@tonic-gate } 822*0Sstevel@tonic-gate self->ul_forw = self->ul_back = NULL; 823*0Sstevel@tonic-gate /* collect queue lock statistics before marking ourself dead */ 824*0Sstevel@tonic-gate record_spin_locks(self); 825*0Sstevel@tonic-gate self->ul_dead = 1; 826*0Sstevel@tonic-gate self->ul_pleasestop = 0; 827*0Sstevel@tonic-gate if (replace != NULL) { 828*0Sstevel@tonic-gate int ix = self->ul_ix; /* the hash index */ 829*0Sstevel@tonic-gate (void) _private_memcpy(replace, self, REPLACEMENT_SIZE); 830*0Sstevel@tonic-gate replace->ul_self = replace; 831*0Sstevel@tonic-gate replace->ul_gs = 0; /* clone does not carry %gs */ 832*0Sstevel@tonic-gate replace->ul_next = NULL; /* clone not on stack list */ 833*0Sstevel@tonic-gate replace->ul_mapsiz = 0; /* allows clone to be freed */ 834*0Sstevel@tonic-gate replace->ul_replace = 1; /* requires clone to be freed */ 835*0Sstevel@tonic-gate hash_out_unlocked(self, ix, udp); 836*0Sstevel@tonic-gate hash_in_unlocked(replace, ix, udp); 837*0Sstevel@tonic-gate ASSERT(!(self->ul_detached)); 838*0Sstevel@tonic-gate self->ul_detached = 1; /* this frees the stack */ 839*0Sstevel@tonic-gate self->ul_schedctl = NULL; 840*0Sstevel@tonic-gate self->ul_schedctl_called = &udp->uberflags; 841*0Sstevel@tonic-gate set_curthread(self = replace); 842*0Sstevel@tonic-gate /* 843*0Sstevel@tonic-gate * Having just changed the address of curthread, we 844*0Sstevel@tonic-gate * must reset the ownership of the locks we hold so 845*0Sstevel@tonic-gate * that assertions will not fire when we release them. 846*0Sstevel@tonic-gate */ 847*0Sstevel@tonic-gate udp->link_lock.mutex_owner = (uintptr_t)self; 848*0Sstevel@tonic-gate ulwp_mutex(self, udp)->mutex_owner = (uintptr_t)self; 849*0Sstevel@tonic-gate /* 850*0Sstevel@tonic-gate * NOTE: 851*0Sstevel@tonic-gate * On i386, %gs still references the original, not the 852*0Sstevel@tonic-gate * replacement, ulwp structure. Fetching the replacement 853*0Sstevel@tonic-gate * curthread pointer via %gs:0 works correctly since the 854*0Sstevel@tonic-gate * original ulwp structure will not be reallocated until 855*0Sstevel@tonic-gate * this lwp has completed its lwp_exit() system call (see 856*0Sstevel@tonic-gate * dead_and_buried()), but from here on out, we must make 857*0Sstevel@tonic-gate * no references to %gs:<offset> other than %gs:0. 858*0Sstevel@tonic-gate */ 859*0Sstevel@tonic-gate } 860*0Sstevel@tonic-gate /* 861*0Sstevel@tonic-gate * Put non-detached terminated threads in the all_zombies list. 862*0Sstevel@tonic-gate */ 863*0Sstevel@tonic-gate if (!self->ul_detached) { 864*0Sstevel@tonic-gate udp->nzombies++; 865*0Sstevel@tonic-gate if (udp->all_zombies == NULL) { 866*0Sstevel@tonic-gate ASSERT(udp->nzombies == 1); 867*0Sstevel@tonic-gate udp->all_zombies = self->ul_forw = self->ul_back = self; 868*0Sstevel@tonic-gate } else { 869*0Sstevel@tonic-gate self->ul_forw = udp->all_zombies; 870*0Sstevel@tonic-gate self->ul_back = udp->all_zombies->ul_back; 871*0Sstevel@tonic-gate self->ul_back->ul_forw = self; 872*0Sstevel@tonic-gate self->ul_forw->ul_back = self; 873*0Sstevel@tonic-gate } 874*0Sstevel@tonic-gate } 875*0Sstevel@tonic-gate /* 876*0Sstevel@tonic-gate * Notify everyone waiting for this thread. 877*0Sstevel@tonic-gate */ 878*0Sstevel@tonic-gate ulwp_broadcast(self); 879*0Sstevel@tonic-gate (void) ulwp_unlock(self, udp); 880*0Sstevel@tonic-gate /* 881*0Sstevel@tonic-gate * Prevent any more references to the schedctl data. 882*0Sstevel@tonic-gate * We are exiting and continue_fork() may not find us. 883*0Sstevel@tonic-gate * Do this just before dropping link_lock, since fork 884*0Sstevel@tonic-gate * serializes on link_lock. 885*0Sstevel@tonic-gate */ 886*0Sstevel@tonic-gate self->ul_schedctl = NULL; 887*0Sstevel@tonic-gate self->ul_schedctl_called = &udp->uberflags; 888*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 889*0Sstevel@tonic-gate 890*0Sstevel@tonic-gate ASSERT(self->ul_critical == 1); 891*0Sstevel@tonic-gate ASSERT(self->ul_preempt == 0); 892*0Sstevel@tonic-gate _lwp_terminate(); /* never returns */ 893*0Sstevel@tonic-gate thr_panic("_thrp_exit(): _lwp_terminate() returned"); 894*0Sstevel@tonic-gate } 895*0Sstevel@tonic-gate 896*0Sstevel@tonic-gate void 897*0Sstevel@tonic-gate collect_queue_statistics() 898*0Sstevel@tonic-gate { 899*0Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 900*0Sstevel@tonic-gate ulwp_t *ulwp; 901*0Sstevel@tonic-gate 902*0Sstevel@tonic-gate if (thread_queue_dump) { 903*0Sstevel@tonic-gate lmutex_lock(&udp->link_lock); 904*0Sstevel@tonic-gate if ((ulwp = udp->all_lwps) != NULL) { 905*0Sstevel@tonic-gate do { 906*0Sstevel@tonic-gate record_spin_locks(ulwp); 907*0Sstevel@tonic-gate } while ((ulwp = ulwp->ul_forw) != udp->all_lwps); 908*0Sstevel@tonic-gate } 909*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 910*0Sstevel@tonic-gate } 911*0Sstevel@tonic-gate } 912*0Sstevel@tonic-gate 913*0Sstevel@tonic-gate void 914*0Sstevel@tonic-gate _thr_exit_common(void *status, int unwind) 915*0Sstevel@tonic-gate { 916*0Sstevel@tonic-gate ulwp_t *self = curthread; 917*0Sstevel@tonic-gate int cancelled = (self->ul_cancel_pending && status == PTHREAD_CANCELED); 918*0Sstevel@tonic-gate 919*0Sstevel@tonic-gate ASSERT(self->ul_critical == 0 && self->ul_preempt == 0); 920*0Sstevel@tonic-gate 921*0Sstevel@tonic-gate /* 922*0Sstevel@tonic-gate * Disable cancellation and call the special DCE cancellation 923*0Sstevel@tonic-gate * cleanup hook if it is enabled. Do nothing else before calling 924*0Sstevel@tonic-gate * the DCE cancellation cleanup hook; it may call longjmp() and 925*0Sstevel@tonic-gate * never return here. 926*0Sstevel@tonic-gate */ 927*0Sstevel@tonic-gate self->ul_cancel_disabled = 1; 928*0Sstevel@tonic-gate self->ul_cancel_async = 0; 929*0Sstevel@tonic-gate self->ul_save_async = 0; 930*0Sstevel@tonic-gate self->ul_cancelable = 0; 931*0Sstevel@tonic-gate self->ul_cancel_pending = 0; 932*0Sstevel@tonic-gate if (cancelled && cleanuphndlr != NULL) 933*0Sstevel@tonic-gate (*cleanuphndlr)(); 934*0Sstevel@tonic-gate 935*0Sstevel@tonic-gate /* 936*0Sstevel@tonic-gate * Block application signals while we are exiting. 937*0Sstevel@tonic-gate * We call out to C++, TSD, and TLS destructors while exiting 938*0Sstevel@tonic-gate * and these are application-defined, so we cannot be assured 939*0Sstevel@tonic-gate * that they won't reset the signal mask. We use sigoff() to 940*0Sstevel@tonic-gate * defer any signals that may be received as a result of this 941*0Sstevel@tonic-gate * bad behavior. Such signals will be lost to the process 942*0Sstevel@tonic-gate * when the thread finishes exiting. 943*0Sstevel@tonic-gate */ 944*0Sstevel@tonic-gate (void) _thr_sigsetmask(SIG_SETMASK, &maskset, NULL); 945*0Sstevel@tonic-gate sigoff(self); 946*0Sstevel@tonic-gate 947*0Sstevel@tonic-gate self->ul_rval = status; 948*0Sstevel@tonic-gate 949*0Sstevel@tonic-gate /* 950*0Sstevel@tonic-gate * If thr_exit is being called from the places where 951*0Sstevel@tonic-gate * C++ destructors are to be called such as cancellation 952*0Sstevel@tonic-gate * points, then set this flag. It is checked in _t_cancel() 953*0Sstevel@tonic-gate * to decide whether _ex_unwind() is to be called or not. 954*0Sstevel@tonic-gate */ 955*0Sstevel@tonic-gate if (unwind) 956*0Sstevel@tonic-gate self->ul_unwind = 1; 957*0Sstevel@tonic-gate 958*0Sstevel@tonic-gate /* 959*0Sstevel@tonic-gate * _thrp_unwind() will eventually call _thrp_exit(). 960*0Sstevel@tonic-gate * It never returns. 961*0Sstevel@tonic-gate */ 962*0Sstevel@tonic-gate _thrp_unwind(NULL); 963*0Sstevel@tonic-gate thr_panic("_thr_exit_common(): _thrp_unwind() returned"); 964*0Sstevel@tonic-gate } 965*0Sstevel@tonic-gate 966*0Sstevel@tonic-gate /* 967*0Sstevel@tonic-gate * Called when a thread returns from its start function. 968*0Sstevel@tonic-gate * We are at the top of the stack; no unwinding is necessary. 969*0Sstevel@tonic-gate */ 970*0Sstevel@tonic-gate void 971*0Sstevel@tonic-gate _thr_terminate(void *status) 972*0Sstevel@tonic-gate { 973*0Sstevel@tonic-gate _thr_exit_common(status, 0); 974*0Sstevel@tonic-gate } 975*0Sstevel@tonic-gate 976*0Sstevel@tonic-gate #pragma weak thr_exit = _thr_exit 977*0Sstevel@tonic-gate #pragma weak pthread_exit = _thr_exit 978*0Sstevel@tonic-gate #pragma weak _pthread_exit = _thr_exit 979*0Sstevel@tonic-gate void 980*0Sstevel@tonic-gate _thr_exit(void *status) 981*0Sstevel@tonic-gate { 982*0Sstevel@tonic-gate _thr_exit_common(status, 1); 983*0Sstevel@tonic-gate } 984*0Sstevel@tonic-gate 985*0Sstevel@tonic-gate int 986*0Sstevel@tonic-gate _thrp_join(thread_t tid, thread_t *departed, void **status, int do_cancel) 987*0Sstevel@tonic-gate { 988*0Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 989*0Sstevel@tonic-gate mutex_t *mp; 990*0Sstevel@tonic-gate void *rval; 991*0Sstevel@tonic-gate thread_t found; 992*0Sstevel@tonic-gate ulwp_t *ulwp; 993*0Sstevel@tonic-gate ulwp_t **ulwpp; 994*0Sstevel@tonic-gate int replace; 995*0Sstevel@tonic-gate int error; 996*0Sstevel@tonic-gate 997*0Sstevel@tonic-gate if (do_cancel) 998*0Sstevel@tonic-gate error = lwp_wait(tid, &found); 999*0Sstevel@tonic-gate else { 1000*0Sstevel@tonic-gate while ((error = __lwp_wait(tid, &found)) == EINTR) 1001*0Sstevel@tonic-gate ; 1002*0Sstevel@tonic-gate } 1003*0Sstevel@tonic-gate if (error) 1004*0Sstevel@tonic-gate return (error); 1005*0Sstevel@tonic-gate 1006*0Sstevel@tonic-gate /* 1007*0Sstevel@tonic-gate * We must hold link_lock to avoid a race condition with find_stack(). 1008*0Sstevel@tonic-gate */ 1009*0Sstevel@tonic-gate lmutex_lock(&udp->link_lock); 1010*0Sstevel@tonic-gate if ((ulwpp = find_lwpp(found)) == NULL) { 1011*0Sstevel@tonic-gate /* 1012*0Sstevel@tonic-gate * lwp_wait() found an lwp that the library doesn't know 1013*0Sstevel@tonic-gate * about. It must have been created with _lwp_create(). 1014*0Sstevel@tonic-gate * Just return its lwpid; we can't know its status. 1015*0Sstevel@tonic-gate */ 1016*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 1017*0Sstevel@tonic-gate rval = NULL; 1018*0Sstevel@tonic-gate } else { 1019*0Sstevel@tonic-gate /* 1020*0Sstevel@tonic-gate * Remove ulwp from the hash table. 1021*0Sstevel@tonic-gate */ 1022*0Sstevel@tonic-gate ulwp = *ulwpp; 1023*0Sstevel@tonic-gate *ulwpp = ulwp->ul_hash; 1024*0Sstevel@tonic-gate ulwp->ul_hash = NULL; 1025*0Sstevel@tonic-gate /* 1026*0Sstevel@tonic-gate * Remove ulwp from all_zombies list. 1027*0Sstevel@tonic-gate */ 1028*0Sstevel@tonic-gate ASSERT(udp->nzombies >= 1); 1029*0Sstevel@tonic-gate if (udp->all_zombies == ulwp) 1030*0Sstevel@tonic-gate udp->all_zombies = ulwp->ul_forw; 1031*0Sstevel@tonic-gate if (udp->all_zombies == ulwp) 1032*0Sstevel@tonic-gate udp->all_zombies = NULL; 1033*0Sstevel@tonic-gate else { 1034*0Sstevel@tonic-gate ulwp->ul_forw->ul_back = ulwp->ul_back; 1035*0Sstevel@tonic-gate ulwp->ul_back->ul_forw = ulwp->ul_forw; 1036*0Sstevel@tonic-gate } 1037*0Sstevel@tonic-gate ulwp->ul_forw = ulwp->ul_back = NULL; 1038*0Sstevel@tonic-gate udp->nzombies--; 1039*0Sstevel@tonic-gate ASSERT(ulwp->ul_dead && !ulwp->ul_detached && 1040*0Sstevel@tonic-gate !(ulwp->ul_usropts & (THR_DETACHED|THR_DAEMON))); 1041*0Sstevel@tonic-gate /* 1042*0Sstevel@tonic-gate * We can't call ulwp_unlock(ulwp) after we set 1043*0Sstevel@tonic-gate * ulwp->ul_ix = -1 so we have to get a pointer to the 1044*0Sstevel@tonic-gate * ulwp's hash table mutex now in order to unlock it below. 1045*0Sstevel@tonic-gate */ 1046*0Sstevel@tonic-gate mp = ulwp_mutex(ulwp, udp); 1047*0Sstevel@tonic-gate ulwp->ul_lwpid = (lwpid_t)(-1); 1048*0Sstevel@tonic-gate ulwp->ul_ix = -1; 1049*0Sstevel@tonic-gate rval = ulwp->ul_rval; 1050*0Sstevel@tonic-gate replace = ulwp->ul_replace; 1051*0Sstevel@tonic-gate lmutex_unlock(mp); 1052*0Sstevel@tonic-gate if (replace) { 1053*0Sstevel@tonic-gate ulwp->ul_next = NULL; 1054*0Sstevel@tonic-gate if (udp->ulwp_replace_free == NULL) 1055*0Sstevel@tonic-gate udp->ulwp_replace_free = 1056*0Sstevel@tonic-gate udp->ulwp_replace_last = ulwp; 1057*0Sstevel@tonic-gate else { 1058*0Sstevel@tonic-gate udp->ulwp_replace_last->ul_next = ulwp; 1059*0Sstevel@tonic-gate udp->ulwp_replace_last = ulwp; 1060*0Sstevel@tonic-gate } 1061*0Sstevel@tonic-gate } 1062*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 1063*0Sstevel@tonic-gate } 1064*0Sstevel@tonic-gate 1065*0Sstevel@tonic-gate if (departed != NULL) 1066*0Sstevel@tonic-gate *departed = found; 1067*0Sstevel@tonic-gate if (status != NULL) 1068*0Sstevel@tonic-gate *status = rval; 1069*0Sstevel@tonic-gate return (0); 1070*0Sstevel@tonic-gate } 1071*0Sstevel@tonic-gate 1072*0Sstevel@tonic-gate #pragma weak thr_join = _thr_join 1073*0Sstevel@tonic-gate int 1074*0Sstevel@tonic-gate _thr_join(thread_t tid, thread_t *departed, void **status) 1075*0Sstevel@tonic-gate { 1076*0Sstevel@tonic-gate int error = _thrp_join(tid, departed, status, 1); 1077*0Sstevel@tonic-gate return ((error == EINVAL)? ESRCH : error); 1078*0Sstevel@tonic-gate } 1079*0Sstevel@tonic-gate 1080*0Sstevel@tonic-gate /* 1081*0Sstevel@tonic-gate * pthread_join() differs from Solaris thr_join(): 1082*0Sstevel@tonic-gate * It does not return the departed thread's id 1083*0Sstevel@tonic-gate * and hence does not have a "departed" argument. 1084*0Sstevel@tonic-gate * It returns EINVAL if tid refers to a detached thread. 1085*0Sstevel@tonic-gate */ 1086*0Sstevel@tonic-gate #pragma weak pthread_join = _pthread_join 1087*0Sstevel@tonic-gate int 1088*0Sstevel@tonic-gate _pthread_join(pthread_t tid, void **status) 1089*0Sstevel@tonic-gate { 1090*0Sstevel@tonic-gate return ((tid == 0)? ESRCH : _thrp_join(tid, NULL, status, 1)); 1091*0Sstevel@tonic-gate } 1092*0Sstevel@tonic-gate 1093*0Sstevel@tonic-gate #pragma weak pthread_detach = _thr_detach 1094*0Sstevel@tonic-gate #pragma weak _pthread_detach = _thr_detach 1095*0Sstevel@tonic-gate int 1096*0Sstevel@tonic-gate _thr_detach(thread_t tid) 1097*0Sstevel@tonic-gate { 1098*0Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 1099*0Sstevel@tonic-gate ulwp_t *ulwp; 1100*0Sstevel@tonic-gate ulwp_t **ulwpp; 1101*0Sstevel@tonic-gate int error = 0; 1102*0Sstevel@tonic-gate 1103*0Sstevel@tonic-gate if ((ulwpp = find_lwpp(tid)) == NULL) 1104*0Sstevel@tonic-gate return (ESRCH); 1105*0Sstevel@tonic-gate ulwp = *ulwpp; 1106*0Sstevel@tonic-gate 1107*0Sstevel@tonic-gate if (ulwp->ul_dead) { 1108*0Sstevel@tonic-gate ulwp_unlock(ulwp, udp); 1109*0Sstevel@tonic-gate error = _thrp_join(tid, NULL, NULL, 0); 1110*0Sstevel@tonic-gate } else { 1111*0Sstevel@tonic-gate error = __lwp_detach(tid); 1112*0Sstevel@tonic-gate ulwp->ul_detached = 1; 1113*0Sstevel@tonic-gate ulwp->ul_usropts |= THR_DETACHED; 1114*0Sstevel@tonic-gate ulwp_unlock(ulwp, udp); 1115*0Sstevel@tonic-gate } 1116*0Sstevel@tonic-gate return (error); 1117*0Sstevel@tonic-gate } 1118*0Sstevel@tonic-gate 1119*0Sstevel@tonic-gate /* 1120*0Sstevel@tonic-gate * Static local string compare function to avoid calling strncmp() 1121*0Sstevel@tonic-gate * (and hence the dynamic linker) during library initialization. 1122*0Sstevel@tonic-gate */ 1123*0Sstevel@tonic-gate static int 1124*0Sstevel@tonic-gate sncmp(const char *s1, const char *s2, size_t n) 1125*0Sstevel@tonic-gate { 1126*0Sstevel@tonic-gate n++; 1127*0Sstevel@tonic-gate while (--n != 0 && *s1 == *s2++) 1128*0Sstevel@tonic-gate if (*s1++ == '\0') 1129*0Sstevel@tonic-gate return (0); 1130*0Sstevel@tonic-gate return (n == 0 ? 0 : *(uchar_t *)s1 - *(uchar_t *)--s2); 1131*0Sstevel@tonic-gate } 1132*0Sstevel@tonic-gate 1133*0Sstevel@tonic-gate static const char * 1134*0Sstevel@tonic-gate ematch(const char *ev, const char *match) 1135*0Sstevel@tonic-gate { 1136*0Sstevel@tonic-gate int c; 1137*0Sstevel@tonic-gate 1138*0Sstevel@tonic-gate while ((c = *match++) != '\0') { 1139*0Sstevel@tonic-gate if (*ev++ != c) 1140*0Sstevel@tonic-gate return (NULL); 1141*0Sstevel@tonic-gate } 1142*0Sstevel@tonic-gate if (*ev++ != '=') 1143*0Sstevel@tonic-gate return (NULL); 1144*0Sstevel@tonic-gate return (ev); 1145*0Sstevel@tonic-gate } 1146*0Sstevel@tonic-gate 1147*0Sstevel@tonic-gate static int 1148*0Sstevel@tonic-gate envvar(const char *ev, const char *match, int limit) 1149*0Sstevel@tonic-gate { 1150*0Sstevel@tonic-gate int val = -1; 1151*0Sstevel@tonic-gate const char *ename; 1152*0Sstevel@tonic-gate 1153*0Sstevel@tonic-gate if ((ename = ematch(ev, match)) != NULL) { 1154*0Sstevel@tonic-gate int c; 1155*0Sstevel@tonic-gate for (val = 0; (c = *ename) != '\0'; ename++) { 1156*0Sstevel@tonic-gate if (!isdigit(c)) { 1157*0Sstevel@tonic-gate val = -1; 1158*0Sstevel@tonic-gate break; 1159*0Sstevel@tonic-gate } 1160*0Sstevel@tonic-gate val = val * 10 + (c - '0'); 1161*0Sstevel@tonic-gate if (val > limit) { 1162*0Sstevel@tonic-gate val = limit; 1163*0Sstevel@tonic-gate break; 1164*0Sstevel@tonic-gate } 1165*0Sstevel@tonic-gate } 1166*0Sstevel@tonic-gate } 1167*0Sstevel@tonic-gate return (val); 1168*0Sstevel@tonic-gate } 1169*0Sstevel@tonic-gate 1170*0Sstevel@tonic-gate static void 1171*0Sstevel@tonic-gate etest(const char *ev) 1172*0Sstevel@tonic-gate { 1173*0Sstevel@tonic-gate int value; 1174*0Sstevel@tonic-gate 1175*0Sstevel@tonic-gate if ((value = envvar(ev, "QUEUE_SPIN", 1000000)) >= 0) 1176*0Sstevel@tonic-gate thread_queue_spin = value; 1177*0Sstevel@tonic-gate if ((value = envvar(ev, "ADAPTIVE_SPIN", 1000000)) >= 0) { 1178*0Sstevel@tonic-gate thread_adaptive_spin = value; 1179*0Sstevel@tonic-gate thread_release_spin = (value + 1) / 2; 1180*0Sstevel@tonic-gate } 1181*0Sstevel@tonic-gate if ((value = envvar(ev, "RELEASE_SPIN", 1000000)) >= 0) 1182*0Sstevel@tonic-gate thread_release_spin = value; 1183*0Sstevel@tonic-gate if ((value = envvar(ev, "MAX_SPINNERS", 100)) >= 0) 1184*0Sstevel@tonic-gate thread_max_spinners = value; 1185*0Sstevel@tonic-gate if ((value = envvar(ev, "QUEUE_FIFO", 8)) >= 0) 1186*0Sstevel@tonic-gate thread_queue_fifo = value; 1187*0Sstevel@tonic-gate #if defined(THREAD_DEBUG) 1188*0Sstevel@tonic-gate if ((value = envvar(ev, "QUEUE_VERIFY", 1)) >= 0) 1189*0Sstevel@tonic-gate thread_queue_verify = value; 1190*0Sstevel@tonic-gate #endif 1191*0Sstevel@tonic-gate if ((value = envvar(ev, "QUEUE_DUMP", 1)) >= 0) 1192*0Sstevel@tonic-gate thread_queue_dump = value; 1193*0Sstevel@tonic-gate if ((value = envvar(ev, "STACK_CACHE", 10000)) >= 0) 1194*0Sstevel@tonic-gate thread_stack_cache = value; 1195*0Sstevel@tonic-gate if ((value = envvar(ev, "COND_WAIT_DEFER", 1)) >= 0) 1196*0Sstevel@tonic-gate thread_cond_wait_defer = value; 1197*0Sstevel@tonic-gate if ((value = envvar(ev, "ERROR_DETECTION", 2)) >= 0) 1198*0Sstevel@tonic-gate thread_error_detection = value; 1199*0Sstevel@tonic-gate if ((value = envvar(ev, "ASYNC_SAFE", 1)) >= 0) 1200*0Sstevel@tonic-gate thread_async_safe = value; 1201*0Sstevel@tonic-gate if ((value = envvar(ev, "DOOR_NORESERVE", 1)) >= 0) 1202*0Sstevel@tonic-gate thread_door_noreserve = value; 1203*0Sstevel@tonic-gate } 1204*0Sstevel@tonic-gate 1205*0Sstevel@tonic-gate /* 1206*0Sstevel@tonic-gate * Look for and evaluate environment variables of the form "_THREAD_*". 1207*0Sstevel@tonic-gate * For compatibility with the past, we also look for environment 1208*0Sstevel@tonic-gate * names of the form "LIBTHREAD_*". 1209*0Sstevel@tonic-gate */ 1210*0Sstevel@tonic-gate static void 1211*0Sstevel@tonic-gate set_thread_vars() 1212*0Sstevel@tonic-gate { 1213*0Sstevel@tonic-gate extern const char **_environ; 1214*0Sstevel@tonic-gate const char **pev; 1215*0Sstevel@tonic-gate const char *ev; 1216*0Sstevel@tonic-gate char c; 1217*0Sstevel@tonic-gate 1218*0Sstevel@tonic-gate if ((pev = _environ) == NULL) 1219*0Sstevel@tonic-gate return; 1220*0Sstevel@tonic-gate while ((ev = *pev++) != NULL) { 1221*0Sstevel@tonic-gate c = *ev; 1222*0Sstevel@tonic-gate if (c == '_' && sncmp(ev, "_THREAD_", 8) == 0) 1223*0Sstevel@tonic-gate etest(ev + 8); 1224*0Sstevel@tonic-gate if (c == 'L' && sncmp(ev, "LIBTHREAD_", 10) == 0) 1225*0Sstevel@tonic-gate etest(ev + 10); 1226*0Sstevel@tonic-gate } 1227*0Sstevel@tonic-gate } 1228*0Sstevel@tonic-gate 1229*0Sstevel@tonic-gate /* PROBE_SUPPORT begin */ 1230*0Sstevel@tonic-gate #pragma weak __tnf_probe_notify 1231*0Sstevel@tonic-gate extern void __tnf_probe_notify(void); 1232*0Sstevel@tonic-gate /* PROBE_SUPPORT end */ 1233*0Sstevel@tonic-gate 1234*0Sstevel@tonic-gate /* same as atexit() but private to the library */ 1235*0Sstevel@tonic-gate extern int _atexit(void (*)(void)); 1236*0Sstevel@tonic-gate 1237*0Sstevel@tonic-gate /* same as _cleanup() but private to the library */ 1238*0Sstevel@tonic-gate extern void __cleanup(void); 1239*0Sstevel@tonic-gate 1240*0Sstevel@tonic-gate extern void atfork_init(void); 1241*0Sstevel@tonic-gate 1242*0Sstevel@tonic-gate #ifdef __amd64 1243*0Sstevel@tonic-gate extern void __amd64id(void); 1244*0Sstevel@tonic-gate #endif 1245*0Sstevel@tonic-gate 1246*0Sstevel@tonic-gate /* 1247*0Sstevel@tonic-gate * libc_init() is called by ld.so.1 for library initialization. 1248*0Sstevel@tonic-gate * We perform minimal initialization; enough to work with the main thread. 1249*0Sstevel@tonic-gate */ 1250*0Sstevel@tonic-gate void 1251*0Sstevel@tonic-gate libc_init(void) 1252*0Sstevel@tonic-gate { 1253*0Sstevel@tonic-gate uberdata_t *udp = &__uberdata; 1254*0Sstevel@tonic-gate ulwp_t *oldself = __curthread(); 1255*0Sstevel@tonic-gate ucontext_t uc; 1256*0Sstevel@tonic-gate ulwp_t *self; 1257*0Sstevel@tonic-gate struct rlimit rl; 1258*0Sstevel@tonic-gate caddr_t data; 1259*0Sstevel@tonic-gate size_t tls_size; 1260*0Sstevel@tonic-gate int setmask; 1261*0Sstevel@tonic-gate 1262*0Sstevel@tonic-gate /* 1263*0Sstevel@tonic-gate * For the initial stage of initialization, we must be careful 1264*0Sstevel@tonic-gate * not to call any function that could possibly call _cerror(). 1265*0Sstevel@tonic-gate * For this purpose, we call only the raw system call wrappers. 1266*0Sstevel@tonic-gate */ 1267*0Sstevel@tonic-gate 1268*0Sstevel@tonic-gate #ifdef __amd64 1269*0Sstevel@tonic-gate /* 1270*0Sstevel@tonic-gate * Gather information about cache layouts for optimized 1271*0Sstevel@tonic-gate * AMD assembler strfoo() and memfoo() functions. 1272*0Sstevel@tonic-gate */ 1273*0Sstevel@tonic-gate __amd64id(); 1274*0Sstevel@tonic-gate #endif 1275*0Sstevel@tonic-gate 1276*0Sstevel@tonic-gate /* 1277*0Sstevel@tonic-gate * Every libc, regardless of which link map, must register __cleanup(). 1278*0Sstevel@tonic-gate */ 1279*0Sstevel@tonic-gate (void) _atexit(__cleanup); 1280*0Sstevel@tonic-gate 1281*0Sstevel@tonic-gate /* 1282*0Sstevel@tonic-gate * We keep our uberdata on one of (a) the first alternate link map 1283*0Sstevel@tonic-gate * or (b) the primary link map. We switch to the primary link map 1284*0Sstevel@tonic-gate * and stay there once we see it. All intermediate link maps are 1285*0Sstevel@tonic-gate * subject to being unloaded at any time. 1286*0Sstevel@tonic-gate */ 1287*0Sstevel@tonic-gate if (oldself != NULL && (oldself->ul_primarymap || !primary_link_map)) { 1288*0Sstevel@tonic-gate __tdb_bootstrap = oldself->ul_uberdata->tdb_bootstrap; 1289*0Sstevel@tonic-gate mutex_setup(); 1290*0Sstevel@tonic-gate atfork_init(); /* every link map needs atfork() processing */ 1291*0Sstevel@tonic-gate return; 1292*0Sstevel@tonic-gate } 1293*0Sstevel@tonic-gate 1294*0Sstevel@tonic-gate /* 1295*0Sstevel@tonic-gate * To establish the main stack information, we have to get our context. 1296*0Sstevel@tonic-gate * This is also convenient to use for getting our signal mask. 1297*0Sstevel@tonic-gate */ 1298*0Sstevel@tonic-gate uc.uc_flags = UC_ALL; 1299*0Sstevel@tonic-gate (void) __getcontext_syscall(&uc); 1300*0Sstevel@tonic-gate ASSERT(uc.uc_link == NULL); 1301*0Sstevel@tonic-gate 1302*0Sstevel@tonic-gate tls_size = roundup64(udp->tls_metadata.static_tls.tls_size); 1303*0Sstevel@tonic-gate ASSERT(primary_link_map || tls_size == 0); 1304*0Sstevel@tonic-gate data = lmalloc(sizeof (ulwp_t) + tls_size); 1305*0Sstevel@tonic-gate if (data == NULL) 1306*0Sstevel@tonic-gate thr_panic("cannot allocate thread structure for main thread"); 1307*0Sstevel@tonic-gate /* LINTED pointer cast may result in improper alignment */ 1308*0Sstevel@tonic-gate self = (ulwp_t *)(data + tls_size); 1309*0Sstevel@tonic-gate init_hash_table[0].hash_bucket = self; 1310*0Sstevel@tonic-gate 1311*0Sstevel@tonic-gate self->ul_sigmask = uc.uc_sigmask; 1312*0Sstevel@tonic-gate delete_reserved_signals(&self->ul_sigmask); 1313*0Sstevel@tonic-gate /* 1314*0Sstevel@tonic-gate * Are the old and new sets different? 1315*0Sstevel@tonic-gate * (This can happen if we are currently blocking SIGCANCEL.) 1316*0Sstevel@tonic-gate * If so, we must explicitly set our signal mask, below. 1317*0Sstevel@tonic-gate */ 1318*0Sstevel@tonic-gate setmask = 1319*0Sstevel@tonic-gate ((self->ul_sigmask.__sigbits[0] ^ uc.uc_sigmask.__sigbits[0]) | 1320*0Sstevel@tonic-gate (self->ul_sigmask.__sigbits[1] ^ uc.uc_sigmask.__sigbits[1])); 1321*0Sstevel@tonic-gate 1322*0Sstevel@tonic-gate #ifdef __sparc 1323*0Sstevel@tonic-gate /* 1324*0Sstevel@tonic-gate * We cache several instructions in the thread structure for use 1325*0Sstevel@tonic-gate * by the fasttrap DTrace provider. When changing this, read the 1326*0Sstevel@tonic-gate * comment in fasttrap.h for the all the other places that must 1327*0Sstevel@tonic-gate * be changed. 1328*0Sstevel@tonic-gate */ 1329*0Sstevel@tonic-gate self->ul_dsave = 0x9de04000; /* save %g1, %g0, %sp */ 1330*0Sstevel@tonic-gate self->ul_drestore = 0x81e80000; /* restore %g0, %g0, %g0 */ 1331*0Sstevel@tonic-gate self->ul_dftret = 0x91d0203a; /* ta 0x3a */ 1332*0Sstevel@tonic-gate self->ul_dreturn = 0x81ca0000; /* return %o0 */ 1333*0Sstevel@tonic-gate #endif 1334*0Sstevel@tonic-gate 1335*0Sstevel@tonic-gate self->ul_stktop = 1336*0Sstevel@tonic-gate (uintptr_t)uc.uc_stack.ss_sp + uc.uc_stack.ss_size; 1337*0Sstevel@tonic-gate (void) _private_getrlimit(RLIMIT_STACK, &rl); 1338*0Sstevel@tonic-gate self->ul_stksiz = rl.rlim_cur; 1339*0Sstevel@tonic-gate self->ul_stk = (caddr_t)(self->ul_stktop - self->ul_stksiz); 1340*0Sstevel@tonic-gate 1341*0Sstevel@tonic-gate self->ul_forw = self->ul_back = self; 1342*0Sstevel@tonic-gate self->ul_hash = NULL; 1343*0Sstevel@tonic-gate self->ul_ix = 0; 1344*0Sstevel@tonic-gate self->ul_lwpid = 1; /* __lwp_self() */ 1345*0Sstevel@tonic-gate self->ul_main = 1; 1346*0Sstevel@tonic-gate self->ul_self = self; 1347*0Sstevel@tonic-gate self->ul_uberdata = udp; 1348*0Sstevel@tonic-gate if (oldself != NULL) { 1349*0Sstevel@tonic-gate int i; 1350*0Sstevel@tonic-gate 1351*0Sstevel@tonic-gate ASSERT(primary_link_map); 1352*0Sstevel@tonic-gate ASSERT(oldself->ul_main == 1); 1353*0Sstevel@tonic-gate self->ul_stsd = oldself->ul_stsd; 1354*0Sstevel@tonic-gate for (i = 0; i < TSD_NFAST; i++) 1355*0Sstevel@tonic-gate self->ul_ftsd[i] = oldself->ul_ftsd[i]; 1356*0Sstevel@tonic-gate self->ul_tls = oldself->ul_tls; 1357*0Sstevel@tonic-gate /* 1358*0Sstevel@tonic-gate * Retrieve all pointers to uberdata allocated 1359*0Sstevel@tonic-gate * while running on previous link maps. 1360*0Sstevel@tonic-gate * This is a giant structure assignment. 1361*0Sstevel@tonic-gate */ 1362*0Sstevel@tonic-gate *udp = *oldself->ul_uberdata; 1363*0Sstevel@tonic-gate /* 1364*0Sstevel@tonic-gate * These items point to global data on the primary link map. 1365*0Sstevel@tonic-gate */ 1366*0Sstevel@tonic-gate udp->thr_hash_table = init_hash_table; 1367*0Sstevel@tonic-gate udp->sigacthandler = sigacthandler; 1368*0Sstevel@tonic-gate udp->tdb.tdb_events = tdb_events; 1369*0Sstevel@tonic-gate ASSERT(udp->nthreads == 1 && !udp->uberflags.uf_mt); 1370*0Sstevel@tonic-gate ASSERT(udp->lwp_stacks == NULL); 1371*0Sstevel@tonic-gate ASSERT(udp->ulwp_freelist == NULL); 1372*0Sstevel@tonic-gate ASSERT(udp->ulwp_replace_free == NULL); 1373*0Sstevel@tonic-gate ASSERT(udp->hash_size == 1); 1374*0Sstevel@tonic-gate } 1375*0Sstevel@tonic-gate udp->all_lwps = self; 1376*0Sstevel@tonic-gate udp->ulwp_one = self; 1377*0Sstevel@tonic-gate udp->pid = _private_getpid(); 1378*0Sstevel@tonic-gate udp->nthreads = 1; 1379*0Sstevel@tonic-gate /* 1380*0Sstevel@tonic-gate * In every link map, tdb_bootstrap points to the same piece of 1381*0Sstevel@tonic-gate * allocated memory. When the primary link map is initialized, 1382*0Sstevel@tonic-gate * the allocated memory is assigned a pointer to the one true 1383*0Sstevel@tonic-gate * uberdata. This allows libc_db to initialize itself regardless 1384*0Sstevel@tonic-gate * of which instance of libc it finds in the address space. 1385*0Sstevel@tonic-gate */ 1386*0Sstevel@tonic-gate if (udp->tdb_bootstrap == NULL) 1387*0Sstevel@tonic-gate udp->tdb_bootstrap = lmalloc(sizeof (uberdata_t *)); 1388*0Sstevel@tonic-gate __tdb_bootstrap = udp->tdb_bootstrap; 1389*0Sstevel@tonic-gate if (primary_link_map) { 1390*0Sstevel@tonic-gate self->ul_primarymap = 1; 1391*0Sstevel@tonic-gate udp->primary_map = 1; 1392*0Sstevel@tonic-gate *udp->tdb_bootstrap = udp; 1393*0Sstevel@tonic-gate } 1394*0Sstevel@tonic-gate /* 1395*0Sstevel@tonic-gate * Cancellation can't happen until: 1396*0Sstevel@tonic-gate * pthread_cancel() is called 1397*0Sstevel@tonic-gate * or: 1398*0Sstevel@tonic-gate * another thread is created 1399*0Sstevel@tonic-gate * For now, as a single-threaded process, set the flag that tells 1400*0Sstevel@tonic-gate * PROLOGUE/EPILOGUE (in scalls.c) that cancellation can't happen. 1401*0Sstevel@tonic-gate */ 1402*0Sstevel@tonic-gate self->ul_nocancel = 1; 1403*0Sstevel@tonic-gate 1404*0Sstevel@tonic-gate #if defined(__amd64) 1405*0Sstevel@tonic-gate self->ul_gs = ___lwp_private(_LWP_SETPRIVATE, _LWP_FSBASE, self); 1406*0Sstevel@tonic-gate #elif defined(__i386) 1407*0Sstevel@tonic-gate self->ul_gs = ___lwp_private(_LWP_SETPRIVATE, _LWP_GSBASE, self); 1408*0Sstevel@tonic-gate #endif /* __i386 || __amd64 */ 1409*0Sstevel@tonic-gate set_curthread(self); /* redundant on i386 */ 1410*0Sstevel@tonic-gate /* 1411*0Sstevel@tonic-gate * Now curthread is established and it is safe to call any 1412*0Sstevel@tonic-gate * function in libc except one that uses thread-local storage. 1413*0Sstevel@tonic-gate */ 1414*0Sstevel@tonic-gate self->ul_errnop = &errno; 1415*0Sstevel@tonic-gate if (oldself != NULL) { 1416*0Sstevel@tonic-gate /* tls_size was zero when oldself was allocated */ 1417*0Sstevel@tonic-gate lfree(oldself, sizeof (ulwp_t)); 1418*0Sstevel@tonic-gate } 1419*0Sstevel@tonic-gate mutex_setup(); 1420*0Sstevel@tonic-gate atfork_init(); 1421*0Sstevel@tonic-gate signal_init(); 1422*0Sstevel@tonic-gate 1423*0Sstevel@tonic-gate /* 1424*0Sstevel@tonic-gate * If the stack is unlimited, we set the size to zero to disable 1425*0Sstevel@tonic-gate * stack checking. 1426*0Sstevel@tonic-gate * XXX: Work harder here. Get the stack size from /proc/self/rmap 1427*0Sstevel@tonic-gate */ 1428*0Sstevel@tonic-gate if (self->ul_stksiz == RLIM_INFINITY) { 1429*0Sstevel@tonic-gate self->ul_ustack.ss_sp = (void *)self->ul_stktop; 1430*0Sstevel@tonic-gate self->ul_ustack.ss_size = 0; 1431*0Sstevel@tonic-gate } else { 1432*0Sstevel@tonic-gate self->ul_ustack.ss_sp = self->ul_stk; 1433*0Sstevel@tonic-gate self->ul_ustack.ss_size = self->ul_stksiz; 1434*0Sstevel@tonic-gate } 1435*0Sstevel@tonic-gate self->ul_ustack.ss_flags = 0; 1436*0Sstevel@tonic-gate (void) _private_setustack(&self->ul_ustack); 1437*0Sstevel@tonic-gate 1438*0Sstevel@tonic-gate /* 1439*0Sstevel@tonic-gate * Get the variables that affect thread behavior from the environment. 1440*0Sstevel@tonic-gate */ 1441*0Sstevel@tonic-gate set_thread_vars(); 1442*0Sstevel@tonic-gate udp->uberflags.uf_thread_error_detection = (char)thread_error_detection; 1443*0Sstevel@tonic-gate udp->thread_stack_cache = thread_stack_cache; 1444*0Sstevel@tonic-gate 1445*0Sstevel@tonic-gate /* 1446*0Sstevel@tonic-gate * Make per-thread copies of global variables, for speed. 1447*0Sstevel@tonic-gate */ 1448*0Sstevel@tonic-gate self->ul_queue_fifo = (char)thread_queue_fifo; 1449*0Sstevel@tonic-gate self->ul_cond_wait_defer = (char)thread_cond_wait_defer; 1450*0Sstevel@tonic-gate self->ul_error_detection = (char)thread_error_detection; 1451*0Sstevel@tonic-gate self->ul_async_safe = (char)thread_async_safe; 1452*0Sstevel@tonic-gate self->ul_door_noreserve = (char)thread_door_noreserve; 1453*0Sstevel@tonic-gate self->ul_max_spinners = (uchar_t)thread_max_spinners; 1454*0Sstevel@tonic-gate self->ul_adaptive_spin = thread_adaptive_spin; 1455*0Sstevel@tonic-gate self->ul_release_spin = thread_release_spin; 1456*0Sstevel@tonic-gate self->ul_queue_spin = thread_queue_spin; 1457*0Sstevel@tonic-gate 1458*0Sstevel@tonic-gate /* 1459*0Sstevel@tonic-gate * When we have initialized the primary link map, inform 1460*0Sstevel@tonic-gate * the dynamic linker about our interface functions. 1461*0Sstevel@tonic-gate */ 1462*0Sstevel@tonic-gate if (self->ul_primarymap) 1463*0Sstevel@tonic-gate _ld_libc((void *)rtld_funcs); 1464*0Sstevel@tonic-gate 1465*0Sstevel@tonic-gate /* 1466*0Sstevel@tonic-gate * Defer signals until TLS constructors have been called. 1467*0Sstevel@tonic-gate */ 1468*0Sstevel@tonic-gate sigoff(self); 1469*0Sstevel@tonic-gate tls_setup(); 1470*0Sstevel@tonic-gate sigon(self); 1471*0Sstevel@tonic-gate if (setmask) 1472*0Sstevel@tonic-gate (void) restore_signals(self); 1473*0Sstevel@tonic-gate 1474*0Sstevel@tonic-gate /* PROBE_SUPPORT begin */ 1475*0Sstevel@tonic-gate if (self->ul_primarymap && __tnf_probe_notify != NULL) 1476*0Sstevel@tonic-gate __tnf_probe_notify(); 1477*0Sstevel@tonic-gate /* PROBE_SUPPORT end */ 1478*0Sstevel@tonic-gate } 1479*0Sstevel@tonic-gate 1480*0Sstevel@tonic-gate #pragma fini(libc_fini) 1481*0Sstevel@tonic-gate void 1482*0Sstevel@tonic-gate libc_fini() 1483*0Sstevel@tonic-gate { 1484*0Sstevel@tonic-gate /* 1485*0Sstevel@tonic-gate * If we are doing fini processing for the instance of libc 1486*0Sstevel@tonic-gate * on the first alternate link map (this happens only when 1487*0Sstevel@tonic-gate * the dynamic linker rejects a bad audit library), then clear 1488*0Sstevel@tonic-gate * __curthread(). We abandon whatever memory was allocated by 1489*0Sstevel@tonic-gate * lmalloc() while running on this alternate link-map but we 1490*0Sstevel@tonic-gate * don't care (and can't find the memory in any case); we just 1491*0Sstevel@tonic-gate * want to protect the application from this bad audit library. 1492*0Sstevel@tonic-gate * No fini processing is done by libc in the normal case. 1493*0Sstevel@tonic-gate */ 1494*0Sstevel@tonic-gate 1495*0Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 1496*0Sstevel@tonic-gate 1497*0Sstevel@tonic-gate if (udp->primary_map == 0 && udp == &__uberdata) 1498*0Sstevel@tonic-gate set_curthread(NULL); 1499*0Sstevel@tonic-gate } 1500*0Sstevel@tonic-gate 1501*0Sstevel@tonic-gate /* 1502*0Sstevel@tonic-gate * finish_init is called when we are about to become multi-threaded, 1503*0Sstevel@tonic-gate * that is, on the first call to thr_create(). 1504*0Sstevel@tonic-gate */ 1505*0Sstevel@tonic-gate void 1506*0Sstevel@tonic-gate finish_init() 1507*0Sstevel@tonic-gate { 1508*0Sstevel@tonic-gate ulwp_t *self = curthread; 1509*0Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 1510*0Sstevel@tonic-gate thr_hash_table_t *htp; 1511*0Sstevel@tonic-gate void *data; 1512*0Sstevel@tonic-gate int i; 1513*0Sstevel@tonic-gate 1514*0Sstevel@tonic-gate /* 1515*0Sstevel@tonic-gate * No locks needed here; we are single-threaded on the first call. 1516*0Sstevel@tonic-gate * We can be called only after the primary link map has been set up. 1517*0Sstevel@tonic-gate */ 1518*0Sstevel@tonic-gate ASSERT(self->ul_primarymap); 1519*0Sstevel@tonic-gate ASSERT(self == udp->ulwp_one); 1520*0Sstevel@tonic-gate ASSERT(!udp->uberflags.uf_mt); 1521*0Sstevel@tonic-gate ASSERT(udp->hash_size == 1); 1522*0Sstevel@tonic-gate 1523*0Sstevel@tonic-gate /* 1524*0Sstevel@tonic-gate * First allocate the queue_head array if not already allocated. 1525*0Sstevel@tonic-gate */ 1526*0Sstevel@tonic-gate if (udp->queue_head == NULL) 1527*0Sstevel@tonic-gate queue_alloc(); 1528*0Sstevel@tonic-gate 1529*0Sstevel@tonic-gate /* 1530*0Sstevel@tonic-gate * Now allocate the thread hash table. 1531*0Sstevel@tonic-gate */ 1532*0Sstevel@tonic-gate if ((data = _private_mmap(NULL, HASHTBLSZ * sizeof (thr_hash_table_t), 1533*0Sstevel@tonic-gate PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0)) 1534*0Sstevel@tonic-gate == MAP_FAILED) 1535*0Sstevel@tonic-gate thr_panic("cannot allocate thread hash table"); 1536*0Sstevel@tonic-gate 1537*0Sstevel@tonic-gate udp->thr_hash_table = htp = (thr_hash_table_t *)data; 1538*0Sstevel@tonic-gate udp->hash_size = HASHTBLSZ; 1539*0Sstevel@tonic-gate udp->hash_mask = HASHTBLSZ - 1; 1540*0Sstevel@tonic-gate 1541*0Sstevel@tonic-gate for (i = 0; i < HASHTBLSZ; i++, htp++) { 1542*0Sstevel@tonic-gate htp->hash_lock.mutex_magic = MUTEX_MAGIC; 1543*0Sstevel@tonic-gate htp->hash_cond.cond_magic = COND_MAGIC; 1544*0Sstevel@tonic-gate } 1545*0Sstevel@tonic-gate hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp); 1546*0Sstevel@tonic-gate 1547*0Sstevel@tonic-gate /* 1548*0Sstevel@tonic-gate * Set up the SIGCANCEL handler for threads cancellation. 1549*0Sstevel@tonic-gate */ 1550*0Sstevel@tonic-gate init_sigcancel(); 1551*0Sstevel@tonic-gate 1552*0Sstevel@tonic-gate /* 1553*0Sstevel@tonic-gate * Arrange to do special things on exit -- 1554*0Sstevel@tonic-gate * - collect queue statistics from all remaining active threads. 1555*0Sstevel@tonic-gate * - grab assert_lock to ensure that assertion failures 1556*0Sstevel@tonic-gate * and a core dump take precedence over _exit(). 1557*0Sstevel@tonic-gate * - dump queue statistics to stderr if _THREAD_QUEUE_DUMP is set. 1558*0Sstevel@tonic-gate * (Functions are called in the reverse order of their registration.) 1559*0Sstevel@tonic-gate */ 1560*0Sstevel@tonic-gate (void) _atexit(dump_queue_statistics); 1561*0Sstevel@tonic-gate (void) _atexit(grab_assert_lock); 1562*0Sstevel@tonic-gate (void) _atexit(collect_queue_statistics); 1563*0Sstevel@tonic-gate } 1564*0Sstevel@tonic-gate 1565*0Sstevel@tonic-gate /* 1566*0Sstevel@tonic-gate * Used only by _postfork1_child(), below. 1567*0Sstevel@tonic-gate */ 1568*0Sstevel@tonic-gate static void 1569*0Sstevel@tonic-gate mark_dead_and_buried(ulwp_t *ulwp) 1570*0Sstevel@tonic-gate { 1571*0Sstevel@tonic-gate ulwp->ul_dead = 1; 1572*0Sstevel@tonic-gate ulwp->ul_lwpid = (lwpid_t)(-1); 1573*0Sstevel@tonic-gate ulwp->ul_hash = NULL; 1574*0Sstevel@tonic-gate ulwp->ul_ix = -1; 1575*0Sstevel@tonic-gate ulwp->ul_schedctl = NULL; 1576*0Sstevel@tonic-gate ulwp->ul_schedctl_called = NULL; 1577*0Sstevel@tonic-gate } 1578*0Sstevel@tonic-gate 1579*0Sstevel@tonic-gate /* 1580*0Sstevel@tonic-gate * This is called from fork1() in the child. 1581*0Sstevel@tonic-gate * Reset our data structures to reflect one lwp. 1582*0Sstevel@tonic-gate */ 1583*0Sstevel@tonic-gate void 1584*0Sstevel@tonic-gate _postfork1_child() 1585*0Sstevel@tonic-gate { 1586*0Sstevel@tonic-gate ulwp_t *self = curthread; 1587*0Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 1588*0Sstevel@tonic-gate ulwp_t *next; 1589*0Sstevel@tonic-gate ulwp_t *ulwp; 1590*0Sstevel@tonic-gate int i; 1591*0Sstevel@tonic-gate 1592*0Sstevel@tonic-gate /* daemon threads shouldn't call fork1(), but oh well... */ 1593*0Sstevel@tonic-gate self->ul_usropts &= ~THR_DAEMON; 1594*0Sstevel@tonic-gate udp->nthreads = 1; 1595*0Sstevel@tonic-gate udp->ndaemons = 0; 1596*0Sstevel@tonic-gate udp->uberflags.uf_mt = 0; 1597*0Sstevel@tonic-gate __threaded = 0; 1598*0Sstevel@tonic-gate for (i = 0; i < udp->hash_size; i++) 1599*0Sstevel@tonic-gate udp->thr_hash_table[i].hash_bucket = NULL; 1600*0Sstevel@tonic-gate self->ul_lwpid = __lwp_self(); 1601*0Sstevel@tonic-gate hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp); 1602*0Sstevel@tonic-gate 1603*0Sstevel@tonic-gate /* no one in the child is on a sleep queue; reinitialize */ 1604*0Sstevel@tonic-gate if (udp->queue_head) { 1605*0Sstevel@tonic-gate (void) _private_memset(udp->queue_head, 0, 1606*0Sstevel@tonic-gate 2 * QHASHSIZE * sizeof (queue_head_t)); 1607*0Sstevel@tonic-gate for (i = 0; i < 2 * QHASHSIZE; i++) 1608*0Sstevel@tonic-gate udp->queue_head[i].qh_lock.mutex_magic = MUTEX_MAGIC; 1609*0Sstevel@tonic-gate } 1610*0Sstevel@tonic-gate 1611*0Sstevel@tonic-gate /* 1612*0Sstevel@tonic-gate * All lwps except ourself are gone. Mark them so. 1613*0Sstevel@tonic-gate * First mark all of the lwps that have already been freed. 1614*0Sstevel@tonic-gate * Then mark and free all of the active lwps except ourself. 1615*0Sstevel@tonic-gate * Since we are single-threaded, no locks are required here. 1616*0Sstevel@tonic-gate */ 1617*0Sstevel@tonic-gate for (ulwp = udp->lwp_stacks; ulwp != NULL; ulwp = ulwp->ul_next) 1618*0Sstevel@tonic-gate mark_dead_and_buried(ulwp); 1619*0Sstevel@tonic-gate for (ulwp = udp->ulwp_freelist; ulwp != NULL; ulwp = ulwp->ul_next) 1620*0Sstevel@tonic-gate mark_dead_and_buried(ulwp); 1621*0Sstevel@tonic-gate for (ulwp = self->ul_forw; ulwp != self; ulwp = next) { 1622*0Sstevel@tonic-gate next = ulwp->ul_forw; 1623*0Sstevel@tonic-gate ulwp->ul_forw = ulwp->ul_back = NULL; 1624*0Sstevel@tonic-gate mark_dead_and_buried(ulwp); 1625*0Sstevel@tonic-gate tsd_free(ulwp); 1626*0Sstevel@tonic-gate tls_free(ulwp); 1627*0Sstevel@tonic-gate rwl_free(ulwp); 1628*0Sstevel@tonic-gate ulwp_free(ulwp); 1629*0Sstevel@tonic-gate } 1630*0Sstevel@tonic-gate self->ul_forw = self->ul_back = udp->all_lwps = self; 1631*0Sstevel@tonic-gate if (self != udp->ulwp_one) 1632*0Sstevel@tonic-gate mark_dead_and_buried(udp->ulwp_one); 1633*0Sstevel@tonic-gate if ((ulwp = udp->all_zombies) != NULL) { 1634*0Sstevel@tonic-gate ASSERT(udp->nzombies != 0); 1635*0Sstevel@tonic-gate do { 1636*0Sstevel@tonic-gate next = ulwp->ul_forw; 1637*0Sstevel@tonic-gate ulwp->ul_forw = ulwp->ul_back = NULL; 1638*0Sstevel@tonic-gate mark_dead_and_buried(ulwp); 1639*0Sstevel@tonic-gate udp->nzombies--; 1640*0Sstevel@tonic-gate if (ulwp->ul_replace) { 1641*0Sstevel@tonic-gate ulwp->ul_next = NULL; 1642*0Sstevel@tonic-gate if (udp->ulwp_replace_free == NULL) { 1643*0Sstevel@tonic-gate udp->ulwp_replace_free = 1644*0Sstevel@tonic-gate udp->ulwp_replace_last = ulwp; 1645*0Sstevel@tonic-gate } else { 1646*0Sstevel@tonic-gate udp->ulwp_replace_last->ul_next = ulwp; 1647*0Sstevel@tonic-gate udp->ulwp_replace_last = ulwp; 1648*0Sstevel@tonic-gate } 1649*0Sstevel@tonic-gate } 1650*0Sstevel@tonic-gate } while ((ulwp = next) != udp->all_zombies); 1651*0Sstevel@tonic-gate ASSERT(udp->nzombies == 0); 1652*0Sstevel@tonic-gate udp->all_zombies = NULL; 1653*0Sstevel@tonic-gate udp->nzombies = 0; 1654*0Sstevel@tonic-gate } 1655*0Sstevel@tonic-gate trim_stack_cache(0); 1656*0Sstevel@tonic-gate } 1657*0Sstevel@tonic-gate 1658*0Sstevel@tonic-gate #pragma weak thr_setprio = _thr_setprio 1659*0Sstevel@tonic-gate #pragma weak pthread_setschedprio = _thr_setprio 1660*0Sstevel@tonic-gate #pragma weak _pthread_setschedprio = _thr_setprio 1661*0Sstevel@tonic-gate int 1662*0Sstevel@tonic-gate _thr_setprio(thread_t tid, int priority) 1663*0Sstevel@tonic-gate { 1664*0Sstevel@tonic-gate struct sched_param param; 1665*0Sstevel@tonic-gate 1666*0Sstevel@tonic-gate (void) _memset(¶m, 0, sizeof (param)); 1667*0Sstevel@tonic-gate param.sched_priority = priority; 1668*0Sstevel@tonic-gate return (_thread_setschedparam_main(tid, 0, ¶m, PRIO_SET_PRIO)); 1669*0Sstevel@tonic-gate } 1670*0Sstevel@tonic-gate 1671*0Sstevel@tonic-gate #pragma weak thr_getprio = _thr_getprio 1672*0Sstevel@tonic-gate int 1673*0Sstevel@tonic-gate _thr_getprio(thread_t tid, int *priority) 1674*0Sstevel@tonic-gate { 1675*0Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 1676*0Sstevel@tonic-gate ulwp_t *ulwp; 1677*0Sstevel@tonic-gate int error = 0; 1678*0Sstevel@tonic-gate 1679*0Sstevel@tonic-gate if ((ulwp = find_lwp(tid)) == NULL) 1680*0Sstevel@tonic-gate error = ESRCH; 1681*0Sstevel@tonic-gate else { 1682*0Sstevel@tonic-gate *priority = ulwp->ul_pri; 1683*0Sstevel@tonic-gate ulwp_unlock(ulwp, udp); 1684*0Sstevel@tonic-gate } 1685*0Sstevel@tonic-gate return (error); 1686*0Sstevel@tonic-gate } 1687*0Sstevel@tonic-gate 1688*0Sstevel@tonic-gate lwpid_t 1689*0Sstevel@tonic-gate lwp_self(void) 1690*0Sstevel@tonic-gate { 1691*0Sstevel@tonic-gate return (curthread->ul_lwpid); 1692*0Sstevel@tonic-gate } 1693*0Sstevel@tonic-gate 1694*0Sstevel@tonic-gate #pragma weak _ti_thr_self = _thr_self 1695*0Sstevel@tonic-gate #pragma weak thr_self = _thr_self 1696*0Sstevel@tonic-gate #pragma weak pthread_self = _thr_self 1697*0Sstevel@tonic-gate #pragma weak _pthread_self = _thr_self 1698*0Sstevel@tonic-gate thread_t 1699*0Sstevel@tonic-gate _thr_self() 1700*0Sstevel@tonic-gate { 1701*0Sstevel@tonic-gate return (curthread->ul_lwpid); 1702*0Sstevel@tonic-gate } 1703*0Sstevel@tonic-gate 1704*0Sstevel@tonic-gate #pragma weak thr_main = _thr_main 1705*0Sstevel@tonic-gate int 1706*0Sstevel@tonic-gate _thr_main() 1707*0Sstevel@tonic-gate { 1708*0Sstevel@tonic-gate ulwp_t *self = __curthread(); 1709*0Sstevel@tonic-gate 1710*0Sstevel@tonic-gate return ((self == NULL)? -1 : self->ul_main); 1711*0Sstevel@tonic-gate } 1712*0Sstevel@tonic-gate 1713*0Sstevel@tonic-gate int 1714*0Sstevel@tonic-gate _thrp_stksegment(ulwp_t *ulwp, stack_t *stk) 1715*0Sstevel@tonic-gate { 1716*0Sstevel@tonic-gate stk->ss_sp = (void *)ulwp->ul_stktop; 1717*0Sstevel@tonic-gate stk->ss_size = ulwp->ul_stksiz; 1718*0Sstevel@tonic-gate stk->ss_flags = 0; 1719*0Sstevel@tonic-gate return (0); 1720*0Sstevel@tonic-gate } 1721*0Sstevel@tonic-gate 1722*0Sstevel@tonic-gate #pragma weak thr_stksegment = _thr_stksegment 1723*0Sstevel@tonic-gate int 1724*0Sstevel@tonic-gate _thr_stksegment(stack_t *stk) 1725*0Sstevel@tonic-gate { 1726*0Sstevel@tonic-gate return (_thrp_stksegment(curthread, stk)); 1727*0Sstevel@tonic-gate } 1728*0Sstevel@tonic-gate 1729*0Sstevel@tonic-gate void 1730*0Sstevel@tonic-gate force_continue(ulwp_t *ulwp) 1731*0Sstevel@tonic-gate { 1732*0Sstevel@tonic-gate #if defined(THREAD_DEBUG) 1733*0Sstevel@tonic-gate ulwp_t *self = curthread; 1734*0Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 1735*0Sstevel@tonic-gate #endif 1736*0Sstevel@tonic-gate int error; 1737*0Sstevel@tonic-gate timespec_t ts; 1738*0Sstevel@tonic-gate 1739*0Sstevel@tonic-gate ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self)); 1740*0Sstevel@tonic-gate 1741*0Sstevel@tonic-gate for (;;) { 1742*0Sstevel@tonic-gate error = __lwp_continue(ulwp->ul_lwpid); 1743*0Sstevel@tonic-gate if (error != 0 && error != EINTR) 1744*0Sstevel@tonic-gate break; 1745*0Sstevel@tonic-gate error = 0; 1746*0Sstevel@tonic-gate if (ulwp->ul_stopping) { /* he is stopping himself */ 1747*0Sstevel@tonic-gate ts.tv_sec = 0; /* give him a chance to run */ 1748*0Sstevel@tonic-gate ts.tv_nsec = 100000; /* 100 usecs or clock tick */ 1749*0Sstevel@tonic-gate (void) ___nanosleep(&ts, NULL); 1750*0Sstevel@tonic-gate } 1751*0Sstevel@tonic-gate if (!ulwp->ul_stopping) /* he is running now */ 1752*0Sstevel@tonic-gate break; /* so we are done */ 1753*0Sstevel@tonic-gate /* 1754*0Sstevel@tonic-gate * He is marked as being in the process of stopping 1755*0Sstevel@tonic-gate * himself. Loop around and continue him again. 1756*0Sstevel@tonic-gate * He may not have been stopped the first time. 1757*0Sstevel@tonic-gate */ 1758*0Sstevel@tonic-gate } 1759*0Sstevel@tonic-gate } 1760*0Sstevel@tonic-gate 1761*0Sstevel@tonic-gate /* 1762*0Sstevel@tonic-gate * Suspend an lwp with lwp_suspend(), then move it to a safe 1763*0Sstevel@tonic-gate * point, that is, to a point where ul_critical is zero. 1764*0Sstevel@tonic-gate * On return, the ulwp_lock() is dropped as with ulwp_unlock(). 1765*0Sstevel@tonic-gate * If 'link_dropped' is non-NULL, then 'link_lock' is held on entry. 1766*0Sstevel@tonic-gate * If we have to drop link_lock, we store 1 through link_dropped. 1767*0Sstevel@tonic-gate * If the lwp exits before it can be suspended, we return ESRCH. 1768*0Sstevel@tonic-gate */ 1769*0Sstevel@tonic-gate int 1770*0Sstevel@tonic-gate safe_suspend(ulwp_t *ulwp, uchar_t whystopped, int *link_dropped) 1771*0Sstevel@tonic-gate { 1772*0Sstevel@tonic-gate ulwp_t *self = curthread; 1773*0Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 1774*0Sstevel@tonic-gate cond_t *cvp = ulwp_condvar(ulwp, udp); 1775*0Sstevel@tonic-gate mutex_t *mp = ulwp_mutex(ulwp, udp); 1776*0Sstevel@tonic-gate thread_t tid = ulwp->ul_lwpid; 1777*0Sstevel@tonic-gate int ix = ulwp->ul_ix; 1778*0Sstevel@tonic-gate int error = 0; 1779*0Sstevel@tonic-gate 1780*0Sstevel@tonic-gate ASSERT(whystopped == TSTP_REGULAR || 1781*0Sstevel@tonic-gate whystopped == TSTP_MUTATOR || 1782*0Sstevel@tonic-gate whystopped == TSTP_FORK); 1783*0Sstevel@tonic-gate ASSERT(ulwp != self); 1784*0Sstevel@tonic-gate ASSERT(!ulwp->ul_stop); 1785*0Sstevel@tonic-gate ASSERT(MUTEX_OWNED(mp, self)); 1786*0Sstevel@tonic-gate 1787*0Sstevel@tonic-gate if (link_dropped != NULL) 1788*0Sstevel@tonic-gate *link_dropped = 0; 1789*0Sstevel@tonic-gate 1790*0Sstevel@tonic-gate /* 1791*0Sstevel@tonic-gate * We must grab the target's spin lock before suspending it. 1792*0Sstevel@tonic-gate * See the comments below and in _thrp_suspend() for why. 1793*0Sstevel@tonic-gate */ 1794*0Sstevel@tonic-gate spin_lock_set(&ulwp->ul_spinlock); 1795*0Sstevel@tonic-gate (void) ___lwp_suspend(tid); 1796*0Sstevel@tonic-gate spin_lock_clear(&ulwp->ul_spinlock); 1797*0Sstevel@tonic-gate 1798*0Sstevel@tonic-gate top: 1799*0Sstevel@tonic-gate if (ulwp->ul_critical == 0 || ulwp->ul_stopping) { 1800*0Sstevel@tonic-gate /* thread is already safe */ 1801*0Sstevel@tonic-gate ulwp->ul_stop |= whystopped; 1802*0Sstevel@tonic-gate } else { 1803*0Sstevel@tonic-gate /* 1804*0Sstevel@tonic-gate * Setting ul_pleasestop causes the target thread to stop 1805*0Sstevel@tonic-gate * itself in _thrp_suspend(), below, after we drop its lock. 1806*0Sstevel@tonic-gate * We must continue the critical thread before dropping 1807*0Sstevel@tonic-gate * link_lock because the critical thread may be holding 1808*0Sstevel@tonic-gate * the queue lock for link_lock. This is delicate. 1809*0Sstevel@tonic-gate */ 1810*0Sstevel@tonic-gate ulwp->ul_pleasestop |= whystopped; 1811*0Sstevel@tonic-gate force_continue(ulwp); 1812*0Sstevel@tonic-gate if (link_dropped != NULL) { 1813*0Sstevel@tonic-gate *link_dropped = 1; 1814*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 1815*0Sstevel@tonic-gate /* be sure to drop link_lock only once */ 1816*0Sstevel@tonic-gate link_dropped = NULL; 1817*0Sstevel@tonic-gate } 1818*0Sstevel@tonic-gate 1819*0Sstevel@tonic-gate /* 1820*0Sstevel@tonic-gate * The thread may disappear by calling thr_exit() so we 1821*0Sstevel@tonic-gate * cannot rely on the ulwp pointer after dropping the lock. 1822*0Sstevel@tonic-gate * Instead, we search the hash table to find it again. 1823*0Sstevel@tonic-gate * When we return, we may find that the thread has been 1824*0Sstevel@tonic-gate * continued by some other thread. The suspend/continue 1825*0Sstevel@tonic-gate * interfaces are prone to such race conditions by design. 1826*0Sstevel@tonic-gate */ 1827*0Sstevel@tonic-gate while (ulwp && !ulwp->ul_dead && !ulwp->ul_stop && 1828*0Sstevel@tonic-gate (ulwp->ul_pleasestop & whystopped)) { 1829*0Sstevel@tonic-gate (void) _cond_wait(cvp, mp); 1830*0Sstevel@tonic-gate for (ulwp = udp->thr_hash_table[ix].hash_bucket; 1831*0Sstevel@tonic-gate ulwp != NULL; ulwp = ulwp->ul_hash) { 1832*0Sstevel@tonic-gate if (ulwp->ul_lwpid == tid) 1833*0Sstevel@tonic-gate break; 1834*0Sstevel@tonic-gate } 1835*0Sstevel@tonic-gate } 1836*0Sstevel@tonic-gate 1837*0Sstevel@tonic-gate if (ulwp == NULL || ulwp->ul_dead) 1838*0Sstevel@tonic-gate error = ESRCH; 1839*0Sstevel@tonic-gate else { 1840*0Sstevel@tonic-gate /* 1841*0Sstevel@tonic-gate * Do another lwp_suspend() to make sure we don't 1842*0Sstevel@tonic-gate * return until the target thread is fully stopped 1843*0Sstevel@tonic-gate * in the kernel. Don't apply lwp_suspend() until 1844*0Sstevel@tonic-gate * we know that the target is not holding any 1845*0Sstevel@tonic-gate * queue locks, that is, that it has completed 1846*0Sstevel@tonic-gate * ulwp_unlock(self) and has, or at least is 1847*0Sstevel@tonic-gate * about to, call lwp_suspend() on itself. We do 1848*0Sstevel@tonic-gate * this by grabbing the target's spin lock. 1849*0Sstevel@tonic-gate */ 1850*0Sstevel@tonic-gate ASSERT(ulwp->ul_lwpid == tid); 1851*0Sstevel@tonic-gate spin_lock_set(&ulwp->ul_spinlock); 1852*0Sstevel@tonic-gate (void) ___lwp_suspend(tid); 1853*0Sstevel@tonic-gate spin_lock_clear(&ulwp->ul_spinlock); 1854*0Sstevel@tonic-gate /* 1855*0Sstevel@tonic-gate * If some other thread did a thr_continue() 1856*0Sstevel@tonic-gate * on the target thread we have to start over. 1857*0Sstevel@tonic-gate */ 1858*0Sstevel@tonic-gate if (!ulwp->ul_stopping || !(ulwp->ul_stop & whystopped)) 1859*0Sstevel@tonic-gate goto top; 1860*0Sstevel@tonic-gate } 1861*0Sstevel@tonic-gate } 1862*0Sstevel@tonic-gate 1863*0Sstevel@tonic-gate (void) cond_broadcast_internal(cvp); 1864*0Sstevel@tonic-gate lmutex_unlock(mp); 1865*0Sstevel@tonic-gate return (error); 1866*0Sstevel@tonic-gate } 1867*0Sstevel@tonic-gate 1868*0Sstevel@tonic-gate int 1869*0Sstevel@tonic-gate _thrp_suspend(thread_t tid, uchar_t whystopped) 1870*0Sstevel@tonic-gate { 1871*0Sstevel@tonic-gate ulwp_t *self = curthread; 1872*0Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 1873*0Sstevel@tonic-gate ulwp_t *ulwp; 1874*0Sstevel@tonic-gate int error = 0; 1875*0Sstevel@tonic-gate 1876*0Sstevel@tonic-gate ASSERT((whystopped & (TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) != 0); 1877*0Sstevel@tonic-gate ASSERT((whystopped & ~(TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) == 0); 1878*0Sstevel@tonic-gate 1879*0Sstevel@tonic-gate /* 1880*0Sstevel@tonic-gate * We can't suspend anyone except ourself while a fork is happening. 1881*0Sstevel@tonic-gate * This also has the effect of allowing only one suspension at a time. 1882*0Sstevel@tonic-gate */ 1883*0Sstevel@tonic-gate if (tid != self->ul_lwpid) 1884*0Sstevel@tonic-gate (void) fork_lock_enter(NULL); 1885*0Sstevel@tonic-gate 1886*0Sstevel@tonic-gate if ((ulwp = find_lwp(tid)) == NULL) 1887*0Sstevel@tonic-gate error = ESRCH; 1888*0Sstevel@tonic-gate else if (whystopped == TSTP_MUTATOR && !ulwp->ul_mutator) { 1889*0Sstevel@tonic-gate ulwp_unlock(ulwp, udp); 1890*0Sstevel@tonic-gate error = EINVAL; 1891*0Sstevel@tonic-gate } else if (ulwp->ul_stop) { /* already stopped */ 1892*0Sstevel@tonic-gate ulwp->ul_stop |= whystopped; 1893*0Sstevel@tonic-gate ulwp_broadcast(ulwp); 1894*0Sstevel@tonic-gate ulwp_unlock(ulwp, udp); 1895*0Sstevel@tonic-gate } else if (ulwp != self) { 1896*0Sstevel@tonic-gate /* 1897*0Sstevel@tonic-gate * After suspending the other thread, move it out of a 1898*0Sstevel@tonic-gate * critical section and deal with the schedctl mappings. 1899*0Sstevel@tonic-gate * safe_suspend() suspends the other thread, calls 1900*0Sstevel@tonic-gate * ulwp_broadcast(ulwp) and drops the ulwp lock. 1901*0Sstevel@tonic-gate */ 1902*0Sstevel@tonic-gate error = safe_suspend(ulwp, whystopped, NULL); 1903*0Sstevel@tonic-gate } else { 1904*0Sstevel@tonic-gate int schedctl_after_fork = 0; 1905*0Sstevel@tonic-gate 1906*0Sstevel@tonic-gate /* 1907*0Sstevel@tonic-gate * We are suspending ourself. We must not take a signal 1908*0Sstevel@tonic-gate * until we return from lwp_suspend() and clear ul_stopping. 1909*0Sstevel@tonic-gate * This is to guard against siglongjmp(). 1910*0Sstevel@tonic-gate */ 1911*0Sstevel@tonic-gate enter_critical(self); 1912*0Sstevel@tonic-gate self->ul_sp = stkptr(); 1913*0Sstevel@tonic-gate _flush_windows(); /* sparc */ 1914*0Sstevel@tonic-gate self->ul_pleasestop = 0; 1915*0Sstevel@tonic-gate self->ul_stop |= whystopped; 1916*0Sstevel@tonic-gate /* 1917*0Sstevel@tonic-gate * Grab our spin lock before dropping ulwp_mutex(self). 1918*0Sstevel@tonic-gate * This prevents the suspending thread from applying 1919*0Sstevel@tonic-gate * lwp_suspend() to us before we emerge from 1920*0Sstevel@tonic-gate * lmutex_unlock(mp) and have dropped mp's queue lock. 1921*0Sstevel@tonic-gate */ 1922*0Sstevel@tonic-gate spin_lock_set(&self->ul_spinlock); 1923*0Sstevel@tonic-gate self->ul_stopping = 1; 1924*0Sstevel@tonic-gate ulwp_broadcast(self); 1925*0Sstevel@tonic-gate ulwp_unlock(self, udp); 1926*0Sstevel@tonic-gate /* 1927*0Sstevel@tonic-gate * From this point until we return from lwp_suspend(), 1928*0Sstevel@tonic-gate * we must not call any function that might invoke the 1929*0Sstevel@tonic-gate * dynamic linker, that is, we can only call functions 1930*0Sstevel@tonic-gate * private to the library. 1931*0Sstevel@tonic-gate * 1932*0Sstevel@tonic-gate * Also, this is a nasty race condition for a process 1933*0Sstevel@tonic-gate * that is undergoing a forkall() operation: 1934*0Sstevel@tonic-gate * Once we clear our spinlock (below), we are vulnerable 1935*0Sstevel@tonic-gate * to being suspended by the forkall() thread before 1936*0Sstevel@tonic-gate * we manage to suspend ourself in ___lwp_suspend(). 1937*0Sstevel@tonic-gate * See safe_suspend() and force_continue(). 1938*0Sstevel@tonic-gate * 1939*0Sstevel@tonic-gate * To avoid a SIGSEGV due to the disappearance 1940*0Sstevel@tonic-gate * of the schedctl mappings in the child process, 1941*0Sstevel@tonic-gate * which can happen in spin_lock_clear() if we 1942*0Sstevel@tonic-gate * are suspended while we are in the middle of 1943*0Sstevel@tonic-gate * its call to preempt(), we preemptively clear 1944*0Sstevel@tonic-gate * our own schedctl pointer before dropping our 1945*0Sstevel@tonic-gate * spinlock. We reinstate it, in both the parent 1946*0Sstevel@tonic-gate * and (if this really is a forkall()) the child. 1947*0Sstevel@tonic-gate */ 1948*0Sstevel@tonic-gate if (whystopped & TSTP_FORK) { 1949*0Sstevel@tonic-gate schedctl_after_fork = 1; 1950*0Sstevel@tonic-gate self->ul_schedctl = NULL; 1951*0Sstevel@tonic-gate self->ul_schedctl_called = &udp->uberflags; 1952*0Sstevel@tonic-gate } 1953*0Sstevel@tonic-gate spin_lock_clear(&self->ul_spinlock); 1954*0Sstevel@tonic-gate (void) ___lwp_suspend(tid); 1955*0Sstevel@tonic-gate /* 1956*0Sstevel@tonic-gate * Somebody else continued us. 1957*0Sstevel@tonic-gate * We can't grab ulwp_lock(self) 1958*0Sstevel@tonic-gate * until after clearing ul_stopping. 1959*0Sstevel@tonic-gate * force_continue() relies on this. 1960*0Sstevel@tonic-gate */ 1961*0Sstevel@tonic-gate self->ul_stopping = 0; 1962*0Sstevel@tonic-gate self->ul_sp = 0; 1963*0Sstevel@tonic-gate if (schedctl_after_fork) { 1964*0Sstevel@tonic-gate self->ul_schedctl_called = NULL; 1965*0Sstevel@tonic-gate self->ul_schedctl = NULL; 1966*0Sstevel@tonic-gate (void) setup_schedctl(); 1967*0Sstevel@tonic-gate } 1968*0Sstevel@tonic-gate ulwp_lock(self, udp); 1969*0Sstevel@tonic-gate ulwp_broadcast(self); 1970*0Sstevel@tonic-gate ulwp_unlock(self, udp); 1971*0Sstevel@tonic-gate exit_critical(self); 1972*0Sstevel@tonic-gate } 1973*0Sstevel@tonic-gate 1974*0Sstevel@tonic-gate if (tid != self->ul_lwpid) 1975*0Sstevel@tonic-gate fork_lock_exit(); 1976*0Sstevel@tonic-gate 1977*0Sstevel@tonic-gate return (error); 1978*0Sstevel@tonic-gate } 1979*0Sstevel@tonic-gate 1980*0Sstevel@tonic-gate /* 1981*0Sstevel@tonic-gate * Suspend all lwps other than ourself in preparation for fork. 1982*0Sstevel@tonic-gate */ 1983*0Sstevel@tonic-gate void 1984*0Sstevel@tonic-gate suspend_fork() 1985*0Sstevel@tonic-gate { 1986*0Sstevel@tonic-gate ulwp_t *self = curthread; 1987*0Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 1988*0Sstevel@tonic-gate ulwp_t *ulwp; 1989*0Sstevel@tonic-gate int link_dropped; 1990*0Sstevel@tonic-gate 1991*0Sstevel@tonic-gate top: 1992*0Sstevel@tonic-gate lmutex_lock(&udp->link_lock); 1993*0Sstevel@tonic-gate 1994*0Sstevel@tonic-gate for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 1995*0Sstevel@tonic-gate ulwp_lock(ulwp, udp); 1996*0Sstevel@tonic-gate if (ulwp->ul_stop) { /* already stopped */ 1997*0Sstevel@tonic-gate ulwp->ul_stop |= TSTP_FORK; 1998*0Sstevel@tonic-gate ulwp_broadcast(ulwp); 1999*0Sstevel@tonic-gate ulwp_unlock(ulwp, udp); 2000*0Sstevel@tonic-gate } else { 2001*0Sstevel@tonic-gate /* 2002*0Sstevel@tonic-gate * Move the stopped lwp out of a critical section. 2003*0Sstevel@tonic-gate */ 2004*0Sstevel@tonic-gate if (safe_suspend(ulwp, TSTP_FORK, &link_dropped) || 2005*0Sstevel@tonic-gate link_dropped) 2006*0Sstevel@tonic-gate goto top; 2007*0Sstevel@tonic-gate } 2008*0Sstevel@tonic-gate } 2009*0Sstevel@tonic-gate 2010*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 2011*0Sstevel@tonic-gate } 2012*0Sstevel@tonic-gate 2013*0Sstevel@tonic-gate void 2014*0Sstevel@tonic-gate continue_fork(int child) 2015*0Sstevel@tonic-gate { 2016*0Sstevel@tonic-gate ulwp_t *self = curthread; 2017*0Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 2018*0Sstevel@tonic-gate ulwp_t *ulwp; 2019*0Sstevel@tonic-gate 2020*0Sstevel@tonic-gate /* 2021*0Sstevel@tonic-gate * Clear the schedctl pointers in the child of forkall(). 2022*0Sstevel@tonic-gate */ 2023*0Sstevel@tonic-gate if (child) { 2024*0Sstevel@tonic-gate for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2025*0Sstevel@tonic-gate ulwp->ul_schedctl_called = 2026*0Sstevel@tonic-gate ulwp->ul_dead? &udp->uberflags : NULL; 2027*0Sstevel@tonic-gate ulwp->ul_schedctl = NULL; 2028*0Sstevel@tonic-gate } 2029*0Sstevel@tonic-gate } 2030*0Sstevel@tonic-gate 2031*0Sstevel@tonic-gate /* 2032*0Sstevel@tonic-gate * Set all lwps that were stopped for fork() running again. 2033*0Sstevel@tonic-gate */ 2034*0Sstevel@tonic-gate lmutex_lock(&udp->link_lock); 2035*0Sstevel@tonic-gate for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2036*0Sstevel@tonic-gate mutex_t *mp = ulwp_mutex(ulwp, udp); 2037*0Sstevel@tonic-gate lmutex_lock(mp); 2038*0Sstevel@tonic-gate ASSERT(ulwp->ul_stop & TSTP_FORK); 2039*0Sstevel@tonic-gate ulwp->ul_stop &= ~TSTP_FORK; 2040*0Sstevel@tonic-gate ulwp_broadcast(ulwp); 2041*0Sstevel@tonic-gate if (!ulwp->ul_stop) 2042*0Sstevel@tonic-gate force_continue(ulwp); 2043*0Sstevel@tonic-gate lmutex_unlock(mp); 2044*0Sstevel@tonic-gate } 2045*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 2046*0Sstevel@tonic-gate } 2047*0Sstevel@tonic-gate 2048*0Sstevel@tonic-gate int 2049*0Sstevel@tonic-gate _thrp_continue(thread_t tid, uchar_t whystopped) 2050*0Sstevel@tonic-gate { 2051*0Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 2052*0Sstevel@tonic-gate ulwp_t *ulwp; 2053*0Sstevel@tonic-gate mutex_t *mp; 2054*0Sstevel@tonic-gate int error = 0; 2055*0Sstevel@tonic-gate 2056*0Sstevel@tonic-gate ASSERT(whystopped == TSTP_REGULAR || 2057*0Sstevel@tonic-gate whystopped == TSTP_MUTATOR); 2058*0Sstevel@tonic-gate 2059*0Sstevel@tonic-gate if ((ulwp = find_lwp(tid)) == NULL) 2060*0Sstevel@tonic-gate return (ESRCH); 2061*0Sstevel@tonic-gate 2062*0Sstevel@tonic-gate mp = ulwp_mutex(ulwp, udp); 2063*0Sstevel@tonic-gate if ((whystopped == TSTP_MUTATOR && !ulwp->ul_mutator)) { 2064*0Sstevel@tonic-gate error = EINVAL; 2065*0Sstevel@tonic-gate } else if (ulwp->ul_stop & whystopped) { 2066*0Sstevel@tonic-gate ulwp->ul_stop &= ~whystopped; 2067*0Sstevel@tonic-gate ulwp_broadcast(ulwp); 2068*0Sstevel@tonic-gate if (!ulwp->ul_stop) { 2069*0Sstevel@tonic-gate if (whystopped == TSTP_REGULAR && ulwp->ul_created) { 2070*0Sstevel@tonic-gate ulwp->ul_sp = 0; 2071*0Sstevel@tonic-gate ulwp->ul_created = 0; 2072*0Sstevel@tonic-gate } 2073*0Sstevel@tonic-gate force_continue(ulwp); 2074*0Sstevel@tonic-gate } 2075*0Sstevel@tonic-gate } 2076*0Sstevel@tonic-gate 2077*0Sstevel@tonic-gate lmutex_unlock(mp); 2078*0Sstevel@tonic-gate return (error); 2079*0Sstevel@tonic-gate } 2080*0Sstevel@tonic-gate 2081*0Sstevel@tonic-gate #pragma weak thr_suspend = _thr_suspend 2082*0Sstevel@tonic-gate int 2083*0Sstevel@tonic-gate _thr_suspend(thread_t tid) 2084*0Sstevel@tonic-gate { 2085*0Sstevel@tonic-gate return (_thrp_suspend(tid, TSTP_REGULAR)); 2086*0Sstevel@tonic-gate } 2087*0Sstevel@tonic-gate 2088*0Sstevel@tonic-gate #pragma weak thr_continue = _thr_continue 2089*0Sstevel@tonic-gate int 2090*0Sstevel@tonic-gate _thr_continue(thread_t tid) 2091*0Sstevel@tonic-gate { 2092*0Sstevel@tonic-gate return (_thrp_continue(tid, TSTP_REGULAR)); 2093*0Sstevel@tonic-gate } 2094*0Sstevel@tonic-gate 2095*0Sstevel@tonic-gate #pragma weak thr_yield = _thr_yield 2096*0Sstevel@tonic-gate void 2097*0Sstevel@tonic-gate _thr_yield() 2098*0Sstevel@tonic-gate { 2099*0Sstevel@tonic-gate lwp_yield(); 2100*0Sstevel@tonic-gate } 2101*0Sstevel@tonic-gate 2102*0Sstevel@tonic-gate #pragma weak thr_kill = _thr_kill 2103*0Sstevel@tonic-gate #pragma weak pthread_kill = _thr_kill 2104*0Sstevel@tonic-gate #pragma weak _pthread_kill = _thr_kill 2105*0Sstevel@tonic-gate int 2106*0Sstevel@tonic-gate _thr_kill(thread_t tid, int sig) 2107*0Sstevel@tonic-gate { 2108*0Sstevel@tonic-gate if (sig == SIGCANCEL) 2109*0Sstevel@tonic-gate return (EINVAL); 2110*0Sstevel@tonic-gate return (__lwp_kill(tid, sig)); 2111*0Sstevel@tonic-gate } 2112*0Sstevel@tonic-gate 2113*0Sstevel@tonic-gate /* 2114*0Sstevel@tonic-gate * Exit a critical section, take deferred actions if necessary. 2115*0Sstevel@tonic-gate */ 2116*0Sstevel@tonic-gate void 2117*0Sstevel@tonic-gate do_exit_critical() 2118*0Sstevel@tonic-gate { 2119*0Sstevel@tonic-gate ulwp_t *self = curthread; 2120*0Sstevel@tonic-gate int sig; 2121*0Sstevel@tonic-gate 2122*0Sstevel@tonic-gate ASSERT(self->ul_critical == 0); 2123*0Sstevel@tonic-gate if (self->ul_dead) 2124*0Sstevel@tonic-gate return; 2125*0Sstevel@tonic-gate 2126*0Sstevel@tonic-gate while (self->ul_pleasestop || 2127*0Sstevel@tonic-gate (self->ul_cursig != 0 && self->ul_sigdefer == 0)) { 2128*0Sstevel@tonic-gate /* 2129*0Sstevel@tonic-gate * Avoid a recursive call to exit_critical() in _thrp_suspend() 2130*0Sstevel@tonic-gate * by keeping self->ul_critical == 1 here. 2131*0Sstevel@tonic-gate */ 2132*0Sstevel@tonic-gate self->ul_critical++; 2133*0Sstevel@tonic-gate while (self->ul_pleasestop) { 2134*0Sstevel@tonic-gate /* 2135*0Sstevel@tonic-gate * Guard against suspending ourself while on a sleep 2136*0Sstevel@tonic-gate * queue. See the comments in call_user_handler(). 2137*0Sstevel@tonic-gate */ 2138*0Sstevel@tonic-gate unsleep_self(); 2139*0Sstevel@tonic-gate set_parking_flag(self, 0); 2140*0Sstevel@tonic-gate (void) _thrp_suspend(self->ul_lwpid, 2141*0Sstevel@tonic-gate self->ul_pleasestop); 2142*0Sstevel@tonic-gate } 2143*0Sstevel@tonic-gate self->ul_critical--; 2144*0Sstevel@tonic-gate 2145*0Sstevel@tonic-gate if ((sig = self->ul_cursig) != 0 && self->ul_sigdefer == 0) { 2146*0Sstevel@tonic-gate /* 2147*0Sstevel@tonic-gate * Clear ul_cursig before proceeding. 2148*0Sstevel@tonic-gate * This protects us from the dynamic linker's 2149*0Sstevel@tonic-gate * calls to bind_guard()/bind_clear() in the 2150*0Sstevel@tonic-gate * event that it is invoked to resolve a symbol 2151*0Sstevel@tonic-gate * like take_deferred_signal() below. 2152*0Sstevel@tonic-gate */ 2153*0Sstevel@tonic-gate self->ul_cursig = 0; 2154*0Sstevel@tonic-gate take_deferred_signal(sig); 2155*0Sstevel@tonic-gate ASSERT(self->ul_cursig == 0); 2156*0Sstevel@tonic-gate } 2157*0Sstevel@tonic-gate } 2158*0Sstevel@tonic-gate ASSERT(self->ul_critical == 0); 2159*0Sstevel@tonic-gate } 2160*0Sstevel@tonic-gate 2161*0Sstevel@tonic-gate int 2162*0Sstevel@tonic-gate _ti_bind_guard(int bindflag) 2163*0Sstevel@tonic-gate { 2164*0Sstevel@tonic-gate ulwp_t *self = curthread; 2165*0Sstevel@tonic-gate 2166*0Sstevel@tonic-gate if ((self->ul_bindflags & bindflag) == bindflag) 2167*0Sstevel@tonic-gate return (0); 2168*0Sstevel@tonic-gate enter_critical(self); 2169*0Sstevel@tonic-gate self->ul_bindflags |= bindflag; 2170*0Sstevel@tonic-gate return (1); 2171*0Sstevel@tonic-gate } 2172*0Sstevel@tonic-gate 2173*0Sstevel@tonic-gate int 2174*0Sstevel@tonic-gate _ti_bind_clear(int bindflag) 2175*0Sstevel@tonic-gate { 2176*0Sstevel@tonic-gate ulwp_t *self = curthread; 2177*0Sstevel@tonic-gate 2178*0Sstevel@tonic-gate if ((self->ul_bindflags & bindflag) == 0) 2179*0Sstevel@tonic-gate return (self->ul_bindflags); 2180*0Sstevel@tonic-gate self->ul_bindflags &= ~bindflag; 2181*0Sstevel@tonic-gate exit_critical(self); 2182*0Sstevel@tonic-gate return (self->ul_bindflags); 2183*0Sstevel@tonic-gate } 2184*0Sstevel@tonic-gate 2185*0Sstevel@tonic-gate /* 2186*0Sstevel@tonic-gate * sigoff() and sigon() enable cond_wait() to behave (optionally) like 2187*0Sstevel@tonic-gate * it does in the old libthread (see the comments in cond_wait_queue()). 2188*0Sstevel@tonic-gate * Also, signals are deferred at thread startup until TLS constructors 2189*0Sstevel@tonic-gate * have all been called, at which time _thr_setup() calls sigon(). 2190*0Sstevel@tonic-gate * Also, _sigoff() and _sigon() are called from dbx's run-time checking 2191*0Sstevel@tonic-gate * (librtc.so) to defer signals during its critical sections (not to be 2192*0Sstevel@tonic-gate * confused with libc critical sections [see exit_critical() above]). 2193*0Sstevel@tonic-gate */ 2194*0Sstevel@tonic-gate void 2195*0Sstevel@tonic-gate _sigoff(void) 2196*0Sstevel@tonic-gate { 2197*0Sstevel@tonic-gate sigoff(curthread); 2198*0Sstevel@tonic-gate } 2199*0Sstevel@tonic-gate 2200*0Sstevel@tonic-gate void 2201*0Sstevel@tonic-gate _sigon(void) 2202*0Sstevel@tonic-gate { 2203*0Sstevel@tonic-gate sigon(curthread); 2204*0Sstevel@tonic-gate } 2205*0Sstevel@tonic-gate 2206*0Sstevel@tonic-gate void 2207*0Sstevel@tonic-gate sigon(ulwp_t *self) 2208*0Sstevel@tonic-gate { 2209*0Sstevel@tonic-gate int sig; 2210*0Sstevel@tonic-gate 2211*0Sstevel@tonic-gate ASSERT(self->ul_sigdefer > 0); 2212*0Sstevel@tonic-gate if (--self->ul_sigdefer == 0) { 2213*0Sstevel@tonic-gate if ((sig = self->ul_cursig) != 0 && self->ul_critical == 0) { 2214*0Sstevel@tonic-gate self->ul_cursig = 0; 2215*0Sstevel@tonic-gate take_deferred_signal(sig); 2216*0Sstevel@tonic-gate ASSERT(self->ul_cursig == 0); 2217*0Sstevel@tonic-gate } 2218*0Sstevel@tonic-gate } 2219*0Sstevel@tonic-gate } 2220*0Sstevel@tonic-gate 2221*0Sstevel@tonic-gate #pragma weak thr_getconcurrency = _thr_getconcurrency 2222*0Sstevel@tonic-gate int 2223*0Sstevel@tonic-gate _thr_getconcurrency() 2224*0Sstevel@tonic-gate { 2225*0Sstevel@tonic-gate return (thr_concurrency); 2226*0Sstevel@tonic-gate } 2227*0Sstevel@tonic-gate 2228*0Sstevel@tonic-gate #pragma weak pthread_getconcurrency = _pthread_getconcurrency 2229*0Sstevel@tonic-gate int 2230*0Sstevel@tonic-gate _pthread_getconcurrency() 2231*0Sstevel@tonic-gate { 2232*0Sstevel@tonic-gate return (pthread_concurrency); 2233*0Sstevel@tonic-gate } 2234*0Sstevel@tonic-gate 2235*0Sstevel@tonic-gate #pragma weak thr_setconcurrency = _thr_setconcurrency 2236*0Sstevel@tonic-gate int 2237*0Sstevel@tonic-gate _thr_setconcurrency(int new_level) 2238*0Sstevel@tonic-gate { 2239*0Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 2240*0Sstevel@tonic-gate 2241*0Sstevel@tonic-gate if (new_level < 0) 2242*0Sstevel@tonic-gate return (EINVAL); 2243*0Sstevel@tonic-gate if (new_level > 65536) /* 65536 is totally arbitrary */ 2244*0Sstevel@tonic-gate return (EAGAIN); 2245*0Sstevel@tonic-gate lmutex_lock(&udp->link_lock); 2246*0Sstevel@tonic-gate if (new_level > thr_concurrency) 2247*0Sstevel@tonic-gate thr_concurrency = new_level; 2248*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 2249*0Sstevel@tonic-gate return (0); 2250*0Sstevel@tonic-gate } 2251*0Sstevel@tonic-gate 2252*0Sstevel@tonic-gate #pragma weak pthread_setconcurrency = _pthread_setconcurrency 2253*0Sstevel@tonic-gate int 2254*0Sstevel@tonic-gate _pthread_setconcurrency(int new_level) 2255*0Sstevel@tonic-gate { 2256*0Sstevel@tonic-gate if (new_level < 0) 2257*0Sstevel@tonic-gate return (EINVAL); 2258*0Sstevel@tonic-gate if (new_level > 65536) /* 65536 is totally arbitrary */ 2259*0Sstevel@tonic-gate return (EAGAIN); 2260*0Sstevel@tonic-gate pthread_concurrency = new_level; 2261*0Sstevel@tonic-gate return (0); 2262*0Sstevel@tonic-gate } 2263*0Sstevel@tonic-gate 2264*0Sstevel@tonic-gate #pragma weak thr_min_stack = _thr_min_stack 2265*0Sstevel@tonic-gate #pragma weak __pthread_min_stack = _thr_min_stack 2266*0Sstevel@tonic-gate size_t 2267*0Sstevel@tonic-gate _thr_min_stack(void) 2268*0Sstevel@tonic-gate { 2269*0Sstevel@tonic-gate return (MINSTACK); 2270*0Sstevel@tonic-gate } 2271*0Sstevel@tonic-gate 2272*0Sstevel@tonic-gate int 2273*0Sstevel@tonic-gate __nthreads(void) 2274*0Sstevel@tonic-gate { 2275*0Sstevel@tonic-gate return (curthread->ul_uberdata->nthreads); 2276*0Sstevel@tonic-gate } 2277*0Sstevel@tonic-gate 2278*0Sstevel@tonic-gate /* 2279*0Sstevel@tonic-gate * XXX 2280*0Sstevel@tonic-gate * The remainder of this file implements the private interfaces to java for 2281*0Sstevel@tonic-gate * garbage collection. It is no longer used, at least by java 1.2. 2282*0Sstevel@tonic-gate * It can all go away once all old JVMs have disappeared. 2283*0Sstevel@tonic-gate */ 2284*0Sstevel@tonic-gate 2285*0Sstevel@tonic-gate int suspendingallmutators; /* when non-zero, suspending all mutators. */ 2286*0Sstevel@tonic-gate int suspendedallmutators; /* when non-zero, all mutators suspended. */ 2287*0Sstevel@tonic-gate int mutatorsbarrier; /* when non-zero, mutators barrier imposed. */ 2288*0Sstevel@tonic-gate mutex_t mutatorslock = DEFAULTMUTEX; /* used to enforce mutators barrier. */ 2289*0Sstevel@tonic-gate cond_t mutatorscv = DEFAULTCV; /* where non-mutators sleep. */ 2290*0Sstevel@tonic-gate 2291*0Sstevel@tonic-gate /* 2292*0Sstevel@tonic-gate * Get the available register state for the target thread. 2293*0Sstevel@tonic-gate * Return non-volatile registers: TRS_NONVOLATILE 2294*0Sstevel@tonic-gate */ 2295*0Sstevel@tonic-gate #pragma weak thr_getstate = _thr_getstate 2296*0Sstevel@tonic-gate int 2297*0Sstevel@tonic-gate _thr_getstate(thread_t tid, int *flag, lwpid_t *lwp, stack_t *ss, gregset_t rs) 2298*0Sstevel@tonic-gate { 2299*0Sstevel@tonic-gate ulwp_t *self = curthread; 2300*0Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 2301*0Sstevel@tonic-gate ulwp_t **ulwpp; 2302*0Sstevel@tonic-gate ulwp_t *ulwp; 2303*0Sstevel@tonic-gate int error = 0; 2304*0Sstevel@tonic-gate int trs_flag = TRS_LWPID; 2305*0Sstevel@tonic-gate 2306*0Sstevel@tonic-gate if (tid == 0 || self->ul_lwpid == tid) { 2307*0Sstevel@tonic-gate ulwp = self; 2308*0Sstevel@tonic-gate ulwp_lock(ulwp, udp); 2309*0Sstevel@tonic-gate } else if ((ulwpp = find_lwpp(tid)) != NULL) { 2310*0Sstevel@tonic-gate ulwp = *ulwpp; 2311*0Sstevel@tonic-gate } else { 2312*0Sstevel@tonic-gate if (flag) 2313*0Sstevel@tonic-gate *flag = TRS_INVALID; 2314*0Sstevel@tonic-gate return (ESRCH); 2315*0Sstevel@tonic-gate } 2316*0Sstevel@tonic-gate 2317*0Sstevel@tonic-gate if (ulwp->ul_dead) { 2318*0Sstevel@tonic-gate trs_flag = TRS_INVALID; 2319*0Sstevel@tonic-gate } else if (!ulwp->ul_stop && !suspendedallmutators) { 2320*0Sstevel@tonic-gate error = EINVAL; 2321*0Sstevel@tonic-gate trs_flag = TRS_INVALID; 2322*0Sstevel@tonic-gate } else if (ulwp->ul_stop) { 2323*0Sstevel@tonic-gate trs_flag = TRS_NONVOLATILE; 2324*0Sstevel@tonic-gate getgregs(ulwp, rs); 2325*0Sstevel@tonic-gate } 2326*0Sstevel@tonic-gate 2327*0Sstevel@tonic-gate if (flag) 2328*0Sstevel@tonic-gate *flag = trs_flag; 2329*0Sstevel@tonic-gate if (lwp) 2330*0Sstevel@tonic-gate *lwp = tid; 2331*0Sstevel@tonic-gate if (ss != NULL) 2332*0Sstevel@tonic-gate (void) _thrp_stksegment(ulwp, ss); 2333*0Sstevel@tonic-gate 2334*0Sstevel@tonic-gate ulwp_unlock(ulwp, udp); 2335*0Sstevel@tonic-gate return (error); 2336*0Sstevel@tonic-gate } 2337*0Sstevel@tonic-gate 2338*0Sstevel@tonic-gate /* 2339*0Sstevel@tonic-gate * Set the appropriate register state for the target thread. 2340*0Sstevel@tonic-gate * This is not used by java. It exists solely for the MSTC test suite. 2341*0Sstevel@tonic-gate */ 2342*0Sstevel@tonic-gate #pragma weak thr_setstate = _thr_setstate 2343*0Sstevel@tonic-gate int 2344*0Sstevel@tonic-gate _thr_setstate(thread_t tid, int flag, gregset_t rs) 2345*0Sstevel@tonic-gate { 2346*0Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 2347*0Sstevel@tonic-gate ulwp_t *ulwp; 2348*0Sstevel@tonic-gate int error = 0; 2349*0Sstevel@tonic-gate 2350*0Sstevel@tonic-gate if ((ulwp = find_lwp(tid)) == NULL) 2351*0Sstevel@tonic-gate return (ESRCH); 2352*0Sstevel@tonic-gate 2353*0Sstevel@tonic-gate if (!ulwp->ul_stop && !suspendedallmutators) 2354*0Sstevel@tonic-gate error = EINVAL; 2355*0Sstevel@tonic-gate else if (rs != NULL) { 2356*0Sstevel@tonic-gate switch (flag) { 2357*0Sstevel@tonic-gate case TRS_NONVOLATILE: 2358*0Sstevel@tonic-gate /* do /proc stuff here? */ 2359*0Sstevel@tonic-gate if (ulwp->ul_stop) 2360*0Sstevel@tonic-gate setgregs(ulwp, rs); 2361*0Sstevel@tonic-gate else 2362*0Sstevel@tonic-gate error = EINVAL; 2363*0Sstevel@tonic-gate break; 2364*0Sstevel@tonic-gate case TRS_LWPID: /* do /proc stuff here? */ 2365*0Sstevel@tonic-gate default: 2366*0Sstevel@tonic-gate error = EINVAL; 2367*0Sstevel@tonic-gate break; 2368*0Sstevel@tonic-gate } 2369*0Sstevel@tonic-gate } 2370*0Sstevel@tonic-gate 2371*0Sstevel@tonic-gate ulwp_unlock(ulwp, udp); 2372*0Sstevel@tonic-gate return (error); 2373*0Sstevel@tonic-gate } 2374*0Sstevel@tonic-gate 2375*0Sstevel@tonic-gate int 2376*0Sstevel@tonic-gate getlwpstatus(thread_t tid, struct lwpstatus *sp) 2377*0Sstevel@tonic-gate { 2378*0Sstevel@tonic-gate extern ssize_t _pread(int, void *, size_t, off_t); 2379*0Sstevel@tonic-gate char buf[100]; 2380*0Sstevel@tonic-gate int fd; 2381*0Sstevel@tonic-gate 2382*0Sstevel@tonic-gate /* "/proc/self/lwp/%u/lwpstatus" w/o stdio */ 2383*0Sstevel@tonic-gate (void) strcpy(buf, "/proc/self/lwp/"); 2384*0Sstevel@tonic-gate ultos((uint64_t)tid, 10, buf + strlen(buf)); 2385*0Sstevel@tonic-gate (void) strcat(buf, "/lwpstatus"); 2386*0Sstevel@tonic-gate if ((fd = _open(buf, O_RDONLY, 0)) >= 0) { 2387*0Sstevel@tonic-gate while (_pread(fd, sp, sizeof (*sp), 0) == sizeof (*sp)) { 2388*0Sstevel@tonic-gate if (sp->pr_flags & PR_STOPPED) { 2389*0Sstevel@tonic-gate (void) _close(fd); 2390*0Sstevel@tonic-gate return (0); 2391*0Sstevel@tonic-gate } 2392*0Sstevel@tonic-gate lwp_yield(); /* give him a chance to stop */ 2393*0Sstevel@tonic-gate } 2394*0Sstevel@tonic-gate (void) _close(fd); 2395*0Sstevel@tonic-gate } 2396*0Sstevel@tonic-gate return (-1); 2397*0Sstevel@tonic-gate } 2398*0Sstevel@tonic-gate 2399*0Sstevel@tonic-gate int 2400*0Sstevel@tonic-gate putlwpregs(thread_t tid, prgregset_t prp) 2401*0Sstevel@tonic-gate { 2402*0Sstevel@tonic-gate extern ssize_t _writev(int, const struct iovec *, int); 2403*0Sstevel@tonic-gate char buf[100]; 2404*0Sstevel@tonic-gate int fd; 2405*0Sstevel@tonic-gate long dstop_sreg[2]; 2406*0Sstevel@tonic-gate long run_null[2]; 2407*0Sstevel@tonic-gate iovec_t iov[3]; 2408*0Sstevel@tonic-gate 2409*0Sstevel@tonic-gate /* "/proc/self/lwp/%u/lwpctl" w/o stdio */ 2410*0Sstevel@tonic-gate (void) strcpy(buf, "/proc/self/lwp/"); 2411*0Sstevel@tonic-gate ultos((uint64_t)tid, 10, buf + strlen(buf)); 2412*0Sstevel@tonic-gate (void) strcat(buf, "/lwpctl"); 2413*0Sstevel@tonic-gate if ((fd = _open(buf, O_WRONLY, 0)) >= 0) { 2414*0Sstevel@tonic-gate dstop_sreg[0] = PCDSTOP; /* direct it to stop */ 2415*0Sstevel@tonic-gate dstop_sreg[1] = PCSREG; /* set the registers */ 2416*0Sstevel@tonic-gate iov[0].iov_base = (caddr_t)dstop_sreg; 2417*0Sstevel@tonic-gate iov[0].iov_len = sizeof (dstop_sreg); 2418*0Sstevel@tonic-gate iov[1].iov_base = (caddr_t)prp; /* from the register set */ 2419*0Sstevel@tonic-gate iov[1].iov_len = sizeof (prgregset_t); 2420*0Sstevel@tonic-gate run_null[0] = PCRUN; /* make it runnable again */ 2421*0Sstevel@tonic-gate run_null[1] = 0; 2422*0Sstevel@tonic-gate iov[2].iov_base = (caddr_t)run_null; 2423*0Sstevel@tonic-gate iov[2].iov_len = sizeof (run_null); 2424*0Sstevel@tonic-gate if (_writev(fd, iov, 3) >= 0) { 2425*0Sstevel@tonic-gate (void) _close(fd); 2426*0Sstevel@tonic-gate return (0); 2427*0Sstevel@tonic-gate } 2428*0Sstevel@tonic-gate (void) _close(fd); 2429*0Sstevel@tonic-gate } 2430*0Sstevel@tonic-gate return (-1); 2431*0Sstevel@tonic-gate } 2432*0Sstevel@tonic-gate 2433*0Sstevel@tonic-gate static ulong_t 2434*0Sstevel@tonic-gate gettsp_slow(thread_t tid) 2435*0Sstevel@tonic-gate { 2436*0Sstevel@tonic-gate char buf[100]; 2437*0Sstevel@tonic-gate struct lwpstatus status; 2438*0Sstevel@tonic-gate 2439*0Sstevel@tonic-gate if (getlwpstatus(tid, &status) != 0) { 2440*0Sstevel@tonic-gate /* "__gettsp(%u): can't read lwpstatus" w/o stdio */ 2441*0Sstevel@tonic-gate (void) strcpy(buf, "__gettsp("); 2442*0Sstevel@tonic-gate ultos((uint64_t)tid, 10, buf + strlen(buf)); 2443*0Sstevel@tonic-gate (void) strcat(buf, "): can't read lwpstatus"); 2444*0Sstevel@tonic-gate thr_panic(buf); 2445*0Sstevel@tonic-gate } 2446*0Sstevel@tonic-gate return (status.pr_reg[R_SP]); 2447*0Sstevel@tonic-gate } 2448*0Sstevel@tonic-gate 2449*0Sstevel@tonic-gate ulong_t 2450*0Sstevel@tonic-gate __gettsp(thread_t tid) 2451*0Sstevel@tonic-gate { 2452*0Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 2453*0Sstevel@tonic-gate ulwp_t *ulwp; 2454*0Sstevel@tonic-gate ulong_t result; 2455*0Sstevel@tonic-gate 2456*0Sstevel@tonic-gate if ((ulwp = find_lwp(tid)) == NULL) 2457*0Sstevel@tonic-gate return (0); 2458*0Sstevel@tonic-gate 2459*0Sstevel@tonic-gate if (ulwp->ul_stop && (result = ulwp->ul_sp) != 0) { 2460*0Sstevel@tonic-gate ulwp_unlock(ulwp, udp); 2461*0Sstevel@tonic-gate return (result); 2462*0Sstevel@tonic-gate } 2463*0Sstevel@tonic-gate 2464*0Sstevel@tonic-gate result = gettsp_slow(tid); 2465*0Sstevel@tonic-gate ulwp_unlock(ulwp, udp); 2466*0Sstevel@tonic-gate return (result); 2467*0Sstevel@tonic-gate } 2468*0Sstevel@tonic-gate 2469*0Sstevel@tonic-gate /* 2470*0Sstevel@tonic-gate * This tells java stack walkers how to find the ucontext 2471*0Sstevel@tonic-gate * structure passed to signal handlers. 2472*0Sstevel@tonic-gate */ 2473*0Sstevel@tonic-gate #pragma weak thr_sighndlrinfo = _thr_sighndlrinfo 2474*0Sstevel@tonic-gate void 2475*0Sstevel@tonic-gate _thr_sighndlrinfo(void (**func)(), int *funcsize) 2476*0Sstevel@tonic-gate { 2477*0Sstevel@tonic-gate *func = &__sighndlr; 2478*0Sstevel@tonic-gate *funcsize = (char *)&__sighndlrend - (char *)&__sighndlr; 2479*0Sstevel@tonic-gate } 2480*0Sstevel@tonic-gate 2481*0Sstevel@tonic-gate /* 2482*0Sstevel@tonic-gate * Mark a thread a mutator or reset a mutator to being a default, 2483*0Sstevel@tonic-gate * non-mutator thread. 2484*0Sstevel@tonic-gate */ 2485*0Sstevel@tonic-gate #pragma weak thr_setmutator = _thr_setmutator 2486*0Sstevel@tonic-gate int 2487*0Sstevel@tonic-gate _thr_setmutator(thread_t tid, int enabled) 2488*0Sstevel@tonic-gate { 2489*0Sstevel@tonic-gate ulwp_t *self = curthread; 2490*0Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 2491*0Sstevel@tonic-gate ulwp_t *ulwp; 2492*0Sstevel@tonic-gate int error; 2493*0Sstevel@tonic-gate 2494*0Sstevel@tonic-gate enabled = enabled?1:0; 2495*0Sstevel@tonic-gate top: 2496*0Sstevel@tonic-gate if (tid == 0) { 2497*0Sstevel@tonic-gate ulwp = self; 2498*0Sstevel@tonic-gate ulwp_lock(ulwp, udp); 2499*0Sstevel@tonic-gate } else if ((ulwp = find_lwp(tid)) == NULL) { 2500*0Sstevel@tonic-gate return (ESRCH); 2501*0Sstevel@tonic-gate } 2502*0Sstevel@tonic-gate 2503*0Sstevel@tonic-gate /* 2504*0Sstevel@tonic-gate * The target thread should be the caller itself or a suspended thread. 2505*0Sstevel@tonic-gate * This prevents the target from also changing its ul_mutator field. 2506*0Sstevel@tonic-gate */ 2507*0Sstevel@tonic-gate error = 0; 2508*0Sstevel@tonic-gate if (ulwp != self && !ulwp->ul_stop && enabled) 2509*0Sstevel@tonic-gate error = EINVAL; 2510*0Sstevel@tonic-gate else if (ulwp->ul_mutator != enabled) { 2511*0Sstevel@tonic-gate lmutex_lock(&mutatorslock); 2512*0Sstevel@tonic-gate if (mutatorsbarrier) { 2513*0Sstevel@tonic-gate ulwp_unlock(ulwp, udp); 2514*0Sstevel@tonic-gate while (mutatorsbarrier) 2515*0Sstevel@tonic-gate (void) _cond_wait(&mutatorscv, &mutatorslock); 2516*0Sstevel@tonic-gate lmutex_unlock(&mutatorslock); 2517*0Sstevel@tonic-gate goto top; 2518*0Sstevel@tonic-gate } 2519*0Sstevel@tonic-gate ulwp->ul_mutator = enabled; 2520*0Sstevel@tonic-gate lmutex_unlock(&mutatorslock); 2521*0Sstevel@tonic-gate } 2522*0Sstevel@tonic-gate 2523*0Sstevel@tonic-gate ulwp_unlock(ulwp, udp); 2524*0Sstevel@tonic-gate return (error); 2525*0Sstevel@tonic-gate } 2526*0Sstevel@tonic-gate 2527*0Sstevel@tonic-gate /* 2528*0Sstevel@tonic-gate * Establish a barrier against new mutators. Any non-mutator trying 2529*0Sstevel@tonic-gate * to become a mutator is suspended until the barrier is removed. 2530*0Sstevel@tonic-gate */ 2531*0Sstevel@tonic-gate #pragma weak thr_mutators_barrier = _thr_mutators_barrier 2532*0Sstevel@tonic-gate void 2533*0Sstevel@tonic-gate _thr_mutators_barrier(int enabled) 2534*0Sstevel@tonic-gate { 2535*0Sstevel@tonic-gate int oldvalue; 2536*0Sstevel@tonic-gate 2537*0Sstevel@tonic-gate lmutex_lock(&mutatorslock); 2538*0Sstevel@tonic-gate 2539*0Sstevel@tonic-gate /* 2540*0Sstevel@tonic-gate * Wait if trying to set the barrier while it is already set. 2541*0Sstevel@tonic-gate */ 2542*0Sstevel@tonic-gate while (mutatorsbarrier && enabled) 2543*0Sstevel@tonic-gate (void) _cond_wait(&mutatorscv, &mutatorslock); 2544*0Sstevel@tonic-gate 2545*0Sstevel@tonic-gate oldvalue = mutatorsbarrier; 2546*0Sstevel@tonic-gate mutatorsbarrier = enabled; 2547*0Sstevel@tonic-gate /* 2548*0Sstevel@tonic-gate * Wakeup any blocked non-mutators when barrier is removed. 2549*0Sstevel@tonic-gate */ 2550*0Sstevel@tonic-gate if (oldvalue && !enabled) 2551*0Sstevel@tonic-gate (void) cond_broadcast_internal(&mutatorscv); 2552*0Sstevel@tonic-gate lmutex_unlock(&mutatorslock); 2553*0Sstevel@tonic-gate } 2554*0Sstevel@tonic-gate 2555*0Sstevel@tonic-gate /* 2556*0Sstevel@tonic-gate * Suspend the set of all mutators except for the caller. The list 2557*0Sstevel@tonic-gate * of actively running threads is searched and only the mutators 2558*0Sstevel@tonic-gate * in this list are suspended. Actively running non-mutators remain 2559*0Sstevel@tonic-gate * running. Any other thread is suspended. 2560*0Sstevel@tonic-gate */ 2561*0Sstevel@tonic-gate #pragma weak thr_suspend_allmutators = _thr_suspend_allmutators 2562*0Sstevel@tonic-gate int 2563*0Sstevel@tonic-gate _thr_suspend_allmutators(void) 2564*0Sstevel@tonic-gate { 2565*0Sstevel@tonic-gate ulwp_t *self = curthread; 2566*0Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 2567*0Sstevel@tonic-gate ulwp_t *ulwp; 2568*0Sstevel@tonic-gate int link_dropped; 2569*0Sstevel@tonic-gate 2570*0Sstevel@tonic-gate /* 2571*0Sstevel@tonic-gate * We single-thread the entire thread suspend mechanism. 2572*0Sstevel@tonic-gate */ 2573*0Sstevel@tonic-gate (void) fork_lock_enter(NULL); 2574*0Sstevel@tonic-gate top: 2575*0Sstevel@tonic-gate lmutex_lock(&udp->link_lock); 2576*0Sstevel@tonic-gate 2577*0Sstevel@tonic-gate if (suspendingallmutators || suspendedallmutators) { 2578*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 2579*0Sstevel@tonic-gate fork_lock_exit(); 2580*0Sstevel@tonic-gate return (EINVAL); 2581*0Sstevel@tonic-gate } 2582*0Sstevel@tonic-gate suspendingallmutators = 1; 2583*0Sstevel@tonic-gate 2584*0Sstevel@tonic-gate for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2585*0Sstevel@tonic-gate ulwp_lock(ulwp, udp); 2586*0Sstevel@tonic-gate if (!ulwp->ul_mutator) { 2587*0Sstevel@tonic-gate ulwp_unlock(ulwp, udp); 2588*0Sstevel@tonic-gate } else if (ulwp->ul_stop) { /* already stopped */ 2589*0Sstevel@tonic-gate ulwp->ul_stop |= TSTP_MUTATOR; 2590*0Sstevel@tonic-gate ulwp_broadcast(ulwp); 2591*0Sstevel@tonic-gate ulwp_unlock(ulwp, udp); 2592*0Sstevel@tonic-gate } else { 2593*0Sstevel@tonic-gate /* 2594*0Sstevel@tonic-gate * Move the stopped lwp out of a critical section. 2595*0Sstevel@tonic-gate */ 2596*0Sstevel@tonic-gate if (safe_suspend(ulwp, TSTP_MUTATOR, &link_dropped) || 2597*0Sstevel@tonic-gate link_dropped) { 2598*0Sstevel@tonic-gate suspendingallmutators = 0; 2599*0Sstevel@tonic-gate goto top; 2600*0Sstevel@tonic-gate } 2601*0Sstevel@tonic-gate } 2602*0Sstevel@tonic-gate } 2603*0Sstevel@tonic-gate 2604*0Sstevel@tonic-gate suspendedallmutators = 1; 2605*0Sstevel@tonic-gate suspendingallmutators = 0; 2606*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 2607*0Sstevel@tonic-gate fork_lock_exit(); 2608*0Sstevel@tonic-gate return (0); 2609*0Sstevel@tonic-gate } 2610*0Sstevel@tonic-gate 2611*0Sstevel@tonic-gate /* 2612*0Sstevel@tonic-gate * Suspend the target mutator. The caller is permitted to suspend 2613*0Sstevel@tonic-gate * itself. If a mutator barrier is enabled, the caller will suspend 2614*0Sstevel@tonic-gate * itself as though it had been suspended by thr_suspend_allmutators(). 2615*0Sstevel@tonic-gate * When the barrier is removed, this thread will be resumed. Any 2616*0Sstevel@tonic-gate * suspended mutator, whether suspended by thr_suspend_mutator(), or by 2617*0Sstevel@tonic-gate * thr_suspend_allmutators(), can be resumed by thr_continue_mutator(). 2618*0Sstevel@tonic-gate */ 2619*0Sstevel@tonic-gate #pragma weak thr_suspend_mutator = _thr_suspend_mutator 2620*0Sstevel@tonic-gate int 2621*0Sstevel@tonic-gate _thr_suspend_mutator(thread_t tid) 2622*0Sstevel@tonic-gate { 2623*0Sstevel@tonic-gate if (tid == 0) 2624*0Sstevel@tonic-gate tid = curthread->ul_lwpid; 2625*0Sstevel@tonic-gate return (_thrp_suspend(tid, TSTP_MUTATOR)); 2626*0Sstevel@tonic-gate } 2627*0Sstevel@tonic-gate 2628*0Sstevel@tonic-gate /* 2629*0Sstevel@tonic-gate * Resume the set of all suspended mutators. 2630*0Sstevel@tonic-gate */ 2631*0Sstevel@tonic-gate #pragma weak thr_continue_allmutators = _thr_continue_allmutators 2632*0Sstevel@tonic-gate int 2633*0Sstevel@tonic-gate _thr_continue_allmutators() 2634*0Sstevel@tonic-gate { 2635*0Sstevel@tonic-gate ulwp_t *self = curthread; 2636*0Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 2637*0Sstevel@tonic-gate ulwp_t *ulwp; 2638*0Sstevel@tonic-gate 2639*0Sstevel@tonic-gate lmutex_lock(&udp->link_lock); 2640*0Sstevel@tonic-gate if (!suspendedallmutators) { 2641*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 2642*0Sstevel@tonic-gate return (EINVAL); 2643*0Sstevel@tonic-gate } 2644*0Sstevel@tonic-gate suspendedallmutators = 0; 2645*0Sstevel@tonic-gate 2646*0Sstevel@tonic-gate for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2647*0Sstevel@tonic-gate mutex_t *mp = ulwp_mutex(ulwp, udp); 2648*0Sstevel@tonic-gate lmutex_lock(mp); 2649*0Sstevel@tonic-gate if (ulwp->ul_stop & TSTP_MUTATOR) { 2650*0Sstevel@tonic-gate ulwp->ul_stop &= ~TSTP_MUTATOR; 2651*0Sstevel@tonic-gate ulwp_broadcast(ulwp); 2652*0Sstevel@tonic-gate if (!ulwp->ul_stop) 2653*0Sstevel@tonic-gate force_continue(ulwp); 2654*0Sstevel@tonic-gate } 2655*0Sstevel@tonic-gate lmutex_unlock(mp); 2656*0Sstevel@tonic-gate } 2657*0Sstevel@tonic-gate 2658*0Sstevel@tonic-gate lmutex_unlock(&udp->link_lock); 2659*0Sstevel@tonic-gate return (0); 2660*0Sstevel@tonic-gate } 2661*0Sstevel@tonic-gate 2662*0Sstevel@tonic-gate /* 2663*0Sstevel@tonic-gate * Resume a suspended mutator. 2664*0Sstevel@tonic-gate */ 2665*0Sstevel@tonic-gate #pragma weak thr_continue_mutator = _thr_continue_mutator 2666*0Sstevel@tonic-gate int 2667*0Sstevel@tonic-gate _thr_continue_mutator(thread_t tid) 2668*0Sstevel@tonic-gate { 2669*0Sstevel@tonic-gate return (_thrp_continue(tid, TSTP_MUTATOR)); 2670*0Sstevel@tonic-gate } 2671*0Sstevel@tonic-gate 2672*0Sstevel@tonic-gate #pragma weak thr_wait_mutator = _thr_wait_mutator 2673*0Sstevel@tonic-gate int 2674*0Sstevel@tonic-gate _thr_wait_mutator(thread_t tid, int dontwait) 2675*0Sstevel@tonic-gate { 2676*0Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 2677*0Sstevel@tonic-gate ulwp_t *ulwp; 2678*0Sstevel@tonic-gate int error = 0; 2679*0Sstevel@tonic-gate 2680*0Sstevel@tonic-gate top: 2681*0Sstevel@tonic-gate if ((ulwp = find_lwp(tid)) == NULL) 2682*0Sstevel@tonic-gate return (ESRCH); 2683*0Sstevel@tonic-gate 2684*0Sstevel@tonic-gate if (!ulwp->ul_mutator) 2685*0Sstevel@tonic-gate error = EINVAL; 2686*0Sstevel@tonic-gate else if (dontwait) { 2687*0Sstevel@tonic-gate if (!(ulwp->ul_stop & TSTP_MUTATOR)) 2688*0Sstevel@tonic-gate error = EWOULDBLOCK; 2689*0Sstevel@tonic-gate } else if (!(ulwp->ul_stop & TSTP_MUTATOR)) { 2690*0Sstevel@tonic-gate cond_t *cvp = ulwp_condvar(ulwp, udp); 2691*0Sstevel@tonic-gate mutex_t *mp = ulwp_mutex(ulwp, udp); 2692*0Sstevel@tonic-gate 2693*0Sstevel@tonic-gate (void) _cond_wait(cvp, mp); 2694*0Sstevel@tonic-gate (void) lmutex_unlock(mp); 2695*0Sstevel@tonic-gate goto top; 2696*0Sstevel@tonic-gate } 2697*0Sstevel@tonic-gate 2698*0Sstevel@tonic-gate ulwp_unlock(ulwp, udp); 2699*0Sstevel@tonic-gate return (error); 2700*0Sstevel@tonic-gate } 2701*0Sstevel@tonic-gate 2702*0Sstevel@tonic-gate /* PROBE_SUPPORT begin */ 2703*0Sstevel@tonic-gate 2704*0Sstevel@tonic-gate void 2705*0Sstevel@tonic-gate thr_probe_setup(void *data) 2706*0Sstevel@tonic-gate { 2707*0Sstevel@tonic-gate curthread->ul_tpdp = data; 2708*0Sstevel@tonic-gate } 2709*0Sstevel@tonic-gate 2710*0Sstevel@tonic-gate static void * 2711*0Sstevel@tonic-gate _thread_probe_getfunc() 2712*0Sstevel@tonic-gate { 2713*0Sstevel@tonic-gate return (curthread->ul_tpdp); 2714*0Sstevel@tonic-gate } 2715*0Sstevel@tonic-gate 2716*0Sstevel@tonic-gate void * (*thr_probe_getfunc_addr)(void) = _thread_probe_getfunc; 2717*0Sstevel@tonic-gate 2718*0Sstevel@tonic-gate /* ARGSUSED */ 2719*0Sstevel@tonic-gate void 2720*0Sstevel@tonic-gate _resume(ulwp_t *ulwp, caddr_t sp, int dontsave) 2721*0Sstevel@tonic-gate { 2722*0Sstevel@tonic-gate /* never called */ 2723*0Sstevel@tonic-gate } 2724*0Sstevel@tonic-gate 2725*0Sstevel@tonic-gate /* ARGSUSED */ 2726*0Sstevel@tonic-gate void 2727*0Sstevel@tonic-gate _resume_ret(ulwp_t *oldlwp) 2728*0Sstevel@tonic-gate { 2729*0Sstevel@tonic-gate /* never called */ 2730*0Sstevel@tonic-gate } 2731*0Sstevel@tonic-gate 2732*0Sstevel@tonic-gate /* PROBE_SUPPORT end */ 2733