xref: /onnv-gate/usr/src/lib/libc/port/threads/door_calls.c (revision 11102:b91faef0c984)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
56812Sraf  * Common Development and Distribution License (the "License").
66812Sraf  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
216812Sraf 
220Sstevel@tonic-gate /*
23*11102SGavin.Maltby@Sun.COM  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
276812Sraf #include "lint.h"
280Sstevel@tonic-gate #include "thr_uberdata.h"
290Sstevel@tonic-gate #include "libc.h"
300Sstevel@tonic-gate 
310Sstevel@tonic-gate #include <alloca.h>
320Sstevel@tonic-gate #include <unistd.h>
330Sstevel@tonic-gate #include <thread.h>
340Sstevel@tonic-gate #include <pthread.h>
350Sstevel@tonic-gate #include <stdio.h>
360Sstevel@tonic-gate #include <errno.h>
370Sstevel@tonic-gate #include <door.h>
380Sstevel@tonic-gate #include <signal.h>
390Sstevel@tonic-gate #include <ucred.h>
40*11102SGavin.Maltby@Sun.COM #include <strings.h>
41*11102SGavin.Maltby@Sun.COM #include <ucontext.h>
420Sstevel@tonic-gate #include <sys/ucred.h>
43*11102SGavin.Maltby@Sun.COM #include <atomic.h>
440Sstevel@tonic-gate 
450Sstevel@tonic-gate static door_server_func_t door_create_server;
460Sstevel@tonic-gate 
470Sstevel@tonic-gate /*
480Sstevel@tonic-gate  * Global state -- the non-statics are accessed from the __door_return()
490Sstevel@tonic-gate  * syscall wrapper.
500Sstevel@tonic-gate  */
510Sstevel@tonic-gate static mutex_t		door_state_lock = DEFAULTMUTEX;
520Sstevel@tonic-gate door_server_func_t	*door_server_func = door_create_server;
530Sstevel@tonic-gate pid_t			door_create_pid = 0;
540Sstevel@tonic-gate static pid_t		door_create_first_pid = 0;
550Sstevel@tonic-gate static pid_t		door_create_unref_pid = 0;
560Sstevel@tonic-gate 
570Sstevel@tonic-gate /*
580Sstevel@tonic-gate  * The raw system call interfaces
590Sstevel@tonic-gate  */
600Sstevel@tonic-gate extern int __door_create(void (*)(void *, char *, size_t, door_desc_t *,
610Sstevel@tonic-gate     uint_t), void *, uint_t);
620Sstevel@tonic-gate extern int __door_return(caddr_t, size_t, door_return_desc_t *, caddr_t,
630Sstevel@tonic-gate     size_t);
640Sstevel@tonic-gate extern int __door_ucred(ucred_t *);
650Sstevel@tonic-gate extern int __door_unref(void);
66*11102SGavin.Maltby@Sun.COM extern int __door_unbind(void);
67*11102SGavin.Maltby@Sun.COM 
68*11102SGavin.Maltby@Sun.COM /*
69*11102SGavin.Maltby@Sun.COM  * Key for per-door data for doors created with door_xcreate.
70*11102SGavin.Maltby@Sun.COM  */
71*11102SGavin.Maltby@Sun.COM static pthread_key_t privdoor_key = PTHREAD_ONCE_KEY_NP;
72*11102SGavin.Maltby@Sun.COM 
73*11102SGavin.Maltby@Sun.COM /*
74*11102SGavin.Maltby@Sun.COM  * Each door_xcreate'd door has a struct privdoor_data allocated for it,
75*11102SGavin.Maltby@Sun.COM  * and each of the initial pool of service threads for the door
76*11102SGavin.Maltby@Sun.COM  * has TSD for the privdoor_key set to point to this structure.
77*11102SGavin.Maltby@Sun.COM  * When a thread in door_return decides it is time to perform a
78*11102SGavin.Maltby@Sun.COM  * thread depletion callback we can retrieve this door information
79*11102SGavin.Maltby@Sun.COM  * via a TSD lookup on the privdoor key.
80*11102SGavin.Maltby@Sun.COM  */
81*11102SGavin.Maltby@Sun.COM struct privdoor_data {
82*11102SGavin.Maltby@Sun.COM 	int pd_dfd;
83*11102SGavin.Maltby@Sun.COM 	door_id_t pd_uniqid;
84*11102SGavin.Maltby@Sun.COM 	volatile uint32_t pd_refcnt;
85*11102SGavin.Maltby@Sun.COM 	door_xcreate_server_func_t *pd_crf;
86*11102SGavin.Maltby@Sun.COM 	void *pd_crcookie;
87*11102SGavin.Maltby@Sun.COM 	door_xcreate_thrsetup_func_t *pd_setupf;
88*11102SGavin.Maltby@Sun.COM };
89*11102SGavin.Maltby@Sun.COM 
90*11102SGavin.Maltby@Sun.COM static int door_xcreate_n(door_info_t *, struct privdoor_data *, int);
91*11102SGavin.Maltby@Sun.COM 
92*11102SGavin.Maltby@Sun.COM /*
93*11102SGavin.Maltby@Sun.COM  * door_create_cmn holds the privdoor data before kicking off server
94*11102SGavin.Maltby@Sun.COM  * thread creation, all of which must succeed; if they don't then
95*11102SGavin.Maltby@Sun.COM  * they return leaving the refcnt unchanged overall, and door_create_cmn
96*11102SGavin.Maltby@Sun.COM  * releases its hold after revoking the door and we're done.  Otherwise
97*11102SGavin.Maltby@Sun.COM  * all n threads created add one each to the refcnt, and door_create_cmn
98*11102SGavin.Maltby@Sun.COM  * drops its hold.  If and when a server thread exits the key destructor
99*11102SGavin.Maltby@Sun.COM  * function will be called, and we use that to decrement the reference
100*11102SGavin.Maltby@Sun.COM  * count.  We also decrement the reference count on door_unbind().
101*11102SGavin.Maltby@Sun.COM  * If ever we get the reference count to 0 then we will free that data.
102*11102SGavin.Maltby@Sun.COM  */
103*11102SGavin.Maltby@Sun.COM static void
privdoor_data_hold(struct privdoor_data * pdd)104*11102SGavin.Maltby@Sun.COM privdoor_data_hold(struct privdoor_data *pdd)
105*11102SGavin.Maltby@Sun.COM {
106*11102SGavin.Maltby@Sun.COM 	atomic_inc_32(&pdd->pd_refcnt);
107*11102SGavin.Maltby@Sun.COM }
108*11102SGavin.Maltby@Sun.COM 
109*11102SGavin.Maltby@Sun.COM static void
privdoor_data_rele(struct privdoor_data * pdd)110*11102SGavin.Maltby@Sun.COM privdoor_data_rele(struct privdoor_data *pdd)
111*11102SGavin.Maltby@Sun.COM {
112*11102SGavin.Maltby@Sun.COM 	if (atomic_dec_32_nv(&pdd->pd_refcnt) == 0)
113*11102SGavin.Maltby@Sun.COM 		free(pdd);
114*11102SGavin.Maltby@Sun.COM }
115*11102SGavin.Maltby@Sun.COM 
116*11102SGavin.Maltby@Sun.COM void
privdoor_destructor(void * data)117*11102SGavin.Maltby@Sun.COM privdoor_destructor(void *data)
118*11102SGavin.Maltby@Sun.COM {
119*11102SGavin.Maltby@Sun.COM 	privdoor_data_rele((struct privdoor_data *)data);
120*11102SGavin.Maltby@Sun.COM }
1210Sstevel@tonic-gate 
1220Sstevel@tonic-gate /*
1230Sstevel@tonic-gate  * We park the ourselves in the kernel to serve as the "caller" for
1240Sstevel@tonic-gate  * unreferenced upcalls for this process.  If the call returns with
1250Sstevel@tonic-gate  * EINTR (e.g., someone did a forkall), we repeat as long as we're still
1260Sstevel@tonic-gate  * in the parent.  If the child creates an unref door it will create
1270Sstevel@tonic-gate  * a new thread.
1280Sstevel@tonic-gate  */
1290Sstevel@tonic-gate static void *
door_unref_func(void * arg)1300Sstevel@tonic-gate door_unref_func(void *arg)
1310Sstevel@tonic-gate {
1320Sstevel@tonic-gate 	pid_t mypid = (pid_t)(uintptr_t)arg;
1330Sstevel@tonic-gate 
1340Sstevel@tonic-gate 	sigset_t fillset;
1350Sstevel@tonic-gate 
1360Sstevel@tonic-gate 	/* mask signals before diving into the kernel */
1370Sstevel@tonic-gate 	(void) sigfillset(&fillset);
1380Sstevel@tonic-gate 	(void) thr_sigsetmask(SIG_SETMASK, &fillset, NULL);
1390Sstevel@tonic-gate 
1400Sstevel@tonic-gate 	while (getpid() == mypid && __door_unref() && errno == EINTR)
1410Sstevel@tonic-gate 		continue;
1420Sstevel@tonic-gate 
1430Sstevel@tonic-gate 	return (NULL);
1440Sstevel@tonic-gate }
1450Sstevel@tonic-gate 
146*11102SGavin.Maltby@Sun.COM static int
door_create_cmn(door_server_procedure_t * f,void * cookie,uint_t flags,door_xcreate_server_func_t * crf,door_xcreate_thrsetup_func_t * setupf,void * crcookie,int nthread)147*11102SGavin.Maltby@Sun.COM door_create_cmn(door_server_procedure_t *f, void *cookie, uint_t flags,
148*11102SGavin.Maltby@Sun.COM     door_xcreate_server_func_t *crf, door_xcreate_thrsetup_func_t *setupf,
149*11102SGavin.Maltby@Sun.COM     void *crcookie, int nthread)
1500Sstevel@tonic-gate {
1510Sstevel@tonic-gate 	int d;
1520Sstevel@tonic-gate 
1530Sstevel@tonic-gate 	int is_private = (flags & DOOR_PRIVATE);
1540Sstevel@tonic-gate 	int is_unref = (flags & (DOOR_UNREF | DOOR_UNREF_MULTI));
1550Sstevel@tonic-gate 	int do_create_first = 0;
1560Sstevel@tonic-gate 	int do_create_unref = 0;
1570Sstevel@tonic-gate 
1580Sstevel@tonic-gate 	ulwp_t *self = curthread;
1590Sstevel@tonic-gate 
1600Sstevel@tonic-gate 	pid_t mypid;
1610Sstevel@tonic-gate 
1620Sstevel@tonic-gate 	if (self->ul_vfork) {
1630Sstevel@tonic-gate 		errno = ENOTSUP;
1640Sstevel@tonic-gate 		return (-1);
1650Sstevel@tonic-gate 	}
1660Sstevel@tonic-gate 
167*11102SGavin.Maltby@Sun.COM 	if (crf)
168*11102SGavin.Maltby@Sun.COM 		flags |= DOOR_PRIVCREATE;
169*11102SGavin.Maltby@Sun.COM 
1700Sstevel@tonic-gate 	/*
1710Sstevel@tonic-gate 	 * Doors are associated with the processes which created them.  In
1720Sstevel@tonic-gate 	 * the face of forkall(), this gets quite complicated.  To simplify
1730Sstevel@tonic-gate 	 * it somewhat, we include the call to __door_create() in a critical
1740Sstevel@tonic-gate 	 * section, and figure out what additional actions to take while
1750Sstevel@tonic-gate 	 * still in the critical section.
1760Sstevel@tonic-gate 	 */
1770Sstevel@tonic-gate 	enter_critical(self);
1780Sstevel@tonic-gate 	if ((d = __door_create(f, cookie, flags)) < 0) {
1790Sstevel@tonic-gate 		exit_critical(self);
180*11102SGavin.Maltby@Sun.COM 		return (-1);	/* errno is set */
1810Sstevel@tonic-gate 	}
1820Sstevel@tonic-gate 	mypid = getpid();
1830Sstevel@tonic-gate 	if (mypid != door_create_pid ||
1840Sstevel@tonic-gate 	    (!is_private && mypid != door_create_first_pid) ||
1850Sstevel@tonic-gate 	    (is_unref && mypid != door_create_unref_pid)) {
1860Sstevel@tonic-gate 
1870Sstevel@tonic-gate 		lmutex_lock(&door_state_lock);
1880Sstevel@tonic-gate 		door_create_pid = mypid;
1890Sstevel@tonic-gate 
1900Sstevel@tonic-gate 		if (!is_private && mypid != door_create_first_pid) {
1910Sstevel@tonic-gate 			do_create_first = 1;
1920Sstevel@tonic-gate 			door_create_first_pid = mypid;
1930Sstevel@tonic-gate 		}
1940Sstevel@tonic-gate 		if (is_unref && mypid != door_create_unref_pid) {
1950Sstevel@tonic-gate 			do_create_unref = 1;
1960Sstevel@tonic-gate 			door_create_unref_pid = mypid;
1970Sstevel@tonic-gate 		}
1980Sstevel@tonic-gate 		lmutex_unlock(&door_state_lock);
1990Sstevel@tonic-gate 	}
2000Sstevel@tonic-gate 	exit_critical(self);
2010Sstevel@tonic-gate 
2020Sstevel@tonic-gate 	if (do_create_unref) {
2030Sstevel@tonic-gate 		/*
2040Sstevel@tonic-gate 		 * Create an unref thread the first time we create an
2050Sstevel@tonic-gate 		 * unref door for this process.  Create it as a daemon
2060Sstevel@tonic-gate 		 * thread, so that it doesn't interfere with normal exit
2070Sstevel@tonic-gate 		 * processing.
2080Sstevel@tonic-gate 		 */
2090Sstevel@tonic-gate 		(void) thr_create(NULL, 0, door_unref_func,
2100Sstevel@tonic-gate 		    (void *)(uintptr_t)mypid, THR_DAEMON, NULL);
2110Sstevel@tonic-gate 	}
2120Sstevel@tonic-gate 
2130Sstevel@tonic-gate 	if (is_private) {
2140Sstevel@tonic-gate 		door_info_t di;
2150Sstevel@tonic-gate 
216*11102SGavin.Maltby@Sun.COM 		/*
217*11102SGavin.Maltby@Sun.COM 		 * Create the first thread(s) for this private door.
218*11102SGavin.Maltby@Sun.COM 		 */
2190Sstevel@tonic-gate 		if (__door_info(d, &di) < 0)
220*11102SGavin.Maltby@Sun.COM 			return (-1);	/* errno is set */
221*11102SGavin.Maltby@Sun.COM 
222*11102SGavin.Maltby@Sun.COM 		/*
223*11102SGavin.Maltby@Sun.COM 		 * This key must be available for lookup for all private
224*11102SGavin.Maltby@Sun.COM 		 * door threads, whether associated with a door created via
225*11102SGavin.Maltby@Sun.COM 		 * door_create or door_xcreate.
226*11102SGavin.Maltby@Sun.COM 		 */
227*11102SGavin.Maltby@Sun.COM 		(void) pthread_key_create_once_np(&privdoor_key,
228*11102SGavin.Maltby@Sun.COM 		    privdoor_destructor);
229*11102SGavin.Maltby@Sun.COM 
230*11102SGavin.Maltby@Sun.COM 		if (crf == NULL) {
231*11102SGavin.Maltby@Sun.COM 			(*door_server_func)(&di);
232*11102SGavin.Maltby@Sun.COM 		} else {
233*11102SGavin.Maltby@Sun.COM 			struct privdoor_data *pdd = malloc(sizeof (*pdd));
234*11102SGavin.Maltby@Sun.COM 
235*11102SGavin.Maltby@Sun.COM 			if (pdd == NULL) {
236*11102SGavin.Maltby@Sun.COM 				(void) door_revoke(d);
237*11102SGavin.Maltby@Sun.COM 				errno = ENOMEM;
238*11102SGavin.Maltby@Sun.COM 				return (-1);
239*11102SGavin.Maltby@Sun.COM 			}
240*11102SGavin.Maltby@Sun.COM 
241*11102SGavin.Maltby@Sun.COM 			pdd->pd_dfd = d;
242*11102SGavin.Maltby@Sun.COM 			pdd->pd_uniqid = di.di_uniquifier;
243*11102SGavin.Maltby@Sun.COM 			pdd->pd_refcnt = 1; /* prevent free during xcreate_n */
244*11102SGavin.Maltby@Sun.COM 			pdd->pd_crf = crf;
245*11102SGavin.Maltby@Sun.COM 			pdd->pd_crcookie = crcookie;
246*11102SGavin.Maltby@Sun.COM 			pdd->pd_setupf = setupf;
247*11102SGavin.Maltby@Sun.COM 
248*11102SGavin.Maltby@Sun.COM 			if (!door_xcreate_n(&di, pdd, nthread)) {
249*11102SGavin.Maltby@Sun.COM 				int errnocp = errno;
250*11102SGavin.Maltby@Sun.COM 
251*11102SGavin.Maltby@Sun.COM 				(void) door_revoke(d);
252*11102SGavin.Maltby@Sun.COM 				privdoor_data_rele(pdd);
253*11102SGavin.Maltby@Sun.COM 				errno = errnocp;
254*11102SGavin.Maltby@Sun.COM 				return (-1);
255*11102SGavin.Maltby@Sun.COM 			} else {
256*11102SGavin.Maltby@Sun.COM 				privdoor_data_rele(pdd);
257*11102SGavin.Maltby@Sun.COM 			}
258*11102SGavin.Maltby@Sun.COM 		}
259*11102SGavin.Maltby@Sun.COM 	} else if (do_create_first) {
260*11102SGavin.Maltby@Sun.COM 		/* First non-private door created in the process */
261*11102SGavin.Maltby@Sun.COM 		(*door_server_func)(NULL);
2620Sstevel@tonic-gate 	}
2630Sstevel@tonic-gate 
2640Sstevel@tonic-gate 	return (d);
2650Sstevel@tonic-gate }
2660Sstevel@tonic-gate 
2670Sstevel@tonic-gate int
door_create(door_server_procedure_t * f,void * cookie,uint_t flags)268*11102SGavin.Maltby@Sun.COM door_create(door_server_procedure_t *f, void *cookie, uint_t flags)
269*11102SGavin.Maltby@Sun.COM {
270*11102SGavin.Maltby@Sun.COM 	if (flags & (DOOR_NO_DEPLETION_CB | DOOR_PRIVCREATE)) {
271*11102SGavin.Maltby@Sun.COM 		errno = EINVAL;
272*11102SGavin.Maltby@Sun.COM 		return (-1);
273*11102SGavin.Maltby@Sun.COM 	}
274*11102SGavin.Maltby@Sun.COM 
275*11102SGavin.Maltby@Sun.COM 	return (door_create_cmn(f, cookie, flags, NULL, NULL, NULL, 1));
276*11102SGavin.Maltby@Sun.COM }
277*11102SGavin.Maltby@Sun.COM 
278*11102SGavin.Maltby@Sun.COM int
door_xcreate(door_server_procedure_t * f,void * cookie,uint_t flags,door_xcreate_server_func_t * crf,door_xcreate_thrsetup_func_t * setupf,void * crcookie,int nthread)279*11102SGavin.Maltby@Sun.COM door_xcreate(door_server_procedure_t *f, void *cookie, uint_t flags,
280*11102SGavin.Maltby@Sun.COM     door_xcreate_server_func_t *crf, door_xcreate_thrsetup_func_t *setupf,
281*11102SGavin.Maltby@Sun.COM     void *crcookie, int nthread)
282*11102SGavin.Maltby@Sun.COM {
283*11102SGavin.Maltby@Sun.COM 	if (flags & DOOR_PRIVCREATE || nthread < 1 || crf == NULL) {
284*11102SGavin.Maltby@Sun.COM 		errno = EINVAL;
285*11102SGavin.Maltby@Sun.COM 		return (-1);
286*11102SGavin.Maltby@Sun.COM 	}
287*11102SGavin.Maltby@Sun.COM 
288*11102SGavin.Maltby@Sun.COM 	return (door_create_cmn(f, cookie, flags | DOOR_PRIVATE,
289*11102SGavin.Maltby@Sun.COM 	    crf, setupf, crcookie, nthread));
290*11102SGavin.Maltby@Sun.COM }
291*11102SGavin.Maltby@Sun.COM 
292*11102SGavin.Maltby@Sun.COM int
door_ucred(ucred_t ** uc)2930Sstevel@tonic-gate door_ucred(ucred_t **uc)
2940Sstevel@tonic-gate {
2950Sstevel@tonic-gate 	ucred_t *ucp = *uc;
2960Sstevel@tonic-gate 
2970Sstevel@tonic-gate 	if (ucp == NULL) {
2980Sstevel@tonic-gate 		ucp = _ucred_alloc();
2990Sstevel@tonic-gate 		if (ucp == NULL)
3000Sstevel@tonic-gate 			return (-1);
3010Sstevel@tonic-gate 	}
3020Sstevel@tonic-gate 
3030Sstevel@tonic-gate 	if (__door_ucred(ucp) != 0) {
3040Sstevel@tonic-gate 		if (*uc == NULL)
3050Sstevel@tonic-gate 			ucred_free(ucp);
3060Sstevel@tonic-gate 		return (-1);
3070Sstevel@tonic-gate 	}
3080Sstevel@tonic-gate 
3090Sstevel@tonic-gate 	*uc = ucp;
3100Sstevel@tonic-gate 
3110Sstevel@tonic-gate 	return (0);
3120Sstevel@tonic-gate }
3130Sstevel@tonic-gate 
3140Sstevel@tonic-gate int
door_cred(door_cred_t * dc)3150Sstevel@tonic-gate door_cred(door_cred_t *dc)
3160Sstevel@tonic-gate {
3170Sstevel@tonic-gate 	/*
3180Sstevel@tonic-gate 	 * Ucred size is small and alloca is fast
3190Sstevel@tonic-gate 	 * and cannot fail.
3200Sstevel@tonic-gate 	 */
3210Sstevel@tonic-gate 	ucred_t *ucp = alloca(ucred_size());
3220Sstevel@tonic-gate 	int ret;
3230Sstevel@tonic-gate 
3240Sstevel@tonic-gate 	if ((ret = __door_ucred(ucp)) == 0) {
3250Sstevel@tonic-gate 		dc->dc_euid = ucred_geteuid(ucp);
3260Sstevel@tonic-gate 		dc->dc_ruid = ucred_getruid(ucp);
3270Sstevel@tonic-gate 		dc->dc_egid = ucred_getegid(ucp);
3280Sstevel@tonic-gate 		dc->dc_rgid = ucred_getrgid(ucp);
3290Sstevel@tonic-gate 		dc->dc_pid = ucred_getpid(ucp);
3300Sstevel@tonic-gate 	}
3310Sstevel@tonic-gate 	return (ret);
3320Sstevel@tonic-gate }
3330Sstevel@tonic-gate 
3340Sstevel@tonic-gate int
door_unbind(void)335*11102SGavin.Maltby@Sun.COM door_unbind(void)
336*11102SGavin.Maltby@Sun.COM {
337*11102SGavin.Maltby@Sun.COM 	struct privdoor_data *pdd;
338*11102SGavin.Maltby@Sun.COM 	int rv = __door_unbind();
339*11102SGavin.Maltby@Sun.COM 
340*11102SGavin.Maltby@Sun.COM 	/*
341*11102SGavin.Maltby@Sun.COM 	 * If we were indeed bound to the door then check to see whether
342*11102SGavin.Maltby@Sun.COM 	 * we are part of a door_xcreate'd door by checking for our TSD.
343*11102SGavin.Maltby@Sun.COM 	 * If so, then clear the TSD for this key to avoid destructor
344*11102SGavin.Maltby@Sun.COM 	 * callback on future thread exit, and release the private door data.
345*11102SGavin.Maltby@Sun.COM 	 */
346*11102SGavin.Maltby@Sun.COM 	if (rv == 0 && (pdd = pthread_getspecific(privdoor_key)) != NULL) {
347*11102SGavin.Maltby@Sun.COM 		(void) pthread_setspecific(privdoor_key, NULL);
348*11102SGavin.Maltby@Sun.COM 		privdoor_data_rele(pdd);
349*11102SGavin.Maltby@Sun.COM 	}
350*11102SGavin.Maltby@Sun.COM 
351*11102SGavin.Maltby@Sun.COM 	return (rv);
352*11102SGavin.Maltby@Sun.COM }
353*11102SGavin.Maltby@Sun.COM 
354*11102SGavin.Maltby@Sun.COM int
door_return(char * data_ptr,size_t data_size,door_desc_t * desc_ptr,uint_t num_desc)3550Sstevel@tonic-gate door_return(char *data_ptr, size_t data_size,
3560Sstevel@tonic-gate     door_desc_t *desc_ptr, uint_t num_desc)
3570Sstevel@tonic-gate {
3580Sstevel@tonic-gate 	caddr_t sp;
3590Sstevel@tonic-gate 	size_t ssize;
3600Sstevel@tonic-gate 	size_t reserve;
3610Sstevel@tonic-gate 	ulwp_t *self = curthread;
3620Sstevel@tonic-gate 
3630Sstevel@tonic-gate 	{
3640Sstevel@tonic-gate 		stack_t s;
3650Sstevel@tonic-gate 		if (thr_stksegment(&s) != 0) {
3660Sstevel@tonic-gate 			errno = EINVAL;
3670Sstevel@tonic-gate 			return (-1);
3680Sstevel@tonic-gate 		}
3690Sstevel@tonic-gate 		sp = s.ss_sp;
3700Sstevel@tonic-gate 		ssize = s.ss_size;
3710Sstevel@tonic-gate 	}
3720Sstevel@tonic-gate 
3730Sstevel@tonic-gate 	if (!self->ul_door_noreserve) {
3740Sstevel@tonic-gate 		/*
3750Sstevel@tonic-gate 		 * When we return from the kernel, we must have enough stack
3760Sstevel@tonic-gate 		 * available to handle the request.  Since the creator of
3770Sstevel@tonic-gate 		 * the thread has control over its stack size, and larger
3780Sstevel@tonic-gate 		 * stacks generally indicate bigger request queues, we
3790Sstevel@tonic-gate 		 * use the heuristic of reserving 1/32nd of the stack size
3800Sstevel@tonic-gate 		 * (up to the default stack size), with a minimum of 1/8th
3810Sstevel@tonic-gate 		 * of MINSTACK.  Currently, this translates to:
3820Sstevel@tonic-gate 		 *
3830Sstevel@tonic-gate 		 *			_ILP32		_LP64
3840Sstevel@tonic-gate 		 *	min resv	 512 bytes	1024 bytes
3850Sstevel@tonic-gate 		 *	max resv	 32k bytes	 64k bytes
3860Sstevel@tonic-gate 		 *
3870Sstevel@tonic-gate 		 * This reservation can be disabled by setting
3880Sstevel@tonic-gate 		 *	_THREAD_DOOR_NORESERVE=1
3890Sstevel@tonic-gate 		 * in the environment, but shouldn't be.
3900Sstevel@tonic-gate 		 */
3910Sstevel@tonic-gate 
3920Sstevel@tonic-gate #define	STACK_FRACTION		32
3930Sstevel@tonic-gate #define	MINSTACK_FRACTION	8
3940Sstevel@tonic-gate 
3950Sstevel@tonic-gate 		if (ssize < (MINSTACK * (STACK_FRACTION/MINSTACK_FRACTION)))
3960Sstevel@tonic-gate 			reserve = MINSTACK / MINSTACK_FRACTION;
3970Sstevel@tonic-gate 		else if (ssize < DEFAULTSTACK)
3980Sstevel@tonic-gate 			reserve = ssize / STACK_FRACTION;
3990Sstevel@tonic-gate 		else
4000Sstevel@tonic-gate 			reserve = DEFAULTSTACK / STACK_FRACTION;
4010Sstevel@tonic-gate 
4020Sstevel@tonic-gate #undef STACK_FRACTION
4030Sstevel@tonic-gate #undef MINSTACK_FRACTION
4040Sstevel@tonic-gate 
4050Sstevel@tonic-gate 		if (ssize > reserve)
4060Sstevel@tonic-gate 			ssize -= reserve;
4070Sstevel@tonic-gate 		else
4080Sstevel@tonic-gate 			ssize = 0;
4090Sstevel@tonic-gate 	}
4100Sstevel@tonic-gate 
4110Sstevel@tonic-gate 	/*
4120Sstevel@tonic-gate 	 * Historically, the __door_return() syscall wrapper subtracted
4130Sstevel@tonic-gate 	 * some "slop" from the stack pointer before trapping into the
4140Sstevel@tonic-gate 	 * kernel.  We now do this here, so that ssize can be adjusted
4150Sstevel@tonic-gate 	 * correctly.  Eventually, this should be removed, since it is
4160Sstevel@tonic-gate 	 * unnecessary.  (note that TNF on x86 currently relies upon this
4170Sstevel@tonic-gate 	 * idiocy)
4180Sstevel@tonic-gate 	 */
4190Sstevel@tonic-gate #if defined(__sparc)
4200Sstevel@tonic-gate 	reserve = SA(MINFRAME);
4210Sstevel@tonic-gate #elif defined(__x86)
4220Sstevel@tonic-gate 	reserve = SA(512);
4230Sstevel@tonic-gate #else
4240Sstevel@tonic-gate #error need to define stack base reserve
4250Sstevel@tonic-gate #endif
4260Sstevel@tonic-gate 
4270Sstevel@tonic-gate #ifdef _STACK_GROWS_DOWNWARD
4280Sstevel@tonic-gate 	sp -= reserve;
4290Sstevel@tonic-gate #else
4300Sstevel@tonic-gate #error stack does not grow downwards, routine needs update
4310Sstevel@tonic-gate #endif
4320Sstevel@tonic-gate 
4330Sstevel@tonic-gate 	if (ssize > reserve)
4340Sstevel@tonic-gate 		ssize -= reserve;
4350Sstevel@tonic-gate 	else
4360Sstevel@tonic-gate 		ssize = 0;
4370Sstevel@tonic-gate 
4380Sstevel@tonic-gate 	/*
4390Sstevel@tonic-gate 	 * Normally, the above will leave plenty of space in sp for a
4400Sstevel@tonic-gate 	 * request.  Just in case some bozo overrides thr_stksegment() to
4410Sstevel@tonic-gate 	 * return an uncommonly small stack size, we turn off stack size
4420Sstevel@tonic-gate 	 * checking if there is less than 1k remaining.
4430Sstevel@tonic-gate 	 */
4440Sstevel@tonic-gate #define	MIN_DOOR_STACK	1024
4450Sstevel@tonic-gate 	if (ssize < MIN_DOOR_STACK)
4460Sstevel@tonic-gate 		ssize = 0;
4470Sstevel@tonic-gate 
4480Sstevel@tonic-gate #undef MIN_DOOR_STACK
4490Sstevel@tonic-gate 
4500Sstevel@tonic-gate 	/*
4510Sstevel@tonic-gate 	 * We have to wrap the desc_* arguments for the syscall.  If there are
4520Sstevel@tonic-gate 	 * no descriptors being returned, we can skip the wrapping.
4530Sstevel@tonic-gate 	 */
4540Sstevel@tonic-gate 	if (num_desc != 0) {
4550Sstevel@tonic-gate 		door_return_desc_t d;
4560Sstevel@tonic-gate 
4570Sstevel@tonic-gate 		d.desc_ptr = desc_ptr;
4580Sstevel@tonic-gate 		d.desc_num = num_desc;
4590Sstevel@tonic-gate 		return (__door_return(data_ptr, data_size, &d, sp, ssize));
4600Sstevel@tonic-gate 	}
4610Sstevel@tonic-gate 	return (__door_return(data_ptr, data_size, NULL, sp, ssize));
4620Sstevel@tonic-gate }
4630Sstevel@tonic-gate 
4640Sstevel@tonic-gate /*
465*11102SGavin.Maltby@Sun.COM  * To start and synchronize a number of door service threads at once
466*11102SGavin.Maltby@Sun.COM  * we use a struct door_xsync_shared shared by all threads, and
467*11102SGavin.Maltby@Sun.COM  * a struct door_xsync for each thread.  While each thread
468*11102SGavin.Maltby@Sun.COM  * has its own startup state, all such state are protected by the same
469*11102SGavin.Maltby@Sun.COM  * shared lock.  This could cause a little contention but it is a one-off
470*11102SGavin.Maltby@Sun.COM  * cost at door creation.
471*11102SGavin.Maltby@Sun.COM  */
472*11102SGavin.Maltby@Sun.COM enum door_xsync_state {
473*11102SGavin.Maltby@Sun.COM 	DOOR_XSYNC_CREATEWAIT = 0x1c8c8c80,	/* awaits creation handshake */
474*11102SGavin.Maltby@Sun.COM 	DOOR_XSYNC_ABORT,		/* aborting door_xcreate */
475*11102SGavin.Maltby@Sun.COM 	DOOR_XSYNC_ABORTED,		/* thread heeded abort request */
476*11102SGavin.Maltby@Sun.COM 	DOOR_XSYNC_MAXCONCUR,		/* create func decided no more */
477*11102SGavin.Maltby@Sun.COM 	DOOR_XSYNC_CREATEFAIL,		/* thr_create/pthread_create failure */
478*11102SGavin.Maltby@Sun.COM 	DOOR_XSYNC_SETSPEC_FAIL,	/* setspecific failed */
479*11102SGavin.Maltby@Sun.COM 	DOOR_XSYNC_BINDFAIL,		/* door_bind failed */
480*11102SGavin.Maltby@Sun.COM 	DOOR_XSYNC_BOUND,		/* door_bind succeeded */
481*11102SGavin.Maltby@Sun.COM 	DOOR_XSYNC_ENTER_SERVICE	/* Go on to door_return */
482*11102SGavin.Maltby@Sun.COM };
483*11102SGavin.Maltby@Sun.COM 
484*11102SGavin.Maltby@Sun.COM /* These stats are incremented non-atomically - indicative only */
485*11102SGavin.Maltby@Sun.COM uint64_t door_xcreate_n_stats[DOOR_XSYNC_ENTER_SERVICE -
486*11102SGavin.Maltby@Sun.COM     DOOR_XSYNC_CREATEWAIT + 1];
487*11102SGavin.Maltby@Sun.COM 
488*11102SGavin.Maltby@Sun.COM struct door_xsync_shared {
489*11102SGavin.Maltby@Sun.COM 	pthread_mutex_t lock;
490*11102SGavin.Maltby@Sun.COM 	pthread_cond_t cv_m2s;
491*11102SGavin.Maltby@Sun.COM 	pthread_cond_t cv_s2m;
492*11102SGavin.Maltby@Sun.COM 	struct privdoor_data *pdd;
493*11102SGavin.Maltby@Sun.COM 	volatile uint32_t waiting;
494*11102SGavin.Maltby@Sun.COM };
495*11102SGavin.Maltby@Sun.COM 
496*11102SGavin.Maltby@Sun.COM struct door_xsync {
497*11102SGavin.Maltby@Sun.COM 	volatile enum door_xsync_state state;
498*11102SGavin.Maltby@Sun.COM 	struct door_xsync_shared *sharedp;
499*11102SGavin.Maltby@Sun.COM };
500*11102SGavin.Maltby@Sun.COM 
501*11102SGavin.Maltby@Sun.COM /*
502*11102SGavin.Maltby@Sun.COM  * Thread start function that xcreated private doors must use in
503*11102SGavin.Maltby@Sun.COM  * thr_create or pthread_create.  They must also use the argument we
504*11102SGavin.Maltby@Sun.COM  * provide.  We:
505*11102SGavin.Maltby@Sun.COM  *
506*11102SGavin.Maltby@Sun.COM  *	o call a thread setup function if supplied, or apply sensible defaults
507*11102SGavin.Maltby@Sun.COM  *	o bind the newly-created thread to the door it will service
508*11102SGavin.Maltby@Sun.COM  *	o synchronize with door_xcreate to indicate that we have successfully
509*11102SGavin.Maltby@Sun.COM  *	  bound to the door;  door_xcreate will not return until all
510*11102SGavin.Maltby@Sun.COM  *	  requested threads have at least bound
511*11102SGavin.Maltby@Sun.COM  *	o enter service with door_return quoting magic sentinel args
512*11102SGavin.Maltby@Sun.COM  */
513*11102SGavin.Maltby@Sun.COM void *
door_xcreate_startf(void * arg)514*11102SGavin.Maltby@Sun.COM door_xcreate_startf(void *arg)
515*11102SGavin.Maltby@Sun.COM {
516*11102SGavin.Maltby@Sun.COM 	struct door_xsync *xsp = (struct door_xsync *)arg;
517*11102SGavin.Maltby@Sun.COM 	struct door_xsync_shared *xssp = xsp->sharedp;
518*11102SGavin.Maltby@Sun.COM 	struct privdoor_data *pdd = xssp->pdd;
519*11102SGavin.Maltby@Sun.COM 	enum door_xsync_state next_state;
520*11102SGavin.Maltby@Sun.COM 
521*11102SGavin.Maltby@Sun.COM 	privdoor_data_hold(pdd);
522*11102SGavin.Maltby@Sun.COM 	if (pthread_setspecific(privdoor_key, (const void *)pdd) != 0) {
523*11102SGavin.Maltby@Sun.COM 		next_state = DOOR_XSYNC_SETSPEC_FAIL;
524*11102SGavin.Maltby@Sun.COM 		privdoor_data_rele(pdd);
525*11102SGavin.Maltby@Sun.COM 		goto handshake;
526*11102SGavin.Maltby@Sun.COM 	}
527*11102SGavin.Maltby@Sun.COM 
528*11102SGavin.Maltby@Sun.COM 	if (pdd->pd_setupf != NULL) {
529*11102SGavin.Maltby@Sun.COM 		(pdd->pd_setupf)(pdd->pd_crcookie);
530*11102SGavin.Maltby@Sun.COM 	} else {
531*11102SGavin.Maltby@Sun.COM 		(void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
532*11102SGavin.Maltby@Sun.COM 		(void) pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
533*11102SGavin.Maltby@Sun.COM 	}
534*11102SGavin.Maltby@Sun.COM 
535*11102SGavin.Maltby@Sun.COM 	if (door_bind(pdd->pd_dfd) == 0)
536*11102SGavin.Maltby@Sun.COM 		next_state = DOOR_XSYNC_BOUND;
537*11102SGavin.Maltby@Sun.COM 	else
538*11102SGavin.Maltby@Sun.COM 		next_state = DOOR_XSYNC_BINDFAIL;
539*11102SGavin.Maltby@Sun.COM 
540*11102SGavin.Maltby@Sun.COM handshake:
541*11102SGavin.Maltby@Sun.COM 	(void) pthread_mutex_lock(&xssp->lock);
542*11102SGavin.Maltby@Sun.COM 
543*11102SGavin.Maltby@Sun.COM 	ASSERT(xsp->state == DOOR_XSYNC_CREATEWAIT ||
544*11102SGavin.Maltby@Sun.COM 	    xsp->state == DOOR_XSYNC_ABORT);
545*11102SGavin.Maltby@Sun.COM 
546*11102SGavin.Maltby@Sun.COM 	if (xsp->state == DOOR_XSYNC_ABORT)
547*11102SGavin.Maltby@Sun.COM 		next_state = DOOR_XSYNC_ABORTED;
548*11102SGavin.Maltby@Sun.COM 
549*11102SGavin.Maltby@Sun.COM 	xsp->state = next_state;
550*11102SGavin.Maltby@Sun.COM 
551*11102SGavin.Maltby@Sun.COM 	if (--xssp->waiting == 0)
552*11102SGavin.Maltby@Sun.COM 		(void) pthread_cond_signal(&xssp->cv_s2m);
553*11102SGavin.Maltby@Sun.COM 
554*11102SGavin.Maltby@Sun.COM 	if (next_state != DOOR_XSYNC_BOUND) {
555*11102SGavin.Maltby@Sun.COM 		(void) pthread_mutex_unlock(&xssp->lock);
556*11102SGavin.Maltby@Sun.COM 		return (NULL);	/* thread exits, key destructor called */
557*11102SGavin.Maltby@Sun.COM 	}
558*11102SGavin.Maltby@Sun.COM 
559*11102SGavin.Maltby@Sun.COM 	while (xsp->state == DOOR_XSYNC_BOUND)
560*11102SGavin.Maltby@Sun.COM 		(void) pthread_cond_wait(&xssp->cv_m2s, &xssp->lock);
561*11102SGavin.Maltby@Sun.COM 
562*11102SGavin.Maltby@Sun.COM 	next_state = xsp->state;
563*11102SGavin.Maltby@Sun.COM 	ASSERT(next_state == DOOR_XSYNC_ENTER_SERVICE ||
564*11102SGavin.Maltby@Sun.COM 	    next_state == DOOR_XSYNC_ABORT);
565*11102SGavin.Maltby@Sun.COM 
566*11102SGavin.Maltby@Sun.COM 	if (--xssp->waiting == 0)
567*11102SGavin.Maltby@Sun.COM 		(void) pthread_cond_signal(&xssp->cv_s2m);
568*11102SGavin.Maltby@Sun.COM 
569*11102SGavin.Maltby@Sun.COM 	(void) pthread_mutex_unlock(&xssp->lock); /* xssp/xsp can be freed */
570*11102SGavin.Maltby@Sun.COM 
571*11102SGavin.Maltby@Sun.COM 	if (next_state == DOOR_XSYNC_ABORT)
572*11102SGavin.Maltby@Sun.COM 		return (NULL);	/* thread exits, key destructor called */
573*11102SGavin.Maltby@Sun.COM 
574*11102SGavin.Maltby@Sun.COM 	(void) door_return(NULL, 0, NULL, 0);
575*11102SGavin.Maltby@Sun.COM 	return (NULL);
576*11102SGavin.Maltby@Sun.COM }
577*11102SGavin.Maltby@Sun.COM 
578*11102SGavin.Maltby@Sun.COM static int
door_xcreate_n(door_info_t * dip,struct privdoor_data * pdd,int n)579*11102SGavin.Maltby@Sun.COM door_xcreate_n(door_info_t *dip, struct privdoor_data *pdd, int n)
580*11102SGavin.Maltby@Sun.COM {
581*11102SGavin.Maltby@Sun.COM 	struct door_xsync_shared *xssp;
582*11102SGavin.Maltby@Sun.COM 	struct door_xsync *xsp;
583*11102SGavin.Maltby@Sun.COM 	int i, failidx = -1;
584*11102SGavin.Maltby@Sun.COM 	int isdepcb = 0;
585*11102SGavin.Maltby@Sun.COM 	int failerrno;
586*11102SGavin.Maltby@Sun.COM 	int bound = 0;
587*11102SGavin.Maltby@Sun.COM #ifdef _STACK_GROWS_DOWNWARD
588*11102SGavin.Maltby@Sun.COM 	int stkdir = -1;
589*11102SGavin.Maltby@Sun.COM #else
590*11102SGavin.Maltby@Sun.COM 	int stkdir = 1;
591*11102SGavin.Maltby@Sun.COM #endif
592*11102SGavin.Maltby@Sun.COM 	int rv = 0;
593*11102SGavin.Maltby@Sun.COM 
594*11102SGavin.Maltby@Sun.COM 	/*
595*11102SGavin.Maltby@Sun.COM 	 * If we're called during door creation then we have the
596*11102SGavin.Maltby@Sun.COM 	 * privdoor_data.  If we're called as part of a depletion callback
597*11102SGavin.Maltby@Sun.COM 	 * then the current thread has the privdoor_data as TSD.
598*11102SGavin.Maltby@Sun.COM 	 */
599*11102SGavin.Maltby@Sun.COM 	if (pdd == NULL) {
600*11102SGavin.Maltby@Sun.COM 		isdepcb = 1;
601*11102SGavin.Maltby@Sun.COM 		if ((pdd = pthread_getspecific(privdoor_key)) == NULL)
602*11102SGavin.Maltby@Sun.COM 			thr_panic("door_xcreate_n - no privdoor_data "
603*11102SGavin.Maltby@Sun.COM 			    "on existing server thread");
604*11102SGavin.Maltby@Sun.COM 	}
605*11102SGavin.Maltby@Sun.COM 
606*11102SGavin.Maltby@Sun.COM 	/*
607*11102SGavin.Maltby@Sun.COM 	 * Allocate on our stack.  We'll pass pointers to this to the
608*11102SGavin.Maltby@Sun.COM 	 * newly-created threads, therefore this function must not return until
609*11102SGavin.Maltby@Sun.COM 	 * we have synced with server threads that are created.
610*11102SGavin.Maltby@Sun.COM 	 * We do not limit the number of threads so begin by checking
611*11102SGavin.Maltby@Sun.COM 	 * that we have space on the stack for this.
612*11102SGavin.Maltby@Sun.COM 	 */
613*11102SGavin.Maltby@Sun.COM 	{
614*11102SGavin.Maltby@Sun.COM 		size_t sz = sizeof (*xssp) + n * sizeof (*xsp) + 32;
615*11102SGavin.Maltby@Sun.COM 		char dummy;
616*11102SGavin.Maltby@Sun.COM 
617*11102SGavin.Maltby@Sun.COM 		if (!stack_inbounds(&dummy + stkdir * sz)) {
618*11102SGavin.Maltby@Sun.COM 			errno = E2BIG;
619*11102SGavin.Maltby@Sun.COM 			return (0);
620*11102SGavin.Maltby@Sun.COM 		}
621*11102SGavin.Maltby@Sun.COM 	}
622*11102SGavin.Maltby@Sun.COM 
623*11102SGavin.Maltby@Sun.COM 	if ((xssp = alloca(sizeof (*xssp))) == NULL ||
624*11102SGavin.Maltby@Sun.COM 	    (xsp = alloca(n * sizeof (*xsp))) == NULL) {
625*11102SGavin.Maltby@Sun.COM 		errno = E2BIG;
626*11102SGavin.Maltby@Sun.COM 		return (0);
627*11102SGavin.Maltby@Sun.COM 	}
628*11102SGavin.Maltby@Sun.COM 
629*11102SGavin.Maltby@Sun.COM 	(void) pthread_mutex_init(&xssp->lock, NULL);
630*11102SGavin.Maltby@Sun.COM 	(void) pthread_cond_init(&xssp->cv_m2s, NULL);
631*11102SGavin.Maltby@Sun.COM 	(void) pthread_cond_init(&xssp->cv_s2m, NULL);
632*11102SGavin.Maltby@Sun.COM 	xssp->pdd = pdd;
633*11102SGavin.Maltby@Sun.COM 	xssp->waiting = 0;
634*11102SGavin.Maltby@Sun.COM 
635*11102SGavin.Maltby@Sun.COM 	(void) pthread_mutex_lock(&xssp->lock);
636*11102SGavin.Maltby@Sun.COM 
637*11102SGavin.Maltby@Sun.COM 	for (i = 0; failidx == -1 && i < n; i++) {
638*11102SGavin.Maltby@Sun.COM 		xsp[i].sharedp = xssp;
639*11102SGavin.Maltby@Sun.COM 		membar_producer();	/* xssp and xsp[i] for new thread */
640*11102SGavin.Maltby@Sun.COM 
641*11102SGavin.Maltby@Sun.COM 		switch ((pdd->pd_crf)(dip, door_xcreate_startf,
642*11102SGavin.Maltby@Sun.COM 		    (void *)&xsp[i], pdd->pd_crcookie)) {
643*11102SGavin.Maltby@Sun.COM 		case 1:
644*11102SGavin.Maltby@Sun.COM 			/*
645*11102SGavin.Maltby@Sun.COM 			 * Thread successfully created.  Set mailbox
646*11102SGavin.Maltby@Sun.COM 			 * state and increment the number we have to
647*11102SGavin.Maltby@Sun.COM 			 * sync with.
648*11102SGavin.Maltby@Sun.COM 			 */
649*11102SGavin.Maltby@Sun.COM 			xsp[i].state = DOOR_XSYNC_CREATEWAIT;
650*11102SGavin.Maltby@Sun.COM 			xssp->waiting++;
651*11102SGavin.Maltby@Sun.COM 			break;
652*11102SGavin.Maltby@Sun.COM 		case 0:
653*11102SGavin.Maltby@Sun.COM 			/*
654*11102SGavin.Maltby@Sun.COM 			 * Elected to create no further threads.  OK for
655*11102SGavin.Maltby@Sun.COM 			 * a depletion callback, but not during door_xcreate.
656*11102SGavin.Maltby@Sun.COM 			 */
657*11102SGavin.Maltby@Sun.COM 			xsp[i].state = DOOR_XSYNC_MAXCONCUR;
658*11102SGavin.Maltby@Sun.COM 			if (!isdepcb) {
659*11102SGavin.Maltby@Sun.COM 				failidx = i;
660*11102SGavin.Maltby@Sun.COM 				failerrno = EINVAL;
661*11102SGavin.Maltby@Sun.COM 			}
662*11102SGavin.Maltby@Sun.COM 			break;
663*11102SGavin.Maltby@Sun.COM 		case -1:
664*11102SGavin.Maltby@Sun.COM 			/*
665*11102SGavin.Maltby@Sun.COM 			 * Thread creation was attempted but failed.
666*11102SGavin.Maltby@Sun.COM 			 */
667*11102SGavin.Maltby@Sun.COM 			xsp[i].state = DOOR_XSYNC_CREATEFAIL;
668*11102SGavin.Maltby@Sun.COM 			failidx = i;
669*11102SGavin.Maltby@Sun.COM 			failerrno = EPIPE;
670*11102SGavin.Maltby@Sun.COM 			break;
671*11102SGavin.Maltby@Sun.COM 		default:
672*11102SGavin.Maltby@Sun.COM 			/*
673*11102SGavin.Maltby@Sun.COM 			 * The application-supplied function did not return
674*11102SGavin.Maltby@Sun.COM 			 * -1/0/1 - best we can do is panic because anything
675*11102SGavin.Maltby@Sun.COM 			 * else is harder to debug.
676*11102SGavin.Maltby@Sun.COM 			 */
677*11102SGavin.Maltby@Sun.COM 			thr_panic("door server create function illegal return");
678*11102SGavin.Maltby@Sun.COM 			/*NOTREACHED*/
679*11102SGavin.Maltby@Sun.COM 		}
680*11102SGavin.Maltby@Sun.COM 	}
681*11102SGavin.Maltby@Sun.COM 
682*11102SGavin.Maltby@Sun.COM 	/*
683*11102SGavin.Maltby@Sun.COM 	 * On initial creation all must succeed; if not then abort
684*11102SGavin.Maltby@Sun.COM 	 */
685*11102SGavin.Maltby@Sun.COM 	if (!isdepcb && failidx != -1) {
686*11102SGavin.Maltby@Sun.COM 		for (i = 0; i < failidx; i++)
687*11102SGavin.Maltby@Sun.COM 			if (xsp[i].state == DOOR_XSYNC_CREATEWAIT)
688*11102SGavin.Maltby@Sun.COM 				xsp[i].state = DOOR_XSYNC_ABORT;
689*11102SGavin.Maltby@Sun.COM 	}
690*11102SGavin.Maltby@Sun.COM 
691*11102SGavin.Maltby@Sun.COM 	/*
692*11102SGavin.Maltby@Sun.COM 	 * Wait for thread startup handshake to complete for all threads
693*11102SGavin.Maltby@Sun.COM 	 */
694*11102SGavin.Maltby@Sun.COM 	while (xssp->waiting)
695*11102SGavin.Maltby@Sun.COM 		(void) pthread_cond_wait(&xssp->cv_s2m, &xssp->lock);
696*11102SGavin.Maltby@Sun.COM 
697*11102SGavin.Maltby@Sun.COM 	/*
698*11102SGavin.Maltby@Sun.COM 	 * If we are aborting for a failed thread create in door_xcreate
699*11102SGavin.Maltby@Sun.COM 	 * then we're done.
700*11102SGavin.Maltby@Sun.COM 	 */
701*11102SGavin.Maltby@Sun.COM 	if (!isdepcb && failidx != -1) {
702*11102SGavin.Maltby@Sun.COM 		rv = 0;
703*11102SGavin.Maltby@Sun.COM 		goto out;	/* lock held, failerrno is set */
704*11102SGavin.Maltby@Sun.COM 	}
705*11102SGavin.Maltby@Sun.COM 
706*11102SGavin.Maltby@Sun.COM 	/*
707*11102SGavin.Maltby@Sun.COM 	 * Did we all succeed in binding?
708*11102SGavin.Maltby@Sun.COM 	 */
709*11102SGavin.Maltby@Sun.COM 	for (i = 0; i < n; i++) {
710*11102SGavin.Maltby@Sun.COM 		int statidx = xsp[i].state - DOOR_XSYNC_CREATEWAIT;
711*11102SGavin.Maltby@Sun.COM 
712*11102SGavin.Maltby@Sun.COM 		door_xcreate_n_stats[statidx]++;
713*11102SGavin.Maltby@Sun.COM 		if (xsp[i].state == DOOR_XSYNC_BOUND)
714*11102SGavin.Maltby@Sun.COM 			bound++;
715*11102SGavin.Maltby@Sun.COM 	}
716*11102SGavin.Maltby@Sun.COM 
717*11102SGavin.Maltby@Sun.COM 	if (bound == n) {
718*11102SGavin.Maltby@Sun.COM 		rv = 1;
719*11102SGavin.Maltby@Sun.COM 	} else {
720*11102SGavin.Maltby@Sun.COM 		failerrno = EBADF;
721*11102SGavin.Maltby@Sun.COM 		rv = 0;
722*11102SGavin.Maltby@Sun.COM 	}
723*11102SGavin.Maltby@Sun.COM 
724*11102SGavin.Maltby@Sun.COM 	/*
725*11102SGavin.Maltby@Sun.COM 	 * During door_xcreate all must succeed in binding - if not then
726*11102SGavin.Maltby@Sun.COM 	 * we command even those that did bind to abort.  Threads that
727*11102SGavin.Maltby@Sun.COM 	 * did not get as far as binding have already exited.
728*11102SGavin.Maltby@Sun.COM 	 */
729*11102SGavin.Maltby@Sun.COM 	for (i = 0; i < n; i++) {
730*11102SGavin.Maltby@Sun.COM 		if (xsp[i].state == DOOR_XSYNC_BOUND) {
731*11102SGavin.Maltby@Sun.COM 			xsp[i].state = (rv == 1 || isdepcb) ?
732*11102SGavin.Maltby@Sun.COM 			    DOOR_XSYNC_ENTER_SERVICE : DOOR_XSYNC_ABORT;
733*11102SGavin.Maltby@Sun.COM 			xssp->waiting++;
734*11102SGavin.Maltby@Sun.COM 		}
735*11102SGavin.Maltby@Sun.COM 	}
736*11102SGavin.Maltby@Sun.COM 
737*11102SGavin.Maltby@Sun.COM 	(void) pthread_cond_broadcast(&xssp->cv_m2s);
738*11102SGavin.Maltby@Sun.COM 
739*11102SGavin.Maltby@Sun.COM 	while (xssp->waiting)
740*11102SGavin.Maltby@Sun.COM 		(void) pthread_cond_wait(&xssp->cv_s2m, &xssp->lock);
741*11102SGavin.Maltby@Sun.COM 
742*11102SGavin.Maltby@Sun.COM out:
743*11102SGavin.Maltby@Sun.COM 	(void) pthread_mutex_unlock(&xssp->lock);
744*11102SGavin.Maltby@Sun.COM 	(void) pthread_mutex_destroy(&xssp->lock);
745*11102SGavin.Maltby@Sun.COM 	(void) pthread_cond_destroy(&xssp->cv_m2s);
746*11102SGavin.Maltby@Sun.COM 	(void) pthread_cond_destroy(&xssp->cv_s2m);
747*11102SGavin.Maltby@Sun.COM 
748*11102SGavin.Maltby@Sun.COM 	if (rv == 0)
749*11102SGavin.Maltby@Sun.COM 		errno = failerrno;
750*11102SGavin.Maltby@Sun.COM 
751*11102SGavin.Maltby@Sun.COM 	return (rv);
752*11102SGavin.Maltby@Sun.COM }
753*11102SGavin.Maltby@Sun.COM 
754*11102SGavin.Maltby@Sun.COM /*
755*11102SGavin.Maltby@Sun.COM  * Call the server creation function to give it the opportunity to
756*11102SGavin.Maltby@Sun.COM  * create more threads.  Called during a door invocation when we
757*11102SGavin.Maltby@Sun.COM  * return from door_return(NULL,0, NULL, 0) and notice that we're
758*11102SGavin.Maltby@Sun.COM  * running on the last available thread.
759*11102SGavin.Maltby@Sun.COM  */
760*11102SGavin.Maltby@Sun.COM void
door_depletion_cb(door_info_t * dip)761*11102SGavin.Maltby@Sun.COM door_depletion_cb(door_info_t *dip)
762*11102SGavin.Maltby@Sun.COM {
763*11102SGavin.Maltby@Sun.COM 	if (dip == NULL) {
764*11102SGavin.Maltby@Sun.COM 		/*
765*11102SGavin.Maltby@Sun.COM 		 * Non-private doors always use door_server_func.
766*11102SGavin.Maltby@Sun.COM 		 */
767*11102SGavin.Maltby@Sun.COM 		(*door_server_func)(NULL);
768*11102SGavin.Maltby@Sun.COM 		return;
769*11102SGavin.Maltby@Sun.COM 	}
770*11102SGavin.Maltby@Sun.COM 
771*11102SGavin.Maltby@Sun.COM 	if (dip->di_attributes & DOOR_NO_DEPLETION_CB) {
772*11102SGavin.Maltby@Sun.COM 		/*
773*11102SGavin.Maltby@Sun.COM 		 * Private, door_xcreate'd door specified no callbacks.
774*11102SGavin.Maltby@Sun.COM 		 */
775*11102SGavin.Maltby@Sun.COM 		return;
776*11102SGavin.Maltby@Sun.COM 	} else if (!(dip->di_attributes & DOOR_PRIVCREATE)) {
777*11102SGavin.Maltby@Sun.COM 		/*
778*11102SGavin.Maltby@Sun.COM 		 * Private door with standard/legacy creation semantics.
779*11102SGavin.Maltby@Sun.COM 		 */
780*11102SGavin.Maltby@Sun.COM 		dip->di_attributes |= DOOR_DEPLETION_CB;
781*11102SGavin.Maltby@Sun.COM 		(*door_server_func)(dip);
782*11102SGavin.Maltby@Sun.COM 		return;
783*11102SGavin.Maltby@Sun.COM 	} else {
784*11102SGavin.Maltby@Sun.COM 		/*
785*11102SGavin.Maltby@Sun.COM 		 * Private, door_xcreate'd door.
786*11102SGavin.Maltby@Sun.COM 		 */
787*11102SGavin.Maltby@Sun.COM 		dip->di_attributes |= DOOR_DEPLETION_CB;
788*11102SGavin.Maltby@Sun.COM 		(void) door_xcreate_n(dip, NULL, 1);
789*11102SGavin.Maltby@Sun.COM 	}
790*11102SGavin.Maltby@Sun.COM }
791*11102SGavin.Maltby@Sun.COM 
792*11102SGavin.Maltby@Sun.COM /*
793*11102SGavin.Maltby@Sun.COM  * Install a new server creation function.  The appointed function
794*11102SGavin.Maltby@Sun.COM  * will receieve depletion callbacks for non-private doors and private
795*11102SGavin.Maltby@Sun.COM  * doors created with door_create(..., DOOR_PRIVATE).
7960Sstevel@tonic-gate  */
7970Sstevel@tonic-gate door_server_func_t *
door_server_create(door_server_func_t * create_func)7980Sstevel@tonic-gate door_server_create(door_server_func_t *create_func)
7990Sstevel@tonic-gate {
8000Sstevel@tonic-gate 	door_server_func_t *prev;
8010Sstevel@tonic-gate 
8020Sstevel@tonic-gate 	lmutex_lock(&door_state_lock);
8030Sstevel@tonic-gate 	prev = door_server_func;
8040Sstevel@tonic-gate 	door_server_func = create_func;
8050Sstevel@tonic-gate 	lmutex_unlock(&door_state_lock);
8060Sstevel@tonic-gate 
8070Sstevel@tonic-gate 	return (prev);
8080Sstevel@tonic-gate }
8090Sstevel@tonic-gate 
8100Sstevel@tonic-gate /*
811*11102SGavin.Maltby@Sun.COM  * Thread start function for door_create_server() below.
8120Sstevel@tonic-gate  * Create door server threads with cancellation(5) disabled.
8130Sstevel@tonic-gate  */
8140Sstevel@tonic-gate static void *
door_create_func(void * arg)8150Sstevel@tonic-gate door_create_func(void *arg)
8160Sstevel@tonic-gate {
8170Sstevel@tonic-gate 	(void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
8180Sstevel@tonic-gate 	(void) door_return(NULL, 0, NULL, 0);
8190Sstevel@tonic-gate 
8200Sstevel@tonic-gate 	return (arg);
8210Sstevel@tonic-gate }
8220Sstevel@tonic-gate 
8230Sstevel@tonic-gate /*
824*11102SGavin.Maltby@Sun.COM  * The default door_server_func_t.
8250Sstevel@tonic-gate  */
8260Sstevel@tonic-gate /* ARGSUSED */
8270Sstevel@tonic-gate static void
door_create_server(door_info_t * dip)8280Sstevel@tonic-gate door_create_server(door_info_t *dip)
8290Sstevel@tonic-gate {
8300Sstevel@tonic-gate 	(void) thr_create(NULL, 0, door_create_func, NULL, THR_DETACHED, NULL);
8310Sstevel@tonic-gate 	yield();	/* Gives server thread a chance to run */
8320Sstevel@tonic-gate }
833