xref: /onnv-gate/usr/src/uts/common/fs/doorfs/door_sys.c (revision 6997:056043f166c6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * System call I/F to doors (outside of vnodes I/F) and misc support
31  * routines
32  */
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/door.h>
36 #include <sys/door_data.h>
37 #include <sys/proc.h>
38 #include <sys/thread.h>
39 #include <sys/class.h>
40 #include <sys/cred.h>
41 #include <sys/kmem.h>
42 #include <sys/cmn_err.h>
43 #include <sys/stack.h>
44 #include <sys/debug.h>
45 #include <sys/cpuvar.h>
46 #include <sys/file.h>
47 #include <sys/fcntl.h>
48 #include <sys/vnode.h>
49 #include <sys/vfs.h>
50 #include <sys/vfs_opreg.h>
51 #include <sys/sobject.h>
52 #include <sys/schedctl.h>
53 #include <sys/callb.h>
54 #include <sys/ucred.h>
55 
56 #include <sys/mman.h>
57 #include <sys/sysmacros.h>
58 #include <sys/vmsystm.h>
59 #include <vm/as.h>
60 #include <vm/hat.h>
61 #include <vm/page.h>
62 #include <vm/seg.h>
63 #include <vm/seg_vn.h>
64 #include <vm/seg_vn.h>
65 
66 #include <sys/modctl.h>
67 #include <sys/syscall.h>
68 #include <sys/pathname.h>
69 #include <sys/rctl.h>
70 
71 /*
72  * The maximum amount of data (in bytes) that will be transferred using
73  * an intermediate kernel buffer.  For sizes greater than this we map
74  * in the destination pages and perform a 1-copy transfer.
75  */
76 size_t	door_max_arg = 16 * 1024;
77 
78 /*
79  * Maximum amount of data that will be transferred in a reply to a
80  * door_upcall.  Need to guard against a process returning huge amounts
81  * of data and getting the kernel stuck in kmem_alloc.
82  */
83 size_t	door_max_upcall_reply = 1024 * 1024;
84 
85 /*
86  * Maximum number of descriptors allowed to be passed in a single
87  * door_call or door_return.  We need to allocate kernel memory
88  * for all of them at once, so we can't let it scale without limit.
89  */
90 uint_t door_max_desc = 1024;
91 
92 /*
93  * Definition of a door handle, used by other kernel subsystems when
94  * calling door functions.  This is really a file structure but we
95  * want to hide that fact.
96  */
97 struct __door_handle {
98 	file_t dh_file;
99 };
100 
101 #define	DHTOF(dh) ((file_t *)(dh))
102 #define	FTODH(fp) ((door_handle_t)(fp))
103 
104 static int doorfs(long, long, long, long, long, long);
105 
106 static struct sysent door_sysent = {
107 	6,
108 	SE_ARGC | SE_NOUNLOAD,
109 	(int (*)())doorfs,
110 };
111 
112 static struct modlsys modlsys = {
113 	&mod_syscallops, "doors", &door_sysent
114 };
115 
116 #ifdef _SYSCALL32_IMPL
117 
118 static int
119 doorfs32(int32_t arg1, int32_t arg2, int32_t arg3, int32_t arg4,
120     int32_t arg5, int32_t subcode);
121 
122 static struct sysent door_sysent32 = {
123 	6,
124 	SE_ARGC | SE_NOUNLOAD,
125 	(int (*)())doorfs32,
126 };
127 
128 static struct modlsys modlsys32 = {
129 	&mod_syscallops32,
130 	"32-bit door syscalls",
131 	&door_sysent32
132 };
133 #endif
134 
135 static struct modlinkage modlinkage = {
136 	MODREV_1,
137 	&modlsys,
138 #ifdef _SYSCALL32_IMPL
139 	&modlsys32,
140 #endif
141 	NULL
142 };
143 
144 dev_t	doordev;
145 
146 extern	struct vfs door_vfs;
147 extern	struct vnodeops *door_vnodeops;
148 
149 int
150 _init(void)
151 {
152 	static const fs_operation_def_t door_vfsops_template[] = {
153 		NULL, NULL
154 	};
155 	extern const fs_operation_def_t door_vnodeops_template[];
156 	vfsops_t *door_vfsops;
157 	major_t major;
158 	int error;
159 
160 	mutex_init(&door_knob, NULL, MUTEX_DEFAULT, NULL);
161 	if ((major = getudev()) == (major_t)-1)
162 		return (ENXIO);
163 	doordev = makedevice(major, 0);
164 
165 	/* Create a dummy vfs */
166 	error = vfs_makefsops(door_vfsops_template, &door_vfsops);
167 	if (error != 0) {
168 		cmn_err(CE_WARN, "door init: bad vfs ops");
169 		return (error);
170 	}
171 	VFS_INIT(&door_vfs, door_vfsops, NULL);
172 	door_vfs.vfs_flag = VFS_RDONLY;
173 	door_vfs.vfs_dev = doordev;
174 	vfs_make_fsid(&(door_vfs.vfs_fsid), doordev, 0);
175 
176 	error = vn_make_ops("doorfs", door_vnodeops_template, &door_vnodeops);
177 	if (error != 0) {
178 		vfs_freevfsops(door_vfsops);
179 		cmn_err(CE_WARN, "door init: bad vnode ops");
180 		return (error);
181 	}
182 	return (mod_install(&modlinkage));
183 }
184 
185 int
186 _info(struct modinfo *modinfop)
187 {
188 	return (mod_info(&modlinkage, modinfop));
189 }
190 
191 /* system call functions */
192 static int door_call(int, void *);
193 static int door_return(caddr_t, size_t, door_desc_t *, uint_t, caddr_t, size_t);
194 static int door_create(void (*pc_cookie)(void *, char *, size_t, door_desc_t *,
195     uint_t), void *data_cookie, uint_t);
196 static int door_revoke(int);
197 static int door_info(int, struct door_info *);
198 static int door_ucred(struct ucred_s *);
199 static int door_bind(int);
200 static int door_unbind(void);
201 static int door_unref(void);
202 static int door_getparam(int, int, size_t *);
203 static int door_setparam(int, int, size_t);
204 
205 #define	DOOR_RETURN_OLD	4		/* historic value, for s10 */
206 
207 /*
208  * System call wrapper for all door related system calls
209  */
210 static int
211 doorfs(long arg1, long arg2, long arg3, long arg4, long arg5, long subcode)
212 {
213 	switch (subcode) {
214 	case DOOR_CALL:
215 		return (door_call(arg1, (void *)arg2));
216 	case DOOR_RETURN: {
217 		door_return_desc_t *drdp = (door_return_desc_t *)arg3;
218 
219 		if (drdp != NULL) {
220 			door_return_desc_t drd;
221 			if (copyin(drdp, &drd, sizeof (drd)))
222 				return (EFAULT);
223 			return (door_return((caddr_t)arg1, arg2, drd.desc_ptr,
224 			    drd.desc_num, (caddr_t)arg4, arg5));
225 		}
226 		return (door_return((caddr_t)arg1, arg2, NULL,
227 		    0, (caddr_t)arg4, arg5));
228 	}
229 	case DOOR_RETURN_OLD:
230 		/*
231 		 * In order to support the S10 runtime environment, we
232 		 * still respond to the old syscall subcode for door_return.
233 		 * We treat it as having no stack limits.  This code should
234 		 * be removed when such support is no longer needed.
235 		 */
236 		return (door_return((caddr_t)arg1, arg2, (door_desc_t *)arg3,
237 		    arg4, (caddr_t)arg5, 0));
238 	case DOOR_CREATE:
239 		return (door_create((void (*)())arg1, (void *)arg2, arg3));
240 	case DOOR_REVOKE:
241 		return (door_revoke(arg1));
242 	case DOOR_INFO:
243 		return (door_info(arg1, (struct door_info *)arg2));
244 	case DOOR_BIND:
245 		return (door_bind(arg1));
246 	case DOOR_UNBIND:
247 		return (door_unbind());
248 	case DOOR_UNREFSYS:
249 		return (door_unref());
250 	case DOOR_UCRED:
251 		return (door_ucred((struct ucred_s *)arg1));
252 	case DOOR_GETPARAM:
253 		return (door_getparam(arg1, arg2, (size_t *)arg3));
254 	case DOOR_SETPARAM:
255 		return (door_setparam(arg1, arg2, arg3));
256 	default:
257 		return (set_errno(EINVAL));
258 	}
259 }
260 
261 #ifdef _SYSCALL32_IMPL
262 /*
263  * System call wrapper for all door related system calls from 32-bit programs.
264  * Needed at the moment because of the casts - they undo some damage
265  * that truss causes (sign-extending the stack pointer) when truss'ing
266  * a 32-bit program using doors.
267  */
268 static int
269 doorfs32(int32_t arg1, int32_t arg2, int32_t arg3,
270     int32_t arg4, int32_t arg5, int32_t subcode)
271 {
272 	switch (subcode) {
273 	case DOOR_CALL:
274 		return (door_call(arg1, (void *)(uintptr_t)(caddr32_t)arg2));
275 	case DOOR_RETURN: {
276 		door_return_desc32_t *drdp =
277 		    (door_return_desc32_t *)(uintptr_t)(caddr32_t)arg3;
278 		if (drdp != NULL) {
279 			door_return_desc32_t drd;
280 			if (copyin(drdp, &drd, sizeof (drd)))
281 				return (EFAULT);
282 			return (door_return(
283 			    (caddr_t)(uintptr_t)(caddr32_t)arg1, arg2,
284 			    (door_desc_t *)(uintptr_t)drd.desc_ptr,
285 			    drd.desc_num, (caddr_t)(uintptr_t)(caddr32_t)arg4,
286 			    (size_t)(uintptr_t)(size32_t)arg5));
287 		}
288 		return (door_return((caddr_t)(uintptr_t)(caddr32_t)arg1,
289 		    arg2, NULL, 0, (caddr_t)(uintptr_t)(caddr32_t)arg4,
290 		    (size_t)(uintptr_t)(size32_t)arg5));
291 	}
292 	case DOOR_RETURN_OLD:
293 		/*
294 		 * In order to support the S10 runtime environment, we
295 		 * still respond to the old syscall subcode for door_return.
296 		 * We treat it as having no stack limits.  This code should
297 		 * be removed when such support is no longer needed.
298 		 */
299 		return (door_return((caddr_t)(uintptr_t)(caddr32_t)arg1, arg2,
300 		    (door_desc_t *)(uintptr_t)(caddr32_t)arg3, arg4,
301 		    (caddr_t)(uintptr_t)(caddr32_t)arg5, 0));
302 	case DOOR_CREATE:
303 		return (door_create((void (*)())(uintptr_t)(caddr32_t)arg1,
304 		    (void *)(uintptr_t)(caddr32_t)arg2, arg3));
305 	case DOOR_REVOKE:
306 		return (door_revoke(arg1));
307 	case DOOR_INFO:
308 		return (door_info(arg1,
309 		    (struct door_info *)(uintptr_t)(caddr32_t)arg2));
310 	case DOOR_BIND:
311 		return (door_bind(arg1));
312 	case DOOR_UNBIND:
313 		return (door_unbind());
314 	case DOOR_UNREFSYS:
315 		return (door_unref());
316 	case DOOR_UCRED:
317 		return (door_ucred(
318 		    (struct ucred_s *)(uintptr_t)(caddr32_t)arg1));
319 	case DOOR_GETPARAM:
320 		return (door_getparam(arg1, arg2,
321 		    (size_t *)(uintptr_t)(caddr32_t)arg3));
322 	case DOOR_SETPARAM:
323 		return (door_setparam(arg1, arg2, (size_t)(size32_t)arg3));
324 
325 	default:
326 		return (set_errno(EINVAL));
327 	}
328 }
329 #endif
330 
331 void shuttle_resume(kthread_t *, kmutex_t *);
332 void shuttle_swtch(kmutex_t *);
333 void shuttle_sleep(kthread_t *);
334 
335 /*
336  * Support routines
337  */
338 static int door_create_common(void (*)(), void *, uint_t, int, int *,
339     file_t **);
340 static int door_overflow(kthread_t *, caddr_t, size_t, door_desc_t *, uint_t);
341 static int door_args(kthread_t *, int);
342 static int door_results(kthread_t *, caddr_t, size_t, door_desc_t *, uint_t);
343 static int door_copy(struct as *, caddr_t, caddr_t, uint_t);
344 static void	door_server_exit(proc_t *, kthread_t *);
345 static void	door_release_server(door_node_t *, kthread_t *);
346 static kthread_t	*door_get_server(door_node_t *);
347 static door_node_t	*door_lookup(int, file_t **);
348 static int	door_translate_in(void);
349 static int	door_translate_out(void);
350 static void	door_fd_rele(door_desc_t *, uint_t, int);
351 static void	door_list_insert(door_node_t *);
352 static void	door_info_common(door_node_t *, door_info_t *, file_t *);
353 static int	door_release_fds(door_desc_t *, uint_t);
354 static void	door_fd_close(door_desc_t *, uint_t);
355 static void	door_fp_close(struct file **, uint_t);
356 
357 static door_data_t *
358 door_my_data(int create_if_missing)
359 {
360 	door_data_t *ddp;
361 
362 	ddp = curthread->t_door;
363 	if (create_if_missing && ddp == NULL)
364 		ddp = curthread->t_door = kmem_zalloc(sizeof (*ddp), KM_SLEEP);
365 
366 	return (ddp);
367 }
368 
369 static door_server_t *
370 door_my_server(int create_if_missing)
371 {
372 	door_data_t *ddp = door_my_data(create_if_missing);
373 
374 	return ((ddp != NULL)? DOOR_SERVER(ddp) : NULL);
375 }
376 
377 static door_client_t *
378 door_my_client(int create_if_missing)
379 {
380 	door_data_t *ddp = door_my_data(create_if_missing);
381 
382 	return ((ddp != NULL)? DOOR_CLIENT(ddp) : NULL);
383 }
384 
385 /*
386  * System call to create a door
387  */
388 int
389 door_create(void (*pc_cookie)(), void *data_cookie, uint_t attributes)
390 {
391 	int fd;
392 	int err;
393 
394 	if ((attributes & ~DOOR_CREATE_MASK) ||
395 	    ((attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) ==
396 	    (DOOR_UNREF | DOOR_UNREF_MULTI)))
397 		return (set_errno(EINVAL));
398 
399 	if ((err = door_create_common(pc_cookie, data_cookie, attributes, 0,
400 	    &fd, NULL)) != 0)
401 		return (set_errno(err));
402 
403 	f_setfd(fd, FD_CLOEXEC);
404 	return (fd);
405 }
406 
407 /*
408  * Common code for creating user and kernel doors.  If a door was
409  * created, stores a file structure pointer in the location pointed
410  * to by fpp (if fpp is non-NULL) and returns 0.  Also, if a non-NULL
411  * pointer to a file descriptor is passed in as fdp, allocates a file
412  * descriptor representing the door.  If a door could not be created,
413  * returns an error.
414  */
415 static int
416 door_create_common(void (*pc_cookie)(), void *data_cookie, uint_t attributes,
417     int from_kernel, int *fdp, file_t **fpp)
418 {
419 	door_node_t	*dp;
420 	vnode_t		*vp;
421 	struct file	*fp;
422 	static door_id_t index = 0;
423 	proc_t		*p = (from_kernel)? &p0 : curproc;
424 
425 	dp = kmem_zalloc(sizeof (door_node_t), KM_SLEEP);
426 
427 	dp->door_vnode = vn_alloc(KM_SLEEP);
428 	dp->door_target = p;
429 	dp->door_data = data_cookie;
430 	dp->door_pc = pc_cookie;
431 	dp->door_flags = attributes;
432 #ifdef _SYSCALL32_IMPL
433 	if (!from_kernel && get_udatamodel() != DATAMODEL_NATIVE)
434 		dp->door_data_max = UINT32_MAX;
435 	else
436 #endif
437 		dp->door_data_max = SIZE_MAX;
438 	dp->door_data_min = 0UL;
439 	dp->door_desc_max = (attributes & DOOR_REFUSE_DESC)? 0 : INT_MAX;
440 
441 	vp = DTOV(dp);
442 	vn_setops(vp, door_vnodeops);
443 	vp->v_type = VDOOR;
444 	vp->v_vfsp = &door_vfs;
445 	vp->v_data = (caddr_t)dp;
446 	mutex_enter(&door_knob);
447 	dp->door_index = index++;
448 	/* add to per-process door list */
449 	door_list_insert(dp);
450 	mutex_exit(&door_knob);
451 
452 	if (falloc(vp, FREAD | FWRITE, &fp, fdp)) {
453 		/*
454 		 * If the file table is full, remove the door from the
455 		 * per-process list, free the door, and return NULL.
456 		 */
457 		mutex_enter(&door_knob);
458 		door_list_delete(dp);
459 		mutex_exit(&door_knob);
460 		vn_free(vp);
461 		kmem_free(dp, sizeof (door_node_t));
462 		return (EMFILE);
463 	}
464 	vn_exists(vp);
465 	if (fdp != NULL)
466 		setf(*fdp, fp);
467 	mutex_exit(&fp->f_tlock);
468 
469 	if (fpp != NULL)
470 		*fpp = fp;
471 	return (0);
472 }
473 
474 static int
475 door_check_limits(door_node_t *dp, door_arg_t *da, int upcall)
476 {
477 	ASSERT(MUTEX_HELD(&door_knob));
478 
479 	/* we allow unref upcalls through, despite any minimum */
480 	if (da->data_size < dp->door_data_min &&
481 	    !(upcall && da->data_ptr == DOOR_UNREF_DATA))
482 		return (ENOBUFS);
483 
484 	if (da->data_size > dp->door_data_max)
485 		return (ENOBUFS);
486 
487 	if (da->desc_num > 0 && (dp->door_flags & DOOR_REFUSE_DESC))
488 		return (ENOTSUP);
489 
490 	if (da->desc_num > dp->door_desc_max)
491 		return (ENFILE);
492 
493 	return (0);
494 }
495 
496 /*
497  * Door invocation.
498  */
499 int
500 door_call(int did, void *args)
501 {
502 	/* Locals */
503 	door_node_t	*dp;
504 	kthread_t	*server_thread;
505 	int		error = 0;
506 	klwp_t		*lwp;
507 	door_client_t	*ct;		/* curthread door_data */
508 	door_server_t	*st;		/* server thread door_data */
509 	door_desc_t	*start = NULL;
510 	uint_t		ncopied = 0;
511 	size_t		dsize;
512 	/* destructor for data returned by a kernel server */
513 	void		(*destfn)() = NULL;
514 	void		*destarg;
515 	model_t		datamodel;
516 	int		gotresults = 0;
517 	int		needcleanup = 0;
518 	int		cancel_pending;
519 
520 	lwp = ttolwp(curthread);
521 	datamodel = lwp_getdatamodel(lwp);
522 
523 	ct = door_my_client(1);
524 
525 	/*
526 	 * Get the arguments
527 	 */
528 	if (args) {
529 		if (datamodel == DATAMODEL_NATIVE) {
530 			if (copyin(args, &ct->d_args, sizeof (door_arg_t)) != 0)
531 				return (set_errno(EFAULT));
532 		} else {
533 			door_arg32_t    da32;
534 
535 			if (copyin(args, &da32, sizeof (door_arg32_t)) != 0)
536 				return (set_errno(EFAULT));
537 			ct->d_args.data_ptr =
538 			    (char *)(uintptr_t)da32.data_ptr;
539 			ct->d_args.data_size = da32.data_size;
540 			ct->d_args.desc_ptr =
541 			    (door_desc_t *)(uintptr_t)da32.desc_ptr;
542 			ct->d_args.desc_num = da32.desc_num;
543 			ct->d_args.rbuf =
544 			    (char *)(uintptr_t)da32.rbuf;
545 			ct->d_args.rsize = da32.rsize;
546 		}
547 	} else {
548 		/* No arguments, and no results allowed */
549 		ct->d_noresults = 1;
550 		ct->d_args.data_size = 0;
551 		ct->d_args.desc_num = 0;
552 		ct->d_args.rsize = 0;
553 	}
554 
555 	if ((dp = door_lookup(did, NULL)) == NULL)
556 		return (set_errno(EBADF));
557 
558 	/*
559 	 * We don't want to hold the door FD over the entire operation;
560 	 * instead, we put a hold on the door vnode and release the FD
561 	 * immediately
562 	 */
563 	VN_HOLD(DTOV(dp));
564 	releasef(did);
565 
566 	mutex_enter(&door_knob);
567 	if (DOOR_INVALID(dp)) {
568 		mutex_exit(&door_knob);
569 		error = EBADF;
570 		goto out;
571 	}
572 
573 	/*
574 	 * before we do anything, check that we are not overflowing the
575 	 * required limits.
576 	 */
577 	error = door_check_limits(dp, &ct->d_args, 0);
578 	if (error != 0) {
579 		mutex_exit(&door_knob);
580 		goto out;
581 	}
582 
583 	/*
584 	 * Check for in-kernel door server.
585 	 */
586 	if (dp->door_target == &p0) {
587 		caddr_t rbuf = ct->d_args.rbuf;
588 		size_t rsize = ct->d_args.rsize;
589 
590 		dp->door_active++;
591 		ct->d_kernel = 1;
592 		ct->d_error = DOOR_WAIT;
593 		mutex_exit(&door_knob);
594 		/* translate file descriptors to vnodes */
595 		if (ct->d_args.desc_num) {
596 			error = door_translate_in();
597 			if (error)
598 				goto out;
599 		}
600 		/*
601 		 * Call kernel door server.  Arguments are passed and
602 		 * returned as a door_arg pointer.  When called, data_ptr
603 		 * points to user data and desc_ptr points to a kernel list
604 		 * of door descriptors that have been converted to file
605 		 * structure pointers.  It's the server function's
606 		 * responsibility to copyin the data pointed to by data_ptr
607 		 * (this avoids extra copying in some cases).  On return,
608 		 * data_ptr points to a user buffer of data, and desc_ptr
609 		 * points to a kernel list of door descriptors representing
610 		 * files.  When a reference is passed to a kernel server,
611 		 * it is the server's responsibility to release the reference
612 		 * (by calling closef).  When the server includes a
613 		 * reference in its reply, it is released as part of the
614 		 * the call (the server must duplicate the reference if
615 		 * it wants to retain a copy).  The destfn, if set to
616 		 * non-NULL, is a destructor to be called when the returned
617 		 * kernel data (if any) is no longer needed (has all been
618 		 * translated and copied to user level).
619 		 */
620 		(*(dp->door_pc))(dp->door_data, &ct->d_args,
621 		    &destfn, &destarg, &error);
622 		mutex_enter(&door_knob);
623 		/* not implemented yet */
624 		if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
625 			door_deliver_unref(dp);
626 		mutex_exit(&door_knob);
627 		if (error)
628 			goto out;
629 
630 		/* translate vnodes to files */
631 		if (ct->d_args.desc_num) {
632 			error = door_translate_out();
633 			if (error)
634 				goto out;
635 		}
636 		ct->d_buf = ct->d_args.rbuf;
637 		ct->d_bufsize = ct->d_args.rsize;
638 		if (rsize < (ct->d_args.data_size +
639 		    (ct->d_args.desc_num * sizeof (door_desc_t)))) {
640 			/* handle overflow */
641 			error = door_overflow(curthread, ct->d_args.data_ptr,
642 			    ct->d_args.data_size, ct->d_args.desc_ptr,
643 			    ct->d_args.desc_num);
644 			if (error)
645 				goto out;
646 			/* door_overflow sets d_args rbuf and rsize */
647 		} else {
648 			ct->d_args.rbuf = rbuf;
649 			ct->d_args.rsize = rsize;
650 		}
651 		goto results;
652 	}
653 
654 	/*
655 	 * Get a server thread from the target domain
656 	 */
657 	if ((server_thread = door_get_server(dp)) == NULL) {
658 		if (DOOR_INVALID(dp))
659 			error = EBADF;
660 		else
661 			error = EAGAIN;
662 		mutex_exit(&door_knob);
663 		goto out;
664 	}
665 
666 	st = DOOR_SERVER(server_thread->t_door);
667 	if (ct->d_args.desc_num || ct->d_args.data_size) {
668 		int is_private = (dp->door_flags & DOOR_PRIVATE);
669 		/*
670 		 * Move data from client to server
671 		 */
672 		DOOR_T_HOLD(st);
673 		mutex_exit(&door_knob);
674 		error = door_args(server_thread, is_private);
675 		mutex_enter(&door_knob);
676 		DOOR_T_RELEASE(st);
677 		if (error) {
678 			/*
679 			 * We're not going to resume this thread after all
680 			 */
681 			door_release_server(dp, server_thread);
682 			shuttle_sleep(server_thread);
683 			mutex_exit(&door_knob);
684 			goto out;
685 		}
686 	}
687 
688 	dp->door_active++;
689 	ct->d_error = DOOR_WAIT;
690 	ct->d_args_done = 0;
691 	st->d_caller = curthread;
692 	st->d_active = dp;
693 
694 	shuttle_resume(server_thread, &door_knob);
695 
696 	mutex_enter(&door_knob);
697 shuttle_return:
698 	if ((error = ct->d_error) < 0) {	/* DOOR_WAIT or DOOR_EXIT */
699 		/*
700 		 * Premature wakeup. Find out why (stop, forkall, sig, exit ...)
701 		 */
702 		mutex_exit(&door_knob);		/* May block in ISSIG */
703 		cancel_pending = 0;
704 		if (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
705 		    MUSTRETURN(curproc, curthread) ||
706 		    (cancel_pending = schedctl_cancel_pending()) != 0) {
707 			/* Signal, forkall, ... */
708 			lwp->lwp_sysabort = 0;
709 			if (cancel_pending)
710 				schedctl_cancel_eintr();
711 			mutex_enter(&door_knob);
712 			error = EINTR;
713 			/*
714 			 * If the server has finished processing our call,
715 			 * or exited (calling door_slam()), then d_error
716 			 * will have changed.  If the server hasn't finished
717 			 * yet, d_error will still be DOOR_WAIT, and we
718 			 * let it know we are not interested in any
719 			 * results by sending a SIGCANCEL, unless the door
720 			 * is marked with DOOR_NO_CANCEL.
721 			 */
722 			if (ct->d_error == DOOR_WAIT &&
723 			    st->d_caller == curthread) {
724 				proc_t	*p = ttoproc(server_thread);
725 
726 				st->d_active = NULL;
727 				st->d_caller = NULL;
728 
729 				if (!(dp->door_flags & DOOR_NO_CANCEL)) {
730 					DOOR_T_HOLD(st);
731 					mutex_exit(&door_knob);
732 
733 					mutex_enter(&p->p_lock);
734 					sigtoproc(p, server_thread, SIGCANCEL);
735 					mutex_exit(&p->p_lock);
736 
737 					mutex_enter(&door_knob);
738 					DOOR_T_RELEASE(st);
739 				}
740 			}
741 		} else {
742 			/*
743 			 * Return from stop(), server exit...
744 			 *
745 			 * Note that the server could have done a
746 			 * door_return while the client was in stop state
747 			 * (ISSIG), in which case the error condition
748 			 * is updated by the server.
749 			 */
750 			mutex_enter(&door_knob);
751 			if (ct->d_error == DOOR_WAIT) {
752 				/* Still waiting for a reply */
753 				shuttle_swtch(&door_knob);
754 				mutex_enter(&door_knob);
755 				lwp->lwp_asleep = 0;
756 				goto	shuttle_return;
757 			} else if (ct->d_error == DOOR_EXIT) {
758 				/* Server exit */
759 				error = EINTR;
760 			} else {
761 				/* Server did a door_return during ISSIG */
762 				error = ct->d_error;
763 			}
764 		}
765 		/*
766 		 * Can't exit if the server is currently copying
767 		 * results for me.
768 		 */
769 		while (DOOR_T_HELD(ct))
770 			cv_wait(&ct->d_cv, &door_knob);
771 
772 		/*
773 		 * If the server has not processed our message, free the
774 		 * descriptors.
775 		 */
776 		if (!ct->d_args_done) {
777 			needcleanup = 1;
778 			ct->d_args_done = 1;
779 		}
780 
781 		/*
782 		 * Find out if results were successfully copied.
783 		 */
784 		if (ct->d_error == 0)
785 			gotresults = 1;
786 	}
787 	ASSERT(ct->d_args_done);
788 	lwp->lwp_asleep = 0;		/* /proc */
789 	lwp->lwp_sysabort = 0;		/* /proc */
790 	if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
791 		door_deliver_unref(dp);
792 	mutex_exit(&door_knob);
793 
794 	if (needcleanup)
795 		door_fp_close(ct->d_fpp, ct->d_args.desc_num);
796 
797 results:
798 	/*
799 	 * Move the results to userland (if any)
800 	 */
801 
802 	if (ct->d_noresults)
803 		goto out;
804 
805 	if (error) {
806 		/*
807 		 * If server returned results successfully, then we've
808 		 * been interrupted and may need to clean up.
809 		 */
810 		if (gotresults) {
811 			ASSERT(error == EINTR);
812 			door_fp_close(ct->d_fpp, ct->d_args.desc_num);
813 		}
814 		goto out;
815 	}
816 
817 	/*
818 	 * Copy back data if we haven't caused an overflow (already
819 	 * handled) and we are using a 2 copy transfer, or we are
820 	 * returning data from a kernel server.
821 	 */
822 	if (ct->d_args.data_size) {
823 		ct->d_args.data_ptr = ct->d_args.rbuf;
824 		if (ct->d_kernel || (!ct->d_overflow &&
825 		    ct->d_args.data_size <= door_max_arg)) {
826 			if (copyout_nowatch(ct->d_buf, ct->d_args.rbuf,
827 			    ct->d_args.data_size)) {
828 				door_fp_close(ct->d_fpp, ct->d_args.desc_num);
829 				error = EFAULT;
830 				goto out;
831 			}
832 		}
833 	}
834 
835 	/*
836 	 * stuff returned doors into our proc, copyout the descriptors
837 	 */
838 	if (ct->d_args.desc_num) {
839 		struct file	**fpp;
840 		door_desc_t	*didpp;
841 		uint_t		n = ct->d_args.desc_num;
842 
843 		dsize = n * sizeof (door_desc_t);
844 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
845 		fpp = ct->d_fpp;
846 
847 		while (n--) {
848 			if (door_insert(*fpp, didpp) == -1) {
849 				/* Close remaining files */
850 				door_fp_close(fpp, n + 1);
851 				error = EMFILE;
852 				goto out;
853 			}
854 			fpp++; didpp++; ncopied++;
855 		}
856 
857 		ct->d_args.desc_ptr = (door_desc_t *)(ct->d_args.rbuf +
858 		    roundup(ct->d_args.data_size, sizeof (door_desc_t)));
859 
860 		if (copyout_nowatch(start, ct->d_args.desc_ptr, dsize)) {
861 			error = EFAULT;
862 			goto out;
863 		}
864 	}
865 
866 	/*
867 	 * Return the results
868 	 */
869 	if (datamodel == DATAMODEL_NATIVE) {
870 		if (copyout_nowatch(&ct->d_args, args,
871 		    sizeof (door_arg_t)) != 0)
872 			error = EFAULT;
873 	} else {
874 		door_arg32_t    da32;
875 
876 		da32.data_ptr = (caddr32_t)(uintptr_t)ct->d_args.data_ptr;
877 		da32.data_size = ct->d_args.data_size;
878 		da32.desc_ptr = (caddr32_t)(uintptr_t)ct->d_args.desc_ptr;
879 		da32.desc_num = ct->d_args.desc_num;
880 		da32.rbuf = (caddr32_t)(uintptr_t)ct->d_args.rbuf;
881 		da32.rsize = ct->d_args.rsize;
882 		if (copyout_nowatch(&da32, args, sizeof (door_arg32_t)) != 0) {
883 			error = EFAULT;
884 		}
885 	}
886 
887 out:
888 	ct->d_noresults = 0;
889 
890 	/* clean up the overflow buffer if an error occurred */
891 	if (error != 0 && ct->d_overflow) {
892 		(void) as_unmap(curproc->p_as, ct->d_args.rbuf,
893 		    ct->d_args.rsize);
894 	}
895 	ct->d_overflow = 0;
896 
897 	/* call destructor */
898 	if (destfn) {
899 		ASSERT(ct->d_kernel);
900 		(*destfn)(dp->door_data, destarg);
901 		ct->d_buf = NULL;
902 		ct->d_bufsize = 0;
903 	}
904 
905 	if (dp)
906 		VN_RELE(DTOV(dp));
907 
908 	if (ct->d_buf) {
909 		ASSERT(!ct->d_kernel);
910 		kmem_free(ct->d_buf, ct->d_bufsize);
911 		ct->d_buf = NULL;
912 		ct->d_bufsize = 0;
913 	}
914 	ct->d_kernel = 0;
915 
916 	/* clean up the descriptor copyout buffer */
917 	if (start != NULL) {
918 		if (error != 0)
919 			door_fd_close(start, ncopied);
920 		kmem_free(start, dsize);
921 	}
922 
923 	if (ct->d_fpp) {
924 		kmem_free(ct->d_fpp, ct->d_fpp_size);
925 		ct->d_fpp = NULL;
926 		ct->d_fpp_size = 0;
927 	}
928 
929 	if (error)
930 		return (set_errno(error));
931 
932 	return (0);
933 }
934 
935 static int
936 door_setparam_common(door_node_t *dp, int from_kernel, int type, size_t val)
937 {
938 	int error = 0;
939 
940 	mutex_enter(&door_knob);
941 
942 	if (DOOR_INVALID(dp)) {
943 		mutex_exit(&door_knob);
944 		return (EBADF);
945 	}
946 
947 	/*
948 	 * door_ki_setparam() can only affect kernel doors.
949 	 * door_setparam() can only affect doors attached to the current
950 	 * process.
951 	 */
952 	if ((from_kernel && dp->door_target != &p0) ||
953 	    (!from_kernel && dp->door_target != curproc)) {
954 		mutex_exit(&door_knob);
955 		return (EPERM);
956 	}
957 
958 	switch (type) {
959 	case DOOR_PARAM_DESC_MAX:
960 		if (val > INT_MAX)
961 			error = ERANGE;
962 		else if ((dp->door_flags & DOOR_REFUSE_DESC) && val != 0)
963 			error = ENOTSUP;
964 		else
965 			dp->door_desc_max = (uint_t)val;
966 		break;
967 
968 	case DOOR_PARAM_DATA_MIN:
969 		if (val > dp->door_data_max)
970 			error = EINVAL;
971 		else
972 			dp->door_data_min = val;
973 		break;
974 
975 	case DOOR_PARAM_DATA_MAX:
976 		if (val < dp->door_data_min)
977 			error = EINVAL;
978 		else
979 			dp->door_data_max = val;
980 		break;
981 
982 	default:
983 		error = EINVAL;
984 		break;
985 	}
986 
987 	mutex_exit(&door_knob);
988 	return (error);
989 }
990 
991 static int
992 door_getparam_common(door_node_t *dp, int type, size_t *out)
993 {
994 	int error = 0;
995 
996 	mutex_enter(&door_knob);
997 	switch (type) {
998 	case DOOR_PARAM_DESC_MAX:
999 		*out = (size_t)dp->door_desc_max;
1000 		break;
1001 	case DOOR_PARAM_DATA_MIN:
1002 		*out = dp->door_data_min;
1003 		break;
1004 	case DOOR_PARAM_DATA_MAX:
1005 		*out = dp->door_data_max;
1006 		break;
1007 	default:
1008 		error = EINVAL;
1009 		break;
1010 	}
1011 	mutex_exit(&door_knob);
1012 	return (error);
1013 }
1014 
1015 int
1016 door_setparam(int did, int type, size_t val)
1017 {
1018 	door_node_t *dp;
1019 	int error = 0;
1020 
1021 	if ((dp = door_lookup(did, NULL)) == NULL)
1022 		return (set_errno(EBADF));
1023 
1024 	error = door_setparam_common(dp, 0, type, val);
1025 
1026 	releasef(did);
1027 
1028 	if (error)
1029 		return (set_errno(error));
1030 
1031 	return (0);
1032 }
1033 
1034 int
1035 door_getparam(int did, int type, size_t *out)
1036 {
1037 	door_node_t *dp;
1038 	size_t val = 0;
1039 	int error = 0;
1040 
1041 	if ((dp = door_lookup(did, NULL)) == NULL)
1042 		return (set_errno(EBADF));
1043 
1044 	error = door_getparam_common(dp, type, &val);
1045 
1046 	releasef(did);
1047 
1048 	if (error)
1049 		return (set_errno(error));
1050 
1051 	if (get_udatamodel() == DATAMODEL_NATIVE) {
1052 		if (copyout(&val, out, sizeof (val)))
1053 			return (set_errno(EFAULT));
1054 #ifdef _SYSCALL32_IMPL
1055 	} else {
1056 		size32_t val32 = (size32_t)val;
1057 
1058 		if (val != val32)
1059 			return (set_errno(EOVERFLOW));
1060 
1061 		if (copyout(&val32, out, sizeof (val32)))
1062 			return (set_errno(EFAULT));
1063 #endif /* _SYSCALL32_IMPL */
1064 	}
1065 
1066 	return (0);
1067 }
1068 
1069 /*
1070  * A copyout() which proceeds from high addresses to low addresses.  This way,
1071  * stack guard pages are effective.
1072  *
1073  * Note that we use copyout_nowatch();  this is called while the client is
1074  * held.
1075  */
1076 static int
1077 door_stack_copyout(const void *kaddr, void *uaddr, size_t count)
1078 {
1079 	const char *kbase = (const char *)kaddr;
1080 	uintptr_t ubase = (uintptr_t)uaddr;
1081 	size_t pgsize = PAGESIZE;
1082 
1083 	if (count <= pgsize)
1084 		return (copyout_nowatch(kaddr, uaddr, count));
1085 
1086 	while (count > 0) {
1087 		uintptr_t start, end, offset, amount;
1088 
1089 		end = ubase + count;
1090 		start = P2ALIGN(end - 1, pgsize);
1091 		if (P2ALIGN(ubase, pgsize) == start)
1092 			start = ubase;
1093 
1094 		offset = start - ubase;
1095 		amount = end - start;
1096 
1097 		ASSERT(amount > 0 && amount <= count && amount <= pgsize);
1098 
1099 		if (copyout_nowatch(kbase + offset, (void *)start, amount))
1100 			return (1);
1101 		count -= amount;
1102 	}
1103 	return (0);
1104 }
1105 
1106 /*
1107  * Writes the stack layout for door_return() into the door_server_t of the
1108  * server thread.
1109  */
1110 static int
1111 door_layout(kthread_t *tp, size_t data_size, uint_t ndesc, int info_needed)
1112 {
1113 	door_server_t *st = DOOR_SERVER(tp->t_door);
1114 	door_layout_t *out = &st->d_layout;
1115 	uintptr_t base_sp = (uintptr_t)st->d_sp;
1116 	size_t ssize = st->d_ssize;
1117 	size_t descsz;
1118 	uintptr_t descp, datap, infop, resultsp, finalsp;
1119 	size_t align = STACK_ALIGN;
1120 	size_t results_sz = sizeof (struct door_results);
1121 	model_t datamodel = lwp_getdatamodel(ttolwp(tp));
1122 
1123 	ASSERT(!st->d_layout_done);
1124 
1125 #ifndef _STACK_GROWS_DOWNWARD
1126 #error stack does not grow downward, door_layout() must change
1127 #endif
1128 
1129 #ifdef _SYSCALL32_IMPL
1130 	if (datamodel != DATAMODEL_NATIVE) {
1131 		align = STACK_ALIGN32;
1132 		results_sz = sizeof (struct door_results32);
1133 	}
1134 #endif
1135 
1136 	descsz = ndesc * sizeof (door_desc_t);
1137 
1138 	/*
1139 	 * To speed up the overflow checking, we do an initial check
1140 	 * that the passed in data size won't cause us to wrap past
1141 	 * base_sp.  Since door_max_desc limits descsz, we can
1142 	 * safely use it here.  65535 is an arbitrary 'bigger than
1143 	 * we need, small enough to not cause trouble' constant;
1144 	 * the only constraint is that it must be > than:
1145 	 *
1146 	 *	5 * STACK_ALIGN +
1147 	 *	    sizeof (door_info_t) +
1148 	 *	    sizeof (door_results_t) +
1149 	 *	    (max adjustment from door_final_sp())
1150 	 *
1151 	 * After we compute the layout, we can safely do a "did we wrap
1152 	 * around" check, followed by a check against the recorded
1153 	 * stack size.
1154 	 */
1155 	if (data_size >= SIZE_MAX - (size_t)65535UL - descsz)
1156 		return (E2BIG);		/* overflow */
1157 
1158 	descp = P2ALIGN(base_sp - descsz, align);
1159 	datap = P2ALIGN(descp - data_size, align);
1160 
1161 	if (info_needed)
1162 		infop = P2ALIGN(datap - sizeof (door_info_t), align);
1163 	else
1164 		infop = datap;
1165 
1166 	resultsp = P2ALIGN(infop - results_sz, align);
1167 	finalsp = door_final_sp(resultsp, align, datamodel);
1168 
1169 	if (finalsp > base_sp)
1170 		return (E2BIG);		/* overflow */
1171 
1172 	if (ssize != 0 && (base_sp - finalsp) > ssize)
1173 		return (E2BIG);		/* doesn't fit in stack */
1174 
1175 	out->dl_descp = (ndesc != 0)? (caddr_t)descp : 0;
1176 	out->dl_datap = (data_size != 0)? (caddr_t)datap : 0;
1177 	out->dl_infop = info_needed? (caddr_t)infop : 0;
1178 	out->dl_resultsp = (caddr_t)resultsp;
1179 	out->dl_sp = (caddr_t)finalsp;
1180 
1181 	st->d_layout_done = 1;
1182 	return (0);
1183 }
1184 
1185 static int
1186 door_server_dispatch(door_client_t *ct, door_node_t *dp)
1187 {
1188 	door_server_t *st = DOOR_SERVER(curthread->t_door);
1189 	door_layout_t *layout = &st->d_layout;
1190 	int error = 0;
1191 
1192 	int is_private = (dp->door_flags & DOOR_PRIVATE);
1193 
1194 	door_pool_t *pool = (is_private)? &dp->door_servers :
1195 	    &curproc->p_server_threads;
1196 
1197 	int empty_pool = (pool->dp_threads == NULL);
1198 
1199 	caddr_t infop = NULL;
1200 	char *datap = NULL;
1201 	size_t datasize = 0;
1202 	size_t descsize;
1203 
1204 	file_t **fpp = ct->d_fpp;
1205 	door_desc_t *start = NULL;
1206 	uint_t ndesc = 0;
1207 	uint_t ncopied = 0;
1208 
1209 	if (ct != NULL) {
1210 		datap = ct->d_args.data_ptr;
1211 		datasize = ct->d_args.data_size;
1212 		ndesc = ct->d_args.desc_num;
1213 	}
1214 
1215 	descsize = ndesc * sizeof (door_desc_t);
1216 
1217 	/*
1218 	 * Reset datap to NULL if we aren't passing any data.  Be careful
1219 	 * to let unref notifications through, though.
1220 	 */
1221 	if (datap == DOOR_UNREF_DATA) {
1222 		if (ct->d_upcall != NULL)
1223 			datasize = 0;
1224 		else
1225 			datap = NULL;
1226 	} else if (datasize == 0) {
1227 		datap = NULL;
1228 	}
1229 
1230 	/*
1231 	 * Get the stack layout, if it hasn't already been done.
1232 	 */
1233 	if (!st->d_layout_done) {
1234 		error = door_layout(curthread, datasize, ndesc,
1235 		    (is_private && empty_pool));
1236 		if (error != 0)
1237 			goto fail;
1238 	}
1239 
1240 	/*
1241 	 * fill out the stack, starting from the top.  Layout was already
1242 	 * filled in by door_args() or door_translate_out().
1243 	 */
1244 	if (layout->dl_descp != NULL) {
1245 		ASSERT(ndesc != 0);
1246 		start = kmem_alloc(descsize, KM_SLEEP);
1247 
1248 		while (ndesc > 0) {
1249 			if (door_insert(*fpp, &start[ncopied]) == -1) {
1250 				error = EMFILE;
1251 				goto fail;
1252 			}
1253 			ndesc--;
1254 			ncopied++;
1255 			fpp++;
1256 		}
1257 		if (door_stack_copyout(start, layout->dl_descp, descsize)) {
1258 			error = E2BIG;
1259 			goto fail;
1260 		}
1261 	}
1262 	fpp = NULL;			/* finished processing */
1263 
1264 	if (layout->dl_datap != NULL) {
1265 		ASSERT(datasize != 0);
1266 		datap = layout->dl_datap;
1267 		if (ct->d_upcall != NULL || datasize <= door_max_arg) {
1268 			if (door_stack_copyout(ct->d_buf, datap, datasize)) {
1269 				error = E2BIG;
1270 				goto fail;
1271 			}
1272 		}
1273 	}
1274 
1275 	if (is_private && empty_pool) {
1276 		door_info_t di;
1277 
1278 		infop = layout->dl_infop;
1279 		ASSERT(infop != NULL);
1280 
1281 		di.di_target = curproc->p_pid;
1282 		di.di_proc = (door_ptr_t)(uintptr_t)dp->door_pc;
1283 		di.di_data = (door_ptr_t)(uintptr_t)dp->door_data;
1284 		di.di_uniquifier = dp->door_index;
1285 		di.di_attributes = (dp->door_flags & DOOR_ATTR_MASK) |
1286 		    DOOR_LOCAL;
1287 
1288 		if (door_stack_copyout(&di, infop, sizeof (di))) {
1289 			error = E2BIG;
1290 			goto fail;
1291 		}
1292 	}
1293 
1294 	if (get_udatamodel() == DATAMODEL_NATIVE) {
1295 		struct door_results dr;
1296 
1297 		dr.cookie = dp->door_data;
1298 		dr.data_ptr = datap;
1299 		dr.data_size = datasize;
1300 		dr.desc_ptr = (door_desc_t *)layout->dl_descp;
1301 		dr.desc_num = ncopied;
1302 		dr.pc = dp->door_pc;
1303 		dr.nservers = !empty_pool;
1304 		dr.door_info = (door_info_t *)infop;
1305 
1306 		if (door_stack_copyout(&dr, layout->dl_resultsp, sizeof (dr))) {
1307 			error = E2BIG;
1308 			goto fail;
1309 		}
1310 #ifdef _SYSCALL32_IMPL
1311 	} else {
1312 		struct door_results32 dr32;
1313 
1314 		dr32.cookie = (caddr32_t)(uintptr_t)dp->door_data;
1315 		dr32.data_ptr = (caddr32_t)(uintptr_t)datap;
1316 		dr32.data_size = (size32_t)datasize;
1317 		dr32.desc_ptr = (caddr32_t)(uintptr_t)layout->dl_descp;
1318 		dr32.desc_num = ncopied;
1319 		dr32.pc = (caddr32_t)(uintptr_t)dp->door_pc;
1320 		dr32.nservers = !empty_pool;
1321 		dr32.door_info = (caddr32_t)(uintptr_t)infop;
1322 
1323 		if (door_stack_copyout(&dr32, layout->dl_resultsp,
1324 		    sizeof (dr32))) {
1325 			error = E2BIG;
1326 			goto fail;
1327 		}
1328 #endif
1329 	}
1330 
1331 	error = door_finish_dispatch(layout->dl_sp);
1332 fail:
1333 	if (start != NULL) {
1334 		if (error != 0)
1335 			door_fd_close(start, ncopied);
1336 		kmem_free(start, descsize);
1337 	}
1338 	if (fpp != NULL)
1339 		door_fp_close(fpp, ndesc);
1340 
1341 	return (error);
1342 }
1343 
1344 /*
1345  * Return the results (if any) to the caller (if any) and wait for the
1346  * next invocation on a door.
1347  */
1348 int
1349 door_return(caddr_t data_ptr, size_t data_size,
1350     door_desc_t *desc_ptr, uint_t desc_num, caddr_t sp, size_t ssize)
1351 {
1352 	kthread_t	*caller;
1353 	klwp_t		*lwp;
1354 	int		error = 0;
1355 	door_node_t	*dp;
1356 	door_server_t	*st;		/* curthread door_data */
1357 	door_client_t	*ct;		/* caller door_data */
1358 	int		cancel_pending;
1359 
1360 	st = door_my_server(1);
1361 
1362 	/*
1363 	 * If thread was bound to a door that no longer exists, return
1364 	 * an error.  This can happen if a thread is bound to a door
1365 	 * before the process calls forkall(); in the child, the door
1366 	 * doesn't exist and door_fork() sets the d_invbound flag.
1367 	 */
1368 	if (st->d_invbound)
1369 		return (set_errno(EINVAL));
1370 
1371 	st->d_sp = sp;			/* Save base of stack. */
1372 	st->d_ssize = ssize;		/* and its size */
1373 
1374 	/*
1375 	 * before we release our stack to the whims of our next caller,
1376 	 * copy in the syscall arguments if we're being traced by /proc.
1377 	 */
1378 	if (curthread->t_post_sys && PTOU(ttoproc(curthread))->u_systrap)
1379 		(void) save_syscall_args();
1380 
1381 	/* Make sure the caller hasn't gone away */
1382 	mutex_enter(&door_knob);
1383 	if ((caller = st->d_caller) == NULL || caller->t_door == NULL) {
1384 		if (desc_num != 0) {
1385 			/* close any DOOR_RELEASE descriptors */
1386 			mutex_exit(&door_knob);
1387 			error = door_release_fds(desc_ptr, desc_num);
1388 			if (error)
1389 				return (set_errno(error));
1390 			mutex_enter(&door_knob);
1391 		}
1392 		goto out;
1393 	}
1394 	ct = DOOR_CLIENT(caller->t_door);
1395 
1396 	ct->d_args.data_size = data_size;
1397 	ct->d_args.desc_num = desc_num;
1398 	/*
1399 	 * Transfer results, if any, to the client
1400 	 */
1401 	if (data_size != 0 || desc_num != 0) {
1402 		/*
1403 		 * Prevent the client from exiting until we have finished
1404 		 * moving results.
1405 		 */
1406 		DOOR_T_HOLD(ct);
1407 		mutex_exit(&door_knob);
1408 		error = door_results(caller, data_ptr, data_size,
1409 		    desc_ptr, desc_num);
1410 		mutex_enter(&door_knob);
1411 		DOOR_T_RELEASE(ct);
1412 		/*
1413 		 * Pass EOVERFLOW errors back to the client
1414 		 */
1415 		if (error && error != EOVERFLOW) {
1416 			mutex_exit(&door_knob);
1417 			return (set_errno(error));
1418 		}
1419 	}
1420 out:
1421 	/* Put ourselves on the available server thread list */
1422 	door_release_server(st->d_pool, curthread);
1423 
1424 	/*
1425 	 * Make sure the caller is still waiting to be resumed
1426 	 */
1427 	if (caller) {
1428 		disp_lock_t *tlp;
1429 
1430 		thread_lock(caller);
1431 		ct->d_error = error;		/* Return any errors */
1432 		if (caller->t_state == TS_SLEEP &&
1433 		    SOBJ_TYPE(caller->t_sobj_ops) == SOBJ_SHUTTLE) {
1434 			cpu_t *cp = CPU;
1435 
1436 			tlp = caller->t_lockp;
1437 			/*
1438 			 * Setting t_disp_queue prevents erroneous preemptions
1439 			 * if this thread is still in execution on another
1440 			 * processor
1441 			 */
1442 			caller->t_disp_queue = cp->cpu_disp;
1443 			CL_ACTIVE(caller);
1444 			/*
1445 			 * We are calling thread_onproc() instead of
1446 			 * THREAD_ONPROC() because compiler can reorder
1447 			 * the two stores of t_state and t_lockp in
1448 			 * THREAD_ONPROC().
1449 			 */
1450 			thread_onproc(caller, cp);
1451 			disp_lock_exit_high(tlp);
1452 			shuttle_resume(caller, &door_knob);
1453 		} else {
1454 			/* May have been setrun or in stop state */
1455 			thread_unlock(caller);
1456 			shuttle_swtch(&door_knob);
1457 		}
1458 	} else {
1459 		shuttle_swtch(&door_knob);
1460 	}
1461 
1462 	/*
1463 	 * We've sprung to life. Determine if we are part of a door
1464 	 * invocation, or just interrupted
1465 	 */
1466 	lwp = ttolwp(curthread);
1467 	mutex_enter(&door_knob);
1468 	if ((dp = st->d_active) != NULL) {
1469 		/*
1470 		 * Normal door invocation. Return any error condition
1471 		 * encountered while trying to pass args to the server
1472 		 * thread.
1473 		 */
1474 		lwp->lwp_asleep = 0;
1475 		/*
1476 		 * Prevent the caller from leaving us while we
1477 		 * are copying out the arguments from it's buffer.
1478 		 */
1479 		ASSERT(st->d_caller != NULL);
1480 		ct = DOOR_CLIENT(st->d_caller->t_door);
1481 
1482 		DOOR_T_HOLD(ct);
1483 		mutex_exit(&door_knob);
1484 		error = door_server_dispatch(ct, dp);
1485 		mutex_enter(&door_knob);
1486 		DOOR_T_RELEASE(ct);
1487 
1488 		/* let the client know we have processed his message */
1489 		ct->d_args_done = 1;
1490 
1491 		if (error) {
1492 			caller = st->d_caller;
1493 			if (caller)
1494 				ct = DOOR_CLIENT(caller->t_door);
1495 			else
1496 				ct = NULL;
1497 			goto out;
1498 		}
1499 		mutex_exit(&door_knob);
1500 		return (0);
1501 	} else {
1502 		/*
1503 		 * We are not involved in a door_invocation.
1504 		 * Check for /proc related activity...
1505 		 */
1506 		st->d_caller = NULL;
1507 		door_server_exit(curproc, curthread);
1508 		mutex_exit(&door_knob);
1509 		cancel_pending = 0;
1510 		if (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
1511 		    MUSTRETURN(curproc, curthread) ||
1512 		    (cancel_pending = schedctl_cancel_pending()) != 0) {
1513 			if (cancel_pending)
1514 				schedctl_cancel_eintr();
1515 			lwp->lwp_asleep = 0;
1516 			lwp->lwp_sysabort = 0;
1517 			return (set_errno(EINTR));
1518 		}
1519 		/* Go back and wait for another request */
1520 		lwp->lwp_asleep = 0;
1521 		mutex_enter(&door_knob);
1522 		caller = NULL;
1523 		goto out;
1524 	}
1525 }
1526 
1527 /*
1528  * Revoke any future invocations on this door
1529  */
1530 int
1531 door_revoke(int did)
1532 {
1533 	door_node_t	*d;
1534 	int		error;
1535 
1536 	if ((d = door_lookup(did, NULL)) == NULL)
1537 		return (set_errno(EBADF));
1538 
1539 	mutex_enter(&door_knob);
1540 	if (d->door_target != curproc) {
1541 		mutex_exit(&door_knob);
1542 		releasef(did);
1543 		return (set_errno(EPERM));
1544 	}
1545 	d->door_flags |= DOOR_REVOKED;
1546 	if (d->door_flags & DOOR_PRIVATE)
1547 		cv_broadcast(&d->door_servers.dp_cv);
1548 	else
1549 		cv_broadcast(&curproc->p_server_threads.dp_cv);
1550 	mutex_exit(&door_knob);
1551 	releasef(did);
1552 	/* Invalidate the descriptor */
1553 	if ((error = closeandsetf(did, NULL)) != 0)
1554 		return (set_errno(error));
1555 	return (0);
1556 }
1557 
1558 int
1559 door_info(int did, struct door_info *d_info)
1560 {
1561 	door_node_t	*dp;
1562 	door_info_t	di;
1563 	door_server_t	*st;
1564 	file_t		*fp = NULL;
1565 
1566 	if (did == DOOR_QUERY) {
1567 		/* Get information on door current thread is bound to */
1568 		if ((st = door_my_server(0)) == NULL ||
1569 		    (dp = st->d_pool) == NULL)
1570 			/* Thread isn't bound to a door */
1571 			return (set_errno(EBADF));
1572 	} else if ((dp = door_lookup(did, &fp)) == NULL) {
1573 		/* Not a door */
1574 		return (set_errno(EBADF));
1575 	}
1576 
1577 	door_info_common(dp, &di, fp);
1578 
1579 	if (did != DOOR_QUERY)
1580 		releasef(did);
1581 
1582 	if (copyout(&di, d_info, sizeof (struct door_info)))
1583 		return (set_errno(EFAULT));
1584 	return (0);
1585 }
1586 
1587 /*
1588  * Common code for getting information about a door either via the
1589  * door_info system call or the door_ki_info kernel call.
1590  */
1591 void
1592 door_info_common(door_node_t *dp, struct door_info *dip, file_t *fp)
1593 {
1594 	int unref_count;
1595 
1596 	bzero(dip, sizeof (door_info_t));
1597 
1598 	mutex_enter(&door_knob);
1599 	if (dp->door_target == NULL)
1600 		dip->di_target = -1;
1601 	else
1602 		dip->di_target = dp->door_target->p_pid;
1603 
1604 	dip->di_attributes = dp->door_flags & DOOR_ATTR_MASK;
1605 	if (dp->door_target == curproc)
1606 		dip->di_attributes |= DOOR_LOCAL;
1607 	dip->di_proc = (door_ptr_t)(uintptr_t)dp->door_pc;
1608 	dip->di_data = (door_ptr_t)(uintptr_t)dp->door_data;
1609 	dip->di_uniquifier = dp->door_index;
1610 	/*
1611 	 * If this door is in the middle of having an unreferenced
1612 	 * notification delivered, don't count the VN_HOLD by
1613 	 * door_deliver_unref in determining if it is unreferenced.
1614 	 * This handles the case where door_info is called from the
1615 	 * thread delivering the unref notification.
1616 	 */
1617 	if (dp->door_flags & DOOR_UNREF_ACTIVE)
1618 		unref_count = 2;
1619 	else
1620 		unref_count = 1;
1621 	mutex_exit(&door_knob);
1622 
1623 	if (fp == NULL) {
1624 		/*
1625 		 * If this thread is bound to the door, then we can just
1626 		 * check the vnode; a ref count of 1 (or 2 if this is
1627 		 * handling an unref notification) means that the hold
1628 		 * from the door_bind is the only reference to the door
1629 		 * (no file descriptor refers to it).
1630 		 */
1631 		if (DTOV(dp)->v_count == unref_count)
1632 			dip->di_attributes |= DOOR_IS_UNREF;
1633 	} else {
1634 		/*
1635 		 * If we're working from a file descriptor or door handle
1636 		 * we need to look at the file structure count.  We don't
1637 		 * need to hold the vnode lock since this is just a snapshot.
1638 		 */
1639 		mutex_enter(&fp->f_tlock);
1640 		if (fp->f_count == 1 && DTOV(dp)->v_count == unref_count)
1641 			dip->di_attributes |= DOOR_IS_UNREF;
1642 		mutex_exit(&fp->f_tlock);
1643 	}
1644 }
1645 
1646 /*
1647  * Return credentials of the door caller (if any) for this invocation
1648  */
1649 int
1650 door_ucred(struct ucred_s *uch)
1651 {
1652 	kthread_t	*caller;
1653 	door_server_t	*st;
1654 	door_client_t	*ct;
1655 	door_upcall_t	*dup;
1656 	struct proc	*p;
1657 	struct ucred_s	*res;
1658 	int		err;
1659 
1660 	mutex_enter(&door_knob);
1661 	if ((st = door_my_server(0)) == NULL ||
1662 	    (caller = st->d_caller) == NULL) {
1663 		mutex_exit(&door_knob);
1664 		return (set_errno(EINVAL));
1665 	}
1666 
1667 	ASSERT(caller->t_door != NULL);
1668 	ct = DOOR_CLIENT(caller->t_door);
1669 
1670 	/* Prevent caller from exiting while we examine the cred */
1671 	DOOR_T_HOLD(ct);
1672 	mutex_exit(&door_knob);
1673 
1674 	p = ttoproc(caller);
1675 
1676 	/*
1677 	 * If the credentials are not specified by the client, get the one
1678 	 * associated with the calling process.
1679 	 */
1680 	if ((dup = ct->d_upcall) != NULL)
1681 		res = cred2ucred(dup->du_cred, p0.p_pid, NULL, CRED());
1682 	else
1683 		res = cred2ucred(caller->t_cred, p->p_pid, NULL, CRED());
1684 
1685 	mutex_enter(&door_knob);
1686 	DOOR_T_RELEASE(ct);
1687 	mutex_exit(&door_knob);
1688 
1689 	err = copyout(res, uch, res->uc_size);
1690 
1691 	kmem_free(res, res->uc_size);
1692 
1693 	if (err != 0)
1694 		return (set_errno(EFAULT));
1695 
1696 	return (0);
1697 }
1698 
1699 /*
1700  * Bind the current lwp to the server thread pool associated with 'did'
1701  */
1702 int
1703 door_bind(int did)
1704 {
1705 	door_node_t	*dp;
1706 	door_server_t	*st;
1707 
1708 	if ((dp = door_lookup(did, NULL)) == NULL) {
1709 		/* Not a door */
1710 		return (set_errno(EBADF));
1711 	}
1712 
1713 	/*
1714 	 * Can't bind to a non-private door, and can't bind to a door
1715 	 * served by another process.
1716 	 */
1717 	if ((dp->door_flags & DOOR_PRIVATE) == 0 ||
1718 	    dp->door_target != curproc) {
1719 		releasef(did);
1720 		return (set_errno(EINVAL));
1721 	}
1722 
1723 	st = door_my_server(1);
1724 	if (st->d_pool)
1725 		door_unbind_thread(st->d_pool);
1726 	st->d_pool = dp;
1727 	st->d_invbound = 0;
1728 	door_bind_thread(dp);
1729 	releasef(did);
1730 
1731 	return (0);
1732 }
1733 
1734 /*
1735  * Unbind the current lwp from it's server thread pool
1736  */
1737 int
1738 door_unbind(void)
1739 {
1740 	door_server_t *st;
1741 
1742 	if ((st = door_my_server(0)) == NULL)
1743 		return (set_errno(EBADF));
1744 
1745 	if (st->d_invbound) {
1746 		ASSERT(st->d_pool == NULL);
1747 		st->d_invbound = 0;
1748 		return (0);
1749 	}
1750 	if (st->d_pool == NULL)
1751 		return (set_errno(EBADF));
1752 	door_unbind_thread(st->d_pool);
1753 	st->d_pool = NULL;
1754 	return (0);
1755 }
1756 
1757 /*
1758  * Create a descriptor for the associated file and fill in the
1759  * attributes associated with it.
1760  *
1761  * Return 0 for success, -1 otherwise;
1762  */
1763 int
1764 door_insert(struct file *fp, door_desc_t *dp)
1765 {
1766 	struct vnode *vp;
1767 	int	fd;
1768 	door_attr_t attributes = DOOR_DESCRIPTOR;
1769 
1770 	ASSERT(MUTEX_NOT_HELD(&door_knob));
1771 	if ((fd = ufalloc(0)) == -1)
1772 		return (-1);
1773 	setf(fd, fp);
1774 	dp->d_data.d_desc.d_descriptor = fd;
1775 
1776 	/* Fill in the attributes */
1777 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
1778 		vp = fp->f_vnode;
1779 	if (vp && vp->v_type == VDOOR) {
1780 		if (VTOD(vp)->door_target == curproc)
1781 			attributes |= DOOR_LOCAL;
1782 		attributes |= VTOD(vp)->door_flags & DOOR_ATTR_MASK;
1783 		dp->d_data.d_desc.d_id = VTOD(vp)->door_index;
1784 	}
1785 	dp->d_attributes = attributes;
1786 	return (0);
1787 }
1788 
1789 /*
1790  * Return an available thread for this server.  A NULL return value indicates
1791  * that either:
1792  *	The door has been revoked, or
1793  *	a signal was received.
1794  * The two conditions can be differentiated using DOOR_INVALID(dp).
1795  */
1796 static kthread_t *
1797 door_get_server(door_node_t *dp)
1798 {
1799 	kthread_t **ktp;
1800 	kthread_t *server_t;
1801 	door_pool_t *pool;
1802 	door_server_t *st;
1803 	int signalled;
1804 
1805 	disp_lock_t *tlp;
1806 	cpu_t *cp;
1807 
1808 	ASSERT(MUTEX_HELD(&door_knob));
1809 
1810 	if (dp->door_flags & DOOR_PRIVATE)
1811 		pool = &dp->door_servers;
1812 	else
1813 		pool = &dp->door_target->p_server_threads;
1814 
1815 	for (;;) {
1816 		/*
1817 		 * We search the thread pool, looking for a server thread
1818 		 * ready to take an invocation (i.e. one which is still
1819 		 * sleeping on a shuttle object).  If none are available,
1820 		 * we sleep on the pool's CV, and will be signaled when a
1821 		 * thread is added to the pool.
1822 		 *
1823 		 * This relies on the fact that once a thread in the thread
1824 		 * pool wakes up, it *must* remove and add itself to the pool
1825 		 * before it can receive door calls.
1826 		 */
1827 		if (DOOR_INVALID(dp))
1828 			return (NULL);	/* Target has become invalid */
1829 
1830 		for (ktp = &pool->dp_threads;
1831 		    (server_t = *ktp) != NULL;
1832 		    ktp = &st->d_servers) {
1833 			st = DOOR_SERVER(server_t->t_door);
1834 
1835 			thread_lock(server_t);
1836 			if (server_t->t_state == TS_SLEEP &&
1837 			    SOBJ_TYPE(server_t->t_sobj_ops) == SOBJ_SHUTTLE)
1838 				break;
1839 			thread_unlock(server_t);
1840 		}
1841 		if (server_t != NULL)
1842 			break;		/* we've got a live one! */
1843 
1844 		if (!cv_wait_sig_swap_core(&pool->dp_cv, &door_knob,
1845 		    &signalled)) {
1846 			/*
1847 			 * If we were signaled and the door is still
1848 			 * valid, pass the signal on to another waiter.
1849 			 */
1850 			if (signalled && !DOOR_INVALID(dp))
1851 				cv_signal(&pool->dp_cv);
1852 			return (NULL);	/* Got a signal */
1853 		}
1854 	}
1855 
1856 	/*
1857 	 * We've got a thread_lock()ed thread which is still on the
1858 	 * shuttle.  Take it off the list of available server threads
1859 	 * and mark it as ONPROC.  We are committed to resuming this
1860 	 * thread now.
1861 	 */
1862 	tlp = server_t->t_lockp;
1863 	cp = CPU;
1864 
1865 	*ktp = st->d_servers;
1866 	st->d_servers = NULL;
1867 	/*
1868 	 * Setting t_disp_queue prevents erroneous preemptions
1869 	 * if this thread is still in execution on another processor
1870 	 */
1871 	server_t->t_disp_queue = cp->cpu_disp;
1872 	CL_ACTIVE(server_t);
1873 	/*
1874 	 * We are calling thread_onproc() instead of
1875 	 * THREAD_ONPROC() because compiler can reorder
1876 	 * the two stores of t_state and t_lockp in
1877 	 * THREAD_ONPROC().
1878 	 */
1879 	thread_onproc(server_t, cp);
1880 	disp_lock_exit(tlp);
1881 	return (server_t);
1882 }
1883 
1884 /*
1885  * Put a server thread back in the pool.
1886  */
1887 static void
1888 door_release_server(door_node_t *dp, kthread_t *t)
1889 {
1890 	door_server_t *st = DOOR_SERVER(t->t_door);
1891 	door_pool_t *pool;
1892 
1893 	ASSERT(MUTEX_HELD(&door_knob));
1894 	st->d_active = NULL;
1895 	st->d_caller = NULL;
1896 	st->d_layout_done = 0;
1897 	if (dp && (dp->door_flags & DOOR_PRIVATE)) {
1898 		ASSERT(dp->door_target == NULL ||
1899 		    dp->door_target == ttoproc(t));
1900 		pool = &dp->door_servers;
1901 	} else {
1902 		pool = &ttoproc(t)->p_server_threads;
1903 	}
1904 
1905 	st->d_servers = pool->dp_threads;
1906 	pool->dp_threads = t;
1907 
1908 	/* If someone is waiting for a server thread, wake him up */
1909 	cv_signal(&pool->dp_cv);
1910 }
1911 
1912 /*
1913  * Remove a server thread from the pool if present.
1914  */
1915 static void
1916 door_server_exit(proc_t *p, kthread_t *t)
1917 {
1918 	door_pool_t *pool;
1919 	kthread_t **next;
1920 	door_server_t *st = DOOR_SERVER(t->t_door);
1921 
1922 	ASSERT(MUTEX_HELD(&door_knob));
1923 	if (st->d_pool != NULL) {
1924 		ASSERT(st->d_pool->door_flags & DOOR_PRIVATE);
1925 		pool = &st->d_pool->door_servers;
1926 	} else {
1927 		pool = &p->p_server_threads;
1928 	}
1929 
1930 	next = &pool->dp_threads;
1931 	while (*next != NULL) {
1932 		if (*next == t) {
1933 			*next = DOOR_SERVER(t->t_door)->d_servers;
1934 			return;
1935 		}
1936 		next = &(DOOR_SERVER((*next)->t_door)->d_servers);
1937 	}
1938 }
1939 
1940 /*
1941  * Lookup the door descriptor. Caller must call releasef when finished
1942  * with associated door.
1943  */
1944 static door_node_t *
1945 door_lookup(int did, file_t **fpp)
1946 {
1947 	vnode_t	*vp;
1948 	file_t *fp;
1949 
1950 	ASSERT(MUTEX_NOT_HELD(&door_knob));
1951 	if ((fp = getf(did)) == NULL)
1952 		return (NULL);
1953 	/*
1954 	 * Use the underlying vnode (we may be namefs mounted)
1955 	 */
1956 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
1957 		vp = fp->f_vnode;
1958 
1959 	if (vp == NULL || vp->v_type != VDOOR) {
1960 		releasef(did);
1961 		return (NULL);
1962 	}
1963 
1964 	if (fpp)
1965 		*fpp = fp;
1966 
1967 	return (VTOD(vp));
1968 }
1969 
1970 /*
1971  * The current thread is exiting, so clean up any pending
1972  * invocation details
1973  */
1974 void
1975 door_slam(void)
1976 {
1977 	door_node_t *dp;
1978 	door_data_t *dt;
1979 	door_client_t *ct;
1980 	door_server_t *st;
1981 
1982 	/*
1983 	 * If we are an active door server, notify our
1984 	 * client that we are exiting and revoke our door.
1985 	 */
1986 	if ((dt = door_my_data(0)) == NULL)
1987 		return;
1988 	ct = DOOR_CLIENT(dt);
1989 	st = DOOR_SERVER(dt);
1990 
1991 	mutex_enter(&door_knob);
1992 	for (;;) {
1993 		if (DOOR_T_HELD(ct))
1994 			cv_wait(&ct->d_cv, &door_knob);
1995 		else if (DOOR_T_HELD(st))
1996 			cv_wait(&st->d_cv, &door_knob);
1997 		else
1998 			break;			/* neither flag is set */
1999 	}
2000 	curthread->t_door = NULL;
2001 	if ((dp = st->d_active) != NULL) {
2002 		kthread_t *t = st->d_caller;
2003 		proc_t *p = curproc;
2004 
2005 		/* Revoke our door if the process is exiting */
2006 		if (dp->door_target == p && (p->p_flag & SEXITING)) {
2007 			door_list_delete(dp);
2008 			dp->door_target = NULL;
2009 			dp->door_flags |= DOOR_REVOKED;
2010 			if (dp->door_flags & DOOR_PRIVATE)
2011 				cv_broadcast(&dp->door_servers.dp_cv);
2012 			else
2013 				cv_broadcast(&p->p_server_threads.dp_cv);
2014 		}
2015 
2016 		if (t != NULL) {
2017 			/*
2018 			 * Let the caller know we are gone
2019 			 */
2020 			DOOR_CLIENT(t->t_door)->d_error = DOOR_EXIT;
2021 			thread_lock(t);
2022 			if (t->t_state == TS_SLEEP &&
2023 			    SOBJ_TYPE(t->t_sobj_ops) == SOBJ_SHUTTLE)
2024 				setrun_locked(t);
2025 			thread_unlock(t);
2026 		}
2027 	}
2028 	mutex_exit(&door_knob);
2029 	if (st->d_pool)
2030 		door_unbind_thread(st->d_pool);	/* Implicit door_unbind */
2031 	kmem_free(dt, sizeof (door_data_t));
2032 }
2033 
2034 /*
2035  * Set DOOR_REVOKED for all doors of the current process. This is called
2036  * on exit before all lwp's are being terminated so that door calls will
2037  * return with an error.
2038  */
2039 void
2040 door_revoke_all()
2041 {
2042 	door_node_t *dp;
2043 	proc_t *p = ttoproc(curthread);
2044 
2045 	mutex_enter(&door_knob);
2046 	for (dp = p->p_door_list; dp != NULL; dp = dp->door_list) {
2047 		ASSERT(dp->door_target == p);
2048 		dp->door_flags |= DOOR_REVOKED;
2049 		if (dp->door_flags & DOOR_PRIVATE)
2050 			cv_broadcast(&dp->door_servers.dp_cv);
2051 	}
2052 	cv_broadcast(&p->p_server_threads.dp_cv);
2053 	mutex_exit(&door_knob);
2054 }
2055 
2056 /*
2057  * The process is exiting, and all doors it created need to be revoked.
2058  */
2059 void
2060 door_exit(void)
2061 {
2062 	door_node_t *dp;
2063 	proc_t *p = ttoproc(curthread);
2064 
2065 	ASSERT(p->p_lwpcnt == 1);
2066 	/*
2067 	 * Walk the list of active doors created by this process and
2068 	 * revoke them all.
2069 	 */
2070 	mutex_enter(&door_knob);
2071 	for (dp = p->p_door_list; dp != NULL; dp = dp->door_list) {
2072 		dp->door_target = NULL;
2073 		dp->door_flags |= DOOR_REVOKED;
2074 		if (dp->door_flags & DOOR_PRIVATE)
2075 			cv_broadcast(&dp->door_servers.dp_cv);
2076 	}
2077 	cv_broadcast(&p->p_server_threads.dp_cv);
2078 	/* Clear the list */
2079 	p->p_door_list = NULL;
2080 
2081 	/* Clean up the unref list */
2082 	while ((dp = p->p_unref_list) != NULL) {
2083 		p->p_unref_list = dp->door_ulist;
2084 		dp->door_ulist = NULL;
2085 		mutex_exit(&door_knob);
2086 		VN_RELE(DTOV(dp));
2087 		mutex_enter(&door_knob);
2088 	}
2089 	mutex_exit(&door_knob);
2090 }
2091 
2092 
2093 /*
2094  * The process is executing forkall(), and we need to flag threads that
2095  * are bound to a door in the child.  This will make the child threads
2096  * return an error to door_return unless they call door_unbind first.
2097  */
2098 void
2099 door_fork(kthread_t *parent, kthread_t *child)
2100 {
2101 	door_data_t *pt = parent->t_door;
2102 	door_server_t *st = DOOR_SERVER(pt);
2103 	door_data_t *dt;
2104 
2105 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2106 	if (pt != NULL && (st->d_pool != NULL || st->d_invbound)) {
2107 		/* parent thread is bound to a door */
2108 		dt = child->t_door =
2109 		    kmem_zalloc(sizeof (door_data_t), KM_SLEEP);
2110 		DOOR_SERVER(dt)->d_invbound = 1;
2111 	}
2112 }
2113 
2114 /*
2115  * Deliver queued unrefs to appropriate door server.
2116  */
2117 static int
2118 door_unref(void)
2119 {
2120 	door_node_t	*dp;
2121 	static door_arg_t unref_args = { DOOR_UNREF_DATA, 0, 0, 0, 0, 0 };
2122 	proc_t *p = ttoproc(curthread);
2123 
2124 	/* make sure there's only one unref thread per process */
2125 	mutex_enter(&door_knob);
2126 	if (p->p_unref_thread) {
2127 		mutex_exit(&door_knob);
2128 		return (set_errno(EALREADY));
2129 	}
2130 	p->p_unref_thread = 1;
2131 	mutex_exit(&door_knob);
2132 
2133 	(void) door_my_data(1);			/* create info, if necessary */
2134 
2135 	for (;;) {
2136 		mutex_enter(&door_knob);
2137 
2138 		/* Grab a queued request */
2139 		while ((dp = p->p_unref_list) == NULL) {
2140 			if (!cv_wait_sig(&p->p_unref_cv, &door_knob)) {
2141 				/*
2142 				 * Interrupted.
2143 				 * Return so we can finish forkall() or exit().
2144 				 */
2145 				p->p_unref_thread = 0;
2146 				mutex_exit(&door_knob);
2147 				return (set_errno(EINTR));
2148 			}
2149 		}
2150 		p->p_unref_list = dp->door_ulist;
2151 		dp->door_ulist = NULL;
2152 		dp->door_flags |= DOOR_UNREF_ACTIVE;
2153 		mutex_exit(&door_knob);
2154 
2155 		(void) door_upcall(DTOV(dp), &unref_args, NULL, SIZE_MAX, 0);
2156 
2157 		if (unref_args.rbuf != 0) {
2158 			kmem_free(unref_args.rbuf, unref_args.rsize);
2159 			unref_args.rbuf = NULL;
2160 			unref_args.rsize = 0;
2161 		}
2162 
2163 		mutex_enter(&door_knob);
2164 		ASSERT(dp->door_flags & DOOR_UNREF_ACTIVE);
2165 		dp->door_flags &= ~DOOR_UNREF_ACTIVE;
2166 		mutex_exit(&door_knob);
2167 		VN_RELE(DTOV(dp));
2168 	}
2169 }
2170 
2171 
2172 /*
2173  * Deliver queued unrefs to kernel door server.
2174  */
2175 /* ARGSUSED */
2176 static void
2177 door_unref_kernel(caddr_t arg)
2178 {
2179 	door_node_t	*dp;
2180 	static door_arg_t unref_args = { DOOR_UNREF_DATA, 0, 0, 0, 0, 0 };
2181 	proc_t *p = ttoproc(curthread);
2182 	callb_cpr_t cprinfo;
2183 
2184 	/* should only be one of these */
2185 	mutex_enter(&door_knob);
2186 	if (p->p_unref_thread) {
2187 		mutex_exit(&door_knob);
2188 		return;
2189 	}
2190 	p->p_unref_thread = 1;
2191 	mutex_exit(&door_knob);
2192 
2193 	(void) door_my_data(1);		/* make sure we have a door_data_t */
2194 
2195 	CALLB_CPR_INIT(&cprinfo, &door_knob, callb_generic_cpr, "door_unref");
2196 	for (;;) {
2197 		mutex_enter(&door_knob);
2198 		/* Grab a queued request */
2199 		while ((dp = p->p_unref_list) == NULL) {
2200 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
2201 			cv_wait(&p->p_unref_cv, &door_knob);
2202 			CALLB_CPR_SAFE_END(&cprinfo, &door_knob);
2203 		}
2204 		p->p_unref_list = dp->door_ulist;
2205 		dp->door_ulist = NULL;
2206 		dp->door_flags |= DOOR_UNREF_ACTIVE;
2207 		mutex_exit(&door_knob);
2208 
2209 		(*(dp->door_pc))(dp->door_data, &unref_args, NULL, NULL, NULL);
2210 
2211 		mutex_enter(&door_knob);
2212 		ASSERT(dp->door_flags & DOOR_UNREF_ACTIVE);
2213 		dp->door_flags &= ~DOOR_UNREF_ACTIVE;
2214 		mutex_exit(&door_knob);
2215 		VN_RELE(DTOV(dp));
2216 	}
2217 }
2218 
2219 
2220 /*
2221  * Queue an unref invocation for processing for the current process
2222  * The door may or may not be revoked at this point.
2223  */
2224 void
2225 door_deliver_unref(door_node_t *d)
2226 {
2227 	struct proc *server = d->door_target;
2228 
2229 	ASSERT(MUTEX_HELD(&door_knob));
2230 	ASSERT(d->door_active == 0);
2231 
2232 	if (server == NULL)
2233 		return;
2234 	/*
2235 	 * Create a lwp to deliver unref calls if one isn't already running.
2236 	 *
2237 	 * A separate thread is used to deliver unrefs since the current
2238 	 * thread may be holding resources (e.g. locks) in user land that
2239 	 * may be needed by the unref processing. This would cause a
2240 	 * deadlock.
2241 	 */
2242 	if (d->door_flags & DOOR_UNREF_MULTI) {
2243 		/* multiple unrefs */
2244 		d->door_flags &= ~DOOR_DELAY;
2245 	} else {
2246 		/* Only 1 unref per door */
2247 		d->door_flags &= ~(DOOR_UNREF|DOOR_DELAY);
2248 	}
2249 	mutex_exit(&door_knob);
2250 
2251 	/*
2252 	 * Need to bump the vnode count before putting the door on the
2253 	 * list so it doesn't get prematurely released by door_unref.
2254 	 */
2255 	VN_HOLD(DTOV(d));
2256 
2257 	mutex_enter(&door_knob);
2258 	/* is this door already on the unref list? */
2259 	if (d->door_flags & DOOR_UNREF_MULTI) {
2260 		door_node_t *dp;
2261 		for (dp = server->p_unref_list; dp != NULL;
2262 		    dp = dp->door_ulist) {
2263 			if (d == dp) {
2264 				/* already there, don't need to add another */
2265 				mutex_exit(&door_knob);
2266 				VN_RELE(DTOV(d));
2267 				mutex_enter(&door_knob);
2268 				return;
2269 			}
2270 		}
2271 	}
2272 	ASSERT(d->door_ulist == NULL);
2273 	d->door_ulist = server->p_unref_list;
2274 	server->p_unref_list = d;
2275 	cv_broadcast(&server->p_unref_cv);
2276 }
2277 
2278 /*
2279  * The callers buffer isn't big enough for all of the data/fd's. Allocate
2280  * space in the callers address space for the results and copy the data
2281  * there.
2282  *
2283  * For EOVERFLOW, we must clean up the server's door descriptors.
2284  */
2285 static int
2286 door_overflow(
2287 	kthread_t	*caller,
2288 	caddr_t		data_ptr,	/* data location */
2289 	size_t		data_size,	/* data size */
2290 	door_desc_t	*desc_ptr,	/* descriptor location */
2291 	uint_t		desc_num)	/* descriptor size */
2292 {
2293 	proc_t *callerp = ttoproc(caller);
2294 	struct as *as = callerp->p_as;
2295 	door_client_t *ct = DOOR_CLIENT(caller->t_door);
2296 	caddr_t	addr;			/* Resulting address in target */
2297 	size_t	rlen;			/* Rounded len */
2298 	size_t	len;
2299 	uint_t	i;
2300 	size_t	ds = desc_num * sizeof (door_desc_t);
2301 
2302 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2303 	ASSERT(DOOR_T_HELD(ct) || ct->d_kernel);
2304 
2305 	/* Do initial overflow check */
2306 	if (!ufcanalloc(callerp, desc_num))
2307 		return (EMFILE);
2308 
2309 	/*
2310 	 * Allocate space for this stuff in the callers address space
2311 	 */
2312 	rlen = roundup(data_size + ds, PAGESIZE);
2313 	as_rangelock(as);
2314 	map_addr_proc(&addr, rlen, 0, 1, as->a_userlimit, ttoproc(caller), 0);
2315 	if (addr == NULL ||
2316 	    as_map(as, addr, rlen, segvn_create, zfod_argsp) != 0) {
2317 		/* No virtual memory available, or anon mapping failed */
2318 		as_rangeunlock(as);
2319 		if (!ct->d_kernel && desc_num > 0) {
2320 			int error = door_release_fds(desc_ptr, desc_num);
2321 			if (error)
2322 				return (error);
2323 		}
2324 		return (EOVERFLOW);
2325 	}
2326 	as_rangeunlock(as);
2327 
2328 	if (ct->d_kernel)
2329 		goto out;
2330 
2331 	if (data_size != 0) {
2332 		caddr_t	src = data_ptr;
2333 		caddr_t saddr = addr;
2334 
2335 		/* Copy any data */
2336 		len = data_size;
2337 		while (len != 0) {
2338 			int	amount;
2339 			int	error;
2340 
2341 			amount = len > PAGESIZE ? PAGESIZE : len;
2342 			if ((error = door_copy(as, src, saddr, amount)) != 0) {
2343 				(void) as_unmap(as, addr, rlen);
2344 				return (error);
2345 			}
2346 			saddr += amount;
2347 			src += amount;
2348 			len -= amount;
2349 		}
2350 	}
2351 	/* Copy any fd's */
2352 	if (desc_num != 0) {
2353 		door_desc_t	*didpp, *start;
2354 		struct file	**fpp;
2355 		int		fpp_size;
2356 
2357 		start = didpp = kmem_alloc(ds, KM_SLEEP);
2358 		if (copyin_nowatch(desc_ptr, didpp, ds)) {
2359 			kmem_free(start, ds);
2360 			(void) as_unmap(as, addr, rlen);
2361 			return (EFAULT);
2362 		}
2363 
2364 		fpp_size = desc_num * sizeof (struct file *);
2365 		if (fpp_size > ct->d_fpp_size) {
2366 			/* make more space */
2367 			if (ct->d_fpp_size)
2368 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2369 			ct->d_fpp_size = fpp_size;
2370 			ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2371 		}
2372 		fpp = ct->d_fpp;
2373 
2374 		for (i = 0; i < desc_num; i++) {
2375 			struct file *fp;
2376 			int fd = didpp->d_data.d_desc.d_descriptor;
2377 
2378 			if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2379 			    (fp = getf(fd)) == NULL) {
2380 				/* close translated references */
2381 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2382 				/* close untranslated references */
2383 				door_fd_rele(didpp, desc_num - i, 0);
2384 				kmem_free(start, ds);
2385 				(void) as_unmap(as, addr, rlen);
2386 				return (EINVAL);
2387 			}
2388 			mutex_enter(&fp->f_tlock);
2389 			fp->f_count++;
2390 			mutex_exit(&fp->f_tlock);
2391 
2392 			*fpp = fp;
2393 			releasef(fd);
2394 
2395 			if (didpp->d_attributes & DOOR_RELEASE) {
2396 				/* release passed reference */
2397 				(void) closeandsetf(fd, NULL);
2398 			}
2399 
2400 			fpp++; didpp++;
2401 		}
2402 		kmem_free(start, ds);
2403 	}
2404 
2405 out:
2406 	ct->d_overflow = 1;
2407 	ct->d_args.rbuf = addr;
2408 	ct->d_args.rsize = rlen;
2409 	return (0);
2410 }
2411 
2412 /*
2413  * Transfer arguments from the client to the server.
2414  */
2415 static int
2416 door_args(kthread_t *server, int is_private)
2417 {
2418 	door_server_t *st = DOOR_SERVER(server->t_door);
2419 	door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2420 	uint_t	ndid;
2421 	size_t	dsize;
2422 	int	error;
2423 
2424 	ASSERT(DOOR_T_HELD(st));
2425 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2426 
2427 	ndid = ct->d_args.desc_num;
2428 	if (ndid > door_max_desc)
2429 		return (E2BIG);
2430 
2431 	/*
2432 	 * Get the stack layout, and fail now if it won't fit.
2433 	 */
2434 	error = door_layout(server, ct->d_args.data_size, ndid, is_private);
2435 	if (error != 0)
2436 		return (error);
2437 
2438 	dsize = ndid * sizeof (door_desc_t);
2439 	if (ct->d_args.data_size != 0) {
2440 		if (ct->d_args.data_size <= door_max_arg) {
2441 			/*
2442 			 * Use a 2 copy method for small amounts of data
2443 			 *
2444 			 * Allocate a little more than we need for the
2445 			 * args, in the hope that the results will fit
2446 			 * without having to reallocate a buffer
2447 			 */
2448 			ASSERT(ct->d_buf == NULL);
2449 			ct->d_bufsize = roundup(ct->d_args.data_size,
2450 			    DOOR_ROUND);
2451 			ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2452 			if (copyin_nowatch(ct->d_args.data_ptr,
2453 			    ct->d_buf, ct->d_args.data_size) != 0) {
2454 				kmem_free(ct->d_buf, ct->d_bufsize);
2455 				ct->d_buf = NULL;
2456 				ct->d_bufsize = 0;
2457 				return (EFAULT);
2458 			}
2459 		} else {
2460 			struct as	*as;
2461 			caddr_t		src;
2462 			caddr_t		dest;
2463 			size_t		len = ct->d_args.data_size;
2464 			uintptr_t	base;
2465 
2466 			/*
2467 			 * Use a 1 copy method
2468 			 */
2469 			as = ttoproc(server)->p_as;
2470 			src = ct->d_args.data_ptr;
2471 
2472 			dest = st->d_layout.dl_datap;
2473 			base = (uintptr_t)dest;
2474 
2475 			/*
2476 			 * Copy data directly into server.  We proceed
2477 			 * downward from the top of the stack, to mimic
2478 			 * normal stack usage. This allows the guard page
2479 			 * to stop us before we corrupt anything.
2480 			 */
2481 			while (len != 0) {
2482 				uintptr_t start;
2483 				uintptr_t end;
2484 				uintptr_t offset;
2485 				size_t	amount;
2486 
2487 				/*
2488 				 * Locate the next part to copy.
2489 				 */
2490 				end = base + len;
2491 				start = P2ALIGN(end - 1, PAGESIZE);
2492 
2493 				/*
2494 				 * if we are on the final (first) page, fix
2495 				 * up the start position.
2496 				 */
2497 				if (P2ALIGN(base, PAGESIZE) == start)
2498 					start = base;
2499 
2500 				offset = start - base;	/* the copy offset */
2501 				amount = end - start;	/* # bytes to copy */
2502 
2503 				ASSERT(amount > 0 && amount <= len &&
2504 				    amount <= PAGESIZE);
2505 
2506 				error = door_copy(as, src + offset,
2507 				    dest + offset, amount);
2508 				if (error != 0)
2509 					return (error);
2510 				len -= amount;
2511 			}
2512 		}
2513 	}
2514 	/*
2515 	 * Copyin the door args and translate them into files
2516 	 */
2517 	if (ndid != 0) {
2518 		door_desc_t	*didpp;
2519 		door_desc_t	*start;
2520 		struct file	**fpp;
2521 
2522 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
2523 
2524 		if (copyin_nowatch(ct->d_args.desc_ptr, didpp, dsize)) {
2525 			kmem_free(start, dsize);
2526 			return (EFAULT);
2527 		}
2528 		ct->d_fpp_size = ndid * sizeof (struct file *);
2529 		ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2530 		fpp = ct->d_fpp;
2531 		while (ndid--) {
2532 			struct file *fp;
2533 			int fd = didpp->d_data.d_desc.d_descriptor;
2534 
2535 			/* We only understand file descriptors as passed objs */
2536 			if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2537 			    (fp = getf(fd)) == NULL) {
2538 				/* close translated references */
2539 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2540 				/* close untranslated references */
2541 				door_fd_rele(didpp, ndid + 1, 0);
2542 				kmem_free(start, dsize);
2543 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2544 				ct->d_fpp = NULL;
2545 				ct->d_fpp_size = 0;
2546 				return (EINVAL);
2547 			}
2548 			/* Hold the fp */
2549 			mutex_enter(&fp->f_tlock);
2550 			fp->f_count++;
2551 			mutex_exit(&fp->f_tlock);
2552 
2553 			*fpp = fp;
2554 			releasef(fd);
2555 
2556 			if (didpp->d_attributes & DOOR_RELEASE) {
2557 				/* release passed reference */
2558 				(void) closeandsetf(fd, NULL);
2559 			}
2560 
2561 			fpp++; didpp++;
2562 		}
2563 		kmem_free(start, dsize);
2564 	}
2565 	return (0);
2566 }
2567 
2568 /*
2569  * Transfer arguments from a user client to a kernel server.  This copies in
2570  * descriptors and translates them into door handles.  It doesn't touch the
2571  * other data, letting the kernel server deal with that (to avoid needing
2572  * to copy the data twice).
2573  */
2574 static int
2575 door_translate_in(void)
2576 {
2577 	door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2578 	uint_t	ndid;
2579 
2580 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2581 	ndid = ct->d_args.desc_num;
2582 	if (ndid > door_max_desc)
2583 		return (E2BIG);
2584 	/*
2585 	 * Copyin the door args and translate them into door handles.
2586 	 */
2587 	if (ndid != 0) {
2588 		door_desc_t	*didpp;
2589 		door_desc_t	*start;
2590 		size_t		dsize = ndid * sizeof (door_desc_t);
2591 		struct file	*fp;
2592 
2593 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
2594 
2595 		if (copyin_nowatch(ct->d_args.desc_ptr, didpp, dsize)) {
2596 			kmem_free(start, dsize);
2597 			return (EFAULT);
2598 		}
2599 		while (ndid--) {
2600 			vnode_t	*vp;
2601 			int fd = didpp->d_data.d_desc.d_descriptor;
2602 
2603 			/*
2604 			 * We only understand file descriptors as passed objs
2605 			 */
2606 			if ((didpp->d_attributes & DOOR_DESCRIPTOR) &&
2607 			    (fp = getf(fd)) != NULL) {
2608 				didpp->d_data.d_handle = FTODH(fp);
2609 				/* Hold the door */
2610 				door_ki_hold(didpp->d_data.d_handle);
2611 
2612 				releasef(fd);
2613 
2614 				if (didpp->d_attributes & DOOR_RELEASE) {
2615 					/* release passed reference */
2616 					(void) closeandsetf(fd, NULL);
2617 				}
2618 
2619 				if (VOP_REALVP(fp->f_vnode, &vp, NULL))
2620 					vp = fp->f_vnode;
2621 
2622 				/* Set attributes */
2623 				didpp->d_attributes = DOOR_HANDLE |
2624 				    (VTOD(vp)->door_flags & DOOR_ATTR_MASK);
2625 			} else {
2626 				/* close translated references */
2627 				door_fd_close(start, didpp - start);
2628 				/* close untranslated references */
2629 				door_fd_rele(didpp, ndid + 1, 0);
2630 				kmem_free(start, dsize);
2631 				return (EINVAL);
2632 			}
2633 			didpp++;
2634 		}
2635 		ct->d_args.desc_ptr = start;
2636 	}
2637 	return (0);
2638 }
2639 
2640 /*
2641  * Translate door arguments from kernel to user.  This copies the passed
2642  * door handles.  It doesn't touch other data.  It is used by door_upcall,
2643  * and for data returned by a door_call to a kernel server.
2644  */
2645 static int
2646 door_translate_out(void)
2647 {
2648 	door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2649 	uint_t	ndid;
2650 
2651 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2652 	ndid = ct->d_args.desc_num;
2653 	if (ndid > door_max_desc) {
2654 		door_fd_rele(ct->d_args.desc_ptr, ndid, 1);
2655 		return (E2BIG);
2656 	}
2657 	/*
2658 	 * Translate the door args into files
2659 	 */
2660 	if (ndid != 0) {
2661 		door_desc_t	*didpp = ct->d_args.desc_ptr;
2662 		struct file	**fpp;
2663 
2664 		ct->d_fpp_size = ndid * sizeof (struct file *);
2665 		fpp = ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2666 		while (ndid--) {
2667 			struct file *fp = NULL;
2668 			int fd = -1;
2669 
2670 			/*
2671 			 * We understand file descriptors and door
2672 			 * handles as passed objs.
2673 			 */
2674 			if (didpp->d_attributes & DOOR_DESCRIPTOR) {
2675 				fd = didpp->d_data.d_desc.d_descriptor;
2676 				fp = getf(fd);
2677 			} else if (didpp->d_attributes & DOOR_HANDLE)
2678 				fp = DHTOF(didpp->d_data.d_handle);
2679 			if (fp != NULL) {
2680 				/* Hold the fp */
2681 				mutex_enter(&fp->f_tlock);
2682 				fp->f_count++;
2683 				mutex_exit(&fp->f_tlock);
2684 
2685 				*fpp = fp;
2686 				if (didpp->d_attributes & DOOR_DESCRIPTOR)
2687 					releasef(fd);
2688 				if (didpp->d_attributes & DOOR_RELEASE) {
2689 					/* release passed reference */
2690 					if (fd >= 0)
2691 						(void) closeandsetf(fd, NULL);
2692 					else
2693 						(void) closef(fp);
2694 				}
2695 			} else {
2696 				/* close translated references */
2697 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2698 				/* close untranslated references */
2699 				door_fd_rele(didpp, ndid + 1, 1);
2700 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2701 				ct->d_fpp = NULL;
2702 				ct->d_fpp_size = 0;
2703 				return (EINVAL);
2704 			}
2705 			fpp++; didpp++;
2706 		}
2707 	}
2708 	return (0);
2709 }
2710 
2711 /*
2712  * Move the results from the server to the client
2713  */
2714 static int
2715 door_results(kthread_t *caller, caddr_t data_ptr, size_t data_size,
2716 		door_desc_t *desc_ptr, uint_t desc_num)
2717 {
2718 	door_client_t	*ct = DOOR_CLIENT(caller->t_door);
2719 	door_upcall_t	*dup = ct->d_upcall;
2720 	size_t		dsize;
2721 	size_t		rlen;
2722 	size_t		result_size;
2723 
2724 	ASSERT(DOOR_T_HELD(ct));
2725 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2726 
2727 	if (ct->d_noresults)
2728 		return (E2BIG);		/* No results expected */
2729 
2730 	if (desc_num > door_max_desc)
2731 		return (E2BIG);		/* Too many descriptors */
2732 
2733 	dsize = desc_num * sizeof (door_desc_t);
2734 	/*
2735 	 * Check if the results are bigger than the clients buffer
2736 	 */
2737 	if (dsize)
2738 		rlen = roundup(data_size, sizeof (door_desc_t));
2739 	else
2740 		rlen = data_size;
2741 	if ((result_size = rlen + dsize) == 0)
2742 		return (0);
2743 
2744 	if (dup != NULL) {
2745 		if (desc_num > dup->du_max_descs)
2746 			return (EMFILE);
2747 
2748 		if (data_size > dup->du_max_data)
2749 			return (E2BIG);
2750 
2751 		/*
2752 		 * Handle upcalls
2753 		 */
2754 		if (ct->d_args.rbuf == NULL || ct->d_args.rsize < result_size) {
2755 			/*
2756 			 * If there's no return buffer or the buffer is too
2757 			 * small, allocate a new one.  The old buffer (if it
2758 			 * exists) will be freed by the upcall client.
2759 			 */
2760 			if (result_size > door_max_upcall_reply)
2761 				return (E2BIG);
2762 			ct->d_args.rsize = result_size;
2763 			ct->d_args.rbuf = kmem_alloc(result_size, KM_SLEEP);
2764 		}
2765 		ct->d_args.data_ptr = ct->d_args.rbuf;
2766 		if (data_size != 0 &&
2767 		    copyin_nowatch(data_ptr, ct->d_args.data_ptr,
2768 		    data_size) != 0)
2769 			return (EFAULT);
2770 	} else if (result_size > ct->d_args.rsize) {
2771 		return (door_overflow(caller, data_ptr, data_size,
2772 		    desc_ptr, desc_num));
2773 	} else if (data_size != 0) {
2774 		if (data_size <= door_max_arg) {
2775 			/*
2776 			 * Use a 2 copy method for small amounts of data
2777 			 */
2778 			if (ct->d_buf == NULL) {
2779 				ct->d_bufsize = data_size;
2780 				ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2781 			} else if (ct->d_bufsize < data_size) {
2782 				kmem_free(ct->d_buf, ct->d_bufsize);
2783 				ct->d_bufsize = data_size;
2784 				ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2785 			}
2786 			if (copyin_nowatch(data_ptr, ct->d_buf, data_size) != 0)
2787 				return (EFAULT);
2788 		} else {
2789 			struct as *as = ttoproc(caller)->p_as;
2790 			caddr_t	dest = ct->d_args.rbuf;
2791 			caddr_t	src = data_ptr;
2792 			size_t	len = data_size;
2793 
2794 			/* Copy data directly into client */
2795 			while (len != 0) {
2796 				uint_t	amount;
2797 				uint_t	max;
2798 				uint_t	off;
2799 				int	error;
2800 
2801 				off = (uintptr_t)dest & PAGEOFFSET;
2802 				if (off)
2803 					max = PAGESIZE - off;
2804 				else
2805 					max = PAGESIZE;
2806 				amount = len > max ? max : len;
2807 				error = door_copy(as, src, dest, amount);
2808 				if (error != 0)
2809 					return (error);
2810 				dest += amount;
2811 				src += amount;
2812 				len -= amount;
2813 			}
2814 		}
2815 	}
2816 
2817 	/*
2818 	 * Copyin the returned door ids and translate them into door_node_t
2819 	 */
2820 	if (desc_num != 0) {
2821 		door_desc_t *start;
2822 		door_desc_t *didpp;
2823 		struct file **fpp;
2824 		size_t	fpp_size;
2825 		uint_t	i;
2826 
2827 		/* First, check if we would overflow client */
2828 		if (!ufcanalloc(ttoproc(caller), desc_num))
2829 			return (EMFILE);
2830 
2831 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
2832 		if (copyin_nowatch(desc_ptr, didpp, dsize)) {
2833 			kmem_free(start, dsize);
2834 			return (EFAULT);
2835 		}
2836 		fpp_size = desc_num * sizeof (struct file *);
2837 		if (fpp_size > ct->d_fpp_size) {
2838 			/* make more space */
2839 			if (ct->d_fpp_size)
2840 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2841 			ct->d_fpp_size = fpp_size;
2842 			ct->d_fpp = kmem_alloc(fpp_size, KM_SLEEP);
2843 		}
2844 		fpp = ct->d_fpp;
2845 
2846 		for (i = 0; i < desc_num; i++) {
2847 			struct file *fp;
2848 			int fd = didpp->d_data.d_desc.d_descriptor;
2849 
2850 			/* Only understand file descriptor results */
2851 			if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2852 			    (fp = getf(fd)) == NULL) {
2853 				/* close translated references */
2854 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2855 				/* close untranslated references */
2856 				door_fd_rele(didpp, desc_num - i, 0);
2857 				kmem_free(start, dsize);
2858 				return (EINVAL);
2859 			}
2860 
2861 			mutex_enter(&fp->f_tlock);
2862 			fp->f_count++;
2863 			mutex_exit(&fp->f_tlock);
2864 
2865 			*fpp = fp;
2866 			releasef(fd);
2867 
2868 			if (didpp->d_attributes & DOOR_RELEASE) {
2869 				/* release passed reference */
2870 				(void) closeandsetf(fd, NULL);
2871 			}
2872 
2873 			fpp++; didpp++;
2874 		}
2875 		kmem_free(start, dsize);
2876 	}
2877 	return (0);
2878 }
2879 
2880 /*
2881  * Close all the descriptors.
2882  */
2883 static void
2884 door_fd_close(door_desc_t *d, uint_t n)
2885 {
2886 	uint_t	i;
2887 
2888 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2889 	for (i = 0; i < n; i++) {
2890 		if (d->d_attributes & DOOR_DESCRIPTOR) {
2891 			(void) closeandsetf(
2892 			    d->d_data.d_desc.d_descriptor, NULL);
2893 		} else if (d->d_attributes & DOOR_HANDLE) {
2894 			door_ki_rele(d->d_data.d_handle);
2895 		}
2896 		d++;
2897 	}
2898 }
2899 
2900 /*
2901  * Close descriptors that have the DOOR_RELEASE attribute set.
2902  */
2903 void
2904 door_fd_rele(door_desc_t *d, uint_t n, int from_kernel)
2905 {
2906 	uint_t	i;
2907 
2908 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2909 	for (i = 0; i < n; i++) {
2910 		if (d->d_attributes & DOOR_RELEASE) {
2911 			if (d->d_attributes & DOOR_DESCRIPTOR) {
2912 				(void) closeandsetf(
2913 				    d->d_data.d_desc.d_descriptor, NULL);
2914 			} else if (from_kernel &&
2915 			    (d->d_attributes & DOOR_HANDLE)) {
2916 				door_ki_rele(d->d_data.d_handle);
2917 			}
2918 		}
2919 		d++;
2920 	}
2921 }
2922 
2923 /*
2924  * Copy descriptors into the kernel so we can release any marked
2925  * DOOR_RELEASE.
2926  */
2927 int
2928 door_release_fds(door_desc_t *desc_ptr, uint_t ndesc)
2929 {
2930 	size_t dsize;
2931 	door_desc_t *didpp;
2932 	uint_t desc_num;
2933 
2934 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2935 	ASSERT(ndesc != 0);
2936 
2937 	desc_num = MIN(ndesc, door_max_desc);
2938 
2939 	dsize = desc_num * sizeof (door_desc_t);
2940 	didpp = kmem_alloc(dsize, KM_SLEEP);
2941 
2942 	while (ndesc > 0) {
2943 		uint_t count = MIN(ndesc, desc_num);
2944 
2945 		if (copyin_nowatch(desc_ptr, didpp,
2946 		    count * sizeof (door_desc_t))) {
2947 			kmem_free(didpp, dsize);
2948 			return (EFAULT);
2949 		}
2950 		door_fd_rele(didpp, count, 0);
2951 
2952 		ndesc -= count;
2953 		desc_ptr += count;
2954 	}
2955 	kmem_free(didpp, dsize);
2956 	return (0);
2957 }
2958 
2959 /*
2960  * Decrement ref count on all the files passed
2961  */
2962 static void
2963 door_fp_close(struct file **fp, uint_t n)
2964 {
2965 	uint_t	i;
2966 
2967 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2968 
2969 	for (i = 0; i < n; i++)
2970 		(void) closef(fp[i]);
2971 }
2972 
2973 /*
2974  * Copy data from 'src' in current address space to 'dest' in 'as' for 'len'
2975  * bytes.
2976  *
2977  * Performs this using 1 mapin and 1 copy operation.
2978  *
2979  * We really should do more than 1 page at a time to improve
2980  * performance, but for now this is treated as an anomalous condition.
2981  */
2982 static int
2983 door_copy(struct as *as, caddr_t src, caddr_t dest, uint_t len)
2984 {
2985 	caddr_t	kaddr;
2986 	caddr_t	rdest;
2987 	uint_t	off;
2988 	page_t	**pplist;
2989 	page_t	*pp = NULL;
2990 	int	error = 0;
2991 
2992 	ASSERT(len <= PAGESIZE);
2993 	off = (uintptr_t)dest & PAGEOFFSET;	/* offset within the page */
2994 	rdest = (caddr_t)((uintptr_t)dest &
2995 	    (uintptr_t)PAGEMASK);	/* Page boundary */
2996 	ASSERT(off + len <= PAGESIZE);
2997 
2998 	/*
2999 	 * Lock down destination page.
3000 	 */
3001 	if (as_pagelock(as, &pplist, rdest, PAGESIZE, S_WRITE))
3002 		return (E2BIG);
3003 	/*
3004 	 * Check if we have a shadow page list from as_pagelock. If not,
3005 	 * we took the slow path and have to find our page struct the hard
3006 	 * way.
3007 	 */
3008 	if (pplist == NULL) {
3009 		pfn_t	pfnum;
3010 
3011 		/* MMU mapping is already locked down */
3012 		AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
3013 		pfnum = hat_getpfnum(as->a_hat, rdest);
3014 		AS_LOCK_EXIT(as, &as->a_lock);
3015 
3016 		/*
3017 		 * TODO: The pfn step should not be necessary - need
3018 		 * a hat_getpp() function.
3019 		 */
3020 		if (pf_is_memory(pfnum)) {
3021 			pp = page_numtopp_nolock(pfnum);
3022 			ASSERT(pp == NULL || PAGE_LOCKED(pp));
3023 		} else
3024 			pp = NULL;
3025 		if (pp == NULL) {
3026 			as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE);
3027 			return (E2BIG);
3028 		}
3029 	} else {
3030 		pp = *pplist;
3031 	}
3032 	/*
3033 	 * Map destination page into kernel address
3034 	 */
3035 	kaddr = (caddr_t)ppmapin(pp, PROT_READ | PROT_WRITE, (caddr_t)-1);
3036 
3037 	/*
3038 	 * Copy from src to dest
3039 	 */
3040 	if (copyin_nowatch(src, kaddr + off, len) != 0)
3041 		error = EFAULT;
3042 	/*
3043 	 * Unmap destination page from kernel
3044 	 */
3045 	ppmapout(kaddr);
3046 	/*
3047 	 * Unlock destination page
3048 	 */
3049 	as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE);
3050 	return (error);
3051 }
3052 
3053 /*
3054  * General kernel upcall using doors
3055  *	Returns 0 on success, errno for failures.
3056  *	Caller must have a hold on the door based vnode, and on any
3057  *	references passed in desc_ptr.  The references are released
3058  *	in the event of an error, and passed without duplication
3059  *	otherwise.  Note that param->rbuf must be 64-bit aligned in
3060  *	a 64-bit kernel, since it may be used to store door descriptors
3061  *	if they are returned by the server.  The caller is responsible
3062  *	for holding a reference to the cred passed in.
3063  */
3064 int
3065 door_upcall(vnode_t *vp, door_arg_t *param, struct cred *cred,
3066     size_t max_data, uint_t max_descs)
3067 {
3068 	/* Locals */
3069 	door_upcall_t	*dup;
3070 	door_node_t	*dp;
3071 	kthread_t	*server_thread;
3072 	int		error = 0;
3073 	klwp_t		*lwp;
3074 	door_client_t	*ct;		/* curthread door_data */
3075 	door_server_t	*st;		/* server thread door_data */
3076 	int		gotresults = 0;
3077 	int		cancel_pending;
3078 
3079 	if (vp->v_type != VDOOR) {
3080 		if (param->desc_num)
3081 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3082 		return (EINVAL);
3083 	}
3084 
3085 	lwp = ttolwp(curthread);
3086 	ct = door_my_client(1);
3087 	dp = VTOD(vp);	/* Convert to a door_node_t */
3088 
3089 	dup = kmem_zalloc(sizeof (*dup), KM_SLEEP);
3090 	dup->du_cred = (cred != NULL) ? cred : curthread->t_cred;
3091 	dup->du_max_data = max_data;
3092 	dup->du_max_descs = max_descs;
3093 
3094 	mutex_enter(&door_knob);
3095 	if (DOOR_INVALID(dp)) {
3096 		mutex_exit(&door_knob);
3097 		if (param->desc_num)
3098 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3099 		error = EBADF;
3100 		goto out;
3101 	}
3102 
3103 	if (dp->door_target == &p0) {
3104 		/* Can't do an upcall to a kernel server */
3105 		mutex_exit(&door_knob);
3106 		if (param->desc_num)
3107 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3108 		error = EINVAL;
3109 		goto out;
3110 	}
3111 
3112 	error = door_check_limits(dp, param, 1);
3113 	if (error != 0) {
3114 		mutex_exit(&door_knob);
3115 		if (param->desc_num)
3116 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3117 		goto out;
3118 	}
3119 
3120 	/*
3121 	 * Get a server thread from the target domain
3122 	 */
3123 	if ((server_thread = door_get_server(dp)) == NULL) {
3124 		if (DOOR_INVALID(dp))
3125 			error = EBADF;
3126 		else
3127 			error = EAGAIN;
3128 		mutex_exit(&door_knob);
3129 		if (param->desc_num)
3130 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3131 		goto out;
3132 	}
3133 
3134 	st = DOOR_SERVER(server_thread->t_door);
3135 	ct->d_buf = param->data_ptr;
3136 	ct->d_bufsize = param->data_size;
3137 	ct->d_args = *param;	/* structure assignment */
3138 
3139 	if (ct->d_args.desc_num) {
3140 		/*
3141 		 * Move data from client to server
3142 		 */
3143 		DOOR_T_HOLD(st);
3144 		mutex_exit(&door_knob);
3145 		error = door_translate_out();
3146 		mutex_enter(&door_knob);
3147 		DOOR_T_RELEASE(st);
3148 		if (error) {
3149 			/*
3150 			 * We're not going to resume this thread after all
3151 			 */
3152 			door_release_server(dp, server_thread);
3153 			shuttle_sleep(server_thread);
3154 			mutex_exit(&door_knob);
3155 			goto out;
3156 		}
3157 	}
3158 
3159 	ct->d_upcall = dup;
3160 	if (param->rsize == 0)
3161 		ct->d_noresults = 1;
3162 	else
3163 		ct->d_noresults = 0;
3164 
3165 	dp->door_active++;
3166 
3167 	ct->d_error = DOOR_WAIT;
3168 	st->d_caller = curthread;
3169 	st->d_active = dp;
3170 
3171 	shuttle_resume(server_thread, &door_knob);
3172 
3173 	mutex_enter(&door_knob);
3174 shuttle_return:
3175 	if ((error = ct->d_error) < 0) {	/* DOOR_WAIT or DOOR_EXIT */
3176 		/*
3177 		 * Premature wakeup. Find out why (stop, forkall, sig, exit ...)
3178 		 */
3179 		mutex_exit(&door_knob);		/* May block in ISSIG */
3180 		cancel_pending = 0;
3181 		if (lwp && (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
3182 		    MUSTRETURN(curproc, curthread) ||
3183 		    (cancel_pending = schedctl_cancel_pending()) != 0)) {
3184 			/* Signal, forkall, ... */
3185 			if (cancel_pending)
3186 				schedctl_cancel_eintr();
3187 			lwp->lwp_sysabort = 0;
3188 			mutex_enter(&door_knob);
3189 			error = EINTR;
3190 			/*
3191 			 * If the server has finished processing our call,
3192 			 * or exited (calling door_slam()), then d_error
3193 			 * will have changed.  If the server hasn't finished
3194 			 * yet, d_error will still be DOOR_WAIT, and we
3195 			 * let it know we are not interested in any
3196 			 * results by sending a SIGCANCEL, unless the door
3197 			 * is marked with DOOR_NO_CANCEL.
3198 			 */
3199 			if (ct->d_error == DOOR_WAIT &&
3200 			    st->d_caller == curthread) {
3201 				proc_t	*p = ttoproc(server_thread);
3202 
3203 				st->d_active = NULL;
3204 				st->d_caller = NULL;
3205 				if (!(dp->door_flags & DOOR_NO_CANCEL)) {
3206 					DOOR_T_HOLD(st);
3207 					mutex_exit(&door_knob);
3208 
3209 					mutex_enter(&p->p_lock);
3210 					sigtoproc(p, server_thread, SIGCANCEL);
3211 					mutex_exit(&p->p_lock);
3212 
3213 					mutex_enter(&door_knob);
3214 					DOOR_T_RELEASE(st);
3215 				}
3216 			}
3217 		} else {
3218 			/*
3219 			 * Return from stop(), server exit...
3220 			 *
3221 			 * Note that the server could have done a
3222 			 * door_return while the client was in stop state
3223 			 * (ISSIG), in which case the error condition
3224 			 * is updated by the server.
3225 			 */
3226 			mutex_enter(&door_knob);
3227 			if (ct->d_error == DOOR_WAIT) {
3228 				/* Still waiting for a reply */
3229 				shuttle_swtch(&door_knob);
3230 				mutex_enter(&door_knob);
3231 				if (lwp)
3232 					lwp->lwp_asleep = 0;
3233 				goto	shuttle_return;
3234 			} else if (ct->d_error == DOOR_EXIT) {
3235 				/* Server exit */
3236 				error = EINTR;
3237 			} else {
3238 				/* Server did a door_return during ISSIG */
3239 				error = ct->d_error;
3240 			}
3241 		}
3242 		/*
3243 		 * Can't exit if the server is currently copying
3244 		 * results for me
3245 		 */
3246 		while (DOOR_T_HELD(ct))
3247 			cv_wait(&ct->d_cv, &door_knob);
3248 
3249 		/*
3250 		 * Find out if results were successfully copied.
3251 		 */
3252 		if (ct->d_error == 0)
3253 			gotresults = 1;
3254 	}
3255 	if (lwp) {
3256 		lwp->lwp_asleep = 0;		/* /proc */
3257 		lwp->lwp_sysabort = 0;		/* /proc */
3258 	}
3259 	if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
3260 		door_deliver_unref(dp);
3261 	mutex_exit(&door_knob);
3262 
3263 	/*
3264 	 * Translate returned doors (if any)
3265 	 */
3266 
3267 	if (ct->d_noresults)
3268 		goto out;
3269 
3270 	if (error) {
3271 		/*
3272 		 * If server returned results successfully, then we've
3273 		 * been interrupted and may need to clean up.
3274 		 */
3275 		if (gotresults) {
3276 			ASSERT(error == EINTR);
3277 			door_fp_close(ct->d_fpp, ct->d_args.desc_num);
3278 		}
3279 		goto out;
3280 	}
3281 
3282 	if (ct->d_args.desc_num) {
3283 		struct file	**fpp;
3284 		door_desc_t	*didpp;
3285 		vnode_t		*vp;
3286 		uint_t		n = ct->d_args.desc_num;
3287 
3288 		didpp = ct->d_args.desc_ptr = (door_desc_t *)(ct->d_args.rbuf +
3289 		    roundup(ct->d_args.data_size, sizeof (door_desc_t)));
3290 		fpp = ct->d_fpp;
3291 
3292 		while (n--) {
3293 			struct file *fp;
3294 
3295 			fp = *fpp;
3296 			if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3297 				vp = fp->f_vnode;
3298 
3299 			didpp->d_attributes = DOOR_HANDLE |
3300 			    (VTOD(vp)->door_flags & DOOR_ATTR_MASK);
3301 			didpp->d_data.d_handle = FTODH(fp);
3302 
3303 			fpp++; didpp++;
3304 		}
3305 	}
3306 
3307 	/* on return data is in rbuf */
3308 	*param = ct->d_args;		/* structure assignment */
3309 
3310 out:
3311 	kmem_free(dup, sizeof (*dup));
3312 
3313 	if (ct->d_fpp) {
3314 		kmem_free(ct->d_fpp, ct->d_fpp_size);
3315 		ct->d_fpp = NULL;
3316 		ct->d_fpp_size = 0;
3317 	}
3318 
3319 	ct->d_upcall = NULL;
3320 	ct->d_noresults = 0;
3321 	ct->d_buf = NULL;
3322 	ct->d_bufsize = 0;
3323 	return (error);
3324 }
3325 
3326 /*
3327  * Add a door to the per-process list of active doors for which the
3328  * process is a server.
3329  */
3330 static void
3331 door_list_insert(door_node_t *dp)
3332 {
3333 	proc_t *p = dp->door_target;
3334 
3335 	ASSERT(MUTEX_HELD(&door_knob));
3336 	dp->door_list = p->p_door_list;
3337 	p->p_door_list = dp;
3338 }
3339 
3340 /*
3341  * Remove a door from the per-process list of active doors.
3342  */
3343 void
3344 door_list_delete(door_node_t *dp)
3345 {
3346 	door_node_t **pp;
3347 
3348 	ASSERT(MUTEX_HELD(&door_knob));
3349 	/*
3350 	 * Find the door in the list.  If the door belongs to another process,
3351 	 * it's OK to use p_door_list since that process can't exit until all
3352 	 * doors have been taken off the list (see door_exit).
3353 	 */
3354 	pp = &(dp->door_target->p_door_list);
3355 	while (*pp != dp)
3356 		pp = &((*pp)->door_list);
3357 
3358 	/* found it, take it off the list */
3359 	*pp = dp->door_list;
3360 }
3361 
3362 
3363 /*
3364  * External kernel interfaces for doors.  These functions are available
3365  * outside the doorfs module for use in creating and using doors from
3366  * within the kernel.
3367  */
3368 
3369 /*
3370  * door_ki_upcall invokes a user-level door server from the kernel, with
3371  * the credentials associated with curthread.
3372  */
3373 int
3374 door_ki_upcall(door_handle_t dh, door_arg_t *param)
3375 {
3376 	return (door_ki_upcall_limited(dh, param, NULL, SIZE_MAX, UINT_MAX));
3377 }
3378 
3379 /*
3380  * door_ki_upcall_limited invokes a user-level door server from the
3381  * kernel with the given credentials and reply limits.  If the "cred"
3382  * argument is NULL, uses the credentials associated with current
3383  * thread.  max_data limits the maximum length of the returned data (the
3384  * client will get E2BIG if they go over), and max_desc limits the
3385  * number of returned descriptors (the client will get EMFILE if they
3386  * go over).
3387  */
3388 int
3389 door_ki_upcall_limited(door_handle_t dh, door_arg_t *param, struct cred *cred,
3390     size_t max_data, uint_t max_desc)
3391 {
3392 	file_t *fp = DHTOF(dh);
3393 	vnode_t *realvp;
3394 
3395 	if (VOP_REALVP(fp->f_vnode, &realvp, NULL))
3396 		realvp = fp->f_vnode;
3397 	return (door_upcall(realvp, param, cred, max_data, max_desc));
3398 }
3399 
3400 /*
3401  * Function call to create a "kernel" door server.  A kernel door
3402  * server provides a way for a user-level process to invoke a function
3403  * in the kernel through a door_call.  From the caller's point of
3404  * view, a kernel door server looks the same as a user-level one
3405  * (except the server pid is 0).  Unlike normal door calls, the
3406  * kernel door function is invoked via a normal function call in the
3407  * same thread and context as the caller.
3408  */
3409 int
3410 door_ki_create(void (*pc_cookie)(), void *data_cookie, uint_t attributes,
3411     door_handle_t *dhp)
3412 {
3413 	int err;
3414 	file_t *fp;
3415 
3416 	/* no DOOR_PRIVATE */
3417 	if ((attributes & ~DOOR_KI_CREATE_MASK) ||
3418 	    (attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) ==
3419 	    (DOOR_UNREF | DOOR_UNREF_MULTI))
3420 		return (EINVAL);
3421 
3422 	err = door_create_common(pc_cookie, data_cookie, attributes,
3423 	    1, NULL, &fp);
3424 	if (err == 0 && (attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) &&
3425 	    p0.p_unref_thread == 0) {
3426 		/* need to create unref thread for process 0 */
3427 		(void) thread_create(NULL, 0, door_unref_kernel, NULL, 0, &p0,
3428 		    TS_RUN, minclsyspri);
3429 	}
3430 	if (err == 0) {
3431 		*dhp = FTODH(fp);
3432 	}
3433 	return (err);
3434 }
3435 
3436 void
3437 door_ki_hold(door_handle_t dh)
3438 {
3439 	file_t *fp = DHTOF(dh);
3440 
3441 	mutex_enter(&fp->f_tlock);
3442 	fp->f_count++;
3443 	mutex_exit(&fp->f_tlock);
3444 }
3445 
3446 void
3447 door_ki_rele(door_handle_t dh)
3448 {
3449 	file_t *fp = DHTOF(dh);
3450 
3451 	(void) closef(fp);
3452 }
3453 
3454 int
3455 door_ki_open(char *pathname, door_handle_t *dhp)
3456 {
3457 	file_t *fp;
3458 	vnode_t *vp;
3459 	int err;
3460 
3461 	if ((err = lookupname(pathname, UIO_SYSSPACE, FOLLOW, NULL, &vp)) != 0)
3462 		return (err);
3463 	if (err = VOP_OPEN(&vp, FREAD, kcred, NULL)) {
3464 		VN_RELE(vp);
3465 		return (err);
3466 	}
3467 	if (vp->v_type != VDOOR) {
3468 		VN_RELE(vp);
3469 		return (EINVAL);
3470 	}
3471 	if ((err = falloc(vp, FREAD | FWRITE, &fp, NULL)) != 0) {
3472 		VN_RELE(vp);
3473 		return (err);
3474 	}
3475 	/* falloc returns with f_tlock held on success */
3476 	mutex_exit(&fp->f_tlock);
3477 	*dhp = FTODH(fp);
3478 	return (0);
3479 }
3480 
3481 int
3482 door_ki_info(door_handle_t dh, struct door_info *dip)
3483 {
3484 	file_t *fp = DHTOF(dh);
3485 	vnode_t *vp;
3486 
3487 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3488 		vp = fp->f_vnode;
3489 	if (vp->v_type != VDOOR)
3490 		return (EINVAL);
3491 	door_info_common(VTOD(vp), dip, fp);
3492 	return (0);
3493 }
3494 
3495 door_handle_t
3496 door_ki_lookup(int did)
3497 {
3498 	file_t *fp;
3499 	door_handle_t dh;
3500 
3501 	/* is the descriptor really a door? */
3502 	if (door_lookup(did, &fp) == NULL)
3503 		return (NULL);
3504 	/* got the door, put a hold on it and release the fd */
3505 	dh = FTODH(fp);
3506 	door_ki_hold(dh);
3507 	releasef(did);
3508 	return (dh);
3509 }
3510 
3511 int
3512 door_ki_setparam(door_handle_t dh, int type, size_t val)
3513 {
3514 	file_t *fp = DHTOF(dh);
3515 	vnode_t *vp;
3516 
3517 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3518 		vp = fp->f_vnode;
3519 	if (vp->v_type != VDOOR)
3520 		return (EINVAL);
3521 	return (door_setparam_common(VTOD(vp), 1, type, val));
3522 }
3523 
3524 int
3525 door_ki_getparam(door_handle_t dh, int type, size_t *out)
3526 {
3527 	file_t *fp = DHTOF(dh);
3528 	vnode_t *vp;
3529 
3530 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3531 		vp = fp->f_vnode;
3532 	if (vp->v_type != VDOOR)
3533 		return (EINVAL);
3534 	return (door_getparam_common(VTOD(vp), type, out));
3535 }
3536