1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * System call I/F to doors (outside of vnodes I/F) and misc support 31 * routines 32 */ 33 #include <sys/types.h> 34 #include <sys/systm.h> 35 #include <sys/door.h> 36 #include <sys/door_data.h> 37 #include <sys/proc.h> 38 #include <sys/thread.h> 39 #include <sys/class.h> 40 #include <sys/cred.h> 41 #include <sys/kmem.h> 42 #include <sys/cmn_err.h> 43 #include <sys/stack.h> 44 #include <sys/debug.h> 45 #include <sys/cpuvar.h> 46 #include <sys/file.h> 47 #include <sys/fcntl.h> 48 #include <sys/vnode.h> 49 #include <sys/vfs.h> 50 #include <sys/vfs_opreg.h> 51 #include <sys/sobject.h> 52 #include <sys/schedctl.h> 53 #include <sys/callb.h> 54 #include <sys/ucred.h> 55 56 #include <sys/mman.h> 57 #include <sys/sysmacros.h> 58 #include <sys/vmsystm.h> 59 #include <vm/as.h> 60 #include <vm/hat.h> 61 #include <vm/page.h> 62 #include <vm/seg.h> 63 #include <vm/seg_vn.h> 64 #include <vm/seg_vn.h> 65 66 #include <sys/modctl.h> 67 #include <sys/syscall.h> 68 #include <sys/pathname.h> 69 #include <sys/rctl.h> 70 71 /* 72 * The maximum amount of data (in bytes) that will be transferred using 73 * an intermediate kernel buffer. For sizes greater than this we map 74 * in the destination pages and perform a 1-copy transfer. 75 */ 76 size_t door_max_arg = 16 * 1024; 77 78 /* 79 * Maximum amount of data that will be transferred in a reply to a 80 * door_upcall. Need to guard against a process returning huge amounts 81 * of data and getting the kernel stuck in kmem_alloc. 82 */ 83 size_t door_max_upcall_reply = 1024 * 1024; 84 85 /* 86 * Maximum number of descriptors allowed to be passed in a single 87 * door_call or door_return. We need to allocate kernel memory 88 * for all of them at once, so we can't let it scale without limit. 89 */ 90 uint_t door_max_desc = 1024; 91 92 /* 93 * The maximum time door server thread waits before exiting if there 94 * is no request from client. This is expressed in seconds. 95 */ 96 int door_srv_timeout = 5; 97 98 /* 99 * Definition of a door handle, used by other kernel subsystems when 100 * calling door functions. This is really a file structure but we 101 * want to hide that fact. 102 */ 103 struct __door_handle { 104 file_t dh_file; 105 }; 106 107 #define DHTOF(dh) ((file_t *)(dh)) 108 #define FTODH(fp) ((door_handle_t)(fp)) 109 110 static int doorfs(long, long, long, long, long, long); 111 112 static struct sysent door_sysent = { 113 6, 114 SE_ARGC | SE_NOUNLOAD, 115 (int (*)())doorfs, 116 }; 117 118 static struct modlsys modlsys = { 119 &mod_syscallops, "doors", &door_sysent 120 }; 121 122 #ifdef _SYSCALL32_IMPL 123 124 static int 125 doorfs32(int32_t arg1, int32_t arg2, int32_t arg3, int32_t arg4, 126 int32_t arg5, int32_t subcode); 127 128 static struct sysent door_sysent32 = { 129 6, 130 SE_ARGC | SE_NOUNLOAD, 131 (int (*)())doorfs32, 132 }; 133 134 static struct modlsys modlsys32 = { 135 &mod_syscallops32, 136 "32-bit door syscalls", 137 &door_sysent32 138 }; 139 #endif 140 141 static struct modlinkage modlinkage = { 142 MODREV_1, 143 &modlsys, 144 #ifdef _SYSCALL32_IMPL 145 &modlsys32, 146 #endif 147 NULL 148 }; 149 150 dev_t doordev; 151 152 extern struct vfs door_vfs; 153 extern struct vnodeops *door_vnodeops; 154 155 int 156 _init(void) 157 { 158 static const fs_operation_def_t door_vfsops_template[] = { 159 NULL, NULL 160 }; 161 extern const fs_operation_def_t door_vnodeops_template[]; 162 vfsops_t *door_vfsops; 163 major_t major; 164 int error; 165 166 mutex_init(&door_knob, NULL, MUTEX_DEFAULT, NULL); 167 if ((major = getudev()) == (major_t)-1) 168 return (ENXIO); 169 doordev = makedevice(major, 0); 170 171 /* Create a dummy vfs */ 172 error = vfs_makefsops(door_vfsops_template, &door_vfsops); 173 if (error != 0) { 174 cmn_err(CE_WARN, "door init: bad vfs ops"); 175 return (error); 176 } 177 VFS_INIT(&door_vfs, door_vfsops, NULL); 178 door_vfs.vfs_flag = VFS_RDONLY; 179 door_vfs.vfs_dev = doordev; 180 vfs_make_fsid(&(door_vfs.vfs_fsid), doordev, 0); 181 182 error = vn_make_ops("doorfs", door_vnodeops_template, &door_vnodeops); 183 if (error != 0) { 184 vfs_freevfsops(door_vfsops); 185 cmn_err(CE_WARN, "door init: bad vnode ops"); 186 return (error); 187 } 188 return (mod_install(&modlinkage)); 189 } 190 191 int 192 _info(struct modinfo *modinfop) 193 { 194 return (mod_info(&modlinkage, modinfop)); 195 } 196 197 /* system call functions */ 198 static int door_call(int, void *); 199 static int door_return(caddr_t, size_t, door_desc_t *, uint_t, caddr_t, size_t); 200 static int door_create(void (*pc_cookie)(void *, char *, size_t, door_desc_t *, 201 uint_t), void *data_cookie, uint_t); 202 static int door_revoke(int); 203 static int door_info(int, struct door_info *); 204 static int door_ucred(struct ucred_s *); 205 static int door_bind(int); 206 static int door_unbind(void); 207 static int door_unref(void); 208 static int door_getparam(int, int, size_t *); 209 static int door_setparam(int, int, size_t); 210 211 #define DOOR_RETURN_OLD 4 /* historic value, for s10 */ 212 213 /* 214 * System call wrapper for all door related system calls 215 */ 216 static int 217 doorfs(long arg1, long arg2, long arg3, long arg4, long arg5, long subcode) 218 { 219 switch (subcode) { 220 case DOOR_CALL: 221 return (door_call(arg1, (void *)arg2)); 222 case DOOR_RETURN: { 223 door_return_desc_t *drdp = (door_return_desc_t *)arg3; 224 225 if (drdp != NULL) { 226 door_return_desc_t drd; 227 if (copyin(drdp, &drd, sizeof (drd))) 228 return (EFAULT); 229 return (door_return((caddr_t)arg1, arg2, drd.desc_ptr, 230 drd.desc_num, (caddr_t)arg4, arg5)); 231 } 232 return (door_return((caddr_t)arg1, arg2, NULL, 233 0, (caddr_t)arg4, arg5)); 234 } 235 case DOOR_RETURN_OLD: 236 /* 237 * In order to support the S10 runtime environment, we 238 * still respond to the old syscall subcode for door_return. 239 * We treat it as having no stack limits. This code should 240 * be removed when such support is no longer needed. 241 */ 242 return (door_return((caddr_t)arg1, arg2, (door_desc_t *)arg3, 243 arg4, (caddr_t)arg5, 0)); 244 case DOOR_CREATE: 245 return (door_create((void (*)())arg1, (void *)arg2, arg3)); 246 case DOOR_REVOKE: 247 return (door_revoke(arg1)); 248 case DOOR_INFO: 249 return (door_info(arg1, (struct door_info *)arg2)); 250 case DOOR_BIND: 251 return (door_bind(arg1)); 252 case DOOR_UNBIND: 253 return (door_unbind()); 254 case DOOR_UNREFSYS: 255 return (door_unref()); 256 case DOOR_UCRED: 257 return (door_ucred((struct ucred_s *)arg1)); 258 case DOOR_GETPARAM: 259 return (door_getparam(arg1, arg2, (size_t *)arg3)); 260 case DOOR_SETPARAM: 261 return (door_setparam(arg1, arg2, arg3)); 262 default: 263 return (set_errno(EINVAL)); 264 } 265 } 266 267 #ifdef _SYSCALL32_IMPL 268 /* 269 * System call wrapper for all door related system calls from 32-bit programs. 270 * Needed at the moment because of the casts - they undo some damage 271 * that truss causes (sign-extending the stack pointer) when truss'ing 272 * a 32-bit program using doors. 273 */ 274 static int 275 doorfs32(int32_t arg1, int32_t arg2, int32_t arg3, 276 int32_t arg4, int32_t arg5, int32_t subcode) 277 { 278 switch (subcode) { 279 case DOOR_CALL: 280 return (door_call(arg1, (void *)(uintptr_t)(caddr32_t)arg2)); 281 case DOOR_RETURN: { 282 door_return_desc32_t *drdp = 283 (door_return_desc32_t *)(uintptr_t)(caddr32_t)arg3; 284 if (drdp != NULL) { 285 door_return_desc32_t drd; 286 if (copyin(drdp, &drd, sizeof (drd))) 287 return (EFAULT); 288 return (door_return( 289 (caddr_t)(uintptr_t)(caddr32_t)arg1, arg2, 290 (door_desc_t *)(uintptr_t)drd.desc_ptr, 291 drd.desc_num, (caddr_t)(uintptr_t)(caddr32_t)arg4, 292 (size_t)(uintptr_t)(size32_t)arg5)); 293 } 294 return (door_return((caddr_t)(uintptr_t)(caddr32_t)arg1, 295 arg2, NULL, 0, (caddr_t)(uintptr_t)(caddr32_t)arg4, 296 (size_t)(uintptr_t)(size32_t)arg5)); 297 } 298 case DOOR_RETURN_OLD: 299 /* 300 * In order to support the S10 runtime environment, we 301 * still respond to the old syscall subcode for door_return. 302 * We treat it as having no stack limits. This code should 303 * be removed when such support is no longer needed. 304 */ 305 return (door_return((caddr_t)(uintptr_t)(caddr32_t)arg1, arg2, 306 (door_desc_t *)(uintptr_t)(caddr32_t)arg3, arg4, 307 (caddr_t)(uintptr_t)(caddr32_t)arg5, 0)); 308 case DOOR_CREATE: 309 return (door_create((void (*)())(uintptr_t)(caddr32_t)arg1, 310 (void *)(uintptr_t)(caddr32_t)arg2, arg3)); 311 case DOOR_REVOKE: 312 return (door_revoke(arg1)); 313 case DOOR_INFO: 314 return (door_info(arg1, 315 (struct door_info *)(uintptr_t)(caddr32_t)arg2)); 316 case DOOR_BIND: 317 return (door_bind(arg1)); 318 case DOOR_UNBIND: 319 return (door_unbind()); 320 case DOOR_UNREFSYS: 321 return (door_unref()); 322 case DOOR_UCRED: 323 return (door_ucred( 324 (struct ucred_s *)(uintptr_t)(caddr32_t)arg1)); 325 case DOOR_GETPARAM: 326 return (door_getparam(arg1, arg2, 327 (size_t *)(uintptr_t)(caddr32_t)arg3)); 328 case DOOR_SETPARAM: 329 return (door_setparam(arg1, arg2, (size_t)(size32_t)arg3)); 330 331 default: 332 return (set_errno(EINVAL)); 333 } 334 } 335 #endif 336 337 void shuttle_resume(kthread_t *, kmutex_t *); 338 void shuttle_swtch(kmutex_t *); 339 void shuttle_sleep(kthread_t *); 340 341 /* 342 * Support routines 343 */ 344 static int door_create_common(void (*)(), void *, uint_t, int, int *, 345 file_t **); 346 static int door_overflow(kthread_t *, caddr_t, size_t, door_desc_t *, uint_t); 347 static int door_args(kthread_t *, int); 348 static int door_results(kthread_t *, caddr_t, size_t, door_desc_t *, uint_t); 349 static int door_copy(struct as *, caddr_t, caddr_t, uint_t); 350 static int door_server_exit(proc_t *, kthread_t *); 351 static void door_release_server(door_node_t *, kthread_t *); 352 static kthread_t *door_get_server(door_node_t *); 353 static door_node_t *door_lookup(int, file_t **); 354 static int door_translate_in(void); 355 static int door_translate_out(void); 356 static void door_fd_rele(door_desc_t *, uint_t, int); 357 static void door_list_insert(door_node_t *); 358 static void door_info_common(door_node_t *, door_info_t *, file_t *); 359 static int door_release_fds(door_desc_t *, uint_t); 360 static void door_fd_close(door_desc_t *, uint_t); 361 static void door_fp_close(struct file **, uint_t); 362 363 static door_data_t * 364 door_my_data(int create_if_missing) 365 { 366 door_data_t *ddp; 367 368 ddp = curthread->t_door; 369 if (create_if_missing && ddp == NULL) 370 ddp = curthread->t_door = kmem_zalloc(sizeof (*ddp), KM_SLEEP); 371 372 return (ddp); 373 } 374 375 static door_server_t * 376 door_my_server(int create_if_missing) 377 { 378 door_data_t *ddp = door_my_data(create_if_missing); 379 380 return ((ddp != NULL)? DOOR_SERVER(ddp) : NULL); 381 } 382 383 static door_client_t * 384 door_my_client(int create_if_missing) 385 { 386 door_data_t *ddp = door_my_data(create_if_missing); 387 388 return ((ddp != NULL)? DOOR_CLIENT(ddp) : NULL); 389 } 390 391 /* 392 * System call to create a door 393 */ 394 int 395 door_create(void (*pc_cookie)(), void *data_cookie, uint_t attributes) 396 { 397 int fd; 398 int err; 399 400 if ((attributes & ~DOOR_CREATE_MASK) || 401 ((attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) == 402 (DOOR_UNREF | DOOR_UNREF_MULTI))) 403 return (set_errno(EINVAL)); 404 405 if ((err = door_create_common(pc_cookie, data_cookie, attributes, 0, 406 &fd, NULL)) != 0) 407 return (set_errno(err)); 408 409 f_setfd(fd, FD_CLOEXEC); 410 return (fd); 411 } 412 413 /* 414 * Common code for creating user and kernel doors. If a door was 415 * created, stores a file structure pointer in the location pointed 416 * to by fpp (if fpp is non-NULL) and returns 0. Also, if a non-NULL 417 * pointer to a file descriptor is passed in as fdp, allocates a file 418 * descriptor representing the door. If a door could not be created, 419 * returns an error. 420 */ 421 static int 422 door_create_common(void (*pc_cookie)(), void *data_cookie, uint_t attributes, 423 int from_kernel, int *fdp, file_t **fpp) 424 { 425 door_node_t *dp; 426 vnode_t *vp; 427 struct file *fp; 428 static door_id_t index = 0; 429 proc_t *p = (from_kernel)? &p0 : curproc; 430 431 dp = kmem_zalloc(sizeof (door_node_t), KM_SLEEP); 432 433 dp->door_vnode = vn_alloc(KM_SLEEP); 434 dp->door_target = p; 435 dp->door_data = data_cookie; 436 dp->door_pc = pc_cookie; 437 dp->door_flags = attributes; 438 #ifdef _SYSCALL32_IMPL 439 if (!from_kernel && get_udatamodel() != DATAMODEL_NATIVE) 440 dp->door_data_max = UINT32_MAX; 441 else 442 #endif 443 dp->door_data_max = SIZE_MAX; 444 dp->door_data_min = 0UL; 445 dp->door_desc_max = (attributes & DOOR_REFUSE_DESC)? 0 : INT_MAX; 446 447 vp = DTOV(dp); 448 vn_setops(vp, door_vnodeops); 449 vp->v_type = VDOOR; 450 vp->v_vfsp = &door_vfs; 451 vp->v_data = (caddr_t)dp; 452 mutex_enter(&door_knob); 453 dp->door_index = index++; 454 /* add to per-process door list */ 455 door_list_insert(dp); 456 mutex_exit(&door_knob); 457 458 if (falloc(vp, FREAD | FWRITE, &fp, fdp)) { 459 /* 460 * If the file table is full, remove the door from the 461 * per-process list, free the door, and return NULL. 462 */ 463 mutex_enter(&door_knob); 464 door_list_delete(dp); 465 mutex_exit(&door_knob); 466 vn_free(vp); 467 kmem_free(dp, sizeof (door_node_t)); 468 return (EMFILE); 469 } 470 vn_exists(vp); 471 if (fdp != NULL) 472 setf(*fdp, fp); 473 mutex_exit(&fp->f_tlock); 474 475 if (fpp != NULL) 476 *fpp = fp; 477 return (0); 478 } 479 480 static int 481 door_check_limits(door_node_t *dp, door_arg_t *da, int upcall) 482 { 483 ASSERT(MUTEX_HELD(&door_knob)); 484 485 /* we allow unref upcalls through, despite any minimum */ 486 if (da->data_size < dp->door_data_min && 487 !(upcall && da->data_ptr == DOOR_UNREF_DATA)) 488 return (ENOBUFS); 489 490 if (da->data_size > dp->door_data_max) 491 return (ENOBUFS); 492 493 if (da->desc_num > 0 && (dp->door_flags & DOOR_REFUSE_DESC)) 494 return (ENOTSUP); 495 496 if (da->desc_num > dp->door_desc_max) 497 return (ENFILE); 498 499 return (0); 500 } 501 502 /* 503 * Door invocation. 504 */ 505 int 506 door_call(int did, void *args) 507 { 508 /* Locals */ 509 door_node_t *dp; 510 kthread_t *server_thread; 511 int error = 0; 512 klwp_t *lwp; 513 door_client_t *ct; /* curthread door_data */ 514 door_server_t *st; /* server thread door_data */ 515 door_desc_t *start = NULL; 516 uint_t ncopied = 0; 517 size_t dsize; 518 /* destructor for data returned by a kernel server */ 519 void (*destfn)() = NULL; 520 void *destarg; 521 model_t datamodel; 522 int gotresults = 0; 523 524 lwp = ttolwp(curthread); 525 datamodel = lwp_getdatamodel(lwp); 526 527 ct = door_my_client(1); 528 529 /* 530 * Get the arguments 531 */ 532 if (args) { 533 if (datamodel == DATAMODEL_NATIVE) { 534 if (copyin(args, &ct->d_args, sizeof (door_arg_t)) != 0) 535 return (set_errno(EFAULT)); 536 } else { 537 door_arg32_t da32; 538 539 if (copyin(args, &da32, sizeof (door_arg32_t)) != 0) 540 return (set_errno(EFAULT)); 541 ct->d_args.data_ptr = 542 (char *)(uintptr_t)da32.data_ptr; 543 ct->d_args.data_size = da32.data_size; 544 ct->d_args.desc_ptr = 545 (door_desc_t *)(uintptr_t)da32.desc_ptr; 546 ct->d_args.desc_num = da32.desc_num; 547 ct->d_args.rbuf = 548 (char *)(uintptr_t)da32.rbuf; 549 ct->d_args.rsize = da32.rsize; 550 } 551 } else { 552 /* No arguments, and no results allowed */ 553 ct->d_noresults = 1; 554 ct->d_args.data_size = 0; 555 ct->d_args.desc_num = 0; 556 ct->d_args.rsize = 0; 557 } 558 559 if ((dp = door_lookup(did, NULL)) == NULL) 560 return (set_errno(EBADF)); 561 562 mutex_enter(&door_knob); 563 if (DOOR_INVALID(dp)) { 564 mutex_exit(&door_knob); 565 error = EBADF; 566 goto out; 567 } 568 569 /* 570 * before we do anything, check that we are not overflowing the 571 * required limits. 572 */ 573 error = door_check_limits(dp, &ct->d_args, 0); 574 if (error != 0) { 575 mutex_exit(&door_knob); 576 goto out; 577 } 578 579 /* 580 * Check for in-kernel door server. 581 */ 582 if (dp->door_target == &p0) { 583 caddr_t rbuf = ct->d_args.rbuf; 584 size_t rsize = ct->d_args.rsize; 585 586 dp->door_active++; 587 ct->d_kernel = 1; 588 ct->d_error = DOOR_WAIT; 589 mutex_exit(&door_knob); 590 /* translate file descriptors to vnodes */ 591 if (ct->d_args.desc_num) { 592 error = door_translate_in(); 593 if (error) 594 goto out; 595 } 596 /* 597 * Call kernel door server. Arguments are passed and 598 * returned as a door_arg pointer. When called, data_ptr 599 * points to user data and desc_ptr points to a kernel list 600 * of door descriptors that have been converted to file 601 * structure pointers. It's the server function's 602 * responsibility to copyin the data pointed to by data_ptr 603 * (this avoids extra copying in some cases). On return, 604 * data_ptr points to a user buffer of data, and desc_ptr 605 * points to a kernel list of door descriptors representing 606 * files. When a reference is passed to a kernel server, 607 * it is the server's responsibility to release the reference 608 * (by calling closef). When the server includes a 609 * reference in its reply, it is released as part of the 610 * the call (the server must duplicate the reference if 611 * it wants to retain a copy). The destfn, if set to 612 * non-NULL, is a destructor to be called when the returned 613 * kernel data (if any) is no longer needed (has all been 614 * translated and copied to user level). 615 */ 616 (*(dp->door_pc))(dp->door_data, &ct->d_args, 617 &destfn, &destarg, &error); 618 mutex_enter(&door_knob); 619 /* not implemented yet */ 620 if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY)) 621 door_deliver_unref(dp); 622 mutex_exit(&door_knob); 623 if (error) 624 goto out; 625 626 /* translate vnodes to files */ 627 if (ct->d_args.desc_num) { 628 error = door_translate_out(); 629 if (error) 630 goto out; 631 } 632 ct->d_buf = ct->d_args.rbuf; 633 ct->d_bufsize = ct->d_args.rsize; 634 if (rsize < (ct->d_args.data_size + 635 (ct->d_args.desc_num * sizeof (door_desc_t)))) { 636 /* handle overflow */ 637 error = door_overflow(curthread, ct->d_args.data_ptr, 638 ct->d_args.data_size, ct->d_args.desc_ptr, 639 ct->d_args.desc_num); 640 if (error) 641 goto out; 642 /* door_overflow sets d_args rbuf and rsize */ 643 } else { 644 ct->d_args.rbuf = rbuf; 645 ct->d_args.rsize = rsize; 646 } 647 goto results; 648 } 649 650 /* 651 * Get a server thread from the target domain 652 */ 653 if ((server_thread = door_get_server(dp)) == NULL) { 654 if (DOOR_INVALID(dp)) 655 error = EBADF; 656 else 657 error = EAGAIN; 658 mutex_exit(&door_knob); 659 goto out; 660 } 661 662 st = DOOR_SERVER(server_thread->t_door); 663 if (ct->d_args.desc_num || ct->d_args.data_size) { 664 int is_private = (dp->door_flags & DOOR_PRIVATE); 665 /* 666 * Move data from client to server 667 */ 668 DOOR_T_HOLD(st); 669 mutex_exit(&door_knob); 670 error = door_args(server_thread, is_private); 671 mutex_enter(&door_knob); 672 DOOR_T_RELEASE(st); 673 if (error) { 674 /* 675 * We're not going to resume this thread after all 676 */ 677 door_release_server(dp, server_thread); 678 shuttle_sleep(server_thread); 679 mutex_exit(&door_knob); 680 goto out; 681 } 682 } 683 684 dp->door_active++; 685 ct->d_error = DOOR_WAIT; 686 st->d_caller = curthread; 687 st->d_active = dp; 688 689 shuttle_resume(server_thread, &door_knob); 690 691 mutex_enter(&door_knob); 692 shuttle_return: 693 if ((error = ct->d_error) < 0) { /* DOOR_WAIT or DOOR_EXIT */ 694 /* 695 * Premature wakeup. Find out why (stop, forkall, sig, exit ...) 696 */ 697 mutex_exit(&door_knob); /* May block in ISSIG */ 698 if (ISSIG(curthread, FORREAL) || 699 lwp->lwp_sysabort || MUSTRETURN(curproc, curthread)) { 700 /* Signal, forkall, ... */ 701 lwp->lwp_sysabort = 0; 702 mutex_enter(&door_knob); 703 error = EINTR; 704 /* 705 * If the server has finished processing our call, 706 * or exited (calling door_slam()), then d_error 707 * will have changed. If the server hasn't finished 708 * yet, d_error will still be DOOR_WAIT, and we 709 * let it know we are not interested in any 710 * results by sending a SIGCANCEL, unless the door 711 * is marked with DOOR_NO_CANCEL. 712 */ 713 if (ct->d_error == DOOR_WAIT && 714 st->d_caller == curthread) { 715 proc_t *p = ttoproc(server_thread); 716 717 st->d_active = NULL; 718 st->d_caller = NULL; 719 720 if (!(dp->door_flags & DOOR_NO_CANCEL)) { 721 DOOR_T_HOLD(st); 722 mutex_exit(&door_knob); 723 724 mutex_enter(&p->p_lock); 725 sigtoproc(p, server_thread, SIGCANCEL); 726 mutex_exit(&p->p_lock); 727 728 mutex_enter(&door_knob); 729 DOOR_T_RELEASE(st); 730 } 731 } 732 } else { 733 /* 734 * Return from stop(), server exit... 735 * 736 * Note that the server could have done a 737 * door_return while the client was in stop state 738 * (ISSIG), in which case the error condition 739 * is updated by the server. 740 */ 741 mutex_enter(&door_knob); 742 if (ct->d_error == DOOR_WAIT) { 743 /* Still waiting for a reply */ 744 shuttle_swtch(&door_knob); 745 mutex_enter(&door_knob); 746 lwp->lwp_asleep = 0; 747 goto shuttle_return; 748 } else if (ct->d_error == DOOR_EXIT) { 749 /* Server exit */ 750 error = EINTR; 751 } else { 752 /* Server did a door_return during ISSIG */ 753 error = ct->d_error; 754 } 755 } 756 /* 757 * Can't exit if the server is currently copying 758 * results for me. 759 */ 760 while (DOOR_T_HELD(ct)) 761 cv_wait(&ct->d_cv, &door_knob); 762 763 /* 764 * Find out if results were successfully copied. 765 */ 766 if (ct->d_error == 0) 767 gotresults = 1; 768 } 769 lwp->lwp_asleep = 0; /* /proc */ 770 lwp->lwp_sysabort = 0; /* /proc */ 771 if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY)) 772 door_deliver_unref(dp); 773 mutex_exit(&door_knob); 774 775 results: 776 /* 777 * Move the results to userland (if any) 778 */ 779 780 if (ct->d_noresults) 781 goto out; 782 783 if (error) { 784 /* 785 * If server returned results successfully, then we've 786 * been interrupted and may need to clean up. 787 */ 788 if (gotresults) { 789 ASSERT(error == EINTR); 790 door_fp_close(ct->d_fpp, ct->d_args.desc_num); 791 } 792 goto out; 793 } 794 795 /* 796 * Copy back data if we haven't caused an overflow (already 797 * handled) and we are using a 2 copy transfer, or we are 798 * returning data from a kernel server. 799 */ 800 if (ct->d_args.data_size) { 801 ct->d_args.data_ptr = ct->d_args.rbuf; 802 if (ct->d_kernel || (!ct->d_overflow && 803 ct->d_args.data_size <= door_max_arg)) { 804 if (copyout(ct->d_buf, ct->d_args.rbuf, 805 ct->d_args.data_size)) { 806 door_fp_close(ct->d_fpp, ct->d_args.desc_num); 807 error = EFAULT; 808 goto out; 809 } 810 } 811 } 812 813 /* 814 * stuff returned doors into our proc, copyout the descriptors 815 */ 816 if (ct->d_args.desc_num) { 817 struct file **fpp; 818 door_desc_t *didpp; 819 uint_t n = ct->d_args.desc_num; 820 821 dsize = n * sizeof (door_desc_t); 822 start = didpp = kmem_alloc(dsize, KM_SLEEP); 823 fpp = ct->d_fpp; 824 825 while (n--) { 826 if (door_insert(*fpp, didpp) == -1) { 827 /* Close remaining files */ 828 door_fp_close(fpp, n + 1); 829 error = EMFILE; 830 goto out; 831 } 832 fpp++; didpp++; ncopied++; 833 } 834 835 ct->d_args.desc_ptr = (door_desc_t *)(ct->d_args.rbuf + 836 roundup(ct->d_args.data_size, sizeof (door_desc_t))); 837 838 if (copyout(start, ct->d_args.desc_ptr, dsize)) { 839 error = EFAULT; 840 goto out; 841 } 842 } 843 844 /* 845 * Return the results 846 */ 847 if (datamodel == DATAMODEL_NATIVE) { 848 if (copyout(&ct->d_args, args, sizeof (door_arg_t)) != 0) 849 error = EFAULT; 850 } else { 851 door_arg32_t da32; 852 853 da32.data_ptr = (caddr32_t)(uintptr_t)ct->d_args.data_ptr; 854 da32.data_size = ct->d_args.data_size; 855 da32.desc_ptr = (caddr32_t)(uintptr_t)ct->d_args.desc_ptr; 856 da32.desc_num = ct->d_args.desc_num; 857 da32.rbuf = (caddr32_t)(uintptr_t)ct->d_args.rbuf; 858 da32.rsize = ct->d_args.rsize; 859 if (copyout(&da32, args, sizeof (door_arg32_t)) != 0) { 860 error = EFAULT; 861 } 862 } 863 864 out: 865 ct->d_noresults = 0; 866 867 /* clean up the overflow buffer if an error occurred */ 868 if (error != 0 && ct->d_overflow) { 869 (void) as_unmap(curproc->p_as, ct->d_args.rbuf, 870 ct->d_args.rsize); 871 } 872 ct->d_overflow = 0; 873 874 /* call destructor */ 875 if (destfn) { 876 ASSERT(ct->d_kernel); 877 (*destfn)(dp->door_data, destarg); 878 ct->d_buf = NULL; 879 ct->d_bufsize = 0; 880 } 881 882 if (dp) 883 releasef(did); 884 885 if (ct->d_buf) { 886 ASSERT(!ct->d_kernel); 887 kmem_free(ct->d_buf, ct->d_bufsize); 888 ct->d_buf = NULL; 889 ct->d_bufsize = 0; 890 } 891 ct->d_kernel = 0; 892 893 /* clean up the descriptor copyout buffer */ 894 if (start != NULL) { 895 if (error != 0) 896 door_fd_close(start, ncopied); 897 kmem_free(start, dsize); 898 } 899 900 if (ct->d_fpp) { 901 kmem_free(ct->d_fpp, ct->d_fpp_size); 902 ct->d_fpp = NULL; 903 ct->d_fpp_size = 0; 904 } 905 906 if (error) 907 return (set_errno(error)); 908 909 return (0); 910 } 911 912 static int 913 door_setparam_common(door_node_t *dp, int from_kernel, int type, size_t val) 914 { 915 int error = 0; 916 917 mutex_enter(&door_knob); 918 919 if (DOOR_INVALID(dp)) { 920 mutex_exit(&door_knob); 921 return (EBADF); 922 } 923 924 /* 925 * door_ki_setparam() can only affect kernel doors. 926 * door_setparam() can only affect doors attached to the current 927 * process. 928 */ 929 if ((from_kernel && dp->door_target != &p0) || 930 (!from_kernel && dp->door_target != curproc)) { 931 mutex_exit(&door_knob); 932 return (EPERM); 933 } 934 935 switch (type) { 936 case DOOR_PARAM_DESC_MAX: 937 if (val > INT_MAX) 938 error = ERANGE; 939 else if ((dp->door_flags & DOOR_REFUSE_DESC) && val != 0) 940 error = ENOTSUP; 941 else 942 dp->door_desc_max = (uint_t)val; 943 break; 944 945 case DOOR_PARAM_DATA_MIN: 946 if (val > dp->door_data_max) 947 error = EINVAL; 948 else 949 dp->door_data_min = val; 950 break; 951 952 case DOOR_PARAM_DATA_MAX: 953 if (val < dp->door_data_min) 954 error = EINVAL; 955 else 956 dp->door_data_max = val; 957 break; 958 959 default: 960 error = EINVAL; 961 break; 962 } 963 964 mutex_exit(&door_knob); 965 return (error); 966 } 967 968 static int 969 door_getparam_common(door_node_t *dp, int type, size_t *out) 970 { 971 int error = 0; 972 973 mutex_enter(&door_knob); 974 switch (type) { 975 case DOOR_PARAM_DESC_MAX: 976 *out = (size_t)dp->door_desc_max; 977 break; 978 case DOOR_PARAM_DATA_MIN: 979 *out = dp->door_data_min; 980 break; 981 case DOOR_PARAM_DATA_MAX: 982 *out = dp->door_data_max; 983 break; 984 default: 985 error = EINVAL; 986 break; 987 } 988 mutex_exit(&door_knob); 989 return (error); 990 } 991 992 int 993 door_setparam(int did, int type, size_t val) 994 { 995 door_node_t *dp; 996 int error = 0; 997 998 if ((dp = door_lookup(did, NULL)) == NULL) 999 return (set_errno(EBADF)); 1000 1001 error = door_setparam_common(dp, 0, type, val); 1002 1003 releasef(did); 1004 1005 if (error) 1006 return (set_errno(error)); 1007 1008 return (0); 1009 } 1010 1011 int 1012 door_getparam(int did, int type, size_t *out) 1013 { 1014 door_node_t *dp; 1015 size_t val = 0; 1016 int error = 0; 1017 1018 if ((dp = door_lookup(did, NULL)) == NULL) 1019 return (set_errno(EBADF)); 1020 1021 error = door_getparam_common(dp, type, &val); 1022 1023 releasef(did); 1024 1025 if (error) 1026 return (set_errno(error)); 1027 1028 if (get_udatamodel() == DATAMODEL_NATIVE) { 1029 if (copyout(&val, out, sizeof (val))) 1030 return (set_errno(EFAULT)); 1031 #ifdef _SYSCALL32_IMPL 1032 } else { 1033 size32_t val32 = (size32_t)val; 1034 1035 if (val != val32) 1036 return (set_errno(EOVERFLOW)); 1037 1038 if (copyout(&val32, out, sizeof (val32))) 1039 return (set_errno(EFAULT)); 1040 #endif /* _SYSCALL32_IMPL */ 1041 } 1042 1043 return (0); 1044 } 1045 1046 /* 1047 * A copyout() which proceeds from high addresses to low addresses. This way, 1048 * stack guard pages are effective. 1049 */ 1050 static int 1051 door_stack_copyout(const void *kaddr, void *uaddr, size_t count) 1052 { 1053 const char *kbase = (const char *)kaddr; 1054 uintptr_t ubase = (uintptr_t)uaddr; 1055 size_t pgsize = PAGESIZE; 1056 1057 if (count <= pgsize) 1058 return (copyout(kaddr, uaddr, count)); 1059 1060 while (count > 0) { 1061 uintptr_t start, end, offset, amount; 1062 1063 end = ubase + count; 1064 start = P2ALIGN(end - 1, pgsize); 1065 if (P2ALIGN(ubase, pgsize) == start) 1066 start = ubase; 1067 1068 offset = start - ubase; 1069 amount = end - start; 1070 1071 ASSERT(amount > 0 && amount <= count && amount <= pgsize); 1072 1073 if (copyout(kbase + offset, (void *)start, amount)) 1074 return (1); 1075 count -= amount; 1076 } 1077 return (0); 1078 } 1079 1080 /* 1081 * Writes the stack layout for door_return() into the door_server_t of the 1082 * server thread. 1083 */ 1084 static int 1085 door_layout(kthread_t *tp, size_t data_size, uint_t ndesc, int info_needed) 1086 { 1087 door_server_t *st = DOOR_SERVER(tp->t_door); 1088 door_layout_t *out = &st->d_layout; 1089 uintptr_t base_sp = (uintptr_t)st->d_sp; 1090 size_t ssize = st->d_ssize; 1091 size_t descsz; 1092 uintptr_t descp, datap, infop, resultsp, finalsp; 1093 size_t align = STACK_ALIGN; 1094 size_t results_sz = sizeof (struct door_results); 1095 model_t datamodel = lwp_getdatamodel(ttolwp(tp)); 1096 1097 ASSERT(!st->d_layout_done); 1098 1099 #ifndef _STACK_GROWS_DOWNWARD 1100 #error stack does not grow downward, door_layout() must change 1101 #endif 1102 1103 #ifdef _SYSCALL32_IMPL 1104 if (datamodel != DATAMODEL_NATIVE) { 1105 align = STACK_ALIGN32; 1106 results_sz = sizeof (struct door_results32); 1107 } 1108 #endif 1109 1110 descsz = ndesc * sizeof (door_desc_t); 1111 1112 /* 1113 * To speed up the overflow checking, we do an initial check 1114 * that the passed in data size won't cause us to wrap past 1115 * base_sp. Since door_max_desc limits descsz, we can 1116 * safely use it here. 65535 is an arbitrary 'bigger than 1117 * we need, small enough to not cause trouble' constant; 1118 * the only constraint is that it must be > than: 1119 * 1120 * 5 * STACK_ALIGN + 1121 * sizeof (door_info_t) + 1122 * sizeof (door_results_t) + 1123 * (max adjustment from door_final_sp()) 1124 * 1125 * After we compute the layout, we can safely do a "did we wrap 1126 * around" check, followed by a check against the recorded 1127 * stack size. 1128 */ 1129 if (data_size >= SIZE_MAX - (size_t)65535UL - descsz) 1130 return (E2BIG); /* overflow */ 1131 1132 descp = P2ALIGN(base_sp - descsz, align); 1133 datap = P2ALIGN(descp - data_size, align); 1134 1135 if (info_needed) 1136 infop = P2ALIGN(datap - sizeof (door_info_t), align); 1137 else 1138 infop = datap; 1139 1140 resultsp = P2ALIGN(infop - results_sz, align); 1141 finalsp = door_final_sp(resultsp, align, datamodel); 1142 1143 if (finalsp > base_sp) 1144 return (E2BIG); /* overflow */ 1145 1146 if (ssize != 0 && (base_sp - finalsp) > ssize) 1147 return (E2BIG); /* doesn't fit in stack */ 1148 1149 out->dl_descp = (ndesc != 0)? (caddr_t)descp : 0; 1150 out->dl_datap = (data_size != 0)? (caddr_t)datap : 0; 1151 out->dl_infop = info_needed? (caddr_t)infop : 0; 1152 out->dl_resultsp = (caddr_t)resultsp; 1153 out->dl_sp = (caddr_t)finalsp; 1154 1155 st->d_layout_done = 1; 1156 return (0); 1157 } 1158 1159 static int 1160 door_server_dispatch(door_client_t *ct, door_node_t *dp) 1161 { 1162 door_server_t *st = DOOR_SERVER(curthread->t_door); 1163 door_layout_t *layout = &st->d_layout; 1164 int error = 0; 1165 1166 int is_private = (dp->door_flags & DOOR_PRIVATE); 1167 1168 door_pool_t *pool = (is_private)? &dp->door_servers : 1169 &curproc->p_server_threads; 1170 1171 int empty_pool = (pool->dp_threads == NULL); 1172 1173 caddr_t infop = NULL; 1174 char *datap = NULL; 1175 size_t datasize = 0; 1176 size_t descsize; 1177 1178 file_t **fpp = ct->d_fpp; 1179 door_desc_t *start = NULL; 1180 uint_t ndesc = 0; 1181 uint_t ncopied = 0; 1182 1183 if (ct != NULL) { 1184 datap = ct->d_args.data_ptr; 1185 datasize = ct->d_args.data_size; 1186 ndesc = ct->d_args.desc_num; 1187 } 1188 1189 descsize = ndesc * sizeof (door_desc_t); 1190 1191 /* 1192 * Reset datap to NULL if we aren't passing any data. Be careful 1193 * to let unref notifications through, though. 1194 */ 1195 if (datap == DOOR_UNREF_DATA) { 1196 if (ct->d_upcall) 1197 datasize = 0; 1198 else 1199 datap = NULL; 1200 } else if (datasize == 0) { 1201 datap = NULL; 1202 } 1203 1204 /* 1205 * Get the stack layout, if it hasn't already been done. 1206 */ 1207 if (!st->d_layout_done) { 1208 error = door_layout(curthread, datasize, ndesc, 1209 (is_private && empty_pool)); 1210 if (error != 0) 1211 goto fail; 1212 } 1213 1214 /* 1215 * fill out the stack, starting from the top. Layout was already 1216 * filled in by door_args() or door_translate_out(). 1217 */ 1218 if (layout->dl_descp != NULL) { 1219 ASSERT(ndesc != 0); 1220 start = kmem_alloc(descsize, KM_SLEEP); 1221 1222 while (ndesc > 0) { 1223 if (door_insert(*fpp, &start[ncopied]) == -1) { 1224 error = EMFILE; 1225 goto fail; 1226 } 1227 ndesc--; 1228 ncopied++; 1229 fpp++; 1230 } 1231 if (door_stack_copyout(start, layout->dl_descp, descsize)) { 1232 error = E2BIG; 1233 goto fail; 1234 } 1235 } 1236 fpp = NULL; /* finished processing */ 1237 1238 if (layout->dl_datap != NULL) { 1239 ASSERT(datasize != 0); 1240 datap = layout->dl_datap; 1241 if (ct->d_upcall || datasize <= door_max_arg) { 1242 if (door_stack_copyout(ct->d_buf, datap, datasize)) { 1243 error = E2BIG; 1244 goto fail; 1245 } 1246 } 1247 } 1248 1249 if (is_private && empty_pool) { 1250 door_info_t di; 1251 1252 infop = layout->dl_infop; 1253 ASSERT(infop != NULL); 1254 1255 di.di_target = curproc->p_pid; 1256 di.di_proc = (door_ptr_t)(uintptr_t)dp->door_pc; 1257 di.di_data = (door_ptr_t)(uintptr_t)dp->door_data; 1258 di.di_uniquifier = dp->door_index; 1259 di.di_attributes = (dp->door_flags & DOOR_ATTR_MASK) | 1260 DOOR_LOCAL; 1261 1262 if (copyout(&di, infop, sizeof (di))) { 1263 error = E2BIG; 1264 goto fail; 1265 } 1266 } 1267 1268 if (get_udatamodel() == DATAMODEL_NATIVE) { 1269 struct door_results dr; 1270 1271 dr.cookie = dp->door_data; 1272 dr.data_ptr = datap; 1273 dr.data_size = datasize; 1274 dr.desc_ptr = (door_desc_t *)layout->dl_descp; 1275 dr.desc_num = ncopied; 1276 dr.pc = dp->door_pc; 1277 dr.nservers = !empty_pool; 1278 dr.door_info = (door_info_t *)infop; 1279 1280 if (copyout(&dr, layout->dl_resultsp, sizeof (dr))) { 1281 error = E2BIG; 1282 goto fail; 1283 } 1284 #ifdef _SYSCALL32_IMPL 1285 } else { 1286 struct door_results32 dr32; 1287 1288 dr32.cookie = (caddr32_t)(uintptr_t)dp->door_data; 1289 dr32.data_ptr = (caddr32_t)(uintptr_t)datap; 1290 dr32.data_size = (size32_t)datasize; 1291 dr32.desc_ptr = (caddr32_t)(uintptr_t)layout->dl_descp; 1292 dr32.desc_num = ncopied; 1293 dr32.pc = (caddr32_t)(uintptr_t)dp->door_pc; 1294 dr32.nservers = !empty_pool; 1295 dr32.door_info = (caddr32_t)(uintptr_t)infop; 1296 1297 if (copyout(&dr32, layout->dl_resultsp, sizeof (dr32))) { 1298 error = E2BIG; 1299 goto fail; 1300 } 1301 #endif 1302 } 1303 1304 error = door_finish_dispatch(layout->dl_sp); 1305 fail: 1306 if (start != NULL) { 1307 if (error != 0) 1308 door_fd_close(start, ncopied); 1309 kmem_free(start, descsize); 1310 } 1311 if (fpp != NULL) 1312 door_fp_close(fpp, ndesc); 1313 1314 return (error); 1315 } 1316 1317 /* 1318 * Return the results (if any) to the caller (if any) and wait for the 1319 * next invocation on a door. 1320 */ 1321 int 1322 door_return(caddr_t data_ptr, size_t data_size, 1323 door_desc_t *desc_ptr, uint_t desc_num, caddr_t sp, size_t ssize) 1324 { 1325 kthread_t *caller; 1326 klwp_t *lwp; 1327 int error = 0; 1328 door_node_t *dp; 1329 door_server_t *st; /* curthread door_data */ 1330 door_client_t *ct; /* caller door_data */ 1331 clock_t timeleft = 0; 1332 timeout_id_t id = NULL; 1333 1334 if (door_srv_timeout > 0) 1335 timeleft = door_srv_timeout * (hz/100) * 100; 1336 else 1337 timeleft = -1; 1338 1339 st = door_my_server(1); 1340 1341 /* 1342 * If thread was bound to a door that no longer exists, return 1343 * an error. This can happen if a thread is bound to a door 1344 * before the process calls forkall(); in the child, the door 1345 * doesn't exist and door_fork() sets the d_invbound flag. 1346 */ 1347 if (st->d_invbound) 1348 return (set_errno(EINVAL)); 1349 1350 st->d_sp = sp; /* Save base of stack. */ 1351 st->d_ssize = ssize; /* and its size */ 1352 1353 /* 1354 * before we release our stack to the whims of our next caller, 1355 * copy in the syscall arguments if we're being traced by /proc. 1356 */ 1357 if (curthread->t_post_sys && PTOU(ttoproc(curthread))->u_systrap) 1358 (void) save_syscall_args(); 1359 1360 /* Make sure the caller hasn't gone away */ 1361 mutex_enter(&door_knob); 1362 if ((caller = st->d_caller) == NULL || caller->t_door == NULL) { 1363 if (desc_num != 0) { 1364 /* close any DOOR_RELEASE descriptors */ 1365 mutex_exit(&door_knob); 1366 error = door_release_fds(desc_ptr, desc_num); 1367 if (error) 1368 return (set_errno(error)); 1369 mutex_enter(&door_knob); 1370 } 1371 goto out; 1372 } 1373 ct = DOOR_CLIENT(caller->t_door); 1374 1375 ct->d_args.data_size = data_size; 1376 ct->d_args.desc_num = desc_num; 1377 /* 1378 * Transfer results, if any, to the client 1379 */ 1380 if (data_size != 0 || desc_num != 0) { 1381 /* 1382 * Prevent the client from exiting until we have finished 1383 * moving results. 1384 */ 1385 DOOR_T_HOLD(ct); 1386 mutex_exit(&door_knob); 1387 error = door_results(caller, data_ptr, data_size, 1388 desc_ptr, desc_num); 1389 mutex_enter(&door_knob); 1390 DOOR_T_RELEASE(ct); 1391 /* 1392 * Pass EOVERFLOW errors back to the client 1393 */ 1394 if (error && error != EOVERFLOW) { 1395 mutex_exit(&door_knob); 1396 return (set_errno(error)); 1397 } 1398 } 1399 out: 1400 /* Put ourselves on the available server thread list */ 1401 door_release_server(st->d_pool, curthread); 1402 1403 if (timeleft > 0) { 1404 id = realtime_timeout((void (*)(void *))setrun, 1405 curthread, timeleft); 1406 } else { 1407 id = NULL; 1408 } 1409 1410 /* 1411 * Make sure the caller is still waiting to be resumed 1412 */ 1413 if (caller) { 1414 disp_lock_t *tlp; 1415 1416 thread_lock(caller); 1417 ct->d_error = error; /* Return any errors */ 1418 if (caller->t_state == TS_SLEEP && 1419 SOBJ_TYPE(caller->t_sobj_ops) == SOBJ_SHUTTLE) { 1420 cpu_t *cp = CPU; 1421 1422 tlp = caller->t_lockp; 1423 /* 1424 * Setting t_disp_queue prevents erroneous preemptions 1425 * if this thread is still in execution on another 1426 * processor 1427 */ 1428 caller->t_disp_queue = cp->cpu_disp; 1429 CL_ACTIVE(caller); 1430 /* 1431 * We are calling thread_onproc() instead of 1432 * THREAD_ONPROC() because compiler can reorder 1433 * the two stores of t_state and t_lockp in 1434 * THREAD_ONPROC(). 1435 */ 1436 thread_onproc(caller, cp); 1437 disp_lock_exit_high(tlp); 1438 1439 /* when server returns results to client */ 1440 shuttle_resume(caller, &door_knob); 1441 } else { 1442 /* May have been setrun or in stop state */ 1443 thread_unlock(caller); 1444 shuttle_swtch(&door_knob); 1445 } 1446 } else { 1447 /* no client */ 1448 shuttle_swtch(&door_knob); 1449 } 1450 1451 1452 if (id != NULL) { 1453 timeleft = untimeout(id); 1454 id = NULL; 1455 } 1456 1457 /* 1458 * We've sprung to life. Determine if we are part of a door 1459 * invocation, or just interrupted 1460 */ 1461 lwp = ttolwp(curthread); 1462 mutex_enter(&door_knob); 1463 if ((dp = st->d_active) != NULL) { 1464 /* 1465 * Normal door invocation. Return any error condition 1466 * encountered while trying to pass args to the server 1467 * thread. 1468 */ 1469 lwp->lwp_asleep = 0; 1470 /* 1471 * Prevent the caller from leaving us while we 1472 * are copying out the arguments from it's buffer. 1473 */ 1474 ASSERT(st->d_caller != NULL); 1475 ct = DOOR_CLIENT(st->d_caller->t_door); 1476 1477 DOOR_T_HOLD(ct); 1478 mutex_exit(&door_knob); 1479 error = door_server_dispatch(ct, dp); 1480 mutex_enter(&door_knob); 1481 DOOR_T_RELEASE(ct); 1482 1483 if (error) { 1484 caller = st->d_caller; 1485 if (caller) 1486 ct = DOOR_CLIENT(caller->t_door); 1487 else 1488 ct = NULL; 1489 1490 /* 1491 * Recalculate timeout since there was a 1492 * door invocation. 1493 */ 1494 if (door_srv_timeout > 0) { 1495 timeleft = door_srv_timeout * 1496 (hz/100) * 100; 1497 } else { 1498 timeleft = -1; 1499 } 1500 1501 goto out; 1502 } 1503 mutex_exit(&door_knob); 1504 return (0); 1505 } else { 1506 int empty; 1507 1508 /* 1509 * We are not involved in a door_invocation. 1510 * Check for /proc related activity... 1511 */ 1512 st->d_caller = NULL; 1513 empty = door_server_exit(curproc, curthread); 1514 mutex_exit(&door_knob); 1515 1516 /* 1517 * We return EEXIST, which terminates this thread, 1518 * if there was no door invocation for past 1519 * door_srv_timeout seconds. But if all the 1520 * servers threads are busy, then this server 1521 * thread should not exit. 1522 */ 1523 if (door_srv_timeout > 0 && timeleft <= 0 && 1524 empty == 0) { 1525 return (set_errno(EEXIST)); 1526 } 1527 1528 if (ISSIG(curthread, FORREAL) || 1529 lwp->lwp_sysabort || MUSTRETURN(curproc, curthread)) { 1530 lwp->lwp_asleep = 0; 1531 lwp->lwp_sysabort = 0; 1532 return (set_errno(EINTR)); 1533 } 1534 1535 /* Go back and wait for another request */ 1536 lwp->lwp_asleep = 0; 1537 mutex_enter(&door_knob); 1538 caller = NULL; 1539 goto out; 1540 } 1541 } 1542 1543 /* 1544 * Revoke any future invocations on this door 1545 */ 1546 int 1547 door_revoke(int did) 1548 { 1549 door_node_t *d; 1550 int error; 1551 1552 if ((d = door_lookup(did, NULL)) == NULL) 1553 return (set_errno(EBADF)); 1554 1555 mutex_enter(&door_knob); 1556 if (d->door_target != curproc) { 1557 mutex_exit(&door_knob); 1558 releasef(did); 1559 return (set_errno(EPERM)); 1560 } 1561 d->door_flags |= DOOR_REVOKED; 1562 if (d->door_flags & DOOR_PRIVATE) 1563 cv_broadcast(&d->door_servers.dp_cv); 1564 else 1565 cv_broadcast(&curproc->p_server_threads.dp_cv); 1566 mutex_exit(&door_knob); 1567 releasef(did); 1568 /* Invalidate the descriptor */ 1569 if ((error = closeandsetf(did, NULL)) != 0) 1570 return (set_errno(error)); 1571 return (0); 1572 } 1573 1574 int 1575 door_info(int did, struct door_info *d_info) 1576 { 1577 door_node_t *dp; 1578 door_info_t di; 1579 door_server_t *st; 1580 file_t *fp = NULL; 1581 1582 if (did == DOOR_QUERY) { 1583 /* Get information on door current thread is bound to */ 1584 if ((st = door_my_server(0)) == NULL || 1585 (dp = st->d_pool) == NULL) 1586 /* Thread isn't bound to a door */ 1587 return (set_errno(EBADF)); 1588 } else if ((dp = door_lookup(did, &fp)) == NULL) { 1589 /* Not a door */ 1590 return (set_errno(EBADF)); 1591 } 1592 1593 door_info_common(dp, &di, fp); 1594 1595 if (did != DOOR_QUERY) 1596 releasef(did); 1597 1598 if (copyout(&di, d_info, sizeof (struct door_info))) 1599 return (set_errno(EFAULT)); 1600 return (0); 1601 } 1602 1603 /* 1604 * Common code for getting information about a door either via the 1605 * door_info system call or the door_ki_info kernel call. 1606 */ 1607 void 1608 door_info_common(door_node_t *dp, struct door_info *dip, file_t *fp) 1609 { 1610 int unref_count; 1611 1612 bzero(dip, sizeof (door_info_t)); 1613 1614 mutex_enter(&door_knob); 1615 if (dp->door_target == NULL) 1616 dip->di_target = -1; 1617 else 1618 dip->di_target = dp->door_target->p_pid; 1619 1620 dip->di_attributes = dp->door_flags & DOOR_ATTR_MASK; 1621 if (dp->door_target == curproc) 1622 dip->di_attributes |= DOOR_LOCAL; 1623 dip->di_proc = (door_ptr_t)(uintptr_t)dp->door_pc; 1624 dip->di_data = (door_ptr_t)(uintptr_t)dp->door_data; 1625 dip->di_uniquifier = dp->door_index; 1626 /* 1627 * If this door is in the middle of having an unreferenced 1628 * notification delivered, don't count the VN_HOLD by 1629 * door_deliver_unref in determining if it is unreferenced. 1630 * This handles the case where door_info is called from the 1631 * thread delivering the unref notification. 1632 */ 1633 if (dp->door_flags & DOOR_UNREF_ACTIVE) 1634 unref_count = 2; 1635 else 1636 unref_count = 1; 1637 mutex_exit(&door_knob); 1638 1639 if (fp == NULL) { 1640 /* 1641 * If this thread is bound to the door, then we can just 1642 * check the vnode; a ref count of 1 (or 2 if this is 1643 * handling an unref notification) means that the hold 1644 * from the door_bind is the only reference to the door 1645 * (no file descriptor refers to it). 1646 */ 1647 if (DTOV(dp)->v_count == unref_count) 1648 dip->di_attributes |= DOOR_IS_UNREF; 1649 } else { 1650 /* 1651 * If we're working from a file descriptor or door handle 1652 * we need to look at the file structure count. We don't 1653 * need to hold the vnode lock since this is just a snapshot. 1654 */ 1655 mutex_enter(&fp->f_tlock); 1656 if (fp->f_count == 1 && DTOV(dp)->v_count == unref_count) 1657 dip->di_attributes |= DOOR_IS_UNREF; 1658 mutex_exit(&fp->f_tlock); 1659 } 1660 } 1661 1662 /* 1663 * Return credentials of the door caller (if any) for this invocation 1664 */ 1665 int 1666 door_ucred(struct ucred_s *uch) 1667 { 1668 kthread_t *caller; 1669 door_server_t *st; 1670 door_client_t *ct; 1671 struct proc *p; 1672 struct ucred_s *res; 1673 int err; 1674 1675 mutex_enter(&door_knob); 1676 if ((st = door_my_server(0)) == NULL || 1677 (caller = st->d_caller) == NULL) { 1678 mutex_exit(&door_knob); 1679 return (set_errno(EINVAL)); 1680 } 1681 1682 ASSERT(caller->t_door != NULL); 1683 ct = DOOR_CLIENT(caller->t_door); 1684 1685 /* Prevent caller from exiting while we examine the cred */ 1686 DOOR_T_HOLD(ct); 1687 mutex_exit(&door_knob); 1688 1689 /* Get the credentials of the calling process */ 1690 p = ttoproc(caller); 1691 1692 res = pgetucred(p); 1693 1694 mutex_enter(&door_knob); 1695 DOOR_T_RELEASE(ct); 1696 mutex_exit(&door_knob); 1697 1698 err = copyout(res, uch, res->uc_size); 1699 1700 kmem_free(res, res->uc_size); 1701 1702 if (err != 0) 1703 return (set_errno(EFAULT)); 1704 1705 return (0); 1706 } 1707 1708 /* 1709 * Bind the current lwp to the server thread pool associated with 'did' 1710 */ 1711 int 1712 door_bind(int did) 1713 { 1714 door_node_t *dp; 1715 door_server_t *st; 1716 1717 if ((dp = door_lookup(did, NULL)) == NULL) { 1718 /* Not a door */ 1719 return (set_errno(EBADF)); 1720 } 1721 1722 /* 1723 * Can't bind to a non-private door, and can't bind to a door 1724 * served by another process. 1725 */ 1726 if ((dp->door_flags & DOOR_PRIVATE) == 0 || 1727 dp->door_target != curproc) { 1728 releasef(did); 1729 return (set_errno(EINVAL)); 1730 } 1731 1732 st = door_my_server(1); 1733 if (st->d_pool) 1734 door_unbind_thread(st->d_pool); 1735 st->d_pool = dp; 1736 st->d_invbound = 0; 1737 door_bind_thread(dp); 1738 releasef(did); 1739 1740 return (0); 1741 } 1742 1743 /* 1744 * Unbind the current lwp from it's server thread pool 1745 */ 1746 int 1747 door_unbind(void) 1748 { 1749 door_server_t *st; 1750 1751 if ((st = door_my_server(0)) == NULL) 1752 return (set_errno(EBADF)); 1753 1754 if (st->d_invbound) { 1755 ASSERT(st->d_pool == NULL); 1756 st->d_invbound = 0; 1757 return (0); 1758 } 1759 if (st->d_pool == NULL) 1760 return (set_errno(EBADF)); 1761 door_unbind_thread(st->d_pool); 1762 st->d_pool = NULL; 1763 return (0); 1764 } 1765 1766 /* 1767 * Create a descriptor for the associated file and fill in the 1768 * attributes associated with it. 1769 * 1770 * Return 0 for success, -1 otherwise; 1771 */ 1772 int 1773 door_insert(struct file *fp, door_desc_t *dp) 1774 { 1775 struct vnode *vp; 1776 int fd; 1777 door_attr_t attributes = DOOR_DESCRIPTOR; 1778 1779 ASSERT(MUTEX_NOT_HELD(&door_knob)); 1780 if ((fd = ufalloc(0)) == -1) 1781 return (-1); 1782 setf(fd, fp); 1783 dp->d_data.d_desc.d_descriptor = fd; 1784 1785 /* Fill in the attributes */ 1786 if (VOP_REALVP(fp->f_vnode, &vp, NULL)) 1787 vp = fp->f_vnode; 1788 if (vp && vp->v_type == VDOOR) { 1789 if (VTOD(vp)->door_target == curproc) 1790 attributes |= DOOR_LOCAL; 1791 attributes |= VTOD(vp)->door_flags & DOOR_ATTR_MASK; 1792 dp->d_data.d_desc.d_id = VTOD(vp)->door_index; 1793 } 1794 dp->d_attributes = attributes; 1795 return (0); 1796 } 1797 1798 /* 1799 * Return an available thread for this server. A NULL return value indicates 1800 * that either: 1801 * The door has been revoked, or 1802 * a signal was received. 1803 * The two conditions can be differentiated using DOOR_INVALID(dp). 1804 */ 1805 static kthread_t * 1806 door_get_server(door_node_t *dp) 1807 { 1808 kthread_t **ktp; 1809 kthread_t *server_t; 1810 door_pool_t *pool; 1811 door_server_t *st; 1812 int signalled; 1813 1814 disp_lock_t *tlp; 1815 cpu_t *cp; 1816 1817 ASSERT(MUTEX_HELD(&door_knob)); 1818 1819 if (dp->door_flags & DOOR_PRIVATE) 1820 pool = &dp->door_servers; 1821 else 1822 pool = &dp->door_target->p_server_threads; 1823 1824 for (;;) { 1825 /* 1826 * We search the thread pool, looking for a server thread 1827 * ready to take an invocation (i.e. one which is still 1828 * sleeping on a shuttle object). If none are available, 1829 * we sleep on the pool's CV, and will be signaled when a 1830 * thread is added to the pool. 1831 * 1832 * This relies on the fact that once a thread in the thread 1833 * pool wakes up, it *must* remove and add itself to the pool 1834 * before it can receive door calls. 1835 */ 1836 if (DOOR_INVALID(dp)) 1837 return (NULL); /* Target has become invalid */ 1838 1839 for (ktp = &pool->dp_threads; 1840 (server_t = *ktp) != NULL; 1841 ktp = &st->d_servers) { 1842 st = DOOR_SERVER(server_t->t_door); 1843 1844 thread_lock(server_t); 1845 if (server_t->t_state == TS_SLEEP && 1846 SOBJ_TYPE(server_t->t_sobj_ops) == SOBJ_SHUTTLE) 1847 break; 1848 thread_unlock(server_t); 1849 } 1850 if (server_t != NULL) 1851 break; /* we've got a live one! */ 1852 1853 if (!cv_wait_sig_swap_core(&pool->dp_cv, &door_knob, 1854 &signalled)) { 1855 /* 1856 * If we were signaled and the door is still 1857 * valid, pass the signal on to another waiter. 1858 */ 1859 if (signalled && !DOOR_INVALID(dp)) 1860 cv_signal(&pool->dp_cv); 1861 return (NULL); /* Got a signal */ 1862 } 1863 } 1864 1865 /* 1866 * We've got a thread_lock()ed thread which is still on the 1867 * shuttle. Take it off the list of available server threads 1868 * and mark it as ONPROC. We are committed to resuming this 1869 * thread now. 1870 */ 1871 tlp = server_t->t_lockp; 1872 cp = CPU; 1873 1874 *ktp = st->d_servers; 1875 st->d_servers = NULL; 1876 /* 1877 * Setting t_disp_queue prevents erroneous preemptions 1878 * if this thread is still in execution on another processor 1879 */ 1880 server_t->t_disp_queue = cp->cpu_disp; 1881 CL_ACTIVE(server_t); 1882 /* 1883 * We are calling thread_onproc() instead of 1884 * THREAD_ONPROC() because compiler can reorder 1885 * the two stores of t_state and t_lockp in 1886 * THREAD_ONPROC(). 1887 */ 1888 thread_onproc(server_t, cp); 1889 disp_lock_exit(tlp); 1890 return (server_t); 1891 } 1892 1893 /* 1894 * Put a server thread back in the pool. 1895 */ 1896 static void 1897 door_release_server(door_node_t *dp, kthread_t *t) 1898 { 1899 door_server_t *st = DOOR_SERVER(t->t_door); 1900 door_pool_t *pool; 1901 1902 ASSERT(MUTEX_HELD(&door_knob)); 1903 st->d_active = NULL; 1904 st->d_caller = NULL; 1905 st->d_layout_done = 0; 1906 if (dp && (dp->door_flags & DOOR_PRIVATE)) { 1907 ASSERT(dp->door_target == NULL || 1908 dp->door_target == ttoproc(t)); 1909 pool = &dp->door_servers; 1910 } else { 1911 pool = &ttoproc(t)->p_server_threads; 1912 } 1913 1914 st->d_servers = pool->dp_threads; 1915 pool->dp_threads = t; 1916 1917 /* If someone is waiting for a server thread, wake him up */ 1918 cv_signal(&pool->dp_cv); 1919 } 1920 1921 /* 1922 * Remove a server thread from the pool if present. 1923 */ 1924 static int 1925 door_server_exit(proc_t *p, kthread_t *t) 1926 { 1927 door_pool_t *pool; 1928 kthread_t **next; 1929 door_server_t *st = DOOR_SERVER(t->t_door); 1930 int empty = 1; /* assume door server list is empty */ 1931 1932 ASSERT(MUTEX_HELD(&door_knob)); 1933 if (st->d_pool != NULL) { 1934 ASSERT(st->d_pool->door_flags & DOOR_PRIVATE); 1935 pool = &st->d_pool->door_servers; 1936 } else { 1937 pool = &p->p_server_threads; 1938 } 1939 1940 next = &pool->dp_threads; 1941 while (*next != NULL) { 1942 if (*next == t) { 1943 *next = DOOR_SERVER(t->t_door)->d_servers; 1944 if (empty && *next != NULL) { 1945 /* door server list is not empty */ 1946 empty = 0; 1947 } 1948 1949 return (empty); 1950 } 1951 next = &(DOOR_SERVER((*next)->t_door)->d_servers); 1952 1953 /* door server list is not empty */ 1954 empty = 0; 1955 } 1956 1957 /* 1958 * If empty is set to 1, it means that there are no available 1959 * door server threads hence caller should not let the last 1960 * server thread go away. If empty is 0, then it means that 1961 * there is at least one available door server thread in the 1962 * pool. 1963 */ 1964 return (empty); 1965 } 1966 1967 /* 1968 * Lookup the door descriptor. Caller must call releasef when finished 1969 * with associated door. 1970 */ 1971 static door_node_t * 1972 door_lookup(int did, file_t **fpp) 1973 { 1974 vnode_t *vp; 1975 file_t *fp; 1976 1977 ASSERT(MUTEX_NOT_HELD(&door_knob)); 1978 if ((fp = getf(did)) == NULL) 1979 return (NULL); 1980 /* 1981 * Use the underlying vnode (we may be namefs mounted) 1982 */ 1983 if (VOP_REALVP(fp->f_vnode, &vp, NULL)) 1984 vp = fp->f_vnode; 1985 1986 if (vp == NULL || vp->v_type != VDOOR) { 1987 releasef(did); 1988 return (NULL); 1989 } 1990 1991 if (fpp) 1992 *fpp = fp; 1993 1994 return (VTOD(vp)); 1995 } 1996 1997 /* 1998 * The current thread is exiting, so clean up any pending 1999 * invocation details 2000 */ 2001 void 2002 door_slam(void) 2003 { 2004 door_node_t *dp; 2005 door_data_t *dt; 2006 door_client_t *ct; 2007 door_server_t *st; 2008 2009 /* 2010 * If we are an active door server, notify our 2011 * client that we are exiting and revoke our door. 2012 */ 2013 if ((dt = door_my_data(0)) == NULL) 2014 return; 2015 ct = DOOR_CLIENT(dt); 2016 st = DOOR_SERVER(dt); 2017 2018 mutex_enter(&door_knob); 2019 for (;;) { 2020 if (DOOR_T_HELD(ct)) 2021 cv_wait(&ct->d_cv, &door_knob); 2022 else if (DOOR_T_HELD(st)) 2023 cv_wait(&st->d_cv, &door_knob); 2024 else 2025 break; /* neither flag is set */ 2026 } 2027 curthread->t_door = NULL; 2028 if ((dp = st->d_active) != NULL) { 2029 kthread_t *t = st->d_caller; 2030 proc_t *p = curproc; 2031 2032 /* Revoke our door if the process is exiting */ 2033 if (dp->door_target == p && (p->p_flag & SEXITING)) { 2034 door_list_delete(dp); 2035 dp->door_target = NULL; 2036 dp->door_flags |= DOOR_REVOKED; 2037 if (dp->door_flags & DOOR_PRIVATE) 2038 cv_broadcast(&dp->door_servers.dp_cv); 2039 else 2040 cv_broadcast(&p->p_server_threads.dp_cv); 2041 } 2042 2043 if (t != NULL) { 2044 /* 2045 * Let the caller know we are gone 2046 */ 2047 DOOR_CLIENT(t->t_door)->d_error = DOOR_EXIT; 2048 thread_lock(t); 2049 if (t->t_state == TS_SLEEP && 2050 SOBJ_TYPE(t->t_sobj_ops) == SOBJ_SHUTTLE) 2051 setrun_locked(t); 2052 thread_unlock(t); 2053 } 2054 } 2055 mutex_exit(&door_knob); 2056 if (st->d_pool) 2057 door_unbind_thread(st->d_pool); /* Implicit door_unbind */ 2058 kmem_free(dt, sizeof (door_data_t)); 2059 } 2060 2061 /* 2062 * Set DOOR_REVOKED for all doors of the current process. This is called 2063 * on exit before all lwp's are being terminated so that door calls will 2064 * return with an error. 2065 */ 2066 void 2067 door_revoke_all() 2068 { 2069 door_node_t *dp; 2070 proc_t *p = ttoproc(curthread); 2071 2072 mutex_enter(&door_knob); 2073 for (dp = p->p_door_list; dp != NULL; dp = dp->door_list) { 2074 ASSERT(dp->door_target == p); 2075 dp->door_flags |= DOOR_REVOKED; 2076 if (dp->door_flags & DOOR_PRIVATE) 2077 cv_broadcast(&dp->door_servers.dp_cv); 2078 } 2079 cv_broadcast(&p->p_server_threads.dp_cv); 2080 mutex_exit(&door_knob); 2081 } 2082 2083 /* 2084 * The process is exiting, and all doors it created need to be revoked. 2085 */ 2086 void 2087 door_exit(void) 2088 { 2089 door_node_t *dp; 2090 proc_t *p = ttoproc(curthread); 2091 2092 ASSERT(p->p_lwpcnt == 1); 2093 /* 2094 * Walk the list of active doors created by this process and 2095 * revoke them all. 2096 */ 2097 mutex_enter(&door_knob); 2098 for (dp = p->p_door_list; dp != NULL; dp = dp->door_list) { 2099 dp->door_target = NULL; 2100 dp->door_flags |= DOOR_REVOKED; 2101 if (dp->door_flags & DOOR_PRIVATE) 2102 cv_broadcast(&dp->door_servers.dp_cv); 2103 } 2104 cv_broadcast(&p->p_server_threads.dp_cv); 2105 /* Clear the list */ 2106 p->p_door_list = NULL; 2107 2108 /* Clean up the unref list */ 2109 while ((dp = p->p_unref_list) != NULL) { 2110 p->p_unref_list = dp->door_ulist; 2111 dp->door_ulist = NULL; 2112 mutex_exit(&door_knob); 2113 VN_RELE(DTOV(dp)); 2114 mutex_enter(&door_knob); 2115 } 2116 mutex_exit(&door_knob); 2117 } 2118 2119 2120 /* 2121 * The process is executing forkall(), and we need to flag threads that 2122 * are bound to a door in the child. This will make the child threads 2123 * return an error to door_return unless they call door_unbind first. 2124 */ 2125 void 2126 door_fork(kthread_t *parent, kthread_t *child) 2127 { 2128 door_data_t *pt = parent->t_door; 2129 door_server_t *st = DOOR_SERVER(pt); 2130 door_data_t *dt; 2131 2132 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2133 if (pt != NULL && (st->d_pool != NULL || st->d_invbound)) { 2134 /* parent thread is bound to a door */ 2135 dt = child->t_door = 2136 kmem_zalloc(sizeof (door_data_t), KM_SLEEP); 2137 DOOR_SERVER(dt)->d_invbound = 1; 2138 } 2139 } 2140 2141 /* 2142 * Deliver queued unrefs to appropriate door server. 2143 */ 2144 static int 2145 door_unref(void) 2146 { 2147 door_node_t *dp; 2148 static door_arg_t unref_args = { DOOR_UNREF_DATA, 0, 0, 0, 0, 0 }; 2149 proc_t *p = ttoproc(curthread); 2150 2151 /* make sure there's only one unref thread per process */ 2152 mutex_enter(&door_knob); 2153 if (p->p_unref_thread) { 2154 mutex_exit(&door_knob); 2155 return (set_errno(EALREADY)); 2156 } 2157 p->p_unref_thread = 1; 2158 mutex_exit(&door_knob); 2159 2160 (void) door_my_data(1); /* create info, if necessary */ 2161 2162 for (;;) { 2163 mutex_enter(&door_knob); 2164 2165 /* Grab a queued request */ 2166 while ((dp = p->p_unref_list) == NULL) { 2167 if (!cv_wait_sig(&p->p_unref_cv, &door_knob)) { 2168 /* 2169 * Interrupted. 2170 * Return so we can finish forkall() or exit(). 2171 */ 2172 p->p_unref_thread = 0; 2173 mutex_exit(&door_knob); 2174 return (set_errno(EINTR)); 2175 } 2176 } 2177 p->p_unref_list = dp->door_ulist; 2178 dp->door_ulist = NULL; 2179 dp->door_flags |= DOOR_UNREF_ACTIVE; 2180 mutex_exit(&door_knob); 2181 2182 (void) door_upcall(DTOV(dp), &unref_args); 2183 2184 mutex_enter(&door_knob); 2185 ASSERT(dp->door_flags & DOOR_UNREF_ACTIVE); 2186 dp->door_flags &= ~DOOR_UNREF_ACTIVE; 2187 mutex_exit(&door_knob); 2188 VN_RELE(DTOV(dp)); 2189 } 2190 } 2191 2192 2193 /* 2194 * Deliver queued unrefs to kernel door server. 2195 */ 2196 /* ARGSUSED */ 2197 static void 2198 door_unref_kernel(caddr_t arg) 2199 { 2200 door_node_t *dp; 2201 static door_arg_t unref_args = { DOOR_UNREF_DATA, 0, 0, 0, 0, 0 }; 2202 proc_t *p = ttoproc(curthread); 2203 callb_cpr_t cprinfo; 2204 2205 /* should only be one of these */ 2206 mutex_enter(&door_knob); 2207 if (p->p_unref_thread) { 2208 mutex_exit(&door_knob); 2209 return; 2210 } 2211 p->p_unref_thread = 1; 2212 mutex_exit(&door_knob); 2213 2214 (void) door_my_data(1); /* make sure we have a door_data_t */ 2215 2216 CALLB_CPR_INIT(&cprinfo, &door_knob, callb_generic_cpr, "door_unref"); 2217 for (;;) { 2218 mutex_enter(&door_knob); 2219 /* Grab a queued request */ 2220 while ((dp = p->p_unref_list) == NULL) { 2221 CALLB_CPR_SAFE_BEGIN(&cprinfo); 2222 cv_wait(&p->p_unref_cv, &door_knob); 2223 CALLB_CPR_SAFE_END(&cprinfo, &door_knob); 2224 } 2225 p->p_unref_list = dp->door_ulist; 2226 dp->door_ulist = NULL; 2227 dp->door_flags |= DOOR_UNREF_ACTIVE; 2228 mutex_exit(&door_knob); 2229 2230 (*(dp->door_pc))(dp->door_data, &unref_args, NULL, NULL, NULL); 2231 2232 mutex_enter(&door_knob); 2233 ASSERT(dp->door_flags & DOOR_UNREF_ACTIVE); 2234 dp->door_flags &= ~DOOR_UNREF_ACTIVE; 2235 mutex_exit(&door_knob); 2236 VN_RELE(DTOV(dp)); 2237 } 2238 } 2239 2240 2241 /* 2242 * Queue an unref invocation for processing for the current process 2243 * The door may or may not be revoked at this point. 2244 */ 2245 void 2246 door_deliver_unref(door_node_t *d) 2247 { 2248 struct proc *server = d->door_target; 2249 2250 ASSERT(MUTEX_HELD(&door_knob)); 2251 ASSERT(d->door_active == 0); 2252 2253 if (server == NULL) 2254 return; 2255 /* 2256 * Create a lwp to deliver unref calls if one isn't already running. 2257 * 2258 * A separate thread is used to deliver unrefs since the current 2259 * thread may be holding resources (e.g. locks) in user land that 2260 * may be needed by the unref processing. This would cause a 2261 * deadlock. 2262 */ 2263 if (d->door_flags & DOOR_UNREF_MULTI) { 2264 /* multiple unrefs */ 2265 d->door_flags &= ~DOOR_DELAY; 2266 } else { 2267 /* Only 1 unref per door */ 2268 d->door_flags &= ~(DOOR_UNREF|DOOR_DELAY); 2269 } 2270 mutex_exit(&door_knob); 2271 2272 /* 2273 * Need to bump the vnode count before putting the door on the 2274 * list so it doesn't get prematurely released by door_unref. 2275 */ 2276 VN_HOLD(DTOV(d)); 2277 2278 mutex_enter(&door_knob); 2279 /* is this door already on the unref list? */ 2280 if (d->door_flags & DOOR_UNREF_MULTI) { 2281 door_node_t *dp; 2282 for (dp = server->p_unref_list; dp != NULL; 2283 dp = dp->door_ulist) { 2284 if (d == dp) { 2285 /* already there, don't need to add another */ 2286 mutex_exit(&door_knob); 2287 VN_RELE(DTOV(d)); 2288 mutex_enter(&door_knob); 2289 return; 2290 } 2291 } 2292 } 2293 ASSERT(d->door_ulist == NULL); 2294 d->door_ulist = server->p_unref_list; 2295 server->p_unref_list = d; 2296 cv_broadcast(&server->p_unref_cv); 2297 } 2298 2299 /* 2300 * The callers buffer isn't big enough for all of the data/fd's. Allocate 2301 * space in the callers address space for the results and copy the data 2302 * there. 2303 * 2304 * For EOVERFLOW, we must clean up the server's door descriptors. 2305 */ 2306 static int 2307 door_overflow( 2308 kthread_t *caller, 2309 caddr_t data_ptr, /* data location */ 2310 size_t data_size, /* data size */ 2311 door_desc_t *desc_ptr, /* descriptor location */ 2312 uint_t desc_num) /* descriptor size */ 2313 { 2314 proc_t *callerp = ttoproc(caller); 2315 struct as *as = callerp->p_as; 2316 door_client_t *ct = DOOR_CLIENT(caller->t_door); 2317 caddr_t addr; /* Resulting address in target */ 2318 size_t rlen; /* Rounded len */ 2319 size_t len; 2320 uint_t i; 2321 size_t ds = desc_num * sizeof (door_desc_t); 2322 2323 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2324 ASSERT(DOOR_T_HELD(ct) || ct->d_kernel); 2325 2326 /* Do initial overflow check */ 2327 if (!ufcanalloc(callerp, desc_num)) 2328 return (EMFILE); 2329 2330 /* 2331 * Allocate space for this stuff in the callers address space 2332 */ 2333 rlen = roundup(data_size + ds, PAGESIZE); 2334 as_rangelock(as); 2335 map_addr_proc(&addr, rlen, 0, 1, as->a_userlimit, ttoproc(caller), 0); 2336 if (addr == NULL || 2337 as_map(as, addr, rlen, segvn_create, zfod_argsp) != 0) { 2338 /* No virtual memory available, or anon mapping failed */ 2339 as_rangeunlock(as); 2340 if (!ct->d_kernel && desc_num > 0) { 2341 int error = door_release_fds(desc_ptr, desc_num); 2342 if (error) 2343 return (error); 2344 } 2345 return (EOVERFLOW); 2346 } 2347 as_rangeunlock(as); 2348 2349 if (ct->d_kernel) 2350 goto out; 2351 2352 if (data_size != 0) { 2353 caddr_t src = data_ptr; 2354 caddr_t saddr = addr; 2355 2356 /* Copy any data */ 2357 len = data_size; 2358 while (len != 0) { 2359 int amount; 2360 int error; 2361 2362 amount = len > PAGESIZE ? PAGESIZE : len; 2363 if ((error = door_copy(as, src, saddr, amount)) != 0) { 2364 (void) as_unmap(as, addr, rlen); 2365 return (error); 2366 } 2367 saddr += amount; 2368 src += amount; 2369 len -= amount; 2370 } 2371 } 2372 /* Copy any fd's */ 2373 if (desc_num != 0) { 2374 door_desc_t *didpp, *start; 2375 struct file **fpp; 2376 int fpp_size; 2377 2378 start = didpp = kmem_alloc(ds, KM_SLEEP); 2379 if (copyin(desc_ptr, didpp, ds)) { 2380 kmem_free(start, ds); 2381 (void) as_unmap(as, addr, rlen); 2382 return (EFAULT); 2383 } 2384 2385 fpp_size = desc_num * sizeof (struct file *); 2386 if (fpp_size > ct->d_fpp_size) { 2387 /* make more space */ 2388 if (ct->d_fpp_size) 2389 kmem_free(ct->d_fpp, ct->d_fpp_size); 2390 ct->d_fpp_size = fpp_size; 2391 ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP); 2392 } 2393 fpp = ct->d_fpp; 2394 2395 for (i = 0; i < desc_num; i++) { 2396 struct file *fp; 2397 int fd = didpp->d_data.d_desc.d_descriptor; 2398 2399 if (!(didpp->d_attributes & DOOR_DESCRIPTOR) || 2400 (fp = getf(fd)) == NULL) { 2401 /* close translated references */ 2402 door_fp_close(ct->d_fpp, fpp - ct->d_fpp); 2403 /* close untranslated references */ 2404 door_fd_rele(didpp, desc_num - i, 0); 2405 kmem_free(start, ds); 2406 (void) as_unmap(as, addr, rlen); 2407 return (EINVAL); 2408 } 2409 mutex_enter(&fp->f_tlock); 2410 fp->f_count++; 2411 mutex_exit(&fp->f_tlock); 2412 2413 *fpp = fp; 2414 releasef(fd); 2415 2416 if (didpp->d_attributes & DOOR_RELEASE) { 2417 /* release passed reference */ 2418 (void) closeandsetf(fd, NULL); 2419 } 2420 2421 fpp++; didpp++; 2422 } 2423 kmem_free(start, ds); 2424 } 2425 2426 out: 2427 ct->d_overflow = 1; 2428 ct->d_args.rbuf = addr; 2429 ct->d_args.rsize = rlen; 2430 return (0); 2431 } 2432 2433 /* 2434 * Transfer arguments from the client to the server. 2435 */ 2436 static int 2437 door_args(kthread_t *server, int is_private) 2438 { 2439 door_server_t *st = DOOR_SERVER(server->t_door); 2440 door_client_t *ct = DOOR_CLIENT(curthread->t_door); 2441 uint_t ndid; 2442 size_t dsize; 2443 int error; 2444 2445 ASSERT(DOOR_T_HELD(st)); 2446 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2447 2448 ndid = ct->d_args.desc_num; 2449 if (ndid > door_max_desc) 2450 return (E2BIG); 2451 2452 /* 2453 * Get the stack layout, and fail now if it won't fit. 2454 */ 2455 error = door_layout(server, ct->d_args.data_size, ndid, is_private); 2456 if (error != 0) 2457 return (error); 2458 2459 dsize = ndid * sizeof (door_desc_t); 2460 if (ct->d_args.data_size != 0) { 2461 if (ct->d_args.data_size <= door_max_arg) { 2462 /* 2463 * Use a 2 copy method for small amounts of data 2464 * 2465 * Allocate a little more than we need for the 2466 * args, in the hope that the results will fit 2467 * without having to reallocate a buffer 2468 */ 2469 ASSERT(ct->d_buf == NULL); 2470 ct->d_bufsize = roundup(ct->d_args.data_size, 2471 DOOR_ROUND); 2472 ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP); 2473 if (copyin(ct->d_args.data_ptr, 2474 ct->d_buf, ct->d_args.data_size) != 0) { 2475 kmem_free(ct->d_buf, ct->d_bufsize); 2476 ct->d_buf = NULL; 2477 ct->d_bufsize = 0; 2478 return (EFAULT); 2479 } 2480 } else { 2481 struct as *as; 2482 caddr_t src; 2483 caddr_t dest; 2484 size_t len = ct->d_args.data_size; 2485 uintptr_t base; 2486 2487 /* 2488 * Use a 1 copy method 2489 */ 2490 as = ttoproc(server)->p_as; 2491 src = ct->d_args.data_ptr; 2492 2493 dest = st->d_layout.dl_datap; 2494 base = (uintptr_t)dest; 2495 2496 /* 2497 * Copy data directly into server. We proceed 2498 * downward from the top of the stack, to mimic 2499 * normal stack usage. This allows the guard page 2500 * to stop us before we corrupt anything. 2501 */ 2502 while (len != 0) { 2503 uintptr_t start; 2504 uintptr_t end; 2505 uintptr_t offset; 2506 size_t amount; 2507 2508 /* 2509 * Locate the next part to copy. 2510 */ 2511 end = base + len; 2512 start = P2ALIGN(end - 1, PAGESIZE); 2513 2514 /* 2515 * if we are on the final (first) page, fix 2516 * up the start position. 2517 */ 2518 if (P2ALIGN(base, PAGESIZE) == start) 2519 start = base; 2520 2521 offset = start - base; /* the copy offset */ 2522 amount = end - start; /* # bytes to copy */ 2523 2524 ASSERT(amount > 0 && amount <= len && 2525 amount <= PAGESIZE); 2526 2527 error = door_copy(as, src + offset, 2528 dest + offset, amount); 2529 if (error != 0) 2530 return (error); 2531 len -= amount; 2532 } 2533 } 2534 } 2535 /* 2536 * Copyin the door args and translate them into files 2537 */ 2538 if (ndid != 0) { 2539 door_desc_t *didpp; 2540 door_desc_t *start; 2541 struct file **fpp; 2542 2543 start = didpp = kmem_alloc(dsize, KM_SLEEP); 2544 2545 if (copyin(ct->d_args.desc_ptr, didpp, dsize)) { 2546 kmem_free(start, dsize); 2547 return (EFAULT); 2548 } 2549 ct->d_fpp_size = ndid * sizeof (struct file *); 2550 ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP); 2551 fpp = ct->d_fpp; 2552 while (ndid--) { 2553 struct file *fp; 2554 int fd = didpp->d_data.d_desc.d_descriptor; 2555 2556 /* We only understand file descriptors as passed objs */ 2557 if (!(didpp->d_attributes & DOOR_DESCRIPTOR) || 2558 (fp = getf(fd)) == NULL) { 2559 /* close translated references */ 2560 door_fp_close(ct->d_fpp, fpp - ct->d_fpp); 2561 /* close untranslated references */ 2562 door_fd_rele(didpp, ndid + 1, 0); 2563 kmem_free(start, dsize); 2564 kmem_free(ct->d_fpp, ct->d_fpp_size); 2565 ct->d_fpp = NULL; 2566 ct->d_fpp_size = 0; 2567 return (EINVAL); 2568 } 2569 /* Hold the fp */ 2570 mutex_enter(&fp->f_tlock); 2571 fp->f_count++; 2572 mutex_exit(&fp->f_tlock); 2573 2574 *fpp = fp; 2575 releasef(fd); 2576 2577 if (didpp->d_attributes & DOOR_RELEASE) { 2578 /* release passed reference */ 2579 (void) closeandsetf(fd, NULL); 2580 } 2581 2582 fpp++; didpp++; 2583 } 2584 kmem_free(start, dsize); 2585 } 2586 return (0); 2587 } 2588 2589 /* 2590 * Transfer arguments from a user client to a kernel server. This copies in 2591 * descriptors and translates them into door handles. It doesn't touch the 2592 * other data, letting the kernel server deal with that (to avoid needing 2593 * to copy the data twice). 2594 */ 2595 static int 2596 door_translate_in(void) 2597 { 2598 door_client_t *ct = DOOR_CLIENT(curthread->t_door); 2599 uint_t ndid; 2600 2601 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2602 ndid = ct->d_args.desc_num; 2603 if (ndid > door_max_desc) 2604 return (E2BIG); 2605 /* 2606 * Copyin the door args and translate them into door handles. 2607 */ 2608 if (ndid != 0) { 2609 door_desc_t *didpp; 2610 door_desc_t *start; 2611 size_t dsize = ndid * sizeof (door_desc_t); 2612 struct file *fp; 2613 2614 start = didpp = kmem_alloc(dsize, KM_SLEEP); 2615 2616 if (copyin(ct->d_args.desc_ptr, didpp, dsize)) { 2617 kmem_free(start, dsize); 2618 return (EFAULT); 2619 } 2620 while (ndid--) { 2621 vnode_t *vp; 2622 int fd = didpp->d_data.d_desc.d_descriptor; 2623 2624 /* 2625 * We only understand file descriptors as passed objs 2626 */ 2627 if ((didpp->d_attributes & DOOR_DESCRIPTOR) && 2628 (fp = getf(fd)) != NULL) { 2629 didpp->d_data.d_handle = FTODH(fp); 2630 /* Hold the door */ 2631 door_ki_hold(didpp->d_data.d_handle); 2632 2633 releasef(fd); 2634 2635 if (didpp->d_attributes & DOOR_RELEASE) { 2636 /* release passed reference */ 2637 (void) closeandsetf(fd, NULL); 2638 } 2639 2640 if (VOP_REALVP(fp->f_vnode, &vp, NULL)) 2641 vp = fp->f_vnode; 2642 2643 /* Set attributes */ 2644 didpp->d_attributes = DOOR_HANDLE | 2645 (VTOD(vp)->door_flags & DOOR_ATTR_MASK); 2646 } else { 2647 /* close translated references */ 2648 door_fd_close(start, didpp - start); 2649 /* close untranslated references */ 2650 door_fd_rele(didpp, ndid + 1, 0); 2651 kmem_free(start, dsize); 2652 return (EINVAL); 2653 } 2654 didpp++; 2655 } 2656 ct->d_args.desc_ptr = start; 2657 } 2658 return (0); 2659 } 2660 2661 /* 2662 * Translate door arguments from kernel to user. This copies the passed 2663 * door handles. It doesn't touch other data. It is used by door_upcall, 2664 * and for data returned by a door_call to a kernel server. 2665 */ 2666 static int 2667 door_translate_out(void) 2668 { 2669 door_client_t *ct = DOOR_CLIENT(curthread->t_door); 2670 uint_t ndid; 2671 2672 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2673 ndid = ct->d_args.desc_num; 2674 if (ndid > door_max_desc) { 2675 door_fd_rele(ct->d_args.desc_ptr, ndid, 1); 2676 return (E2BIG); 2677 } 2678 /* 2679 * Translate the door args into files 2680 */ 2681 if (ndid != 0) { 2682 door_desc_t *didpp = ct->d_args.desc_ptr; 2683 struct file **fpp; 2684 2685 ct->d_fpp_size = ndid * sizeof (struct file *); 2686 fpp = ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP); 2687 while (ndid--) { 2688 struct file *fp = NULL; 2689 int fd = -1; 2690 2691 /* 2692 * We understand file descriptors and door 2693 * handles as passed objs. 2694 */ 2695 if (didpp->d_attributes & DOOR_DESCRIPTOR) { 2696 fd = didpp->d_data.d_desc.d_descriptor; 2697 fp = getf(fd); 2698 } else if (didpp->d_attributes & DOOR_HANDLE) 2699 fp = DHTOF(didpp->d_data.d_handle); 2700 if (fp != NULL) { 2701 /* Hold the fp */ 2702 mutex_enter(&fp->f_tlock); 2703 fp->f_count++; 2704 mutex_exit(&fp->f_tlock); 2705 2706 *fpp = fp; 2707 if (didpp->d_attributes & DOOR_DESCRIPTOR) 2708 releasef(fd); 2709 if (didpp->d_attributes & DOOR_RELEASE) { 2710 /* release passed reference */ 2711 if (fd >= 0) 2712 (void) closeandsetf(fd, NULL); 2713 else 2714 (void) closef(fp); 2715 } 2716 } else { 2717 /* close translated references */ 2718 door_fp_close(ct->d_fpp, fpp - ct->d_fpp); 2719 /* close untranslated references */ 2720 door_fd_rele(didpp, ndid + 1, 1); 2721 kmem_free(ct->d_fpp, ct->d_fpp_size); 2722 ct->d_fpp = NULL; 2723 ct->d_fpp_size = 0; 2724 return (EINVAL); 2725 } 2726 fpp++; didpp++; 2727 } 2728 } 2729 return (0); 2730 } 2731 2732 /* 2733 * Move the results from the server to the client 2734 */ 2735 static int 2736 door_results(kthread_t *caller, caddr_t data_ptr, size_t data_size, 2737 door_desc_t *desc_ptr, uint_t desc_num) 2738 { 2739 door_client_t *ct = DOOR_CLIENT(caller->t_door); 2740 size_t dsize; 2741 size_t rlen; 2742 size_t result_size; 2743 2744 ASSERT(DOOR_T_HELD(ct)); 2745 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2746 2747 if (ct->d_noresults) 2748 return (E2BIG); /* No results expected */ 2749 2750 if (desc_num > door_max_desc) 2751 return (E2BIG); /* Too many descriptors */ 2752 2753 dsize = desc_num * sizeof (door_desc_t); 2754 /* 2755 * Check if the results are bigger than the clients buffer 2756 */ 2757 if (dsize) 2758 rlen = roundup(data_size, sizeof (door_desc_t)); 2759 else 2760 rlen = data_size; 2761 if ((result_size = rlen + dsize) == 0) 2762 return (0); 2763 2764 if (ct->d_upcall) { 2765 /* 2766 * Handle upcalls 2767 */ 2768 if (ct->d_args.rbuf == NULL || ct->d_args.rsize < result_size) { 2769 /* 2770 * If there's no return buffer or the buffer is too 2771 * small, allocate a new one. The old buffer (if it 2772 * exists) will be freed by the upcall client. 2773 */ 2774 if (result_size > door_max_upcall_reply) 2775 return (E2BIG); 2776 ct->d_args.rsize = result_size; 2777 ct->d_args.rbuf = kmem_alloc(result_size, KM_SLEEP); 2778 } 2779 ct->d_args.data_ptr = ct->d_args.rbuf; 2780 if (data_size != 0 && 2781 copyin(data_ptr, ct->d_args.data_ptr, data_size) != 0) 2782 return (EFAULT); 2783 } else if (result_size > ct->d_args.rsize) { 2784 return (door_overflow(caller, data_ptr, data_size, 2785 desc_ptr, desc_num)); 2786 } else if (data_size != 0) { 2787 if (data_size <= door_max_arg) { 2788 /* 2789 * Use a 2 copy method for small amounts of data 2790 */ 2791 if (ct->d_buf == NULL) { 2792 ct->d_bufsize = data_size; 2793 ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP); 2794 } else if (ct->d_bufsize < data_size) { 2795 kmem_free(ct->d_buf, ct->d_bufsize); 2796 ct->d_bufsize = data_size; 2797 ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP); 2798 } 2799 if (copyin(data_ptr, ct->d_buf, data_size) != 0) 2800 return (EFAULT); 2801 } else { 2802 struct as *as = ttoproc(caller)->p_as; 2803 caddr_t dest = ct->d_args.rbuf; 2804 caddr_t src = data_ptr; 2805 size_t len = data_size; 2806 2807 /* Copy data directly into client */ 2808 while (len != 0) { 2809 uint_t amount; 2810 uint_t max; 2811 uint_t off; 2812 int error; 2813 2814 off = (uintptr_t)dest & PAGEOFFSET; 2815 if (off) 2816 max = PAGESIZE - off; 2817 else 2818 max = PAGESIZE; 2819 amount = len > max ? max : len; 2820 error = door_copy(as, src, dest, amount); 2821 if (error != 0) 2822 return (error); 2823 dest += amount; 2824 src += amount; 2825 len -= amount; 2826 } 2827 } 2828 } 2829 2830 /* 2831 * Copyin the returned door ids and translate them into door_node_t 2832 */ 2833 if (desc_num != 0) { 2834 door_desc_t *start; 2835 door_desc_t *didpp; 2836 struct file **fpp; 2837 size_t fpp_size; 2838 uint_t i; 2839 2840 /* First, check if we would overflow client */ 2841 if (!ufcanalloc(ttoproc(caller), desc_num)) 2842 return (EMFILE); 2843 2844 start = didpp = kmem_alloc(dsize, KM_SLEEP); 2845 if (copyin(desc_ptr, didpp, dsize)) { 2846 kmem_free(start, dsize); 2847 return (EFAULT); 2848 } 2849 fpp_size = desc_num * sizeof (struct file *); 2850 if (fpp_size > ct->d_fpp_size) { 2851 /* make more space */ 2852 if (ct->d_fpp_size) 2853 kmem_free(ct->d_fpp, ct->d_fpp_size); 2854 ct->d_fpp_size = fpp_size; 2855 ct->d_fpp = kmem_alloc(fpp_size, KM_SLEEP); 2856 } 2857 fpp = ct->d_fpp; 2858 2859 for (i = 0; i < desc_num; i++) { 2860 struct file *fp; 2861 int fd = didpp->d_data.d_desc.d_descriptor; 2862 2863 /* Only understand file descriptor results */ 2864 if (!(didpp->d_attributes & DOOR_DESCRIPTOR) || 2865 (fp = getf(fd)) == NULL) { 2866 /* close translated references */ 2867 door_fp_close(ct->d_fpp, fpp - ct->d_fpp); 2868 /* close untranslated references */ 2869 door_fd_rele(didpp, desc_num - i, 0); 2870 kmem_free(start, dsize); 2871 return (EINVAL); 2872 } 2873 2874 mutex_enter(&fp->f_tlock); 2875 fp->f_count++; 2876 mutex_exit(&fp->f_tlock); 2877 2878 *fpp = fp; 2879 releasef(fd); 2880 2881 if (didpp->d_attributes & DOOR_RELEASE) { 2882 /* release passed reference */ 2883 (void) closeandsetf(fd, NULL); 2884 } 2885 2886 fpp++; didpp++; 2887 } 2888 kmem_free(start, dsize); 2889 } 2890 return (0); 2891 } 2892 2893 /* 2894 * Close all the descriptors. 2895 */ 2896 static void 2897 door_fd_close(door_desc_t *d, uint_t n) 2898 { 2899 uint_t i; 2900 2901 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2902 for (i = 0; i < n; i++) { 2903 if (d->d_attributes & DOOR_DESCRIPTOR) { 2904 (void) closeandsetf( 2905 d->d_data.d_desc.d_descriptor, NULL); 2906 } else if (d->d_attributes & DOOR_HANDLE) { 2907 door_ki_rele(d->d_data.d_handle); 2908 } 2909 d++; 2910 } 2911 } 2912 2913 /* 2914 * Close descriptors that have the DOOR_RELEASE attribute set. 2915 */ 2916 void 2917 door_fd_rele(door_desc_t *d, uint_t n, int from_kernel) 2918 { 2919 uint_t i; 2920 2921 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2922 for (i = 0; i < n; i++) { 2923 if (d->d_attributes & DOOR_RELEASE) { 2924 if (d->d_attributes & DOOR_DESCRIPTOR) { 2925 (void) closeandsetf( 2926 d->d_data.d_desc.d_descriptor, NULL); 2927 } else if (from_kernel && 2928 (d->d_attributes & DOOR_HANDLE)) { 2929 door_ki_rele(d->d_data.d_handle); 2930 } 2931 } 2932 d++; 2933 } 2934 } 2935 2936 /* 2937 * Copy descriptors into the kernel so we can release any marked 2938 * DOOR_RELEASE. 2939 */ 2940 int 2941 door_release_fds(door_desc_t *desc_ptr, uint_t ndesc) 2942 { 2943 size_t dsize; 2944 door_desc_t *didpp; 2945 uint_t desc_num; 2946 2947 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2948 ASSERT(ndesc != 0); 2949 2950 desc_num = MIN(ndesc, door_max_desc); 2951 2952 dsize = desc_num * sizeof (door_desc_t); 2953 didpp = kmem_alloc(dsize, KM_SLEEP); 2954 2955 while (ndesc > 0) { 2956 uint_t count = MIN(ndesc, desc_num); 2957 2958 if (copyin(desc_ptr, didpp, count * sizeof (door_desc_t))) { 2959 kmem_free(didpp, dsize); 2960 return (EFAULT); 2961 } 2962 door_fd_rele(didpp, count, 0); 2963 2964 ndesc -= count; 2965 desc_ptr += count; 2966 } 2967 kmem_free(didpp, dsize); 2968 return (0); 2969 } 2970 2971 /* 2972 * Decrement ref count on all the files passed 2973 */ 2974 static void 2975 door_fp_close(struct file **fp, uint_t n) 2976 { 2977 uint_t i; 2978 2979 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2980 2981 for (i = 0; i < n; i++) 2982 (void) closef(fp[i]); 2983 } 2984 2985 /* 2986 * Copy data from 'src' in current address space to 'dest' in 'as' for 'len' 2987 * bytes. 2988 * 2989 * Performs this using 1 mapin and 1 copy operation. 2990 * 2991 * We really should do more than 1 page at a time to improve 2992 * performance, but for now this is treated as an anomalous condition. 2993 */ 2994 static int 2995 door_copy(struct as *as, caddr_t src, caddr_t dest, uint_t len) 2996 { 2997 caddr_t kaddr; 2998 caddr_t rdest; 2999 uint_t off; 3000 page_t **pplist; 3001 page_t *pp = NULL; 3002 int error = 0; 3003 3004 ASSERT(len <= PAGESIZE); 3005 off = (uintptr_t)dest & PAGEOFFSET; /* offset within the page */ 3006 rdest = (caddr_t)((uintptr_t)dest & 3007 (uintptr_t)PAGEMASK); /* Page boundary */ 3008 ASSERT(off + len <= PAGESIZE); 3009 3010 /* 3011 * Lock down destination page. 3012 */ 3013 if (as_pagelock(as, &pplist, rdest, PAGESIZE, S_WRITE)) 3014 return (E2BIG); 3015 /* 3016 * Check if we have a shadow page list from as_pagelock. If not, 3017 * we took the slow path and have to find our page struct the hard 3018 * way. 3019 */ 3020 if (pplist == NULL) { 3021 pfn_t pfnum; 3022 3023 /* MMU mapping is already locked down */ 3024 AS_LOCK_ENTER(as, &as->a_lock, RW_READER); 3025 pfnum = hat_getpfnum(as->a_hat, rdest); 3026 AS_LOCK_EXIT(as, &as->a_lock); 3027 3028 /* 3029 * TODO: The pfn step should not be necessary - need 3030 * a hat_getpp() function. 3031 */ 3032 if (pf_is_memory(pfnum)) { 3033 pp = page_numtopp_nolock(pfnum); 3034 ASSERT(pp == NULL || PAGE_LOCKED(pp)); 3035 } else 3036 pp = NULL; 3037 if (pp == NULL) { 3038 as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE); 3039 return (E2BIG); 3040 } 3041 } else { 3042 pp = *pplist; 3043 } 3044 /* 3045 * Map destination page into kernel address 3046 */ 3047 kaddr = (caddr_t)ppmapin(pp, PROT_READ | PROT_WRITE, (caddr_t)-1); 3048 3049 /* 3050 * Copy from src to dest 3051 */ 3052 if (copyin(src, kaddr + off, len) != 0) 3053 error = EFAULT; 3054 /* 3055 * Unmap destination page from kernel 3056 */ 3057 ppmapout(kaddr); 3058 /* 3059 * Unlock destination page 3060 */ 3061 as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE); 3062 return (error); 3063 } 3064 3065 /* 3066 * General kernel upcall using doors 3067 * Returns 0 on success, errno for failures. 3068 * Caller must have a hold on the door based vnode, and on any 3069 * references passed in desc_ptr. The references are released 3070 * in the event of an error, and passed without duplication 3071 * otherwise. Note that param->rbuf must be 64-bit aligned in 3072 * a 64-bit kernel, since it may be used to store door descriptors 3073 * if they are returned by the server. 3074 */ 3075 int 3076 door_upcall(vnode_t *vp, door_arg_t *param) 3077 { 3078 /* Locals */ 3079 door_node_t *dp; 3080 kthread_t *server_thread; 3081 int error = 0; 3082 klwp_t *lwp; 3083 door_client_t *ct; /* curthread door_data */ 3084 door_server_t *st; /* server thread door_data */ 3085 int gotresults = 0; 3086 3087 if (vp->v_type != VDOOR) { 3088 if (param->desc_num) 3089 door_fd_rele(param->desc_ptr, param->desc_num, 1); 3090 return (EINVAL); 3091 } 3092 3093 lwp = ttolwp(curthread); 3094 ct = door_my_client(1); 3095 dp = VTOD(vp); /* Convert to a door_node_t */ 3096 3097 mutex_enter(&door_knob); 3098 if (DOOR_INVALID(dp)) { 3099 mutex_exit(&door_knob); 3100 if (param->desc_num) 3101 door_fd_rele(param->desc_ptr, param->desc_num, 1); 3102 error = EBADF; 3103 goto out; 3104 } 3105 3106 if (dp->door_target == &p0) { 3107 /* Can't do an upcall to a kernel server */ 3108 mutex_exit(&door_knob); 3109 if (param->desc_num) 3110 door_fd_rele(param->desc_ptr, param->desc_num, 1); 3111 error = EINVAL; 3112 goto out; 3113 } 3114 3115 error = door_check_limits(dp, param, 1); 3116 if (error != 0) { 3117 mutex_exit(&door_knob); 3118 if (param->desc_num) 3119 door_fd_rele(param->desc_ptr, param->desc_num, 1); 3120 goto out; 3121 } 3122 3123 /* 3124 * Get a server thread from the target domain 3125 */ 3126 if ((server_thread = door_get_server(dp)) == NULL) { 3127 if (DOOR_INVALID(dp)) 3128 error = EBADF; 3129 else 3130 error = EAGAIN; 3131 mutex_exit(&door_knob); 3132 if (param->desc_num) 3133 door_fd_rele(param->desc_ptr, param->desc_num, 1); 3134 goto out; 3135 } 3136 3137 st = DOOR_SERVER(server_thread->t_door); 3138 ct->d_buf = param->data_ptr; 3139 ct->d_bufsize = param->data_size; 3140 ct->d_args = *param; /* structure assignment */ 3141 3142 if (ct->d_args.desc_num) { 3143 /* 3144 * Move data from client to server 3145 */ 3146 DOOR_T_HOLD(st); 3147 mutex_exit(&door_knob); 3148 error = door_translate_out(); 3149 mutex_enter(&door_knob); 3150 DOOR_T_RELEASE(st); 3151 if (error) { 3152 /* 3153 * We're not going to resume this thread after all 3154 */ 3155 door_release_server(dp, server_thread); 3156 shuttle_sleep(server_thread); 3157 mutex_exit(&door_knob); 3158 goto out; 3159 } 3160 } 3161 3162 ct->d_upcall = 1; 3163 if (param->rsize == 0) 3164 ct->d_noresults = 1; 3165 else 3166 ct->d_noresults = 0; 3167 3168 dp->door_active++; 3169 3170 ct->d_error = DOOR_WAIT; 3171 st->d_caller = curthread; 3172 st->d_active = dp; 3173 3174 shuttle_resume(server_thread, &door_knob); 3175 3176 mutex_enter(&door_knob); 3177 shuttle_return: 3178 if ((error = ct->d_error) < 0) { /* DOOR_WAIT or DOOR_EXIT */ 3179 /* 3180 * Premature wakeup. Find out why (stop, forkall, sig, exit ...) 3181 */ 3182 mutex_exit(&door_knob); /* May block in ISSIG */ 3183 if (lwp && (ISSIG(curthread, FORREAL) || 3184 lwp->lwp_sysabort || MUSTRETURN(curproc, curthread))) { 3185 /* Signal, forkall, ... */ 3186 lwp->lwp_sysabort = 0; 3187 mutex_enter(&door_knob); 3188 error = EINTR; 3189 /* 3190 * If the server has finished processing our call, 3191 * or exited (calling door_slam()), then d_error 3192 * will have changed. If the server hasn't finished 3193 * yet, d_error will still be DOOR_WAIT, and we 3194 * let it know we are not interested in any 3195 * results by sending a SIGCANCEL, unless the door 3196 * is marked with DOOR_NO_CANCEL. 3197 */ 3198 if (ct->d_error == DOOR_WAIT && 3199 st->d_caller == curthread) { 3200 proc_t *p = ttoproc(server_thread); 3201 3202 st->d_active = NULL; 3203 st->d_caller = NULL; 3204 if (!(dp->door_flags & DOOR_NO_CANCEL)) { 3205 DOOR_T_HOLD(st); 3206 mutex_exit(&door_knob); 3207 3208 mutex_enter(&p->p_lock); 3209 sigtoproc(p, server_thread, SIGCANCEL); 3210 mutex_exit(&p->p_lock); 3211 3212 mutex_enter(&door_knob); 3213 DOOR_T_RELEASE(st); 3214 } 3215 } 3216 } else { 3217 /* 3218 * Return from stop(), server exit... 3219 * 3220 * Note that the server could have done a 3221 * door_return while the client was in stop state 3222 * (ISSIG), in which case the error condition 3223 * is updated by the server. 3224 */ 3225 mutex_enter(&door_knob); 3226 if (ct->d_error == DOOR_WAIT) { 3227 /* Still waiting for a reply */ 3228 shuttle_swtch(&door_knob); 3229 mutex_enter(&door_knob); 3230 if (lwp) 3231 lwp->lwp_asleep = 0; 3232 goto shuttle_return; 3233 } else if (ct->d_error == DOOR_EXIT) { 3234 /* Server exit */ 3235 error = EINTR; 3236 } else { 3237 /* Server did a door_return during ISSIG */ 3238 error = ct->d_error; 3239 } 3240 } 3241 /* 3242 * Can't exit if the server is currently copying 3243 * results for me 3244 */ 3245 while (DOOR_T_HELD(ct)) 3246 cv_wait(&ct->d_cv, &door_knob); 3247 3248 /* 3249 * Find out if results were successfully copied. 3250 */ 3251 if (ct->d_error == 0) 3252 gotresults = 1; 3253 } 3254 if (lwp) { 3255 lwp->lwp_asleep = 0; /* /proc */ 3256 lwp->lwp_sysabort = 0; /* /proc */ 3257 } 3258 if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY)) 3259 door_deliver_unref(dp); 3260 mutex_exit(&door_knob); 3261 3262 /* 3263 * Translate returned doors (if any) 3264 */ 3265 3266 if (ct->d_noresults) 3267 goto out; 3268 3269 if (error) { 3270 /* 3271 * If server returned results successfully, then we've 3272 * been interrupted and may need to clean up. 3273 */ 3274 if (gotresults) { 3275 ASSERT(error == EINTR); 3276 door_fp_close(ct->d_fpp, ct->d_args.desc_num); 3277 } 3278 goto out; 3279 } 3280 3281 if (ct->d_args.desc_num) { 3282 struct file **fpp; 3283 door_desc_t *didpp; 3284 vnode_t *vp; 3285 uint_t n = ct->d_args.desc_num; 3286 3287 didpp = ct->d_args.desc_ptr = (door_desc_t *)(ct->d_args.rbuf + 3288 roundup(ct->d_args.data_size, sizeof (door_desc_t))); 3289 fpp = ct->d_fpp; 3290 3291 while (n--) { 3292 struct file *fp; 3293 3294 fp = *fpp; 3295 if (VOP_REALVP(fp->f_vnode, &vp, NULL)) 3296 vp = fp->f_vnode; 3297 3298 didpp->d_attributes = DOOR_HANDLE | 3299 (VTOD(vp)->door_flags & DOOR_ATTR_MASK); 3300 didpp->d_data.d_handle = FTODH(fp); 3301 3302 fpp++; didpp++; 3303 } 3304 } 3305 3306 /* on return data is in rbuf */ 3307 *param = ct->d_args; /* structure assignment */ 3308 3309 out: 3310 if (ct->d_fpp) { 3311 kmem_free(ct->d_fpp, ct->d_fpp_size); 3312 ct->d_fpp = NULL; 3313 ct->d_fpp_size = 0; 3314 } 3315 3316 ct->d_upcall = 0; 3317 ct->d_noresults = 0; 3318 ct->d_buf = NULL; 3319 ct->d_bufsize = 0; 3320 return (error); 3321 } 3322 3323 /* 3324 * Add a door to the per-process list of active doors for which the 3325 * process is a server. 3326 */ 3327 static void 3328 door_list_insert(door_node_t *dp) 3329 { 3330 proc_t *p = dp->door_target; 3331 3332 ASSERT(MUTEX_HELD(&door_knob)); 3333 dp->door_list = p->p_door_list; 3334 p->p_door_list = dp; 3335 } 3336 3337 /* 3338 * Remove a door from the per-process list of active doors. 3339 */ 3340 void 3341 door_list_delete(door_node_t *dp) 3342 { 3343 door_node_t **pp; 3344 3345 ASSERT(MUTEX_HELD(&door_knob)); 3346 /* 3347 * Find the door in the list. If the door belongs to another process, 3348 * it's OK to use p_door_list since that process can't exit until all 3349 * doors have been taken off the list (see door_exit). 3350 */ 3351 pp = &(dp->door_target->p_door_list); 3352 while (*pp != dp) 3353 pp = &((*pp)->door_list); 3354 3355 /* found it, take it off the list */ 3356 *pp = dp->door_list; 3357 } 3358 3359 3360 /* 3361 * External kernel interfaces for doors. These functions are available 3362 * outside the doorfs module for use in creating and using doors from 3363 * within the kernel. 3364 */ 3365 3366 /* 3367 * door_ki_upcall invokes a user-level door server from the kernel. 3368 */ 3369 int 3370 door_ki_upcall(door_handle_t dh, door_arg_t *param) 3371 { 3372 file_t *fp = DHTOF(dh); 3373 vnode_t *realvp; 3374 3375 if (VOP_REALVP(fp->f_vnode, &realvp, NULL)) 3376 realvp = fp->f_vnode; 3377 return (door_upcall(realvp, param)); 3378 } 3379 3380 /* 3381 * Function call to create a "kernel" door server. A kernel door 3382 * server provides a way for a user-level process to invoke a function 3383 * in the kernel through a door_call. From the caller's point of 3384 * view, a kernel door server looks the same as a user-level one 3385 * (except the server pid is 0). Unlike normal door calls, the 3386 * kernel door function is invoked via a normal function call in the 3387 * same thread and context as the caller. 3388 */ 3389 int 3390 door_ki_create(void (*pc_cookie)(), void *data_cookie, uint_t attributes, 3391 door_handle_t *dhp) 3392 { 3393 int err; 3394 file_t *fp; 3395 3396 /* no DOOR_PRIVATE */ 3397 if ((attributes & ~DOOR_KI_CREATE_MASK) || 3398 (attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) == 3399 (DOOR_UNREF | DOOR_UNREF_MULTI)) 3400 return (EINVAL); 3401 3402 err = door_create_common(pc_cookie, data_cookie, attributes, 3403 1, NULL, &fp); 3404 if (err == 0 && (attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) && 3405 p0.p_unref_thread == 0) { 3406 /* need to create unref thread for process 0 */ 3407 (void) thread_create(NULL, 0, door_unref_kernel, NULL, 0, &p0, 3408 TS_RUN, minclsyspri); 3409 } 3410 if (err == 0) { 3411 *dhp = FTODH(fp); 3412 } 3413 return (err); 3414 } 3415 3416 void 3417 door_ki_hold(door_handle_t dh) 3418 { 3419 file_t *fp = DHTOF(dh); 3420 3421 mutex_enter(&fp->f_tlock); 3422 fp->f_count++; 3423 mutex_exit(&fp->f_tlock); 3424 } 3425 3426 void 3427 door_ki_rele(door_handle_t dh) 3428 { 3429 file_t *fp = DHTOF(dh); 3430 3431 (void) closef(fp); 3432 } 3433 3434 int 3435 door_ki_open(char *pathname, door_handle_t *dhp) 3436 { 3437 file_t *fp; 3438 vnode_t *vp; 3439 int err; 3440 3441 if ((err = lookupname(pathname, UIO_SYSSPACE, FOLLOW, NULL, &vp)) != 0) 3442 return (err); 3443 if (err = VOP_OPEN(&vp, FREAD, kcred, NULL)) { 3444 VN_RELE(vp); 3445 return (err); 3446 } 3447 if (vp->v_type != VDOOR) { 3448 VN_RELE(vp); 3449 return (EINVAL); 3450 } 3451 if ((err = falloc(vp, FREAD | FWRITE, &fp, NULL)) != 0) { 3452 VN_RELE(vp); 3453 return (err); 3454 } 3455 /* falloc returns with f_tlock held on success */ 3456 mutex_exit(&fp->f_tlock); 3457 *dhp = FTODH(fp); 3458 return (0); 3459 } 3460 3461 int 3462 door_ki_info(door_handle_t dh, struct door_info *dip) 3463 { 3464 file_t *fp = DHTOF(dh); 3465 vnode_t *vp; 3466 3467 if (VOP_REALVP(fp->f_vnode, &vp, NULL)) 3468 vp = fp->f_vnode; 3469 if (vp->v_type != VDOOR) 3470 return (EINVAL); 3471 door_info_common(VTOD(vp), dip, fp); 3472 return (0); 3473 } 3474 3475 door_handle_t 3476 door_ki_lookup(int did) 3477 { 3478 file_t *fp; 3479 door_handle_t dh; 3480 3481 /* is the descriptor really a door? */ 3482 if (door_lookup(did, &fp) == NULL) 3483 return (NULL); 3484 /* got the door, put a hold on it and release the fd */ 3485 dh = FTODH(fp); 3486 door_ki_hold(dh); 3487 releasef(did); 3488 return (dh); 3489 } 3490 3491 int 3492 door_ki_setparam(door_handle_t dh, int type, size_t val) 3493 { 3494 file_t *fp = DHTOF(dh); 3495 vnode_t *vp; 3496 3497 if (VOP_REALVP(fp->f_vnode, &vp, NULL)) 3498 vp = fp->f_vnode; 3499 if (vp->v_type != VDOOR) 3500 return (EINVAL); 3501 return (door_setparam_common(VTOD(vp), 1, type, val)); 3502 } 3503 3504 int 3505 door_ki_getparam(door_handle_t dh, int type, size_t *out) 3506 { 3507 file_t *fp = DHTOF(dh); 3508 vnode_t *vp; 3509 3510 if (VOP_REALVP(fp->f_vnode, &vp, NULL)) 3511 vp = fp->f_vnode; 3512 if (vp->v_type != VDOOR) 3513 return (EINVAL); 3514 return (door_getparam_common(VTOD(vp), type, out)); 3515 } 3516