1 /* $NetBSD: uipc_usrreq.c,v 1.194 2019/07/29 09:42:17 maxv Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 2000, 2004, 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center, and by Andrew Doran. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1982, 1986, 1989, 1991, 1993 35 * The Regents of the University of California. All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 3. Neither the name of the University nor the names of its contributors 46 * may be used to endorse or promote products derived from this software 47 * without specific prior written permission. 48 * 49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 59 * SUCH DAMAGE. 60 * 61 * @(#)uipc_usrreq.c 8.9 (Berkeley) 5/14/95 62 */ 63 64 /* 65 * Copyright (c) 1997 Christopher G. Demetriou. All rights reserved. 66 * 67 * Redistribution and use in source and binary forms, with or without 68 * modification, are permitted provided that the following conditions 69 * are met: 70 * 1. Redistributions of source code must retain the above copyright 71 * notice, this list of conditions and the following disclaimer. 72 * 2. Redistributions in binary form must reproduce the above copyright 73 * notice, this list of conditions and the following disclaimer in the 74 * documentation and/or other materials provided with the distribution. 75 * 3. All advertising materials mentioning features or use of this software 76 * must display the following acknowledgement: 77 * This product includes software developed by the University of 78 * California, Berkeley and its contributors. 79 * 4. Neither the name of the University nor the names of its contributors 80 * may be used to endorse or promote products derived from this software 81 * without specific prior written permission. 82 * 83 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 84 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 85 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 86 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 87 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 88 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 89 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 90 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 91 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 92 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 93 * SUCH DAMAGE. 94 * 95 * @(#)uipc_usrreq.c 8.9 (Berkeley) 5/14/95 96 */ 97 98 #include <sys/cdefs.h> 99 __KERNEL_RCSID(0, "$NetBSD: uipc_usrreq.c,v 1.194 2019/07/29 09:42:17 maxv Exp $"); 100 101 #ifdef _KERNEL_OPT 102 #include "opt_compat_netbsd.h" 103 #endif 104 105 #include <sys/param.h> 106 #include <sys/systm.h> 107 #include <sys/proc.h> 108 #include <sys/filedesc.h> 109 #include <sys/domain.h> 110 #include <sys/protosw.h> 111 #include <sys/socket.h> 112 #include <sys/socketvar.h> 113 #include <sys/unpcb.h> 114 #include <sys/un.h> 115 #include <sys/namei.h> 116 #include <sys/vnode.h> 117 #include <sys/file.h> 118 #include <sys/stat.h> 119 #include <sys/mbuf.h> 120 #include <sys/kauth.h> 121 #include <sys/kmem.h> 122 #include <sys/atomic.h> 123 #include <sys/uidinfo.h> 124 #include <sys/kernel.h> 125 #include <sys/kthread.h> 126 #include <sys/compat_stub.h> 127 128 #include <compat/sys/socket.h> 129 #include <compat/net/route_70.h> 130 131 /* 132 * Unix communications domain. 133 * 134 * TODO: 135 * RDM 136 * rethink name space problems 137 * need a proper out-of-band 138 * 139 * Notes on locking: 140 * 141 * The generic rules noted in uipc_socket2.c apply. In addition: 142 * 143 * o We have a global lock, uipc_lock. 144 * 145 * o All datagram sockets are locked by uipc_lock. 146 * 147 * o For stream socketpairs, the two endpoints are created sharing the same 148 * independent lock. Sockets presented to PRU_CONNECT2 must already have 149 * matching locks. 150 * 151 * o Stream sockets created via socket() start life with their own 152 * independent lock. 153 * 154 * o Stream connections to a named endpoint are slightly more complicated. 155 * Sockets that have called listen() have their lock pointer mutated to 156 * the global uipc_lock. When establishing a connection, the connecting 157 * socket also has its lock mutated to uipc_lock, which matches the head 158 * (listening socket). We create a new socket for accept() to return, and 159 * that also shares the head's lock. Until the connection is completely 160 * done on both ends, all three sockets are locked by uipc_lock. Once the 161 * connection is complete, the association with the head's lock is broken. 162 * The connecting socket and the socket returned from accept() have their 163 * lock pointers mutated away from uipc_lock, and back to the connecting 164 * socket's original, independent lock. The head continues to be locked 165 * by uipc_lock. 166 * 167 * o If uipc_lock is determined to be a significant source of contention, 168 * it could easily be hashed out. It is difficult to simply make it an 169 * independent lock because of visibility / garbage collection issues: 170 * if a socket has been associated with a lock at any point, that lock 171 * must remain valid until the socket is no longer visible in the system. 172 * The lock must not be freed or otherwise destroyed until any sockets 173 * that had referenced it have also been destroyed. 174 */ 175 const struct sockaddr_un sun_noname = { 176 .sun_len = offsetof(struct sockaddr_un, sun_path), 177 .sun_family = AF_LOCAL, 178 }; 179 ino_t unp_ino; /* prototype for fake inode numbers */ 180 181 static struct mbuf * unp_addsockcred(struct lwp *, struct mbuf *); 182 static void unp_discard_later(file_t *); 183 static void unp_discard_now(file_t *); 184 static void unp_disconnect1(struct unpcb *); 185 static bool unp_drop(struct unpcb *, int); 186 static int unp_internalize(struct mbuf **); 187 static void unp_mark(file_t *); 188 static void unp_scan(struct mbuf *, void (*)(file_t *), int); 189 static void unp_shutdown1(struct unpcb *); 190 static void unp_thread(void *); 191 static void unp_thread_kick(void); 192 193 static kmutex_t *uipc_lock; 194 195 static kcondvar_t unp_thread_cv; 196 static lwp_t *unp_thread_lwp; 197 static SLIST_HEAD(,file) unp_thread_discard; 198 static int unp_defer; 199 200 /* Compat interface */ 201 202 struct mbuf * stub_compat_70_unp_addsockcred(lwp_t *, struct mbuf *); 203 204 struct mbuf * stub_compat_70_unp_addsockcred(struct lwp *lwp, 205 struct mbuf *control) 206 { 207 208 /* just copy our initial argument */ 209 return control; 210 } 211 212 bool compat70_ocreds_valid = false; 213 214 /* 215 * Initialize Unix protocols. 216 */ 217 void 218 uipc_init(void) 219 { 220 int error; 221 222 uipc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); 223 cv_init(&unp_thread_cv, "unpgc"); 224 225 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, unp_thread, 226 NULL, &unp_thread_lwp, "unpgc"); 227 if (error != 0) 228 panic("uipc_init %d", error); 229 } 230 231 static void 232 unp_connid(struct lwp *l, struct unpcb *unp, int flags) 233 { 234 unp->unp_connid.unp_pid = l->l_proc->p_pid; 235 unp->unp_connid.unp_euid = kauth_cred_geteuid(l->l_cred); 236 unp->unp_connid.unp_egid = kauth_cred_getegid(l->l_cred); 237 unp->unp_flags |= flags; 238 } 239 240 /* 241 * A connection succeeded: disassociate both endpoints from the head's 242 * lock, and make them share their own lock. There is a race here: for 243 * a very brief time one endpoint will be locked by a different lock 244 * than the other end. However, since the current thread holds the old 245 * lock (the listening socket's lock, the head) access can still only be 246 * made to one side of the connection. 247 */ 248 static void 249 unp_setpeerlocks(struct socket *so, struct socket *so2) 250 { 251 struct unpcb *unp; 252 kmutex_t *lock; 253 254 KASSERT(solocked2(so, so2)); 255 256 /* 257 * Bail out if either end of the socket is not yet fully 258 * connected or accepted. We only break the lock association 259 * with the head when the pair of sockets stand completely 260 * on their own. 261 */ 262 KASSERT(so->so_head == NULL); 263 if (so2->so_head != NULL) 264 return; 265 266 /* 267 * Drop references to old lock. A third reference (from the 268 * queue head) must be held as we still hold its lock. Bonus: 269 * we don't need to worry about garbage collecting the lock. 270 */ 271 lock = so->so_lock; 272 KASSERT(lock == uipc_lock); 273 mutex_obj_free(lock); 274 mutex_obj_free(lock); 275 276 /* 277 * Grab stream lock from the initiator and share between the two 278 * endpoints. Issue memory barrier to ensure all modifications 279 * become globally visible before the lock change. so2 is 280 * assumed not to have a stream lock, because it was created 281 * purely for the server side to accept this connection and 282 * started out life using the domain-wide lock. 283 */ 284 unp = sotounpcb(so); 285 KASSERT(unp->unp_streamlock != NULL); 286 KASSERT(sotounpcb(so2)->unp_streamlock == NULL); 287 lock = unp->unp_streamlock; 288 unp->unp_streamlock = NULL; 289 mutex_obj_hold(lock); 290 membar_exit(); 291 /* 292 * possible race if lock is not held - see comment in 293 * uipc_usrreq(PRU_ACCEPT). 294 */ 295 KASSERT(mutex_owned(lock)); 296 solockreset(so, lock); 297 solockreset(so2, lock); 298 } 299 300 /* 301 * Reset a socket's lock back to the domain-wide lock. 302 */ 303 static void 304 unp_resetlock(struct socket *so) 305 { 306 kmutex_t *olock, *nlock; 307 struct unpcb *unp; 308 309 KASSERT(solocked(so)); 310 311 olock = so->so_lock; 312 nlock = uipc_lock; 313 if (olock == nlock) 314 return; 315 unp = sotounpcb(so); 316 KASSERT(unp->unp_streamlock == NULL); 317 unp->unp_streamlock = olock; 318 mutex_obj_hold(nlock); 319 mutex_enter(nlock); 320 solockreset(so, nlock); 321 mutex_exit(olock); 322 } 323 324 static void 325 unp_free(struct unpcb *unp) 326 { 327 if (unp->unp_addr) 328 free(unp->unp_addr, M_SONAME); 329 if (unp->unp_streamlock != NULL) 330 mutex_obj_free(unp->unp_streamlock); 331 kmem_free(unp, sizeof(*unp)); 332 } 333 334 static int 335 unp_output(struct mbuf *m, struct mbuf *control, struct unpcb *unp) 336 { 337 struct socket *so2; 338 const struct sockaddr_un *sun; 339 340 /* XXX: server side closed the socket */ 341 if (unp->unp_conn == NULL) 342 return ECONNREFUSED; 343 so2 = unp->unp_conn->unp_socket; 344 345 KASSERT(solocked(so2)); 346 347 if (unp->unp_addr) 348 sun = unp->unp_addr; 349 else 350 sun = &sun_noname; 351 if (unp->unp_conn->unp_flags & UNP_WANTCRED) 352 control = unp_addsockcred(curlwp, control); 353 if (unp->unp_conn->unp_flags & UNP_OWANTCRED) 354 MODULE_HOOK_CALL(uipc_unp_70_hook, (curlwp, control), 355 stub_compat_70_unp_addsockcred(curlwp, control), control); 356 if (sbappendaddr(&so2->so_rcv, (const struct sockaddr *)sun, m, 357 control) == 0) { 358 unp_dispose(control); 359 m_freem(control); 360 m_freem(m); 361 /* Don't call soroverflow because we're returning this 362 * error directly to the sender. */ 363 so2->so_rcv.sb_overflowed++; 364 return ENOBUFS; 365 } else { 366 sorwakeup(so2); 367 return 0; 368 } 369 } 370 371 static void 372 unp_setaddr(struct socket *so, struct sockaddr *nam, bool peeraddr) 373 { 374 const struct sockaddr_un *sun = NULL; 375 struct unpcb *unp; 376 377 KASSERT(solocked(so)); 378 unp = sotounpcb(so); 379 380 if (peeraddr) { 381 if (unp->unp_conn && unp->unp_conn->unp_addr) 382 sun = unp->unp_conn->unp_addr; 383 } else { 384 if (unp->unp_addr) 385 sun = unp->unp_addr; 386 } 387 if (sun == NULL) 388 sun = &sun_noname; 389 390 memcpy(nam, sun, sun->sun_len); 391 } 392 393 static int 394 unp_rcvd(struct socket *so, int flags, struct lwp *l) 395 { 396 struct unpcb *unp = sotounpcb(so); 397 struct socket *so2; 398 u_int newhiwat; 399 400 KASSERT(solocked(so)); 401 KASSERT(unp != NULL); 402 403 switch (so->so_type) { 404 405 case SOCK_DGRAM: 406 panic("uipc 1"); 407 /*NOTREACHED*/ 408 409 case SOCK_SEQPACKET: /* FALLTHROUGH */ 410 case SOCK_STREAM: 411 #define rcv (&so->so_rcv) 412 #define snd (&so2->so_snd) 413 if (unp->unp_conn == 0) 414 break; 415 so2 = unp->unp_conn->unp_socket; 416 KASSERT(solocked2(so, so2)); 417 /* 418 * Adjust backpressure on sender 419 * and wakeup any waiting to write. 420 */ 421 snd->sb_mbmax += unp->unp_mbcnt - rcv->sb_mbcnt; 422 unp->unp_mbcnt = rcv->sb_mbcnt; 423 newhiwat = snd->sb_hiwat + unp->unp_cc - rcv->sb_cc; 424 (void)chgsbsize(so2->so_uidinfo, 425 &snd->sb_hiwat, newhiwat, RLIM_INFINITY); 426 unp->unp_cc = rcv->sb_cc; 427 sowwakeup(so2); 428 #undef snd 429 #undef rcv 430 break; 431 432 default: 433 panic("uipc 2"); 434 } 435 436 return 0; 437 } 438 439 static int 440 unp_recvoob(struct socket *so, struct mbuf *m, int flags) 441 { 442 KASSERT(solocked(so)); 443 444 return EOPNOTSUPP; 445 } 446 447 static int 448 unp_send(struct socket *so, struct mbuf *m, struct sockaddr *nam, 449 struct mbuf *control, struct lwp *l) 450 { 451 struct unpcb *unp = sotounpcb(so); 452 int error = 0; 453 u_int newhiwat; 454 struct socket *so2; 455 456 KASSERT(solocked(so)); 457 KASSERT(unp != NULL); 458 KASSERT(m != NULL); 459 460 /* 461 * Note: unp_internalize() rejects any control message 462 * other than SCM_RIGHTS, and only allows one. This 463 * has the side-effect of preventing a caller from 464 * forging SCM_CREDS. 465 */ 466 if (control) { 467 sounlock(so); 468 error = unp_internalize(&control); 469 solock(so); 470 if (error != 0) { 471 m_freem(control); 472 m_freem(m); 473 return error; 474 } 475 } 476 477 switch (so->so_type) { 478 479 case SOCK_DGRAM: { 480 KASSERT(so->so_lock == uipc_lock); 481 if (nam) { 482 if ((so->so_state & SS_ISCONNECTED) != 0) 483 error = EISCONN; 484 else { 485 /* 486 * Note: once connected, the 487 * socket's lock must not be 488 * dropped until we have sent 489 * the message and disconnected. 490 * This is necessary to prevent 491 * intervening control ops, like 492 * another connection. 493 */ 494 error = unp_connect(so, nam, l); 495 } 496 } else { 497 if ((so->so_state & SS_ISCONNECTED) == 0) 498 error = ENOTCONN; 499 } 500 if (error) { 501 unp_dispose(control); 502 m_freem(control); 503 m_freem(m); 504 return error; 505 } 506 error = unp_output(m, control, unp); 507 if (nam) 508 unp_disconnect1(unp); 509 break; 510 } 511 512 case SOCK_SEQPACKET: /* FALLTHROUGH */ 513 case SOCK_STREAM: 514 #define rcv (&so2->so_rcv) 515 #define snd (&so->so_snd) 516 if (unp->unp_conn == NULL) { 517 error = ENOTCONN; 518 break; 519 } 520 so2 = unp->unp_conn->unp_socket; 521 KASSERT(solocked2(so, so2)); 522 if (unp->unp_conn->unp_flags & UNP_WANTCRED) { 523 /* 524 * Credentials are passed only once on 525 * SOCK_STREAM and SOCK_SEQPACKET. 526 */ 527 unp->unp_conn->unp_flags &= ~UNP_WANTCRED; 528 control = unp_addsockcred(l, control); 529 } 530 if (unp->unp_conn->unp_flags & UNP_OWANTCRED) { 531 /* 532 * Credentials are passed only once on 533 * SOCK_STREAM and SOCK_SEQPACKET. 534 */ 535 unp->unp_conn->unp_flags &= ~UNP_OWANTCRED; 536 MODULE_HOOK_CALL(uipc_unp_70_hook, (curlwp, control), 537 stub_compat_70_unp_addsockcred(curlwp, control), 538 control); 539 } 540 /* 541 * Send to paired receive port, and then reduce 542 * send buffer hiwater marks to maintain backpressure. 543 * Wake up readers. 544 */ 545 if (control) { 546 if (sbappendcontrol(rcv, m, control) != 0) 547 control = NULL; 548 } else { 549 switch(so->so_type) { 550 case SOCK_SEQPACKET: 551 sbappendrecord(rcv, m); 552 break; 553 case SOCK_STREAM: 554 sbappend(rcv, m); 555 break; 556 default: 557 panic("uipc_usrreq"); 558 break; 559 } 560 } 561 snd->sb_mbmax -= 562 rcv->sb_mbcnt - unp->unp_conn->unp_mbcnt; 563 unp->unp_conn->unp_mbcnt = rcv->sb_mbcnt; 564 newhiwat = snd->sb_hiwat - 565 (rcv->sb_cc - unp->unp_conn->unp_cc); 566 (void)chgsbsize(so->so_uidinfo, 567 &snd->sb_hiwat, newhiwat, RLIM_INFINITY); 568 unp->unp_conn->unp_cc = rcv->sb_cc; 569 sorwakeup(so2); 570 #undef snd 571 #undef rcv 572 if (control != NULL) { 573 unp_dispose(control); 574 m_freem(control); 575 } 576 break; 577 578 default: 579 panic("uipc 4"); 580 } 581 582 return error; 583 } 584 585 static int 586 unp_sendoob(struct socket *so, struct mbuf *m, struct mbuf * control) 587 { 588 KASSERT(solocked(so)); 589 590 m_freem(m); 591 m_freem(control); 592 593 return EOPNOTSUPP; 594 } 595 596 /* 597 * Unix domain socket option processing. 598 */ 599 int 600 uipc_ctloutput(int op, struct socket *so, struct sockopt *sopt) 601 { 602 struct unpcb *unp = sotounpcb(so); 603 int optval = 0, error = 0; 604 605 KASSERT(solocked(so)); 606 607 if (sopt->sopt_level != 0) { 608 error = ENOPROTOOPT; 609 } else switch (op) { 610 611 case PRCO_SETOPT: 612 switch (sopt->sopt_name) { 613 case LOCAL_OCREDS: 614 if (!compat70_ocreds_valid) { 615 error = ENOPROTOOPT; 616 break; 617 } 618 /* FALLTHROUGH */ 619 case LOCAL_CREDS: 620 case LOCAL_CONNWAIT: 621 error = sockopt_getint(sopt, &optval); 622 if (error) 623 break; 624 switch (sopt->sopt_name) { 625 #define OPTSET(bit) \ 626 if (optval) \ 627 unp->unp_flags |= (bit); \ 628 else \ 629 unp->unp_flags &= ~(bit); 630 631 case LOCAL_CREDS: 632 OPTSET(UNP_WANTCRED); 633 break; 634 case LOCAL_CONNWAIT: 635 OPTSET(UNP_CONNWAIT); 636 break; 637 case LOCAL_OCREDS: 638 OPTSET(UNP_OWANTCRED); 639 break; 640 } 641 break; 642 #undef OPTSET 643 644 default: 645 error = ENOPROTOOPT; 646 break; 647 } 648 break; 649 650 case PRCO_GETOPT: 651 sounlock(so); 652 switch (sopt->sopt_name) { 653 case LOCAL_PEEREID: 654 if (unp->unp_flags & UNP_EIDSVALID) { 655 error = sockopt_set(sopt, &unp->unp_connid, 656 sizeof(unp->unp_connid)); 657 } else { 658 error = EINVAL; 659 } 660 break; 661 case LOCAL_CREDS: 662 #define OPTBIT(bit) (unp->unp_flags & (bit) ? 1 : 0) 663 664 optval = OPTBIT(UNP_WANTCRED); 665 error = sockopt_setint(sopt, optval); 666 break; 667 case LOCAL_OCREDS: 668 if (compat70_ocreds_valid) { 669 optval = OPTBIT(UNP_OWANTCRED); 670 error = sockopt_setint(sopt, optval); 671 break; 672 } 673 #undef OPTBIT 674 /* FALLTHROUGH */ 675 default: 676 error = ENOPROTOOPT; 677 break; 678 } 679 solock(so); 680 break; 681 } 682 return (error); 683 } 684 685 /* 686 * Both send and receive buffers are allocated PIPSIZ bytes of buffering 687 * for stream sockets, although the total for sender and receiver is 688 * actually only PIPSIZ. 689 * Datagram sockets really use the sendspace as the maximum datagram size, 690 * and don't really want to reserve the sendspace. Their recvspace should 691 * be large enough for at least one max-size datagram plus address. 692 */ 693 #ifndef PIPSIZ 694 #define PIPSIZ 8192 695 #endif 696 u_long unpst_sendspace = PIPSIZ; 697 u_long unpst_recvspace = PIPSIZ; 698 u_long unpdg_sendspace = 2*1024; /* really max datagram size */ 699 u_long unpdg_recvspace = 16*1024; 700 701 u_int unp_rights; /* files in flight */ 702 u_int unp_rights_ratio = 2; /* limit, fraction of maxfiles */ 703 704 static int 705 unp_attach(struct socket *so, int proto) 706 { 707 struct unpcb *unp = sotounpcb(so); 708 u_long sndspc, rcvspc; 709 int error; 710 711 KASSERT(unp == NULL); 712 713 switch (so->so_type) { 714 case SOCK_SEQPACKET: 715 /* FALLTHROUGH */ 716 case SOCK_STREAM: 717 if (so->so_lock == NULL) { 718 so->so_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); 719 solock(so); 720 } 721 sndspc = unpst_sendspace; 722 rcvspc = unpst_recvspace; 723 break; 724 725 case SOCK_DGRAM: 726 if (so->so_lock == NULL) { 727 mutex_obj_hold(uipc_lock); 728 so->so_lock = uipc_lock; 729 solock(so); 730 } 731 sndspc = unpdg_sendspace; 732 rcvspc = unpdg_recvspace; 733 break; 734 735 default: 736 panic("unp_attach"); 737 } 738 739 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { 740 error = soreserve(so, sndspc, rcvspc); 741 if (error) { 742 return error; 743 } 744 } 745 746 unp = kmem_zalloc(sizeof(*unp), KM_SLEEP); 747 nanotime(&unp->unp_ctime); 748 unp->unp_socket = so; 749 so->so_pcb = unp; 750 751 KASSERT(solocked(so)); 752 return 0; 753 } 754 755 static void 756 unp_detach(struct socket *so) 757 { 758 struct unpcb *unp; 759 vnode_t *vp; 760 761 unp = sotounpcb(so); 762 KASSERT(unp != NULL); 763 KASSERT(solocked(so)); 764 retry: 765 if ((vp = unp->unp_vnode) != NULL) { 766 sounlock(so); 767 /* Acquire v_interlock to protect against unp_connect(). */ 768 /* XXXAD racy */ 769 mutex_enter(vp->v_interlock); 770 vp->v_socket = NULL; 771 mutex_exit(vp->v_interlock); 772 vrele(vp); 773 solock(so); 774 unp->unp_vnode = NULL; 775 } 776 if (unp->unp_conn) 777 unp_disconnect1(unp); 778 while (unp->unp_refs) { 779 KASSERT(solocked2(so, unp->unp_refs->unp_socket)); 780 if (unp_drop(unp->unp_refs, ECONNRESET)) { 781 solock(so); 782 goto retry; 783 } 784 } 785 soisdisconnected(so); 786 so->so_pcb = NULL; 787 if (unp_rights) { 788 /* 789 * Normally the receive buffer is flushed later, in sofree, 790 * but if our receive buffer holds references to files that 791 * are now garbage, we will enqueue those file references to 792 * the garbage collector and kick it into action. 793 */ 794 sorflush(so); 795 unp_free(unp); 796 unp_thread_kick(); 797 } else 798 unp_free(unp); 799 } 800 801 static int 802 unp_accept(struct socket *so, struct sockaddr *nam) 803 { 804 struct unpcb *unp = sotounpcb(so); 805 struct socket *so2; 806 807 KASSERT(solocked(so)); 808 KASSERT(nam != NULL); 809 810 /* XXX code review required to determine if unp can ever be NULL */ 811 if (unp == NULL) 812 return EINVAL; 813 814 KASSERT(so->so_lock == uipc_lock); 815 /* 816 * Mark the initiating STREAM socket as connected *ONLY* 817 * after it's been accepted. This prevents a client from 818 * overrunning a server and receiving ECONNREFUSED. 819 */ 820 if (unp->unp_conn == NULL) { 821 /* 822 * This will use the empty socket and will not 823 * allocate. 824 */ 825 unp_setaddr(so, nam, true); 826 return 0; 827 } 828 so2 = unp->unp_conn->unp_socket; 829 if (so2->so_state & SS_ISCONNECTING) { 830 KASSERT(solocked2(so, so->so_head)); 831 KASSERT(solocked2(so2, so->so_head)); 832 soisconnected(so2); 833 } 834 /* 835 * If the connection is fully established, break the 836 * association with uipc_lock and give the connected 837 * pair a separate lock to share. 838 * There is a race here: sotounpcb(so2)->unp_streamlock 839 * is not locked, so when changing so2->so_lock 840 * another thread can grab it while so->so_lock is still 841 * pointing to the (locked) uipc_lock. 842 * this should be harmless, except that this makes 843 * solocked2() and solocked() unreliable. 844 * Another problem is that unp_setaddr() expects the 845 * the socket locked. Grabing sotounpcb(so2)->unp_streamlock 846 * fixes both issues. 847 */ 848 mutex_enter(sotounpcb(so2)->unp_streamlock); 849 unp_setpeerlocks(so2, so); 850 /* 851 * Only now return peer's address, as we may need to 852 * block in order to allocate memory. 853 * 854 * XXX Minor race: connection can be broken while 855 * lock is dropped in unp_setaddr(). We will return 856 * error == 0 and sun_noname as the peer address. 857 */ 858 unp_setaddr(so, nam, true); 859 /* so_lock now points to unp_streamlock */ 860 mutex_exit(so2->so_lock); 861 return 0; 862 } 863 864 static int 865 unp_ioctl(struct socket *so, u_long cmd, void *nam, struct ifnet *ifp) 866 { 867 return EOPNOTSUPP; 868 } 869 870 static int 871 unp_stat(struct socket *so, struct stat *ub) 872 { 873 struct unpcb *unp; 874 struct socket *so2; 875 876 KASSERT(solocked(so)); 877 878 unp = sotounpcb(so); 879 if (unp == NULL) 880 return EINVAL; 881 882 ub->st_blksize = so->so_snd.sb_hiwat; 883 switch (so->so_type) { 884 case SOCK_SEQPACKET: /* FALLTHROUGH */ 885 case SOCK_STREAM: 886 if (unp->unp_conn == 0) 887 break; 888 889 so2 = unp->unp_conn->unp_socket; 890 KASSERT(solocked2(so, so2)); 891 ub->st_blksize += so2->so_rcv.sb_cc; 892 break; 893 default: 894 break; 895 } 896 ub->st_dev = NODEV; 897 if (unp->unp_ino == 0) 898 unp->unp_ino = unp_ino++; 899 ub->st_atimespec = ub->st_mtimespec = ub->st_ctimespec = unp->unp_ctime; 900 ub->st_ino = unp->unp_ino; 901 return (0); 902 } 903 904 static int 905 unp_peeraddr(struct socket *so, struct sockaddr *nam) 906 { 907 KASSERT(solocked(so)); 908 KASSERT(sotounpcb(so) != NULL); 909 KASSERT(nam != NULL); 910 911 unp_setaddr(so, nam, true); 912 return 0; 913 } 914 915 static int 916 unp_sockaddr(struct socket *so, struct sockaddr *nam) 917 { 918 KASSERT(solocked(so)); 919 KASSERT(sotounpcb(so) != NULL); 920 KASSERT(nam != NULL); 921 922 unp_setaddr(so, nam, false); 923 return 0; 924 } 925 926 /* 927 * we only need to perform this allocation until syscalls other than 928 * bind are adjusted to use sockaddr_big. 929 */ 930 static struct sockaddr_un * 931 makeun_sb(struct sockaddr *nam, size_t *addrlen) 932 { 933 struct sockaddr_un *sun; 934 935 *addrlen = nam->sa_len + 1; 936 sun = malloc(*addrlen, M_SONAME, M_WAITOK); 937 memcpy(sun, nam, nam->sa_len); 938 *(((char *)sun) + nam->sa_len) = '\0'; 939 return sun; 940 } 941 942 static int 943 unp_bind(struct socket *so, struct sockaddr *nam, struct lwp *l) 944 { 945 struct sockaddr_un *sun; 946 struct unpcb *unp; 947 vnode_t *vp; 948 struct vattr vattr; 949 size_t addrlen; 950 int error; 951 struct pathbuf *pb; 952 struct nameidata nd; 953 proc_t *p; 954 955 unp = sotounpcb(so); 956 957 KASSERT(solocked(so)); 958 KASSERT(unp != NULL); 959 KASSERT(nam != NULL); 960 961 if (unp->unp_vnode != NULL) 962 return (EINVAL); 963 if ((unp->unp_flags & UNP_BUSY) != 0) { 964 /* 965 * EALREADY may not be strictly accurate, but since this 966 * is a major application error it's hardly a big deal. 967 */ 968 return (EALREADY); 969 } 970 unp->unp_flags |= UNP_BUSY; 971 sounlock(so); 972 973 p = l->l_proc; 974 sun = makeun_sb(nam, &addrlen); 975 976 pb = pathbuf_create(sun->sun_path); 977 if (pb == NULL) { 978 error = ENOMEM; 979 goto bad; 980 } 981 NDINIT(&nd, CREATE, FOLLOW | LOCKPARENT | TRYEMULROOT, pb); 982 983 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */ 984 if ((error = namei(&nd)) != 0) { 985 pathbuf_destroy(pb); 986 goto bad; 987 } 988 vp = nd.ni_vp; 989 if (vp != NULL) { 990 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); 991 if (nd.ni_dvp == vp) 992 vrele(nd.ni_dvp); 993 else 994 vput(nd.ni_dvp); 995 vrele(vp); 996 pathbuf_destroy(pb); 997 error = EADDRINUSE; 998 goto bad; 999 } 1000 vattr_null(&vattr); 1001 vattr.va_type = VSOCK; 1002 vattr.va_mode = ACCESSPERMS & ~(p->p_cwdi->cwdi_cmask); 1003 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr); 1004 if (error) { 1005 vput(nd.ni_dvp); 1006 pathbuf_destroy(pb); 1007 goto bad; 1008 } 1009 vp = nd.ni_vp; 1010 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1011 solock(so); 1012 vp->v_socket = unp->unp_socket; 1013 unp->unp_vnode = vp; 1014 unp->unp_addrlen = addrlen; 1015 unp->unp_addr = sun; 1016 VOP_UNLOCK(vp); 1017 vput(nd.ni_dvp); 1018 unp->unp_flags &= ~UNP_BUSY; 1019 pathbuf_destroy(pb); 1020 return (0); 1021 1022 bad: 1023 free(sun, M_SONAME); 1024 solock(so); 1025 unp->unp_flags &= ~UNP_BUSY; 1026 return (error); 1027 } 1028 1029 static int 1030 unp_listen(struct socket *so, struct lwp *l) 1031 { 1032 struct unpcb *unp = sotounpcb(so); 1033 1034 KASSERT(solocked(so)); 1035 KASSERT(unp != NULL); 1036 1037 /* 1038 * If the socket can accept a connection, it must be 1039 * locked by uipc_lock. 1040 */ 1041 unp_resetlock(so); 1042 if (unp->unp_vnode == NULL) 1043 return EINVAL; 1044 1045 unp_connid(l, unp, UNP_EIDSBIND); 1046 return 0; 1047 } 1048 1049 static int 1050 unp_disconnect(struct socket *so) 1051 { 1052 KASSERT(solocked(so)); 1053 KASSERT(sotounpcb(so) != NULL); 1054 1055 unp_disconnect1(sotounpcb(so)); 1056 return 0; 1057 } 1058 1059 static int 1060 unp_shutdown(struct socket *so) 1061 { 1062 KASSERT(solocked(so)); 1063 KASSERT(sotounpcb(so) != NULL); 1064 1065 socantsendmore(so); 1066 unp_shutdown1(sotounpcb(so)); 1067 return 0; 1068 } 1069 1070 static int 1071 unp_abort(struct socket *so) 1072 { 1073 KASSERT(solocked(so)); 1074 KASSERT(sotounpcb(so) != NULL); 1075 1076 (void)unp_drop(sotounpcb(so), ECONNABORTED); 1077 KASSERT(so->so_head == NULL); 1078 KASSERT(so->so_pcb != NULL); 1079 unp_detach(so); 1080 return 0; 1081 } 1082 1083 static int 1084 unp_connect1(struct socket *so, struct socket *so2, struct lwp *l) 1085 { 1086 struct unpcb *unp = sotounpcb(so); 1087 struct unpcb *unp2; 1088 1089 if (so2->so_type != so->so_type) 1090 return EPROTOTYPE; 1091 1092 /* 1093 * All three sockets involved must be locked by same lock: 1094 * 1095 * local endpoint (so) 1096 * remote endpoint (so2) 1097 * queue head (so2->so_head, only if PR_CONNREQUIRED) 1098 */ 1099 KASSERT(solocked2(so, so2)); 1100 KASSERT(so->so_head == NULL); 1101 if (so2->so_head != NULL) { 1102 KASSERT(so2->so_lock == uipc_lock); 1103 KASSERT(solocked2(so2, so2->so_head)); 1104 } 1105 1106 unp2 = sotounpcb(so2); 1107 unp->unp_conn = unp2; 1108 1109 switch (so->so_type) { 1110 1111 case SOCK_DGRAM: 1112 unp->unp_nextref = unp2->unp_refs; 1113 unp2->unp_refs = unp; 1114 soisconnected(so); 1115 break; 1116 1117 case SOCK_SEQPACKET: /* FALLTHROUGH */ 1118 case SOCK_STREAM: 1119 1120 /* 1121 * SOCK_SEQPACKET and SOCK_STREAM cases are handled by callers 1122 * which are unp_connect() or unp_connect2(). 1123 */ 1124 1125 break; 1126 1127 default: 1128 panic("unp_connect1"); 1129 } 1130 1131 return 0; 1132 } 1133 1134 int 1135 unp_connect(struct socket *so, struct sockaddr *nam, struct lwp *l) 1136 { 1137 struct sockaddr_un *sun; 1138 vnode_t *vp; 1139 struct socket *so2, *so3; 1140 struct unpcb *unp, *unp2, *unp3; 1141 size_t addrlen; 1142 int error; 1143 struct pathbuf *pb; 1144 struct nameidata nd; 1145 1146 unp = sotounpcb(so); 1147 if ((unp->unp_flags & UNP_BUSY) != 0) { 1148 /* 1149 * EALREADY may not be strictly accurate, but since this 1150 * is a major application error it's hardly a big deal. 1151 */ 1152 return (EALREADY); 1153 } 1154 unp->unp_flags |= UNP_BUSY; 1155 sounlock(so); 1156 1157 sun = makeun_sb(nam, &addrlen); 1158 pb = pathbuf_create(sun->sun_path); 1159 if (pb == NULL) { 1160 error = ENOMEM; 1161 goto bad2; 1162 } 1163 1164 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, pb); 1165 1166 if ((error = namei(&nd)) != 0) { 1167 pathbuf_destroy(pb); 1168 goto bad2; 1169 } 1170 vp = nd.ni_vp; 1171 pathbuf_destroy(pb); 1172 if (vp->v_type != VSOCK) { 1173 error = ENOTSOCK; 1174 goto bad; 1175 } 1176 if ((error = VOP_ACCESS(vp, VWRITE, l->l_cred)) != 0) 1177 goto bad; 1178 /* Acquire v_interlock to protect against unp_detach(). */ 1179 mutex_enter(vp->v_interlock); 1180 so2 = vp->v_socket; 1181 if (so2 == NULL) { 1182 mutex_exit(vp->v_interlock); 1183 error = ECONNREFUSED; 1184 goto bad; 1185 } 1186 if (so->so_type != so2->so_type) { 1187 mutex_exit(vp->v_interlock); 1188 error = EPROTOTYPE; 1189 goto bad; 1190 } 1191 solock(so); 1192 unp_resetlock(so); 1193 mutex_exit(vp->v_interlock); 1194 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) != 0) { 1195 /* 1196 * This may seem somewhat fragile but is OK: if we can 1197 * see SO_ACCEPTCONN set on the endpoint, then it must 1198 * be locked by the domain-wide uipc_lock. 1199 */ 1200 KASSERT((so2->so_options & SO_ACCEPTCONN) == 0 || 1201 so2->so_lock == uipc_lock); 1202 if ((so2->so_options & SO_ACCEPTCONN) == 0 || 1203 (so3 = sonewconn(so2, false)) == NULL) { 1204 error = ECONNREFUSED; 1205 sounlock(so); 1206 goto bad; 1207 } 1208 unp2 = sotounpcb(so2); 1209 unp3 = sotounpcb(so3); 1210 if (unp2->unp_addr) { 1211 unp3->unp_addr = malloc(unp2->unp_addrlen, 1212 M_SONAME, M_WAITOK); 1213 memcpy(unp3->unp_addr, unp2->unp_addr, 1214 unp2->unp_addrlen); 1215 unp3->unp_addrlen = unp2->unp_addrlen; 1216 } 1217 unp3->unp_flags = unp2->unp_flags; 1218 so2 = so3; 1219 /* 1220 * The connector's (client's) credentials are copied from its 1221 * process structure at the time of connect() (which is now). 1222 */ 1223 unp_connid(l, unp3, UNP_EIDSVALID); 1224 /* 1225 * The receiver's (server's) credentials are copied from the 1226 * unp_peercred member of socket on which the former called 1227 * listen(); unp_listen() cached that process's credentials 1228 * at that time so we can use them now. 1229 */ 1230 if (unp2->unp_flags & UNP_EIDSBIND) { 1231 memcpy(&unp->unp_connid, &unp2->unp_connid, 1232 sizeof(unp->unp_connid)); 1233 unp->unp_flags |= UNP_EIDSVALID; 1234 } 1235 } 1236 error = unp_connect1(so, so2, l); 1237 if (error) { 1238 sounlock(so); 1239 goto bad; 1240 } 1241 unp2 = sotounpcb(so2); 1242 switch (so->so_type) { 1243 1244 /* 1245 * SOCK_DGRAM and default cases are handled in prior call to 1246 * unp_connect1(), do not add a default case without fixing 1247 * unp_connect1(). 1248 */ 1249 1250 case SOCK_SEQPACKET: /* FALLTHROUGH */ 1251 case SOCK_STREAM: 1252 unp2->unp_conn = unp; 1253 if ((unp->unp_flags | unp2->unp_flags) & UNP_CONNWAIT) 1254 soisconnecting(so); 1255 else 1256 soisconnected(so); 1257 soisconnected(so2); 1258 /* 1259 * If the connection is fully established, break the 1260 * association with uipc_lock and give the connected 1261 * pair a separate lock to share. 1262 */ 1263 KASSERT(so2->so_head != NULL); 1264 unp_setpeerlocks(so, so2); 1265 break; 1266 1267 } 1268 sounlock(so); 1269 bad: 1270 vput(vp); 1271 bad2: 1272 free(sun, M_SONAME); 1273 solock(so); 1274 unp->unp_flags &= ~UNP_BUSY; 1275 return (error); 1276 } 1277 1278 int 1279 unp_connect2(struct socket *so, struct socket *so2) 1280 { 1281 struct unpcb *unp = sotounpcb(so); 1282 struct unpcb *unp2; 1283 int error = 0; 1284 1285 KASSERT(solocked2(so, so2)); 1286 1287 error = unp_connect1(so, so2, curlwp); 1288 if (error) 1289 return error; 1290 1291 unp2 = sotounpcb(so2); 1292 switch (so->so_type) { 1293 1294 /* 1295 * SOCK_DGRAM and default cases are handled in prior call to 1296 * unp_connect1(), do not add a default case without fixing 1297 * unp_connect1(). 1298 */ 1299 1300 case SOCK_SEQPACKET: /* FALLTHROUGH */ 1301 case SOCK_STREAM: 1302 unp2->unp_conn = unp; 1303 soisconnected(so); 1304 soisconnected(so2); 1305 break; 1306 1307 } 1308 return error; 1309 } 1310 1311 static void 1312 unp_disconnect1(struct unpcb *unp) 1313 { 1314 struct unpcb *unp2 = unp->unp_conn; 1315 struct socket *so; 1316 1317 if (unp2 == 0) 1318 return; 1319 unp->unp_conn = 0; 1320 so = unp->unp_socket; 1321 switch (so->so_type) { 1322 case SOCK_DGRAM: 1323 if (unp2->unp_refs == unp) 1324 unp2->unp_refs = unp->unp_nextref; 1325 else { 1326 unp2 = unp2->unp_refs; 1327 for (;;) { 1328 KASSERT(solocked2(so, unp2->unp_socket)); 1329 if (unp2 == 0) 1330 panic("unp_disconnect1"); 1331 if (unp2->unp_nextref == unp) 1332 break; 1333 unp2 = unp2->unp_nextref; 1334 } 1335 unp2->unp_nextref = unp->unp_nextref; 1336 } 1337 unp->unp_nextref = 0; 1338 so->so_state &= ~SS_ISCONNECTED; 1339 break; 1340 1341 case SOCK_SEQPACKET: /* FALLTHROUGH */ 1342 case SOCK_STREAM: 1343 KASSERT(solocked2(so, unp2->unp_socket)); 1344 soisdisconnected(so); 1345 unp2->unp_conn = 0; 1346 soisdisconnected(unp2->unp_socket); 1347 break; 1348 } 1349 } 1350 1351 static void 1352 unp_shutdown1(struct unpcb *unp) 1353 { 1354 struct socket *so; 1355 1356 switch(unp->unp_socket->so_type) { 1357 case SOCK_SEQPACKET: /* FALLTHROUGH */ 1358 case SOCK_STREAM: 1359 if (unp->unp_conn && (so = unp->unp_conn->unp_socket)) 1360 socantrcvmore(so); 1361 break; 1362 default: 1363 break; 1364 } 1365 } 1366 1367 static bool 1368 unp_drop(struct unpcb *unp, int errno) 1369 { 1370 struct socket *so = unp->unp_socket; 1371 1372 KASSERT(solocked(so)); 1373 1374 so->so_error = errno; 1375 unp_disconnect1(unp); 1376 if (so->so_head) { 1377 so->so_pcb = NULL; 1378 /* sofree() drops the socket lock */ 1379 sofree(so); 1380 unp_free(unp); 1381 return true; 1382 } 1383 return false; 1384 } 1385 1386 #ifdef notdef 1387 unp_drain(void) 1388 { 1389 1390 } 1391 #endif 1392 1393 int 1394 unp_externalize(struct mbuf *rights, struct lwp *l, int flags) 1395 { 1396 struct cmsghdr * const cm = mtod(rights, struct cmsghdr *); 1397 struct proc * const p = l->l_proc; 1398 file_t **rp; 1399 int error = 0; 1400 1401 const size_t nfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))) / 1402 sizeof(file_t *); 1403 if (nfds == 0) 1404 goto noop; 1405 1406 int * const fdp = kmem_alloc(nfds * sizeof(int), KM_SLEEP); 1407 rw_enter(&p->p_cwdi->cwdi_lock, RW_READER); 1408 1409 /* Make sure the recipient should be able to see the files.. */ 1410 rp = (file_t **)CMSG_DATA(cm); 1411 for (size_t i = 0; i < nfds; i++) { 1412 file_t * const fp = *rp++; 1413 if (fp == NULL) { 1414 error = EINVAL; 1415 goto out; 1416 } 1417 /* 1418 * If we are in a chroot'ed directory, and 1419 * someone wants to pass us a directory, make 1420 * sure it's inside the subtree we're allowed 1421 * to access. 1422 */ 1423 if (p->p_cwdi->cwdi_rdir != NULL && fp->f_type == DTYPE_VNODE) { 1424 vnode_t *vp = fp->f_vnode; 1425 if ((vp->v_type == VDIR) && 1426 !vn_isunder(vp, p->p_cwdi->cwdi_rdir, l)) { 1427 error = EPERM; 1428 goto out; 1429 } 1430 } 1431 } 1432 1433 restart: 1434 /* 1435 * First loop -- allocate file descriptor table slots for the 1436 * new files. 1437 */ 1438 for (size_t i = 0; i < nfds; i++) { 1439 if ((error = fd_alloc(p, 0, &fdp[i])) != 0) { 1440 /* 1441 * Back out what we've done so far. 1442 */ 1443 while (i-- > 0) { 1444 fd_abort(p, NULL, fdp[i]); 1445 } 1446 if (error == ENOSPC) { 1447 fd_tryexpand(p); 1448 error = 0; 1449 goto restart; 1450 } 1451 /* 1452 * This is the error that has historically 1453 * been returned, and some callers may 1454 * expect it. 1455 */ 1456 error = EMSGSIZE; 1457 goto out; 1458 } 1459 } 1460 1461 /* 1462 * Now that adding them has succeeded, update all of the 1463 * file passing state and affix the descriptors. 1464 */ 1465 rp = (file_t **)CMSG_DATA(cm); 1466 int *ofdp = (int *)CMSG_DATA(cm); 1467 for (size_t i = 0; i < nfds; i++) { 1468 file_t * const fp = *rp++; 1469 const int fd = fdp[i]; 1470 atomic_dec_uint(&unp_rights); 1471 fd_set_exclose(l, fd, (flags & O_CLOEXEC) != 0); 1472 fd_affix(p, fp, fd); 1473 /* 1474 * Done with this file pointer, replace it with a fd; 1475 */ 1476 *ofdp++ = fd; 1477 mutex_enter(&fp->f_lock); 1478 fp->f_msgcount--; 1479 mutex_exit(&fp->f_lock); 1480 /* 1481 * Note that fd_affix() adds a reference to the file. 1482 * The file may already have been closed by another 1483 * LWP in the process, so we must drop the reference 1484 * added by unp_internalize() with closef(). 1485 */ 1486 closef(fp); 1487 } 1488 1489 /* 1490 * Adjust length, in case of transition from large file_t 1491 * pointers to ints. 1492 */ 1493 if (sizeof(file_t *) != sizeof(int)) { 1494 cm->cmsg_len = CMSG_LEN(nfds * sizeof(int)); 1495 rights->m_len = CMSG_SPACE(nfds * sizeof(int)); 1496 } 1497 out: 1498 if (__predict_false(error != 0)) { 1499 file_t **const fpp = (file_t **)CMSG_DATA(cm); 1500 for (size_t i = 0; i < nfds; i++) 1501 unp_discard_now(fpp[i]); 1502 /* 1503 * Truncate the array so that nobody will try to interpret 1504 * what is now garbage in it. 1505 */ 1506 cm->cmsg_len = CMSG_LEN(0); 1507 rights->m_len = CMSG_SPACE(0); 1508 } 1509 rw_exit(&p->p_cwdi->cwdi_lock); 1510 kmem_free(fdp, nfds * sizeof(int)); 1511 1512 noop: 1513 /* 1514 * Don't disclose kernel memory in the alignment space. 1515 */ 1516 KASSERT(cm->cmsg_len <= rights->m_len); 1517 memset(&mtod(rights, char *)[cm->cmsg_len], 0, rights->m_len - 1518 cm->cmsg_len); 1519 return error; 1520 } 1521 1522 static int 1523 unp_internalize(struct mbuf **controlp) 1524 { 1525 filedesc_t *fdescp = curlwp->l_fd; 1526 struct mbuf *control = *controlp; 1527 struct cmsghdr *newcm, *cm = mtod(control, struct cmsghdr *); 1528 file_t **rp, **files; 1529 file_t *fp; 1530 int i, fd, *fdp; 1531 int nfds, error; 1532 u_int maxmsg; 1533 1534 error = 0; 1535 newcm = NULL; 1536 1537 /* Sanity check the control message header. */ 1538 if (cm->cmsg_type != SCM_RIGHTS || cm->cmsg_level != SOL_SOCKET || 1539 cm->cmsg_len > control->m_len || 1540 cm->cmsg_len < CMSG_ALIGN(sizeof(*cm))) 1541 return (EINVAL); 1542 1543 /* 1544 * Verify that the file descriptors are valid, and acquire 1545 * a reference to each. 1546 */ 1547 nfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))) / sizeof(int); 1548 fdp = (int *)CMSG_DATA(cm); 1549 maxmsg = maxfiles / unp_rights_ratio; 1550 for (i = 0; i < nfds; i++) { 1551 fd = *fdp++; 1552 if (atomic_inc_uint_nv(&unp_rights) > maxmsg) { 1553 atomic_dec_uint(&unp_rights); 1554 nfds = i; 1555 error = EAGAIN; 1556 goto out; 1557 } 1558 if ((fp = fd_getfile(fd)) == NULL 1559 || fp->f_type == DTYPE_KQUEUE) { 1560 if (fp) 1561 fd_putfile(fd); 1562 atomic_dec_uint(&unp_rights); 1563 nfds = i; 1564 error = EBADF; 1565 goto out; 1566 } 1567 } 1568 1569 /* Allocate new space and copy header into it. */ 1570 newcm = malloc(CMSG_SPACE(nfds * sizeof(file_t *)), M_MBUF, M_WAITOK); 1571 if (newcm == NULL) { 1572 error = E2BIG; 1573 goto out; 1574 } 1575 memcpy(newcm, cm, sizeof(struct cmsghdr)); 1576 memset(newcm + 1, 0, CMSG_LEN(0) - sizeof(struct cmsghdr)); 1577 files = (file_t **)CMSG_DATA(newcm); 1578 1579 /* 1580 * Transform the file descriptors into file_t pointers, in 1581 * reverse order so that if pointers are bigger than ints, the 1582 * int won't get until we're done. No need to lock, as we have 1583 * already validated the descriptors with fd_getfile(). 1584 */ 1585 fdp = (int *)CMSG_DATA(cm) + nfds; 1586 rp = files + nfds; 1587 for (i = 0; i < nfds; i++) { 1588 fp = fdescp->fd_dt->dt_ff[*--fdp]->ff_file; 1589 KASSERT(fp != NULL); 1590 mutex_enter(&fp->f_lock); 1591 *--rp = fp; 1592 fp->f_count++; 1593 fp->f_msgcount++; 1594 mutex_exit(&fp->f_lock); 1595 } 1596 1597 out: 1598 /* Release descriptor references. */ 1599 fdp = (int *)CMSG_DATA(cm); 1600 for (i = 0; i < nfds; i++) { 1601 fd_putfile(*fdp++); 1602 if (error != 0) { 1603 atomic_dec_uint(&unp_rights); 1604 } 1605 } 1606 1607 if (error == 0) { 1608 if (control->m_flags & M_EXT) { 1609 m_freem(control); 1610 *controlp = control = m_get(M_WAIT, MT_CONTROL); 1611 } 1612 MEXTADD(control, newcm, CMSG_SPACE(nfds * sizeof(file_t *)), 1613 M_MBUF, NULL, NULL); 1614 cm = newcm; 1615 /* 1616 * Adjust message & mbuf to note amount of space 1617 * actually used. 1618 */ 1619 cm->cmsg_len = CMSG_LEN(nfds * sizeof(file_t *)); 1620 control->m_len = CMSG_SPACE(nfds * sizeof(file_t *)); 1621 } 1622 1623 return error; 1624 } 1625 1626 struct mbuf * 1627 unp_addsockcred(struct lwp *l, struct mbuf *control) 1628 { 1629 struct sockcred *sc; 1630 struct mbuf *m; 1631 void *p; 1632 1633 m = sbcreatecontrol1(&p, SOCKCREDSIZE(kauth_cred_ngroups(l->l_cred)), 1634 SCM_CREDS, SOL_SOCKET, M_WAITOK); 1635 if (m == NULL) 1636 return control; 1637 1638 sc = p; 1639 sc->sc_pid = l->l_proc->p_pid; 1640 sc->sc_uid = kauth_cred_getuid(l->l_cred); 1641 sc->sc_euid = kauth_cred_geteuid(l->l_cred); 1642 sc->sc_gid = kauth_cred_getgid(l->l_cred); 1643 sc->sc_egid = kauth_cred_getegid(l->l_cred); 1644 sc->sc_ngroups = kauth_cred_ngroups(l->l_cred); 1645 1646 for (int i = 0; i < sc->sc_ngroups; i++) 1647 sc->sc_groups[i] = kauth_cred_group(l->l_cred, i); 1648 1649 return m_add(control, m); 1650 } 1651 1652 /* 1653 * Do a mark-sweep GC of files in the system, to free up any which are 1654 * caught in flight to an about-to-be-closed socket. Additionally, 1655 * process deferred file closures. 1656 */ 1657 static void 1658 unp_gc(file_t *dp) 1659 { 1660 extern struct domain unixdomain; 1661 file_t *fp, *np; 1662 struct socket *so, *so1; 1663 u_int i, oflags, rflags; 1664 bool didwork; 1665 1666 KASSERT(curlwp == unp_thread_lwp); 1667 KASSERT(mutex_owned(&filelist_lock)); 1668 1669 /* 1670 * First, process deferred file closures. 1671 */ 1672 while (!SLIST_EMPTY(&unp_thread_discard)) { 1673 fp = SLIST_FIRST(&unp_thread_discard); 1674 KASSERT(fp->f_unpcount > 0); 1675 KASSERT(fp->f_count > 0); 1676 KASSERT(fp->f_msgcount > 0); 1677 KASSERT(fp->f_count >= fp->f_unpcount); 1678 KASSERT(fp->f_count >= fp->f_msgcount); 1679 KASSERT(fp->f_msgcount >= fp->f_unpcount); 1680 SLIST_REMOVE_HEAD(&unp_thread_discard, f_unplist); 1681 i = fp->f_unpcount; 1682 fp->f_unpcount = 0; 1683 mutex_exit(&filelist_lock); 1684 for (; i != 0; i--) { 1685 unp_discard_now(fp); 1686 } 1687 mutex_enter(&filelist_lock); 1688 } 1689 1690 /* 1691 * Clear mark bits. Ensure that we don't consider new files 1692 * entering the file table during this loop (they will not have 1693 * FSCAN set). 1694 */ 1695 unp_defer = 0; 1696 LIST_FOREACH(fp, &filehead, f_list) { 1697 for (oflags = fp->f_flag;; oflags = rflags) { 1698 rflags = atomic_cas_uint(&fp->f_flag, oflags, 1699 (oflags | FSCAN) & ~(FMARK|FDEFER)); 1700 if (__predict_true(oflags == rflags)) { 1701 break; 1702 } 1703 } 1704 } 1705 1706 /* 1707 * Iterate over the set of sockets, marking ones believed (based on 1708 * refcount) to be referenced from a process, and marking for rescan 1709 * sockets which are queued on a socket. Recan continues descending 1710 * and searching for sockets referenced by sockets (FDEFER), until 1711 * there are no more socket->socket references to be discovered. 1712 */ 1713 do { 1714 didwork = false; 1715 for (fp = LIST_FIRST(&filehead); fp != NULL; fp = np) { 1716 KASSERT(mutex_owned(&filelist_lock)); 1717 np = LIST_NEXT(fp, f_list); 1718 mutex_enter(&fp->f_lock); 1719 if ((fp->f_flag & FDEFER) != 0) { 1720 atomic_and_uint(&fp->f_flag, ~FDEFER); 1721 unp_defer--; 1722 if (fp->f_count == 0) { 1723 /* 1724 * XXX: closef() doesn't pay attention 1725 * to FDEFER 1726 */ 1727 mutex_exit(&fp->f_lock); 1728 continue; 1729 } 1730 } else { 1731 if (fp->f_count == 0 || 1732 (fp->f_flag & FMARK) != 0 || 1733 fp->f_count == fp->f_msgcount || 1734 fp->f_unpcount != 0) { 1735 mutex_exit(&fp->f_lock); 1736 continue; 1737 } 1738 } 1739 atomic_or_uint(&fp->f_flag, FMARK); 1740 1741 if (fp->f_type != DTYPE_SOCKET || 1742 (so = fp->f_socket) == NULL || 1743 so->so_proto->pr_domain != &unixdomain || 1744 (so->so_proto->pr_flags & PR_RIGHTS) == 0) { 1745 mutex_exit(&fp->f_lock); 1746 continue; 1747 } 1748 1749 /* Gain file ref, mark our position, and unlock. */ 1750 didwork = true; 1751 LIST_INSERT_AFTER(fp, dp, f_list); 1752 fp->f_count++; 1753 mutex_exit(&fp->f_lock); 1754 mutex_exit(&filelist_lock); 1755 1756 /* 1757 * Mark files referenced from sockets queued on the 1758 * accept queue as well. 1759 */ 1760 solock(so); 1761 unp_scan(so->so_rcv.sb_mb, unp_mark, 0); 1762 if ((so->so_options & SO_ACCEPTCONN) != 0) { 1763 TAILQ_FOREACH(so1, &so->so_q0, so_qe) { 1764 unp_scan(so1->so_rcv.sb_mb, unp_mark, 0); 1765 } 1766 TAILQ_FOREACH(so1, &so->so_q, so_qe) { 1767 unp_scan(so1->so_rcv.sb_mb, unp_mark, 0); 1768 } 1769 } 1770 sounlock(so); 1771 1772 /* Re-lock and restart from where we left off. */ 1773 closef(fp); 1774 mutex_enter(&filelist_lock); 1775 np = LIST_NEXT(dp, f_list); 1776 LIST_REMOVE(dp, f_list); 1777 } 1778 /* 1779 * Bail early if we did nothing in the loop above. Could 1780 * happen because of concurrent activity causing unp_defer 1781 * to get out of sync. 1782 */ 1783 } while (unp_defer != 0 && didwork); 1784 1785 /* 1786 * Sweep pass. 1787 * 1788 * We grab an extra reference to each of the files that are 1789 * not otherwise accessible and then free the rights that are 1790 * stored in messages on them. 1791 */ 1792 for (fp = LIST_FIRST(&filehead); fp != NULL; fp = np) { 1793 KASSERT(mutex_owned(&filelist_lock)); 1794 np = LIST_NEXT(fp, f_list); 1795 mutex_enter(&fp->f_lock); 1796 1797 /* 1798 * Ignore non-sockets. 1799 * Ignore dead sockets, or sockets with pending close. 1800 * Ignore sockets obviously referenced elsewhere. 1801 * Ignore sockets marked as referenced by our scan. 1802 * Ignore new sockets that did not exist during the scan. 1803 */ 1804 if (fp->f_type != DTYPE_SOCKET || 1805 fp->f_count == 0 || fp->f_unpcount != 0 || 1806 fp->f_count != fp->f_msgcount || 1807 (fp->f_flag & (FMARK | FSCAN)) != FSCAN) { 1808 mutex_exit(&fp->f_lock); 1809 continue; 1810 } 1811 1812 /* Gain file ref, mark our position, and unlock. */ 1813 LIST_INSERT_AFTER(fp, dp, f_list); 1814 fp->f_count++; 1815 mutex_exit(&fp->f_lock); 1816 mutex_exit(&filelist_lock); 1817 1818 /* 1819 * Flush all data from the socket's receive buffer. 1820 * This will cause files referenced only by the 1821 * socket to be queued for close. 1822 */ 1823 so = fp->f_socket; 1824 solock(so); 1825 sorflush(so); 1826 sounlock(so); 1827 1828 /* Re-lock and restart from where we left off. */ 1829 closef(fp); 1830 mutex_enter(&filelist_lock); 1831 np = LIST_NEXT(dp, f_list); 1832 LIST_REMOVE(dp, f_list); 1833 } 1834 } 1835 1836 /* 1837 * Garbage collector thread. While SCM_RIGHTS messages are in transit, 1838 * wake once per second to garbage collect. Run continually while we 1839 * have deferred closes to process. 1840 */ 1841 static void 1842 unp_thread(void *cookie) 1843 { 1844 file_t *dp; 1845 1846 /* Allocate a dummy file for our scans. */ 1847 if ((dp = fgetdummy()) == NULL) { 1848 panic("unp_thread"); 1849 } 1850 1851 mutex_enter(&filelist_lock); 1852 for (;;) { 1853 KASSERT(mutex_owned(&filelist_lock)); 1854 if (SLIST_EMPTY(&unp_thread_discard)) { 1855 if (unp_rights != 0) { 1856 (void)cv_timedwait(&unp_thread_cv, 1857 &filelist_lock, hz); 1858 } else { 1859 cv_wait(&unp_thread_cv, &filelist_lock); 1860 } 1861 } 1862 unp_gc(dp); 1863 } 1864 /* NOTREACHED */ 1865 } 1866 1867 /* 1868 * Kick the garbage collector into action if there is something for 1869 * it to process. 1870 */ 1871 static void 1872 unp_thread_kick(void) 1873 { 1874 1875 if (!SLIST_EMPTY(&unp_thread_discard) || unp_rights != 0) { 1876 mutex_enter(&filelist_lock); 1877 cv_signal(&unp_thread_cv); 1878 mutex_exit(&filelist_lock); 1879 } 1880 } 1881 1882 void 1883 unp_dispose(struct mbuf *m) 1884 { 1885 1886 if (m) 1887 unp_scan(m, unp_discard_later, 1); 1888 } 1889 1890 void 1891 unp_scan(struct mbuf *m0, void (*op)(file_t *), int discard) 1892 { 1893 struct mbuf *m; 1894 file_t **rp, *fp; 1895 struct cmsghdr *cm; 1896 int i, qfds; 1897 1898 while (m0) { 1899 for (m = m0; m; m = m->m_next) { 1900 if (m->m_type != MT_CONTROL || 1901 m->m_len < sizeof(*cm)) { 1902 continue; 1903 } 1904 cm = mtod(m, struct cmsghdr *); 1905 if (cm->cmsg_level != SOL_SOCKET || 1906 cm->cmsg_type != SCM_RIGHTS) 1907 continue; 1908 qfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))) 1909 / sizeof(file_t *); 1910 rp = (file_t **)CMSG_DATA(cm); 1911 for (i = 0; i < qfds; i++) { 1912 fp = *rp; 1913 if (discard) { 1914 *rp = 0; 1915 } 1916 (*op)(fp); 1917 rp++; 1918 } 1919 } 1920 m0 = m0->m_nextpkt; 1921 } 1922 } 1923 1924 void 1925 unp_mark(file_t *fp) 1926 { 1927 1928 if (fp == NULL) 1929 return; 1930 1931 /* If we're already deferred, don't screw up the defer count */ 1932 mutex_enter(&fp->f_lock); 1933 if (fp->f_flag & (FMARK | FDEFER)) { 1934 mutex_exit(&fp->f_lock); 1935 return; 1936 } 1937 1938 /* 1939 * Minimize the number of deferrals... Sockets are the only type of 1940 * file which can hold references to another file, so just mark 1941 * other files, and defer unmarked sockets for the next pass. 1942 */ 1943 if (fp->f_type == DTYPE_SOCKET) { 1944 unp_defer++; 1945 KASSERT(fp->f_count != 0); 1946 atomic_or_uint(&fp->f_flag, FDEFER); 1947 } else { 1948 atomic_or_uint(&fp->f_flag, FMARK); 1949 } 1950 mutex_exit(&fp->f_lock); 1951 } 1952 1953 static void 1954 unp_discard_now(file_t *fp) 1955 { 1956 1957 if (fp == NULL) 1958 return; 1959 1960 KASSERT(fp->f_count > 0); 1961 KASSERT(fp->f_msgcount > 0); 1962 1963 mutex_enter(&fp->f_lock); 1964 fp->f_msgcount--; 1965 mutex_exit(&fp->f_lock); 1966 atomic_dec_uint(&unp_rights); 1967 (void)closef(fp); 1968 } 1969 1970 static void 1971 unp_discard_later(file_t *fp) 1972 { 1973 1974 if (fp == NULL) 1975 return; 1976 1977 KASSERT(fp->f_count > 0); 1978 KASSERT(fp->f_msgcount > 0); 1979 1980 mutex_enter(&filelist_lock); 1981 if (fp->f_unpcount++ == 0) { 1982 SLIST_INSERT_HEAD(&unp_thread_discard, fp, f_unplist); 1983 } 1984 mutex_exit(&filelist_lock); 1985 } 1986 1987 void 1988 unp_sysctl_create(struct sysctllog **clog) 1989 { 1990 sysctl_createv(clog, 0, NULL, NULL, 1991 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1992 CTLTYPE_LONG, "sendspace", 1993 SYSCTL_DESCR("Default stream send space"), 1994 NULL, 0, &unpst_sendspace, 0, 1995 CTL_NET, PF_LOCAL, SOCK_STREAM, CTL_CREATE, CTL_EOL); 1996 sysctl_createv(clog, 0, NULL, NULL, 1997 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1998 CTLTYPE_LONG, "recvspace", 1999 SYSCTL_DESCR("Default stream recv space"), 2000 NULL, 0, &unpst_recvspace, 0, 2001 CTL_NET, PF_LOCAL, SOCK_STREAM, CTL_CREATE, CTL_EOL); 2002 sysctl_createv(clog, 0, NULL, NULL, 2003 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2004 CTLTYPE_LONG, "sendspace", 2005 SYSCTL_DESCR("Default datagram send space"), 2006 NULL, 0, &unpdg_sendspace, 0, 2007 CTL_NET, PF_LOCAL, SOCK_DGRAM, CTL_CREATE, CTL_EOL); 2008 sysctl_createv(clog, 0, NULL, NULL, 2009 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2010 CTLTYPE_LONG, "recvspace", 2011 SYSCTL_DESCR("Default datagram recv space"), 2012 NULL, 0, &unpdg_recvspace, 0, 2013 CTL_NET, PF_LOCAL, SOCK_DGRAM, CTL_CREATE, CTL_EOL); 2014 sysctl_createv(clog, 0, NULL, NULL, 2015 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 2016 CTLTYPE_INT, "inflight", 2017 SYSCTL_DESCR("File descriptors in flight"), 2018 NULL, 0, &unp_rights, 0, 2019 CTL_NET, PF_LOCAL, CTL_CREATE, CTL_EOL); 2020 sysctl_createv(clog, 0, NULL, NULL, 2021 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 2022 CTLTYPE_INT, "deferred", 2023 SYSCTL_DESCR("File descriptors deferred for close"), 2024 NULL, 0, &unp_defer, 0, 2025 CTL_NET, PF_LOCAL, CTL_CREATE, CTL_EOL); 2026 } 2027 2028 const struct pr_usrreqs unp_usrreqs = { 2029 .pr_attach = unp_attach, 2030 .pr_detach = unp_detach, 2031 .pr_accept = unp_accept, 2032 .pr_bind = unp_bind, 2033 .pr_listen = unp_listen, 2034 .pr_connect = unp_connect, 2035 .pr_connect2 = unp_connect2, 2036 .pr_disconnect = unp_disconnect, 2037 .pr_shutdown = unp_shutdown, 2038 .pr_abort = unp_abort, 2039 .pr_ioctl = unp_ioctl, 2040 .pr_stat = unp_stat, 2041 .pr_peeraddr = unp_peeraddr, 2042 .pr_sockaddr = unp_sockaddr, 2043 .pr_rcvd = unp_rcvd, 2044 .pr_recvoob = unp_recvoob, 2045 .pr_send = unp_send, 2046 .pr_sendoob = unp_sendoob, 2047 }; 2048