1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 1982, 1986, 1988, 1990, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of the University nor the names of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93 31 * $FreeBSD: src/sys/kern/uipc_socket2.c,v 1.55.2.17 2002/08/31 19:04:55 dwmalone Exp $ 32 */ 33 34 #include "opt_param.h" 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/domain.h> 38 #include <sys/file.h> /* for maxfiles */ 39 #include <sys/kernel.h> 40 #include <sys/ktr.h> 41 #include <sys/proc.h> 42 #include <sys/malloc.h> 43 #include <sys/mbuf.h> 44 #include <sys/protosw.h> 45 #include <sys/resourcevar.h> 46 #include <sys/stat.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/socketops.h> 50 #include <sys/signalvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/event.h> 53 54 #include <sys/thread2.h> 55 #include <sys/msgport2.h> 56 #include <sys/socketvar2.h> 57 58 #include <net/netisr2.h> 59 60 #ifndef KTR_SOWAKEUP 61 #define KTR_SOWAKEUP KTR_ALL 62 #endif 63 KTR_INFO_MASTER(sowakeup); 64 KTR_INFO(KTR_SOWAKEUP, sowakeup, nconn_start, 0, "newconn sorwakeup start"); 65 KTR_INFO(KTR_SOWAKEUP, sowakeup, nconn_end, 1, "newconn sorwakeup end"); 66 KTR_INFO(KTR_SOWAKEUP, sowakeup, nconn_wakeupstart, 2, "newconn wakeup start"); 67 KTR_INFO(KTR_SOWAKEUP, sowakeup, nconn_wakeupend, 3, "newconn wakeup end"); 68 #define logsowakeup(name) KTR_LOG(sowakeup_ ## name) 69 70 int maxsockets; 71 72 /* 73 * Primitive routines for operating on sockets and socket buffers 74 */ 75 76 u_long sb_max = SB_MAX; 77 u_long sb_max_adj = 78 SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */ 79 80 static u_long sb_efficiency = 8; /* parameter for sbreserve() */ 81 82 /************************************************************************ 83 * signalsockbuf procedures * 84 ************************************************************************/ 85 86 /* 87 * Wait for data to arrive at/drain from a socket buffer. 88 * 89 * NOTE: Caller must generally hold the ssb_lock (client side lock) since 90 * WAIT/WAKEUP only works for one client at a time. 91 * 92 * NOTE: Caller always retries whatever operation it was waiting on. 93 */ 94 int 95 ssb_wait(struct signalsockbuf *ssb) 96 { 97 uint32_t flags; 98 int pflags; 99 int error; 100 101 pflags = (ssb->ssb_flags & SSB_NOINTR) ? 0 : PCATCH; 102 103 for (;;) { 104 flags = ssb->ssb_flags; 105 cpu_ccfence(); 106 107 /* 108 * WAKEUP and WAIT interlock each other. We can catch the 109 * race by checking to see if WAKEUP has already been set, 110 * and only setting WAIT if WAKEUP is clear. 111 */ 112 if (flags & SSB_WAKEUP) { 113 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 114 flags & ~SSB_WAKEUP)) { 115 error = 0; 116 break; 117 } 118 continue; 119 } 120 121 /* 122 * Only set WAIT if WAKEUP is clear. 123 */ 124 tsleep_interlock(&ssb->ssb_cc, pflags); 125 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 126 flags | SSB_WAIT)) { 127 error = tsleep(&ssb->ssb_cc, pflags | PINTERLOCKED, 128 "sbwait", ssb->ssb_timeo); 129 break; 130 } 131 } 132 return (error); 133 } 134 135 /* 136 * Lock a sockbuf already known to be locked; 137 * return any error returned from sleep (EINTR). 138 */ 139 int 140 _ssb_lock(struct signalsockbuf *ssb) 141 { 142 uint32_t flags; 143 int pflags; 144 int error; 145 146 pflags = (ssb->ssb_flags & SSB_NOINTR) ? 0 : PCATCH; 147 148 for (;;) { 149 flags = ssb->ssb_flags; 150 cpu_ccfence(); 151 if (flags & SSB_LOCK) { 152 tsleep_interlock(&ssb->ssb_flags, pflags); 153 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 154 flags | SSB_WANT)) { 155 error = tsleep(&ssb->ssb_flags, 156 pflags | PINTERLOCKED, 157 "sblock", 0); 158 if (error) 159 break; 160 } 161 } else { 162 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 163 flags | SSB_LOCK)) { 164 lwkt_gettoken(&ssb->ssb_token); 165 error = 0; 166 break; 167 } 168 } 169 } 170 return (error); 171 } 172 173 /* 174 * This does the same for sockbufs. Note that the xsockbuf structure, 175 * since it is always embedded in a socket, does not include a self 176 * pointer nor a length. We make this entry point public in case 177 * some other mechanism needs it. 178 */ 179 void 180 ssbtoxsockbuf(struct signalsockbuf *ssb, struct xsockbuf *xsb) 181 { 182 xsb->sb_cc = ssb->ssb_cc; 183 xsb->sb_hiwat = ssb->ssb_hiwat; 184 xsb->sb_mbcnt = ssb->ssb_mbcnt; 185 xsb->sb_mbmax = ssb->ssb_mbmax; 186 xsb->sb_lowat = ssb->ssb_lowat; 187 xsb->sb_flags = ssb->ssb_flags; 188 xsb->sb_timeo = ssb->ssb_timeo; 189 } 190 191 192 /************************************************************************ 193 * Procedures which manipulate socket state flags, wakeups, etc. * 194 ************************************************************************ 195 * 196 * Normal sequence from the active (originating) side is that 197 * soisconnecting() is called during processing of connect() call, resulting 198 * in an eventual call to soisconnected() if/when the connection is 199 * established. When the connection is torn down soisdisconnecting() is 200 * called during processing of disconnect() call, and soisdisconnected() is 201 * called when the connection to the peer is totally severed. 202 * 203 * The semantics of these routines are such that connectionless protocols 204 * can call soisconnected() and soisdisconnected() only, bypassing the 205 * in-progress calls when setting up a ``connection'' takes no time. 206 * 207 * From the passive side, a socket is created with two queues of sockets: 208 * so_incomp for connections in progress and so_comp for connections 209 * already made and awaiting user acceptance. As a protocol is preparing 210 * incoming connections, it creates a socket structure queued on so_incomp 211 * by calling sonewconn(). When the connection is established, 212 * soisconnected() is called, and transfers the socket structure to so_comp, 213 * making it available to accept(). 214 * 215 * If a socket is closed with sockets on either so_incomp or so_comp, these 216 * sockets are dropped. 217 * 218 * If higher level protocols are implemented in the kernel, the wakeups 219 * done here will sometimes cause software-interrupt process scheduling. 220 */ 221 222 void 223 soisconnecting(struct socket *so) 224 { 225 soclrstate(so, SS_ISCONNECTED | SS_ISDISCONNECTING); 226 sosetstate(so, SS_ISCONNECTING); 227 } 228 229 void 230 soisconnected(struct socket *so) 231 { 232 struct socket *head; 233 234 while ((head = so->so_head) != NULL) { 235 lwkt_getpooltoken(head); 236 if (so->so_head == head) 237 break; 238 lwkt_relpooltoken(head); 239 } 240 241 soclrstate(so, SS_ISCONNECTING | SS_ISDISCONNECTING | SS_ISCONFIRMING); 242 sosetstate(so, SS_ISCONNECTED); 243 if (head && (so->so_state & SS_INCOMP)) { 244 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 245 so->so_upcall = head->so_accf->so_accept_filter->accf_callback; 246 so->so_upcallarg = head->so_accf->so_accept_filter_arg; 247 atomic_set_int(&so->so_rcv.ssb_flags, SSB_UPCALL); 248 so->so_options &= ~SO_ACCEPTFILTER; 249 so->so_upcall(so, so->so_upcallarg, 0); 250 lwkt_relpooltoken(head); 251 return; 252 } 253 254 /* 255 * Listen socket are not per-cpu. 256 */ 257 KKASSERT((so->so_state & (SS_COMP | SS_INCOMP)) == SS_INCOMP); 258 TAILQ_REMOVE(&head->so_incomp, so, so_list); 259 head->so_incqlen--; 260 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 261 head->so_qlen++; 262 sosetstate(so, SS_COMP); 263 soclrstate(so, SS_INCOMP); 264 265 /* 266 * XXX head may be on a different protocol thread. 267 * sorwakeup()->sowakeup() is hacked atm. 268 */ 269 sorwakeup(head); 270 wakeup_one(&head->so_timeo); 271 } else { 272 wakeup(&so->so_timeo); 273 sorwakeup(so); 274 sowwakeup(so); 275 } 276 if (head) 277 lwkt_relpooltoken(head); 278 } 279 280 void 281 soisdisconnecting(struct socket *so) 282 { 283 soclrstate(so, SS_ISCONNECTING); 284 sosetstate(so, SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE); 285 wakeup((caddr_t)&so->so_timeo); 286 sowwakeup(so); 287 sorwakeup(so); 288 } 289 290 void 291 soisdisconnected(struct socket *so) 292 { 293 soclrstate(so, SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING); 294 sosetstate(so, SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED); 295 wakeup((caddr_t)&so->so_timeo); 296 sbdrop(&so->so_snd.sb, so->so_snd.ssb_cc); 297 sowwakeup(so); 298 sorwakeup(so); 299 } 300 301 void 302 soisreconnecting(struct socket *so) 303 { 304 soclrstate(so, SS_ISDISCONNECTING | SS_ISDISCONNECTED | 305 SS_CANTRCVMORE | SS_CANTSENDMORE); 306 sosetstate(so, SS_ISCONNECTING); 307 } 308 309 void 310 soisreconnected(struct socket *so) 311 { 312 soclrstate(so, SS_ISDISCONNECTED | SS_CANTRCVMORE | SS_CANTSENDMORE); 313 soisconnected(so); 314 } 315 316 /* 317 * Set or change the message port a socket receives commands on. 318 * 319 * XXX 320 */ 321 void 322 sosetport(struct socket *so, lwkt_port_t port) 323 { 324 so->so_port = port; 325 } 326 327 /* 328 * When an attempt at a new connection is noted on a socket 329 * which accepts connections, sonewconn is called. If the 330 * connection is possible (subject to space constraints, etc.) 331 * then we allocate a new structure, propoerly linked into the 332 * data structure of the original socket, and return this. 333 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. 334 * 335 * The new socket is returned with one ref and so_pcb assigned. 336 * The reference is implied by so_pcb. 337 */ 338 struct socket * 339 sonewconn_faddr(struct socket *head, int connstatus, 340 const struct sockaddr *faddr) 341 { 342 struct socket *so; 343 struct socket *sp; 344 struct pru_attach_info ai; 345 346 if (head->so_qlen > 3 * head->so_qlimit / 2) 347 return (NULL); 348 so = soalloc(1, head->so_proto); 349 if (so == NULL) 350 return (NULL); 351 352 /* 353 * Set the port prior to attaching the inpcb to the current 354 * cpu's protocol thread (which should be the current thread 355 * but might not be in all cases). This serializes any pcb ops 356 * which occur to our cpu allowing us to complete the attachment 357 * without racing anything. 358 */ 359 if (head->so_proto->pr_flags & PR_SYNC_PORT) 360 sosetport(so, &netisr_sync_port); 361 else 362 sosetport(so, netisr_cpuport(mycpuid)); 363 if ((head->so_options & SO_ACCEPTFILTER) != 0) 364 connstatus = 0; 365 so->so_head = head; 366 so->so_type = head->so_type; 367 so->so_options = head->so_options &~ SO_ACCEPTCONN; 368 so->so_linger = head->so_linger; 369 370 /* 371 * NOTE: Clearing NOFDREF implies referencing the so with 372 * soreference(). 373 */ 374 so->so_state = head->so_state | SS_NOFDREF | SS_ASSERTINPROG; 375 so->so_cred = crhold(head->so_cred); 376 ai.sb_rlimit = NULL; 377 ai.p_ucred = NULL; 378 ai.fd_rdir = NULL; /* jail code cruft XXX JH */ 379 380 /* 381 * Reserve space and call pru_attach. We can direct-call the 382 * function since we're already in the protocol thread. 383 */ 384 if (soreserve(so, head->so_snd.ssb_hiwat, 385 head->so_rcv.ssb_hiwat, NULL) || 386 so_pru_attach_direct(so, 0, &ai)) { 387 so->so_head = NULL; 388 soclrstate(so, SS_ASSERTINPROG); 389 sofree(so); /* remove implied pcb ref */ 390 return (NULL); 391 } 392 KKASSERT(((so->so_proto->pr_flags & PR_ASYNC_RCVD) == 0 && 393 so->so_refs == 2) || /* attach + our base ref */ 394 ((so->so_proto->pr_flags & PR_ASYNC_RCVD) && 395 so->so_refs == 3)); /* + async rcvd ref */ 396 sofree(so); 397 KKASSERT(so->so_port != NULL); 398 so->so_rcv.ssb_lowat = head->so_rcv.ssb_lowat; 399 so->so_snd.ssb_lowat = head->so_snd.ssb_lowat; 400 so->so_rcv.ssb_timeo = head->so_rcv.ssb_timeo; 401 so->so_snd.ssb_timeo = head->so_snd.ssb_timeo; 402 403 if (head->so_rcv.ssb_flags & SSB_AUTOLOWAT) 404 so->so_rcv.ssb_flags |= SSB_AUTOLOWAT; 405 else 406 so->so_rcv.ssb_flags &= ~SSB_AUTOLOWAT; 407 408 if (head->so_snd.ssb_flags & SSB_AUTOLOWAT) 409 so->so_snd.ssb_flags |= SSB_AUTOLOWAT; 410 else 411 so->so_snd.ssb_flags &= ~SSB_AUTOLOWAT; 412 413 if (head->so_rcv.ssb_flags & SSB_AUTOSIZE) 414 so->so_rcv.ssb_flags |= SSB_AUTOSIZE; 415 else 416 so->so_rcv.ssb_flags &= ~SSB_AUTOSIZE; 417 418 if (head->so_snd.ssb_flags & SSB_AUTOSIZE) 419 so->so_snd.ssb_flags |= SSB_AUTOSIZE; 420 else 421 so->so_snd.ssb_flags &= ~SSB_AUTOSIZE; 422 423 /* 424 * Save the faddr, if the information is provided and 425 * the protocol can perform the saving opertation. 426 */ 427 if (faddr != NULL && so->so_proto->pr_usrreqs->pru_savefaddr != NULL) 428 so->so_proto->pr_usrreqs->pru_savefaddr(so, faddr); 429 430 lwkt_getpooltoken(head); 431 if (connstatus) { 432 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 0); 433 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 434 sosetstate(so, SS_COMP); 435 head->so_qlen++; 436 } else { 437 if (head->so_incqlen > head->so_qlimit) { 438 sp = TAILQ_FIRST(&head->so_incomp); 439 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == 440 SS_INCOMP); 441 TAILQ_REMOVE(&head->so_incomp, sp, so_list); 442 head->so_incqlen--; 443 soclrstate(sp, SS_INCOMP); 444 soabort_async(sp, TRUE); 445 } 446 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 0); 447 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list); 448 sosetstate(so, SS_INCOMP); 449 head->so_incqlen++; 450 } 451 lwkt_relpooltoken(head); 452 if (connstatus) { 453 /* 454 * XXX head may be on a different protocol thread. 455 * sorwakeup()->sowakeup() is hacked atm. 456 */ 457 logsowakeup(nconn_start); 458 sorwakeup(head); 459 logsowakeup(nconn_end); 460 461 logsowakeup(nconn_wakeupstart); 462 wakeup((caddr_t)&head->so_timeo); 463 logsowakeup(nconn_wakeupend); 464 465 sosetstate(so, connstatus); 466 } 467 soclrstate(so, SS_ASSERTINPROG); 468 return (so); 469 } 470 471 struct socket * 472 sonewconn(struct socket *head, int connstatus) 473 { 474 return sonewconn_faddr(head, connstatus, NULL); 475 } 476 477 /* 478 * Socantsendmore indicates that no more data will be sent on the 479 * socket; it would normally be applied to a socket when the user 480 * informs the system that no more data is to be sent, by the protocol 481 * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data 482 * will be received, and will normally be applied to the socket by a 483 * protocol when it detects that the peer will send no more data. 484 * Data queued for reading in the socket may yet be read. 485 */ 486 void 487 socantsendmore(struct socket *so) 488 { 489 sosetstate(so, SS_CANTSENDMORE); 490 sowwakeup(so); 491 } 492 493 void 494 socantrcvmore(struct socket *so) 495 { 496 sosetstate(so, SS_CANTRCVMORE); 497 sorwakeup(so); 498 } 499 500 /* 501 * Wakeup processes waiting on a socket buffer. Do asynchronous notification 502 * via SIGIO if the socket has the SS_ASYNC flag set. 503 * 504 * For users waiting on send/recv try to avoid unnecessary context switch 505 * thrashing. Particularly for senders of large buffers (needs to be 506 * extended to sel and aio? XXX) 507 * 508 * WARNING! Can be called on a foreign socket from the wrong protocol 509 * thread. aka is called on the 'head' listen socket when 510 * a new connection comes in. 511 */ 512 513 void 514 sowakeup(struct socket *so, struct signalsockbuf *ssb) 515 { 516 struct kqinfo *kqinfo = &ssb->ssb_kq; 517 uint32_t flags; 518 519 /* 520 * Atomically check the flags. When no special features are being 521 * used, WAIT is clear, and WAKEUP is already set, we can simply 522 * return. The upcoming synchronous waiter will not block. 523 */ 524 flags = atomic_fetchadd_int(&ssb->ssb_flags, 0); 525 if ((flags & SSB_NOTIFY_MASK) == 0) { 526 if (flags & SSB_WAKEUP) 527 return; 528 } 529 530 /* 531 * Check conditions, set the WAKEUP flag, and clear and signal if 532 * the WAIT flag is found to be set. This interlocks against the 533 * client side. 534 */ 535 for (;;) { 536 long space; 537 538 flags = ssb->ssb_flags; 539 cpu_ccfence(); 540 if (ssb->ssb_flags & SSB_PREALLOC) 541 space = ssb_space_prealloc(ssb); 542 else 543 space = ssb_space(ssb); 544 545 if ((ssb == &so->so_snd && space >= ssb->ssb_lowat) || 546 (ssb == &so->so_rcv && ssb->ssb_cc >= ssb->ssb_lowat) || 547 (ssb == &so->so_snd && (so->so_state & SS_CANTSENDMORE)) || 548 (ssb == &so->so_rcv && (so->so_state & SS_CANTRCVMORE)) 549 ) { 550 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 551 (flags | SSB_WAKEUP) & ~SSB_WAIT)) { 552 if (flags & SSB_WAIT) 553 wakeup(&ssb->ssb_cc); 554 break; 555 } 556 } else { 557 break; 558 } 559 } 560 561 /* 562 * Misc other events 563 */ 564 if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL) 565 pgsigio(so->so_sigio, SIGIO, 0); 566 if (ssb->ssb_flags & SSB_UPCALL) 567 (*so->so_upcall)(so, so->so_upcallarg, M_NOWAIT); 568 KNOTE(&kqinfo->ki_note, 0); 569 570 /* 571 * This is a bit of a hack. Multiple threads can wind up scanning 572 * ki_mlist concurrently due to the fact that this function can be 573 * called on a foreign socket, so we can't afford to block here. 574 * 575 * We need the pool token for (so) (likely the listne socket if 576 * SSB_MEVENT is set) because the predicate function may have 577 * to access the accept queue. 578 */ 579 if (ssb->ssb_flags & SSB_MEVENT) { 580 struct netmsg_so_notify *msg, *nmsg; 581 582 lwkt_getpooltoken(so); 583 TAILQ_FOREACH_MUTABLE(msg, &kqinfo->ki_mlist, nm_list, nmsg) { 584 if (msg->nm_predicate(msg)) { 585 TAILQ_REMOVE(&kqinfo->ki_mlist, msg, nm_list); 586 lwkt_replymsg(&msg->base.lmsg, 587 msg->base.lmsg.ms_error); 588 } 589 } 590 if (TAILQ_EMPTY(&ssb->ssb_kq.ki_mlist)) 591 atomic_clear_int(&ssb->ssb_flags, SSB_MEVENT); 592 lwkt_relpooltoken(so); 593 } 594 } 595 596 /* 597 * Socket buffer (struct signalsockbuf) utility routines. 598 * 599 * Each socket contains two socket buffers: one for sending data and 600 * one for receiving data. Each buffer contains a queue of mbufs, 601 * information about the number of mbufs and amount of data in the 602 * queue, and other fields allowing kevent()/select()/poll() statements 603 * and notification on data availability to be implemented. 604 * 605 * Data stored in a socket buffer is maintained as a list of records. 606 * Each record is a list of mbufs chained together with the m_next 607 * field. Records are chained together with the m_nextpkt field. The upper 608 * level routine soreceive() expects the following conventions to be 609 * observed when placing information in the receive buffer: 610 * 611 * 1. If the protocol requires each message be preceded by the sender's 612 * name, then a record containing that name must be present before 613 * any associated data (mbuf's must be of type MT_SONAME). 614 * 2. If the protocol supports the exchange of ``access rights'' (really 615 * just additional data associated with the message), and there are 616 * ``rights'' to be received, then a record containing this data 617 * should be present (mbuf's must be of type MT_RIGHTS). 618 * 3. If a name or rights record exists, then it must be followed by 619 * a data record, perhaps of zero length. 620 * 621 * Before using a new socket structure it is first necessary to reserve 622 * buffer space to the socket, by calling sbreserve(). This should commit 623 * some of the available buffer space in the system buffer pool for the 624 * socket (currently, it does nothing but enforce limits). The space 625 * should be released by calling ssb_release() when the socket is destroyed. 626 */ 627 int 628 soreserve(struct socket *so, u_long sndcc, u_long rcvcc, struct rlimit *rl) 629 { 630 if (so->so_snd.ssb_lowat == 0) 631 atomic_set_int(&so->so_snd.ssb_flags, SSB_AUTOLOWAT); 632 if (ssb_reserve(&so->so_snd, sndcc, so, rl) == 0) 633 goto bad; 634 if (ssb_reserve(&so->so_rcv, rcvcc, so, rl) == 0) 635 goto bad2; 636 if (so->so_rcv.ssb_lowat == 0) 637 so->so_rcv.ssb_lowat = 1; 638 if (so->so_snd.ssb_lowat == 0) 639 so->so_snd.ssb_lowat = MCLBYTES; 640 if (so->so_snd.ssb_lowat > so->so_snd.ssb_hiwat) 641 so->so_snd.ssb_lowat = so->so_snd.ssb_hiwat; 642 return (0); 643 bad2: 644 ssb_release(&so->so_snd, so); 645 bad: 646 return (ENOBUFS); 647 } 648 649 static int 650 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS) 651 { 652 int error = 0; 653 u_long old_sb_max = sb_max; 654 655 error = SYSCTL_OUT(req, arg1, sizeof(int)); 656 if (error || !req->newptr) 657 return (error); 658 error = SYSCTL_IN(req, arg1, sizeof(int)); 659 if (error) 660 return (error); 661 if (sb_max < MSIZE + MCLBYTES) { 662 sb_max = old_sb_max; 663 return (EINVAL); 664 } 665 sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES); 666 return (0); 667 } 668 669 /* 670 * Allot mbufs to a signalsockbuf. 671 * 672 * Attempt to scale mbmax so that mbcnt doesn't become limiting 673 * if buffering efficiency is near the normal case. 674 * 675 * sb_max only applies to user-sockets (where rl != NULL). It does 676 * not apply to kernel sockets or kernel-controlled sockets. Note 677 * that NFS overrides the sockbuf limits created when nfsd creates 678 * a socket. 679 */ 680 int 681 ssb_reserve(struct signalsockbuf *ssb, u_long cc, struct socket *so, 682 struct rlimit *rl) 683 { 684 /* 685 * rl will only be NULL when we're in an interrupt (eg, in tcp_input) 686 * or when called from netgraph (ie, ngd_attach) 687 */ 688 if (rl && cc > sb_max_adj) 689 cc = sb_max_adj; 690 if (!chgsbsize(so->so_cred->cr_uidinfo, &ssb->ssb_hiwat, cc, 691 rl ? rl->rlim_cur : RLIM_INFINITY)) { 692 return (0); 693 } 694 if (rl) 695 ssb->ssb_mbmax = min(cc * sb_efficiency, sb_max); 696 else 697 ssb->ssb_mbmax = cc * sb_efficiency; 698 699 /* 700 * AUTOLOWAT is set on send buffers and prevents large writes 701 * from generating a huge number of context switches. 702 */ 703 if (ssb->ssb_flags & SSB_AUTOLOWAT) { 704 ssb->ssb_lowat = ssb->ssb_hiwat / 4; 705 if (ssb->ssb_lowat < MCLBYTES) 706 ssb->ssb_lowat = MCLBYTES; 707 } 708 if (ssb->ssb_lowat > ssb->ssb_hiwat) 709 ssb->ssb_lowat = ssb->ssb_hiwat; 710 return (1); 711 } 712 713 /* 714 * Free mbufs held by a socket, and reserved mbuf space. 715 */ 716 void 717 ssb_release(struct signalsockbuf *ssb, struct socket *so) 718 { 719 sbflush(&ssb->sb); 720 (void)chgsbsize(so->so_cred->cr_uidinfo, &ssb->ssb_hiwat, 0, 721 RLIM_INFINITY); 722 ssb->ssb_mbmax = 0; 723 } 724 725 /* 726 * Some routines that return EOPNOTSUPP for entry points that are not 727 * supported by a protocol. Fill in as needed. 728 */ 729 void 730 pr_generic_notsupp(netmsg_t msg) 731 { 732 lwkt_replymsg(&msg->lmsg, EOPNOTSUPP); 733 } 734 735 int 736 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio, 737 struct mbuf *top, struct mbuf *control, int flags, 738 struct thread *td) 739 { 740 if (top) 741 m_freem(top); 742 if (control) 743 m_freem(control); 744 return (EOPNOTSUPP); 745 } 746 747 int 748 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr, 749 struct uio *uio, struct sockbuf *sio, 750 struct mbuf **controlp, int *flagsp) 751 { 752 return (EOPNOTSUPP); 753 } 754 755 /* 756 * This isn't really a ``null'' operation, but it's the default one 757 * and doesn't do anything destructive. 758 */ 759 void 760 pru_sense_null(netmsg_t msg) 761 { 762 msg->sense.nm_stat->st_blksize = msg->base.nm_so->so_snd.ssb_hiwat; 763 lwkt_replymsg(&msg->lmsg, 0); 764 } 765 766 /* 767 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME. Callers 768 * of this routine assume that it always succeeds, so we have to use a 769 * blockable allocation even though we might be called from a critical thread. 770 */ 771 struct sockaddr * 772 dup_sockaddr(const struct sockaddr *sa) 773 { 774 struct sockaddr *sa2; 775 776 sa2 = kmalloc(sa->sa_len, M_SONAME, M_INTWAIT); 777 bcopy(sa, sa2, sa->sa_len); 778 return (sa2); 779 } 780 781 /* 782 * Create an external-format (``xsocket'') structure using the information 783 * in the kernel-format socket structure pointed to by so. This is done 784 * to reduce the spew of irrelevant information over this interface, 785 * to isolate user code from changes in the kernel structure, and 786 * potentially to provide information-hiding if we decide that 787 * some of this information should be hidden from users. 788 */ 789 void 790 sotoxsocket(struct socket *so, struct xsocket *xso) 791 { 792 xso->xso_len = sizeof *xso; 793 xso->xso_so = so; 794 xso->so_type = so->so_type; 795 xso->so_options = so->so_options; 796 xso->so_linger = so->so_linger; 797 xso->so_state = so->so_state; 798 xso->so_pcb = so->so_pcb; 799 xso->xso_protocol = so->so_proto->pr_protocol; 800 xso->xso_family = so->so_proto->pr_domain->dom_family; 801 xso->so_qlen = so->so_qlen; 802 xso->so_incqlen = so->so_incqlen; 803 xso->so_qlimit = so->so_qlimit; 804 xso->so_timeo = so->so_timeo; 805 xso->so_error = so->so_error; 806 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0; 807 xso->so_oobmark = so->so_oobmark; 808 ssbtoxsockbuf(&so->so_snd, &xso->so_snd); 809 ssbtoxsockbuf(&so->so_rcv, &xso->so_rcv); 810 xso->so_uid = so->so_cred->cr_uid; 811 } 812 813 /* 814 * Here is the definition of some of the basic objects in the kern.ipc 815 * branch of the MIB. 816 */ 817 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC"); 818 819 /* 820 * This takes the place of kern.maxsockbuf, which moved to kern.ipc. 821 * 822 * NOTE! sb_max only applies to user-created socket buffers. 823 */ 824 static int dummy; 825 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, ""); 826 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_INT|CTLFLAG_RW, 827 &sb_max, 0, sysctl_handle_sb_max, "I", "Maximum socket buffer size"); 828 SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets, CTLFLAG_RD, 829 &maxsockets, 0, "Maximum number of sockets available"); 830 SYSCTL_INT(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW, 831 &sb_efficiency, 0, 832 "Socket buffer limit scaler"); 833 834 /* 835 * Initialize maxsockets 836 */ 837 static void 838 init_maxsockets(void *ignored) 839 { 840 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets); 841 maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters)); 842 } 843 SYSINIT(param, SI_BOOT1_TUNABLES, SI_ORDER_ANY, 844 init_maxsockets, NULL); 845 846