1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 1982, 1986, 1988, 1990, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of the University nor the names of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93 31 * $FreeBSD: src/sys/kern/uipc_socket2.c,v 1.55.2.17 2002/08/31 19:04:55 dwmalone Exp $ 32 */ 33 34 #include "opt_param.h" 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/domain.h> 38 #include <sys/file.h> /* for maxfiles */ 39 #include <sys/kernel.h> 40 #include <sys/ktr.h> 41 #include <sys/proc.h> 42 #include <sys/malloc.h> 43 #include <sys/mbuf.h> 44 #include <sys/protosw.h> 45 #include <sys/resourcevar.h> 46 #include <sys/stat.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/socketops.h> 50 #include <sys/signalvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/event.h> 53 54 #include <sys/thread2.h> 55 #include <sys/msgport2.h> 56 #include <sys/socketvar2.h> 57 58 #include <net/netisr2.h> 59 60 #ifndef KTR_SOWAKEUP 61 #define KTR_SOWAKEUP KTR_ALL 62 #endif 63 KTR_INFO_MASTER(sowakeup); 64 KTR_INFO(KTR_SOWAKEUP, sowakeup, nconn_start, 0, "newconn sorwakeup start"); 65 KTR_INFO(KTR_SOWAKEUP, sowakeup, nconn_end, 1, "newconn sorwakeup end"); 66 KTR_INFO(KTR_SOWAKEUP, sowakeup, nconn_wakeupstart, 2, "newconn wakeup start"); 67 KTR_INFO(KTR_SOWAKEUP, sowakeup, nconn_wakeupend, 3, "newconn wakeup end"); 68 #define logsowakeup(name) KTR_LOG(sowakeup_ ## name) 69 70 int maxsockets; 71 72 /* 73 * Primitive routines for operating on sockets and socket buffers 74 */ 75 76 u_long sb_max = SB_MAX; 77 u_long sb_max_adj = 78 SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */ 79 80 static u_long sb_efficiency = 8; /* parameter for sbreserve() */ 81 82 /************************************************************************ 83 * signalsockbuf procedures * 84 ************************************************************************/ 85 86 /* 87 * Wait for data to arrive at/drain from a socket buffer. 88 * 89 * NOTE: Caller must generally hold the ssb_lock (client side lock) since 90 * WAIT/WAKEUP only works for one client at a time. 91 * 92 * NOTE: Caller always retries whatever operation it was waiting on. 93 */ 94 int 95 ssb_wait(struct signalsockbuf *ssb) 96 { 97 uint32_t flags; 98 int pflags; 99 int error; 100 101 pflags = (ssb->ssb_flags & SSB_NOINTR) ? 0 : PCATCH; 102 103 for (;;) { 104 flags = ssb->ssb_flags; 105 cpu_ccfence(); 106 107 /* 108 * WAKEUP and WAIT interlock each other. We can catch the 109 * race by checking to see if WAKEUP has already been set, 110 * and only setting WAIT if WAKEUP is clear. 111 */ 112 if (flags & SSB_WAKEUP) { 113 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 114 flags & ~SSB_WAKEUP)) { 115 error = 0; 116 break; 117 } 118 continue; 119 } 120 121 /* 122 * Only set WAIT if WAKEUP is clear. 123 */ 124 tsleep_interlock(&ssb->ssb_cc, pflags); 125 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 126 flags | SSB_WAIT)) { 127 error = tsleep(&ssb->ssb_cc, pflags | PINTERLOCKED, 128 "sbwait", ssb->ssb_timeo); 129 break; 130 } 131 } 132 return (error); 133 } 134 135 /* 136 * Lock a sockbuf already known to be locked; 137 * return any error returned from sleep (EINTR). 138 */ 139 int 140 _ssb_lock(struct signalsockbuf *ssb) 141 { 142 uint32_t flags; 143 int pflags; 144 int error; 145 146 pflags = (ssb->ssb_flags & SSB_NOINTR) ? 0 : PCATCH; 147 148 for (;;) { 149 flags = ssb->ssb_flags; 150 cpu_ccfence(); 151 if (flags & SSB_LOCK) { 152 tsleep_interlock(&ssb->ssb_flags, pflags); 153 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 154 flags | SSB_WANT)) { 155 error = tsleep(&ssb->ssb_flags, 156 pflags | PINTERLOCKED, 157 "sblock", 0); 158 if (error) 159 break; 160 } 161 } else { 162 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 163 flags | SSB_LOCK)) { 164 lwkt_gettoken(&ssb->ssb_token); 165 error = 0; 166 break; 167 } 168 } 169 } 170 return (error); 171 } 172 173 /* 174 * This does the same for sockbufs. Note that the xsockbuf structure, 175 * since it is always embedded in a socket, does not include a self 176 * pointer nor a length. We make this entry point public in case 177 * some other mechanism needs it. 178 */ 179 void 180 ssbtoxsockbuf(struct signalsockbuf *ssb, struct xsockbuf *xsb) 181 { 182 xsb->sb_cc = ssb->ssb_cc; 183 xsb->sb_hiwat = ssb->ssb_hiwat; 184 xsb->sb_mbcnt = ssb->ssb_mbcnt; 185 xsb->sb_mbmax = ssb->ssb_mbmax; 186 xsb->sb_lowat = ssb->ssb_lowat; 187 xsb->sb_flags = ssb->ssb_flags; 188 xsb->sb_timeo = ssb->ssb_timeo; 189 } 190 191 192 /************************************************************************ 193 * Procedures which manipulate socket state flags, wakeups, etc. * 194 ************************************************************************ 195 * 196 * Normal sequence from the active (originating) side is that 197 * soisconnecting() is called during processing of connect() call, resulting 198 * in an eventual call to soisconnected() if/when the connection is 199 * established. When the connection is torn down soisdisconnecting() is 200 * called during processing of disconnect() call, and soisdisconnected() is 201 * called when the connection to the peer is totally severed. 202 * 203 * The semantics of these routines are such that connectionless protocols 204 * can call soisconnected() and soisdisconnected() only, bypassing the 205 * in-progress calls when setting up a ``connection'' takes no time. 206 * 207 * From the passive side, a socket is created with two queues of sockets: 208 * so_incomp for connections in progress and so_comp for connections 209 * already made and awaiting user acceptance. As a protocol is preparing 210 * incoming connections, it creates a socket structure queued on so_incomp 211 * by calling sonewconn(). When the connection is established, 212 * soisconnected() is called, and transfers the socket structure to so_comp, 213 * making it available to accept(). 214 * 215 * If a socket is closed with sockets on either so_incomp or so_comp, these 216 * sockets are dropped. 217 * 218 * If higher level protocols are implemented in the kernel, the wakeups 219 * done here will sometimes cause software-interrupt process scheduling. 220 */ 221 222 void 223 soisconnecting(struct socket *so) 224 { 225 soclrstate(so, SS_ISCONNECTED | SS_ISDISCONNECTING); 226 sosetstate(so, SS_ISCONNECTING); 227 } 228 229 void 230 soisconnected(struct socket *so) 231 { 232 struct socket *head; 233 234 while ((head = so->so_head) != NULL) { 235 lwkt_getpooltoken(head); 236 if (so->so_head == head) 237 break; 238 lwkt_relpooltoken(head); 239 } 240 241 soclrstate(so, SS_ISCONNECTING | SS_ISDISCONNECTING | SS_ISCONFIRMING); 242 sosetstate(so, SS_ISCONNECTED); 243 if (head && (so->so_state & SS_INCOMP)) { 244 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 245 so->so_upcall = head->so_accf->so_accept_filter->accf_callback; 246 so->so_upcallarg = head->so_accf->so_accept_filter_arg; 247 atomic_set_int(&so->so_rcv.ssb_flags, SSB_UPCALL); 248 so->so_options &= ~SO_ACCEPTFILTER; 249 so->so_upcall(so, so->so_upcallarg, 0); 250 lwkt_relpooltoken(head); 251 return; 252 } 253 254 /* 255 * Listen socket are not per-cpu. 256 */ 257 TAILQ_REMOVE(&head->so_incomp, so, so_list); 258 head->so_incqlen--; 259 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 260 head->so_qlen++; 261 sosetstate(so, SS_COMP); 262 soclrstate(so, SS_INCOMP); 263 264 /* 265 * XXX head may be on a different protocol thread. 266 * sorwakeup()->sowakeup() is hacked atm. 267 */ 268 sorwakeup(head); 269 wakeup_one(&head->so_timeo); 270 } else { 271 wakeup(&so->so_timeo); 272 sorwakeup(so); 273 sowwakeup(so); 274 } 275 if (head) 276 lwkt_relpooltoken(head); 277 } 278 279 void 280 soisdisconnecting(struct socket *so) 281 { 282 soclrstate(so, SS_ISCONNECTING); 283 sosetstate(so, SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE); 284 wakeup((caddr_t)&so->so_timeo); 285 sowwakeup(so); 286 sorwakeup(so); 287 } 288 289 void 290 soisdisconnected(struct socket *so) 291 { 292 soclrstate(so, SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING); 293 sosetstate(so, SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED); 294 wakeup((caddr_t)&so->so_timeo); 295 sbdrop(&so->so_snd.sb, so->so_snd.ssb_cc); 296 sowwakeup(so); 297 sorwakeup(so); 298 } 299 300 void 301 soisreconnecting(struct socket *so) 302 { 303 soclrstate(so, SS_ISDISCONNECTING | SS_ISDISCONNECTED | 304 SS_CANTRCVMORE | SS_CANTSENDMORE); 305 sosetstate(so, SS_ISCONNECTING); 306 } 307 308 void 309 soisreconnected(struct socket *so) 310 { 311 soclrstate(so, SS_ISDISCONNECTED | SS_CANTRCVMORE | SS_CANTSENDMORE); 312 soisconnected(so); 313 } 314 315 /* 316 * Set or change the message port a socket receives commands on. 317 * 318 * XXX 319 */ 320 void 321 sosetport(struct socket *so, lwkt_port_t port) 322 { 323 so->so_port = port; 324 } 325 326 /* 327 * When an attempt at a new connection is noted on a socket 328 * which accepts connections, sonewconn is called. If the 329 * connection is possible (subject to space constraints, etc.) 330 * then we allocate a new structure, propoerly linked into the 331 * data structure of the original socket, and return this. 332 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. 333 * 334 * The new socket is returned with one ref and so_pcb assigned. 335 * The reference is implied by so_pcb. 336 */ 337 struct socket * 338 sonewconn_faddr(struct socket *head, int connstatus, 339 const struct sockaddr *faddr) 340 { 341 struct socket *so; 342 struct socket *sp; 343 struct pru_attach_info ai; 344 345 if (head->so_qlen > 3 * head->so_qlimit / 2) 346 return (NULL); 347 so = soalloc(1, head->so_proto); 348 if (so == NULL) 349 return (NULL); 350 351 /* 352 * Set the port prior to attaching the inpcb to the current 353 * cpu's protocol thread (which should be the current thread 354 * but might not be in all cases). This serializes any pcb ops 355 * which occur to our cpu allowing us to complete the attachment 356 * without racing anything. 357 */ 358 if (head->so_proto->pr_flags & PR_SYNC_PORT) 359 sosetport(so, &netisr_sync_port); 360 else 361 sosetport(so, netisr_cpuport(mycpuid)); 362 if ((head->so_options & SO_ACCEPTFILTER) != 0) 363 connstatus = 0; 364 so->so_head = head; 365 so->so_type = head->so_type; 366 so->so_options = head->so_options &~ SO_ACCEPTCONN; 367 so->so_linger = head->so_linger; 368 369 /* 370 * NOTE: Clearing NOFDREF implies referencing the so with 371 * soreference(). 372 */ 373 so->so_state = head->so_state | SS_NOFDREF | SS_ASSERTINPROG; 374 so->so_cred = crhold(head->so_cred); 375 ai.sb_rlimit = NULL; 376 ai.p_ucred = NULL; 377 ai.fd_rdir = NULL; /* jail code cruft XXX JH */ 378 379 /* 380 * Reserve space and call pru_attach. We can direct-call the 381 * function since we're already in the protocol thread. 382 */ 383 if (soreserve(so, head->so_snd.ssb_hiwat, 384 head->so_rcv.ssb_hiwat, NULL) || 385 so_pru_attach_direct(so, 0, &ai)) { 386 so->so_head = NULL; 387 soclrstate(so, SS_ASSERTINPROG); 388 sofree(so); /* remove implied pcb ref */ 389 return (NULL); 390 } 391 KKASSERT(((so->so_proto->pr_flags & PR_ASYNC_RCVD) == 0 && 392 so->so_refs == 2) || /* attach + our base ref */ 393 ((so->so_proto->pr_flags & PR_ASYNC_RCVD) && 394 so->so_refs == 3)); /* + async rcvd ref */ 395 sofree(so); 396 KKASSERT(so->so_port != NULL); 397 so->so_rcv.ssb_lowat = head->so_rcv.ssb_lowat; 398 so->so_snd.ssb_lowat = head->so_snd.ssb_lowat; 399 so->so_rcv.ssb_timeo = head->so_rcv.ssb_timeo; 400 so->so_snd.ssb_timeo = head->so_snd.ssb_timeo; 401 402 if (head->so_rcv.ssb_flags & SSB_AUTOLOWAT) 403 so->so_rcv.ssb_flags |= SSB_AUTOLOWAT; 404 else 405 so->so_rcv.ssb_flags &= ~SSB_AUTOLOWAT; 406 407 if (head->so_snd.ssb_flags & SSB_AUTOLOWAT) 408 so->so_snd.ssb_flags |= SSB_AUTOLOWAT; 409 else 410 so->so_snd.ssb_flags &= ~SSB_AUTOLOWAT; 411 412 if (head->so_rcv.ssb_flags & SSB_AUTOSIZE) 413 so->so_rcv.ssb_flags |= SSB_AUTOSIZE; 414 else 415 so->so_rcv.ssb_flags &= ~SSB_AUTOSIZE; 416 417 if (head->so_snd.ssb_flags & SSB_AUTOSIZE) 418 so->so_snd.ssb_flags |= SSB_AUTOSIZE; 419 else 420 so->so_snd.ssb_flags &= ~SSB_AUTOSIZE; 421 422 /* 423 * Save the faddr, if the information is provided and 424 * the protocol can perform the saving opertation. 425 */ 426 if (faddr != NULL && so->so_proto->pr_usrreqs->pru_savefaddr != NULL) 427 so->so_proto->pr_usrreqs->pru_savefaddr(so, faddr); 428 429 lwkt_getpooltoken(head); 430 if (connstatus) { 431 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 432 sosetstate(so, SS_COMP); 433 head->so_qlen++; 434 } else { 435 if (head->so_incqlen > head->so_qlimit) { 436 sp = TAILQ_FIRST(&head->so_incomp); 437 TAILQ_REMOVE(&head->so_incomp, sp, so_list); 438 head->so_incqlen--; 439 soclrstate(sp, SS_INCOMP); 440 soabort_async(sp, TRUE); 441 } 442 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list); 443 sosetstate(so, SS_INCOMP); 444 head->so_incqlen++; 445 } 446 lwkt_relpooltoken(head); 447 if (connstatus) { 448 /* 449 * XXX head may be on a different protocol thread. 450 * sorwakeup()->sowakeup() is hacked atm. 451 */ 452 logsowakeup(nconn_start); 453 sorwakeup(head); 454 logsowakeup(nconn_end); 455 456 logsowakeup(nconn_wakeupstart); 457 wakeup((caddr_t)&head->so_timeo); 458 logsowakeup(nconn_wakeupend); 459 460 sosetstate(so, connstatus); 461 } 462 soclrstate(so, SS_ASSERTINPROG); 463 return (so); 464 } 465 466 struct socket * 467 sonewconn(struct socket *head, int connstatus) 468 { 469 return sonewconn_faddr(head, connstatus, NULL); 470 } 471 472 /* 473 * Socantsendmore indicates that no more data will be sent on the 474 * socket; it would normally be applied to a socket when the user 475 * informs the system that no more data is to be sent, by the protocol 476 * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data 477 * will be received, and will normally be applied to the socket by a 478 * protocol when it detects that the peer will send no more data. 479 * Data queued for reading in the socket may yet be read. 480 */ 481 void 482 socantsendmore(struct socket *so) 483 { 484 sosetstate(so, SS_CANTSENDMORE); 485 sowwakeup(so); 486 } 487 488 void 489 socantrcvmore(struct socket *so) 490 { 491 sosetstate(so, SS_CANTRCVMORE); 492 sorwakeup(so); 493 } 494 495 /* 496 * Wakeup processes waiting on a socket buffer. Do asynchronous notification 497 * via SIGIO if the socket has the SS_ASYNC flag set. 498 * 499 * For users waiting on send/recv try to avoid unnecessary context switch 500 * thrashing. Particularly for senders of large buffers (needs to be 501 * extended to sel and aio? XXX) 502 * 503 * WARNING! Can be called on a foreign socket from the wrong protocol 504 * thread. aka is called on the 'head' listen socket when 505 * a new connection comes in. 506 */ 507 508 void 509 sowakeup(struct socket *so, struct signalsockbuf *ssb) 510 { 511 struct kqinfo *kqinfo = &ssb->ssb_kq; 512 uint32_t flags; 513 514 /* 515 * Atomically check the flags. When no special features are being 516 * used, WAIT is clear, and WAKEUP is already set, we can simply 517 * return. The upcoming synchronous waiter will not block. 518 */ 519 flags = atomic_fetchadd_int(&ssb->ssb_flags, 0); 520 if ((flags & SSB_NOTIFY_MASK) == 0) { 521 if (flags & SSB_WAKEUP) 522 return; 523 } 524 525 /* 526 * Check conditions, set the WAKEUP flag, and clear and signal if 527 * the WAIT flag is found to be set. This interlocks against the 528 * client side. 529 */ 530 for (;;) { 531 long space; 532 533 flags = ssb->ssb_flags; 534 cpu_ccfence(); 535 if (ssb->ssb_flags & SSB_PREALLOC) 536 space = ssb_space_prealloc(ssb); 537 else 538 space = ssb_space(ssb); 539 540 if ((ssb == &so->so_snd && space >= ssb->ssb_lowat) || 541 (ssb == &so->so_rcv && ssb->ssb_cc >= ssb->ssb_lowat) || 542 (ssb == &so->so_snd && (so->so_state & SS_CANTSENDMORE)) || 543 (ssb == &so->so_rcv && (so->so_state & SS_CANTRCVMORE)) 544 ) { 545 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 546 (flags | SSB_WAKEUP) & ~SSB_WAIT)) { 547 if (flags & SSB_WAIT) 548 wakeup(&ssb->ssb_cc); 549 break; 550 } 551 } else { 552 break; 553 } 554 } 555 556 /* 557 * Misc other events 558 */ 559 if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL) 560 pgsigio(so->so_sigio, SIGIO, 0); 561 if (ssb->ssb_flags & SSB_UPCALL) 562 (*so->so_upcall)(so, so->so_upcallarg, M_NOWAIT); 563 KNOTE(&kqinfo->ki_note, 0); 564 565 /* 566 * This is a bit of a hack. Multiple threads can wind up scanning 567 * ki_mlist concurrently due to the fact that this function can be 568 * called on a foreign socket, so we can't afford to block here. 569 * 570 * We need the pool token for (so) (likely the listne socket if 571 * SSB_MEVENT is set) because the predicate function may have 572 * to access the accept queue. 573 */ 574 if (ssb->ssb_flags & SSB_MEVENT) { 575 struct netmsg_so_notify *msg, *nmsg; 576 577 lwkt_getpooltoken(so); 578 TAILQ_FOREACH_MUTABLE(msg, &kqinfo->ki_mlist, nm_list, nmsg) { 579 if (msg->nm_predicate(msg)) { 580 TAILQ_REMOVE(&kqinfo->ki_mlist, msg, nm_list); 581 lwkt_replymsg(&msg->base.lmsg, 582 msg->base.lmsg.ms_error); 583 } 584 } 585 if (TAILQ_EMPTY(&ssb->ssb_kq.ki_mlist)) 586 atomic_clear_int(&ssb->ssb_flags, SSB_MEVENT); 587 lwkt_relpooltoken(so); 588 } 589 } 590 591 /* 592 * Socket buffer (struct signalsockbuf) utility routines. 593 * 594 * Each socket contains two socket buffers: one for sending data and 595 * one for receiving data. Each buffer contains a queue of mbufs, 596 * information about the number of mbufs and amount of data in the 597 * queue, and other fields allowing kevent()/select()/poll() statements 598 * and notification on data availability to be implemented. 599 * 600 * Data stored in a socket buffer is maintained as a list of records. 601 * Each record is a list of mbufs chained together with the m_next 602 * field. Records are chained together with the m_nextpkt field. The upper 603 * level routine soreceive() expects the following conventions to be 604 * observed when placing information in the receive buffer: 605 * 606 * 1. If the protocol requires each message be preceded by the sender's 607 * name, then a record containing that name must be present before 608 * any associated data (mbuf's must be of type MT_SONAME). 609 * 2. If the protocol supports the exchange of ``access rights'' (really 610 * just additional data associated with the message), and there are 611 * ``rights'' to be received, then a record containing this data 612 * should be present (mbuf's must be of type MT_RIGHTS). 613 * 3. If a name or rights record exists, then it must be followed by 614 * a data record, perhaps of zero length. 615 * 616 * Before using a new socket structure it is first necessary to reserve 617 * buffer space to the socket, by calling sbreserve(). This should commit 618 * some of the available buffer space in the system buffer pool for the 619 * socket (currently, it does nothing but enforce limits). The space 620 * should be released by calling ssb_release() when the socket is destroyed. 621 */ 622 int 623 soreserve(struct socket *so, u_long sndcc, u_long rcvcc, struct rlimit *rl) 624 { 625 if (so->so_snd.ssb_lowat == 0) 626 atomic_set_int(&so->so_snd.ssb_flags, SSB_AUTOLOWAT); 627 if (ssb_reserve(&so->so_snd, sndcc, so, rl) == 0) 628 goto bad; 629 if (ssb_reserve(&so->so_rcv, rcvcc, so, rl) == 0) 630 goto bad2; 631 if (so->so_rcv.ssb_lowat == 0) 632 so->so_rcv.ssb_lowat = 1; 633 if (so->so_snd.ssb_lowat == 0) 634 so->so_snd.ssb_lowat = MCLBYTES; 635 if (so->so_snd.ssb_lowat > so->so_snd.ssb_hiwat) 636 so->so_snd.ssb_lowat = so->so_snd.ssb_hiwat; 637 return (0); 638 bad2: 639 ssb_release(&so->so_snd, so); 640 bad: 641 return (ENOBUFS); 642 } 643 644 static int 645 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS) 646 { 647 int error = 0; 648 u_long old_sb_max = sb_max; 649 650 error = SYSCTL_OUT(req, arg1, sizeof(int)); 651 if (error || !req->newptr) 652 return (error); 653 error = SYSCTL_IN(req, arg1, sizeof(int)); 654 if (error) 655 return (error); 656 if (sb_max < MSIZE + MCLBYTES) { 657 sb_max = old_sb_max; 658 return (EINVAL); 659 } 660 sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES); 661 return (0); 662 } 663 664 /* 665 * Allot mbufs to a signalsockbuf. 666 * 667 * Attempt to scale mbmax so that mbcnt doesn't become limiting 668 * if buffering efficiency is near the normal case. 669 * 670 * sb_max only applies to user-sockets (where rl != NULL). It does 671 * not apply to kernel sockets or kernel-controlled sockets. Note 672 * that NFS overrides the sockbuf limits created when nfsd creates 673 * a socket. 674 */ 675 int 676 ssb_reserve(struct signalsockbuf *ssb, u_long cc, struct socket *so, 677 struct rlimit *rl) 678 { 679 /* 680 * rl will only be NULL when we're in an interrupt (eg, in tcp_input) 681 * or when called from netgraph (ie, ngd_attach) 682 */ 683 if (rl && cc > sb_max_adj) 684 cc = sb_max_adj; 685 if (!chgsbsize(so->so_cred->cr_uidinfo, &ssb->ssb_hiwat, cc, 686 rl ? rl->rlim_cur : RLIM_INFINITY)) { 687 return (0); 688 } 689 if (rl) 690 ssb->ssb_mbmax = min(cc * sb_efficiency, sb_max); 691 else 692 ssb->ssb_mbmax = cc * sb_efficiency; 693 694 /* 695 * AUTOLOWAT is set on send buffers and prevents large writes 696 * from generating a huge number of context switches. 697 */ 698 if (ssb->ssb_flags & SSB_AUTOLOWAT) { 699 ssb->ssb_lowat = ssb->ssb_hiwat / 4; 700 if (ssb->ssb_lowat < MCLBYTES) 701 ssb->ssb_lowat = MCLBYTES; 702 } 703 if (ssb->ssb_lowat > ssb->ssb_hiwat) 704 ssb->ssb_lowat = ssb->ssb_hiwat; 705 return (1); 706 } 707 708 /* 709 * Free mbufs held by a socket, and reserved mbuf space. 710 */ 711 void 712 ssb_release(struct signalsockbuf *ssb, struct socket *so) 713 { 714 sbflush(&ssb->sb); 715 (void)chgsbsize(so->so_cred->cr_uidinfo, &ssb->ssb_hiwat, 0, 716 RLIM_INFINITY); 717 ssb->ssb_mbmax = 0; 718 } 719 720 /* 721 * Some routines that return EOPNOTSUPP for entry points that are not 722 * supported by a protocol. Fill in as needed. 723 */ 724 void 725 pr_generic_notsupp(netmsg_t msg) 726 { 727 lwkt_replymsg(&msg->lmsg, EOPNOTSUPP); 728 } 729 730 int 731 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio, 732 struct mbuf *top, struct mbuf *control, int flags, 733 struct thread *td) 734 { 735 if (top) 736 m_freem(top); 737 if (control) 738 m_freem(control); 739 return (EOPNOTSUPP); 740 } 741 742 int 743 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr, 744 struct uio *uio, struct sockbuf *sio, 745 struct mbuf **controlp, int *flagsp) 746 { 747 return (EOPNOTSUPP); 748 } 749 750 /* 751 * This isn't really a ``null'' operation, but it's the default one 752 * and doesn't do anything destructive. 753 */ 754 void 755 pru_sense_null(netmsg_t msg) 756 { 757 msg->sense.nm_stat->st_blksize = msg->base.nm_so->so_snd.ssb_hiwat; 758 lwkt_replymsg(&msg->lmsg, 0); 759 } 760 761 /* 762 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME. Callers 763 * of this routine assume that it always succeeds, so we have to use a 764 * blockable allocation even though we might be called from a critical thread. 765 */ 766 struct sockaddr * 767 dup_sockaddr(const struct sockaddr *sa) 768 { 769 struct sockaddr *sa2; 770 771 sa2 = kmalloc(sa->sa_len, M_SONAME, M_INTWAIT); 772 bcopy(sa, sa2, sa->sa_len); 773 return (sa2); 774 } 775 776 /* 777 * Create an external-format (``xsocket'') structure using the information 778 * in the kernel-format socket structure pointed to by so. This is done 779 * to reduce the spew of irrelevant information over this interface, 780 * to isolate user code from changes in the kernel structure, and 781 * potentially to provide information-hiding if we decide that 782 * some of this information should be hidden from users. 783 */ 784 void 785 sotoxsocket(struct socket *so, struct xsocket *xso) 786 { 787 xso->xso_len = sizeof *xso; 788 xso->xso_so = so; 789 xso->so_type = so->so_type; 790 xso->so_options = so->so_options; 791 xso->so_linger = so->so_linger; 792 xso->so_state = so->so_state; 793 xso->so_pcb = so->so_pcb; 794 xso->xso_protocol = so->so_proto->pr_protocol; 795 xso->xso_family = so->so_proto->pr_domain->dom_family; 796 xso->so_qlen = so->so_qlen; 797 xso->so_incqlen = so->so_incqlen; 798 xso->so_qlimit = so->so_qlimit; 799 xso->so_timeo = so->so_timeo; 800 xso->so_error = so->so_error; 801 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0; 802 xso->so_oobmark = so->so_oobmark; 803 ssbtoxsockbuf(&so->so_snd, &xso->so_snd); 804 ssbtoxsockbuf(&so->so_rcv, &xso->so_rcv); 805 xso->so_uid = so->so_cred->cr_uid; 806 } 807 808 /* 809 * Here is the definition of some of the basic objects in the kern.ipc 810 * branch of the MIB. 811 */ 812 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC"); 813 814 /* 815 * This takes the place of kern.maxsockbuf, which moved to kern.ipc. 816 * 817 * NOTE! sb_max only applies to user-created socket buffers. 818 */ 819 static int dummy; 820 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, ""); 821 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_INT|CTLFLAG_RW, 822 &sb_max, 0, sysctl_handle_sb_max, "I", "Maximum socket buffer size"); 823 SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets, CTLFLAG_RD, 824 &maxsockets, 0, "Maximum number of sockets available"); 825 SYSCTL_INT(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW, 826 &sb_efficiency, 0, 827 "Socket buffer limit scaler"); 828 829 /* 830 * Initialize maxsockets 831 */ 832 static void 833 init_maxsockets(void *ignored) 834 { 835 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets); 836 maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters)); 837 } 838 SYSINIT(param, SI_BOOT1_TUNABLES, SI_ORDER_ANY, 839 init_maxsockets, NULL); 840 841