1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 1982, 1986, 1988, 1990, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by the University of 17 * California, Berkeley and its contributors. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93 35 * $FreeBSD: src/sys/kern/uipc_socket2.c,v 1.55.2.17 2002/08/31 19:04:55 dwmalone Exp $ 36 * $DragonFly: src/sys/kern/uipc_socket2.c,v 1.26 2006/12/23 23:47:54 swildner Exp $ 37 */ 38 39 #include "opt_param.h" 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/domain.h> 43 #include <sys/file.h> /* for maxfiles */ 44 #include <sys/kernel.h> 45 #include <sys/proc.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/protosw.h> 49 #include <sys/resourcevar.h> 50 #include <sys/stat.h> 51 #include <sys/socket.h> 52 #include <sys/socketvar.h> 53 #include <sys/signalvar.h> 54 #include <sys/sysctl.h> 55 #include <sys/aio.h> /* for aio_swake proto */ 56 #include <sys/event.h> 57 58 #include <sys/thread2.h> 59 #include <sys/msgport2.h> 60 61 int maxsockets; 62 63 /* 64 * Primitive routines for operating on sockets and socket buffers 65 */ 66 67 u_long sb_max = SB_MAX; 68 u_long sb_max_adj = 69 SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */ 70 71 static u_long sb_efficiency = 8; /* parameter for sbreserve() */ 72 73 /* 74 * Procedures to manipulate state flags of socket 75 * and do appropriate wakeups. Normal sequence from the 76 * active (originating) side is that soisconnecting() is 77 * called during processing of connect() call, 78 * resulting in an eventual call to soisconnected() if/when the 79 * connection is established. When the connection is torn down 80 * soisdisconnecting() is called during processing of disconnect() call, 81 * and soisdisconnected() is called when the connection to the peer 82 * is totally severed. The semantics of these routines are such that 83 * connectionless protocols can call soisconnected() and soisdisconnected() 84 * only, bypassing the in-progress calls when setting up a ``connection'' 85 * takes no time. 86 * 87 * From the passive side, a socket is created with 88 * two queues of sockets: so_incomp for connections in progress 89 * and so_comp for connections already made and awaiting user acceptance. 90 * As a protocol is preparing incoming connections, it creates a socket 91 * structure queued on so_incomp by calling sonewconn(). When the connection 92 * is established, soisconnected() is called, and transfers the 93 * socket structure to so_comp, making it available to accept(). 94 * 95 * If a socket is closed with sockets on either 96 * so_incomp or so_comp, these sockets are dropped. 97 * 98 * If higher level protocols are implemented in 99 * the kernel, the wakeups done here will sometimes 100 * cause software-interrupt process scheduling. 101 */ 102 103 void 104 soisconnecting(struct socket *so) 105 { 106 107 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); 108 so->so_state |= SS_ISCONNECTING; 109 } 110 111 void 112 soisconnected(struct socket *so) 113 { 114 struct socket *head = so->so_head; 115 116 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING); 117 so->so_state |= SS_ISCONNECTED; 118 if (head && (so->so_state & SS_INCOMP)) { 119 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 120 so->so_upcall = head->so_accf->so_accept_filter->accf_callback; 121 so->so_upcallarg = head->so_accf->so_accept_filter_arg; 122 so->so_rcv.sb_flags |= SB_UPCALL; 123 so->so_options &= ~SO_ACCEPTFILTER; 124 so->so_upcall(so, so->so_upcallarg, 0); 125 return; 126 } 127 TAILQ_REMOVE(&head->so_incomp, so, so_list); 128 head->so_incqlen--; 129 so->so_state &= ~SS_INCOMP; 130 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 131 head->so_qlen++; 132 so->so_state |= SS_COMP; 133 sorwakeup(head); 134 wakeup_one(&head->so_timeo); 135 } else { 136 wakeup(&so->so_timeo); 137 sorwakeup(so); 138 sowwakeup(so); 139 } 140 } 141 142 void 143 soisdisconnecting(struct socket *so) 144 { 145 146 so->so_state &= ~SS_ISCONNECTING; 147 so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE); 148 wakeup((caddr_t)&so->so_timeo); 149 sowwakeup(so); 150 sorwakeup(so); 151 } 152 153 void 154 soisdisconnected(struct socket *so) 155 { 156 157 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); 158 so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED); 159 wakeup((caddr_t)&so->so_timeo); 160 sbdrop(&so->so_snd, so->so_snd.sb_cc); 161 sowwakeup(so); 162 sorwakeup(so); 163 } 164 165 /* 166 * When an attempt at a new connection is noted on a socket 167 * which accepts connections, sonewconn is called. If the 168 * connection is possible (subject to space constraints, etc.) 169 * then we allocate a new structure, propoerly linked into the 170 * data structure of the original socket, and return this. 171 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. 172 */ 173 struct socket * 174 sonewconn(struct socket *head, int connstatus) 175 { 176 struct socket *so; 177 struct pru_attach_info ai; 178 179 if (head->so_qlen > 3 * head->so_qlimit / 2) 180 return ((struct socket *)0); 181 so = soalloc(1); 182 if (so == NULL) 183 return (NULL); 184 if ((head->so_options & SO_ACCEPTFILTER) != 0) 185 connstatus = 0; 186 so->so_head = head; 187 so->so_type = head->so_type; 188 so->so_options = head->so_options &~ SO_ACCEPTCONN; 189 so->so_linger = head->so_linger; 190 so->so_state = head->so_state | SS_NOFDREF; 191 so->so_proto = head->so_proto; 192 so->so_timeo = head->so_timeo; 193 so->so_cred = crhold(head->so_cred); 194 ai.sb_rlimit = NULL; 195 ai.p_ucred = NULL; 196 ai.fd_rdir = NULL; /* jail code cruft XXX JH */ 197 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat, NULL) || 198 /* Directly call function since we're already at protocol level. */ 199 (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, &ai)) { 200 sodealloc(so); 201 return ((struct socket *)0); 202 } 203 204 if (connstatus) { 205 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 206 so->so_state |= SS_COMP; 207 head->so_qlen++; 208 } else { 209 if (head->so_incqlen > head->so_qlimit) { 210 struct socket *sp; 211 sp = TAILQ_FIRST(&head->so_incomp); 212 (void) soabort(sp); 213 } 214 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list); 215 so->so_state |= SS_INCOMP; 216 head->so_incqlen++; 217 } 218 if (connstatus) { 219 sorwakeup(head); 220 wakeup((caddr_t)&head->so_timeo); 221 so->so_state |= connstatus; 222 } 223 return (so); 224 } 225 226 /* 227 * Socantsendmore indicates that no more data will be sent on the 228 * socket; it would normally be applied to a socket when the user 229 * informs the system that no more data is to be sent, by the protocol 230 * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data 231 * will be received, and will normally be applied to the socket by a 232 * protocol when it detects that the peer will send no more data. 233 * Data queued for reading in the socket may yet be read. 234 */ 235 236 void 237 socantsendmore(struct socket *so) 238 { 239 240 so->so_state |= SS_CANTSENDMORE; 241 sowwakeup(so); 242 } 243 244 void 245 socantrcvmore(struct socket *so) 246 { 247 248 so->so_state |= SS_CANTRCVMORE; 249 sorwakeup(so); 250 } 251 252 /* 253 * Wait for data to arrive at/drain from a socket buffer. 254 */ 255 int 256 sbwait(struct sockbuf *sb) 257 { 258 259 sb->sb_flags |= SB_WAIT; 260 return (tsleep((caddr_t)&sb->sb_cc, 261 ((sb->sb_flags & SB_NOINTR) ? 0 : PCATCH), 262 "sbwait", 263 sb->sb_timeo)); 264 } 265 266 /* 267 * Lock a sockbuf already known to be locked; 268 * return any error returned from sleep (EINTR). 269 */ 270 int 271 sb_lock(struct sockbuf *sb) 272 { 273 int error; 274 275 while (sb->sb_flags & SB_LOCK) { 276 sb->sb_flags |= SB_WANT; 277 error = tsleep((caddr_t)&sb->sb_flags, 278 ((sb->sb_flags & SB_NOINTR) ? 0 : PCATCH), 279 "sblock", 0); 280 if (error) 281 return (error); 282 } 283 sb->sb_flags |= SB_LOCK; 284 return (0); 285 } 286 287 /* 288 * Wakeup processes waiting on a socket buffer. Do asynchronous notification 289 * via SIGIO if the socket has the SS_ASYNC flag set. 290 */ 291 void 292 sowakeup(struct socket *so, struct sockbuf *sb) 293 { 294 struct selinfo *selinfo = &sb->sb_sel; 295 296 selwakeup(selinfo); 297 sb->sb_flags &= ~SB_SEL; 298 if (sb->sb_flags & SB_WAIT) { 299 sb->sb_flags &= ~SB_WAIT; 300 wakeup((caddr_t)&sb->sb_cc); 301 } 302 if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL) 303 pgsigio(so->so_sigio, SIGIO, 0); 304 if (sb->sb_flags & SB_UPCALL) 305 (*so->so_upcall)(so, so->so_upcallarg, MB_DONTWAIT); 306 if (sb->sb_flags & SB_AIO) 307 aio_swake(so, sb); 308 KNOTE(&selinfo->si_note, 0); 309 if (sb->sb_flags & SB_MEVENT) { 310 struct netmsg_so_notify *msg, *nmsg; 311 312 TAILQ_FOREACH_MUTABLE(msg, &selinfo->si_mlist, nm_list, nmsg) { 313 if (msg->nm_predicate((struct netmsg *)msg)) { 314 TAILQ_REMOVE(&selinfo->si_mlist, msg, nm_list); 315 lwkt_replymsg(&msg->nm_lmsg, 316 msg->nm_lmsg.ms_error); 317 } 318 } 319 if (TAILQ_EMPTY(&sb->sb_sel.si_mlist)) 320 sb->sb_flags &= ~SB_MEVENT; 321 } 322 } 323 324 /* 325 * Socket buffer (struct sockbuf) utility routines. 326 * 327 * Each socket contains two socket buffers: one for sending data and 328 * one for receiving data. Each buffer contains a queue of mbufs, 329 * information about the number of mbufs and amount of data in the 330 * queue, and other fields allowing select() statements and notification 331 * on data availability to be implemented. 332 * 333 * Data stored in a socket buffer is maintained as a list of records. 334 * Each record is a list of mbufs chained together with the m_next 335 * field. Records are chained together with the m_nextpkt field. The upper 336 * level routine soreceive() expects the following conventions to be 337 * observed when placing information in the receive buffer: 338 * 339 * 1. If the protocol requires each message be preceded by the sender's 340 * name, then a record containing that name must be present before 341 * any associated data (mbuf's must be of type MT_SONAME). 342 * 2. If the protocol supports the exchange of ``access rights'' (really 343 * just additional data associated with the message), and there are 344 * ``rights'' to be received, then a record containing this data 345 * should be present (mbuf's must be of type MT_RIGHTS). 346 * 3. If a name or rights record exists, then it must be followed by 347 * a data record, perhaps of zero length. 348 * 349 * Before using a new socket structure it is first necessary to reserve 350 * buffer space to the socket, by calling sbreserve(). This should commit 351 * some of the available buffer space in the system buffer pool for the 352 * socket (currently, it does nothing but enforce limits). The space 353 * should be released by calling sbrelease() when the socket is destroyed. 354 */ 355 356 int 357 soreserve(struct socket *so, u_long sndcc, u_long rcvcc, struct rlimit *rl) 358 { 359 if (sbreserve(&so->so_snd, sndcc, so, rl) == 0) 360 goto bad; 361 if (sbreserve(&so->so_rcv, rcvcc, so, rl) == 0) 362 goto bad2; 363 if (so->so_rcv.sb_lowat == 0) 364 so->so_rcv.sb_lowat = 1; 365 if (so->so_snd.sb_lowat == 0) 366 so->so_snd.sb_lowat = MCLBYTES; 367 if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) 368 so->so_snd.sb_lowat = so->so_snd.sb_hiwat; 369 return (0); 370 bad2: 371 sbrelease(&so->so_snd, so); 372 bad: 373 return (ENOBUFS); 374 } 375 376 static int 377 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS) 378 { 379 int error = 0; 380 u_long old_sb_max = sb_max; 381 382 error = SYSCTL_OUT(req, arg1, sizeof(int)); 383 if (error || !req->newptr) 384 return (error); 385 error = SYSCTL_IN(req, arg1, sizeof(int)); 386 if (error) 387 return (error); 388 if (sb_max < MSIZE + MCLBYTES) { 389 sb_max = old_sb_max; 390 return (EINVAL); 391 } 392 sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES); 393 return (0); 394 } 395 396 /* 397 * Allot mbufs to a sockbuf. 398 * Attempt to scale mbmax so that mbcnt doesn't become limiting 399 * if buffering efficiency is near the normal case. 400 */ 401 int 402 sbreserve(struct sockbuf *sb, u_long cc, struct socket *so, struct rlimit *rl) 403 { 404 405 /* 406 * rl will only be NULL when we're in an interrupt (eg, in tcp_input) 407 * or when called from netgraph (ie, ngd_attach) 408 */ 409 if (cc > sb_max_adj) 410 return (0); 411 if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc, 412 rl ? rl->rlim_cur : RLIM_INFINITY)) { 413 return (0); 414 } 415 sb->sb_mbmax = min(cc * sb_efficiency, sb_max); 416 if (sb->sb_lowat > sb->sb_hiwat) 417 sb->sb_lowat = sb->sb_hiwat; 418 return (1); 419 } 420 421 /* 422 * Free mbufs held by a socket, and reserved mbuf space. 423 */ 424 void 425 sbrelease(struct sockbuf *sb, struct socket *so) 426 { 427 428 sbflush(sb); 429 (void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0, 430 RLIM_INFINITY); 431 sb->sb_mbmax = 0; 432 } 433 434 /* 435 * Routines to add and remove 436 * data from an mbuf queue. 437 * 438 * The routines sbappend() or sbappendrecord() are normally called to 439 * append new mbufs to a socket buffer, after checking that adequate 440 * space is available, comparing the function sbspace() with the amount 441 * of data to be added. sbappendrecord() differs from sbappend() in 442 * that data supplied is treated as the beginning of a new record. 443 * To place a sender's address, optional access rights, and data in a 444 * socket receive buffer, sbappendaddr() should be used. To place 445 * access rights and data in a socket receive buffer, sbappendrights() 446 * should be used. In either case, the new data begins a new record. 447 * Note that unlike sbappend() and sbappendrecord(), these routines check 448 * for the caller that there will be enough space to store the data. 449 * Each fails if there is not enough space, or if it cannot find mbufs 450 * to store additional information in. 451 * 452 * Reliable protocols may use the socket send buffer to hold data 453 * awaiting acknowledgement. Data is normally copied from a socket 454 * send buffer in a protocol with m_copy for output to a peer, 455 * and then removing the data from the socket buffer with sbdrop() 456 * or sbdroprecord() when the data is acknowledged by the peer. 457 */ 458 459 /* 460 * Append mbuf chain m to the last record in the 461 * socket buffer sb. The additional space associated 462 * the mbuf chain is recorded in sb. Empty mbufs are 463 * discarded and mbufs are compacted where possible. 464 */ 465 void 466 sbappend(struct sockbuf *sb, struct mbuf *m) 467 { 468 struct mbuf *n; 469 470 if (m) { 471 n = sb->sb_mb; 472 if (n) { 473 while (n->m_nextpkt) 474 n = n->m_nextpkt; 475 do { 476 if (n->m_flags & M_EOR) { 477 /* XXXXXX!!!! */ 478 sbappendrecord(sb, m); 479 return; 480 } 481 } while (n->m_next && (n = n->m_next)); 482 } 483 sbcompress(sb, m, n); 484 } 485 } 486 487 /* 488 * sbappendstream() is an optimized form of sbappend() for protocols 489 * such as TCP that only have one record in the socket buffer, are 490 * not PR_ATOMIC, nor allow MT_CONTROL data. A protocol that uses 491 * sbappendstream() must use sbappendstream() exclusively. 492 */ 493 void 494 sbappendstream(struct sockbuf *sb, struct mbuf *m) 495 { 496 KKASSERT(m->m_nextpkt == NULL); 497 sbcompress(sb, m, sb->sb_lastmbuf); 498 } 499 500 #ifdef SOCKBUF_DEBUG 501 502 void 503 _sbcheck(struct sockbuf *sb) 504 { 505 struct mbuf *m; 506 struct mbuf *n = NULL; 507 u_long len = 0, mbcnt = 0; 508 509 for (m = sb->sb_mb; m; m = n) { 510 n = m->m_nextpkt; 511 if (n == NULL && sb->sb_lastrecord != m) { 512 kprintf("sockbuf %p mismatched lastrecord %p vs %p\n", sb, sb->sb_lastrecord, m); 513 panic("sbcheck1"); 514 515 } 516 for (; m; m = m->m_next) { 517 len += m->m_len; 518 mbcnt += MSIZE; 519 if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */ 520 mbcnt += m->m_ext.ext_size; 521 if (n == NULL && m->m_next == NULL) { 522 if (sb->sb_lastmbuf != m) { 523 kprintf("sockbuf %p mismatched lastmbuf %p vs %p\n", sb, sb->sb_lastmbuf, m); 524 panic("sbcheck2"); 525 } 526 } 527 } 528 } 529 if (sb->sb_mb == NULL) { 530 if (sb->sb_lastrecord != NULL) { 531 kprintf("sockbuf %p is empty, lastrecord not NULL: %p\n", 532 sb, sb->sb_lastrecord); 533 panic("sbcheck3"); 534 } 535 if (sb->sb_lastmbuf != NULL) { 536 kprintf("sockbuf %p is empty, lastmbuf not NULL: %p\n", 537 sb, sb->sb_lastmbuf); 538 panic("sbcheck4"); 539 } 540 } 541 if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) { 542 kprintf("sockbuf %p cc %ld != %ld || mbcnt %ld != %ld\n", 543 sb, len, sb->sb_cc, mbcnt, sb->sb_mbcnt); 544 panic("sbcheck5"); 545 } 546 } 547 548 #endif 549 550 /* 551 * Same as sbappend(), except the mbuf chain begins a new record. 552 */ 553 void 554 sbappendrecord(struct sockbuf *sb, struct mbuf *m0) 555 { 556 struct mbuf *firstmbuf; 557 struct mbuf *secondmbuf; 558 559 if (m0 == NULL) 560 return; 561 562 sbcheck(sb); 563 564 /* 565 * Break the first mbuf off from the rest of the mbuf chain. 566 */ 567 firstmbuf = m0; 568 secondmbuf = m0->m_next; 569 m0->m_next = NULL; 570 571 /* 572 * Insert the first mbuf of the m0 mbuf chain as the last record of 573 * the sockbuf. Note this permits zero length records! Keep the 574 * sockbuf state consistent. 575 */ 576 if (sb->sb_mb == NULL) 577 sb->sb_mb = firstmbuf; 578 else 579 sb->sb_lastrecord->m_nextpkt = firstmbuf; 580 sb->sb_lastrecord = firstmbuf; /* update hint for new last record */ 581 sb->sb_lastmbuf = firstmbuf; /* update hint for new last mbuf */ 582 583 if ((firstmbuf->m_flags & M_EOR) && (secondmbuf != NULL)) { 584 /* propagate the EOR flag */ 585 firstmbuf->m_flags &= ~M_EOR; 586 secondmbuf->m_flags |= M_EOR; 587 } 588 589 /* 590 * The succeeding call to sbcompress() omits accounting for 591 * the first mbuf, so do it here. 592 */ 593 sballoc(sb, firstmbuf); 594 595 /* Compact the rest of the mbuf chain in after the first mbuf. */ 596 sbcompress(sb, secondmbuf, firstmbuf); 597 } 598 599 #if 0 600 /* 601 * As above except that OOB data is inserted at the beginning of the sockbuf, 602 * but after any other OOB data. 603 */ 604 void 605 sbinsertoob(struct sockbuf *sb, struct mbuf *m0) 606 { 607 struct mbuf *m; 608 struct mbuf **mp; 609 610 if (m0 == NULL) 611 return; 612 for (mp = &sb->sb_mb; *mp ; mp = &((*mp)->m_nextpkt)) { 613 m = *mp; 614 again: 615 switch (m->m_type) { 616 617 case MT_OOBDATA: 618 continue; /* WANT next train */ 619 620 case MT_CONTROL: 621 m = m->m_next; 622 if (m) 623 goto again; /* inspect THIS train further */ 624 } 625 break; 626 } 627 /* 628 * Put the first mbuf on the queue. 629 * Note this permits zero length records. 630 */ 631 sballoc(sb, m0); 632 m0->m_nextpkt = *mp; 633 *mp = m0; 634 if (m0->m_nextpkt == NULL) 635 sb->sb_lastrecord = m0; 636 637 m = m0->m_next; 638 m0->m_next = NULL; 639 if (m && (m0->m_flags & M_EOR)) { 640 m0->m_flags &= ~M_EOR; 641 m->m_flags |= M_EOR; 642 } 643 sbcompress(sb, m, m0); 644 } 645 #endif 646 647 /* 648 * Append address and data, and optionally, control (ancillary) data 649 * to the receive queue of a socket. If present, 650 * m0 must include a packet header with total length. 651 * Returns 0 if no space in sockbuf or insufficient mbufs. 652 */ 653 int 654 sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa, struct mbuf *m0, 655 struct mbuf *control) 656 { 657 struct mbuf *m, *n; 658 int space = asa->sa_len; 659 660 if (m0 && (m0->m_flags & M_PKTHDR) == 0) 661 panic("sbappendaddr"); 662 sbcheck(sb); 663 664 if (m0) 665 space += m0->m_pkthdr.len; 666 for (n = control; n; n = n->m_next) { 667 space += n->m_len; 668 if (n->m_next == 0) /* keep pointer to last control buf */ 669 break; 670 } 671 if (space > sbspace(sb)) 672 return (0); 673 if (asa->sa_len > MLEN) 674 return (0); 675 MGET(m, MB_DONTWAIT, MT_SONAME); 676 if (m == NULL) 677 return (0); 678 KKASSERT(m->m_nextpkt == NULL); 679 m->m_len = asa->sa_len; 680 bcopy(asa, mtod(m, caddr_t), asa->sa_len); 681 if (n) 682 n->m_next = m0; /* concatenate data to control */ 683 else 684 control = m0; 685 m->m_next = control; 686 for (n = m; n; n = n->m_next) 687 sballoc(sb, n); 688 689 if (sb->sb_mb == NULL) 690 sb->sb_mb = m; 691 else 692 sb->sb_lastrecord->m_nextpkt = m; 693 sb->sb_lastrecord = m; 694 while (m->m_next) 695 m = m->m_next; 696 sb->sb_lastmbuf = m; 697 698 return (1); 699 } 700 701 /* 702 * Append control information followed by data. 703 * control must be non-null. 704 */ 705 int 706 sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control) 707 { 708 struct mbuf *n; 709 u_int length, cmbcnt, m0mbcnt; 710 711 KASSERT(control != NULL, ("sbappendcontrol")); 712 KKASSERT(control->m_nextpkt == NULL); 713 sbcheck(sb); 714 715 length = m_countm(control, &n, &cmbcnt) + m_countm(m0, NULL, &m0mbcnt); 716 if (length > sbspace(sb)) 717 return (0); 718 719 n->m_next = m0; /* concatenate data to control */ 720 721 if (sb->sb_mb == NULL) 722 sb->sb_mb = control; 723 else 724 sb->sb_lastrecord->m_nextpkt = control; 725 sb->sb_lastrecord = control; 726 sb->sb_lastmbuf = m0; 727 728 sb->sb_cc += length; 729 sb->sb_mbcnt += cmbcnt + m0mbcnt; 730 731 return (1); 732 } 733 734 /* 735 * Compress mbuf chain m into the socket buffer sb following mbuf tailm. 736 * If tailm is null, the buffer is presumed empty. Also, as a side-effect, 737 * increment the sockbuf counts for each mbuf in the chain. 738 */ 739 void 740 sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *tailm) 741 { 742 int eor = 0; 743 struct mbuf *free_chain = NULL; 744 745 sbcheck(sb); 746 while (m) { 747 struct mbuf *o; 748 749 eor |= m->m_flags & M_EOR; 750 /* 751 * Disregard empty mbufs as long as we don't encounter 752 * an end-of-record or there is a trailing mbuf of 753 * the same type to propagate the EOR flag to. 754 * 755 * Defer the m_free() call because it can block and break 756 * the atomicy of the sockbuf. 757 */ 758 if (m->m_len == 0 && 759 (eor == 0 || 760 (((o = m->m_next) || (o = tailm)) && 761 o->m_type == m->m_type))) { 762 o = m->m_next; 763 m->m_next = free_chain; 764 free_chain = m; 765 m = o; 766 continue; 767 } 768 769 /* See if we can coalesce with preceding mbuf. */ 770 if (tailm && !(tailm->m_flags & M_EOR) && M_WRITABLE(tailm) && 771 m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */ 772 m->m_len <= M_TRAILINGSPACE(tailm) && 773 tailm->m_type == m->m_type) { 774 bcopy(mtod(m, caddr_t), 775 mtod(tailm, caddr_t) + tailm->m_len, 776 (unsigned)m->m_len); 777 tailm->m_len += m->m_len; 778 sb->sb_cc += m->m_len; /* update sb counter */ 779 o = m->m_next; 780 m->m_next = free_chain; 781 free_chain = m; 782 m = o; 783 continue; 784 } 785 786 /* Insert whole mbuf. */ 787 if (tailm == NULL) { 788 KASSERT(sb->sb_mb == NULL, 789 ("sbcompress: sb_mb not NULL")); 790 sb->sb_mb = m; /* only mbuf in sockbuf */ 791 sb->sb_lastrecord = m; /* new last record */ 792 } else { 793 tailm->m_next = m; /* tack m on following tailm */ 794 } 795 sb->sb_lastmbuf = m; /* update last mbuf hint */ 796 797 tailm = m; /* just inserted mbuf becomes the new tail */ 798 m = m->m_next; /* advance to next mbuf */ 799 tailm->m_next = NULL; /* split inserted mbuf off from chain */ 800 801 /* update sb counters for just added mbuf */ 802 sballoc(sb, tailm); 803 804 /* clear EOR on intermediate mbufs */ 805 tailm->m_flags &= ~M_EOR; 806 } 807 808 /* 809 * Propogate EOR to the last mbuf 810 */ 811 if (eor) { 812 if (tailm) 813 tailm->m_flags |= eor; 814 else 815 kprintf("semi-panic: sbcompress"); 816 } 817 818 /* 819 * Clean up any defered frees. 820 */ 821 while (free_chain) 822 free_chain = m_free(free_chain); 823 824 sbcheck(sb); 825 } 826 827 /* 828 * Free all mbufs in a sockbuf. 829 * Check that all resources are reclaimed. 830 */ 831 void 832 sbflush(struct sockbuf *sb) 833 { 834 835 if (sb->sb_flags & SB_LOCK) 836 panic("sbflush: locked"); 837 while (sb->sb_mbcnt) { 838 /* 839 * Don't call sbdrop(sb, 0) if the leading mbuf is non-empty: 840 * we would loop forever. Panic instead. 841 */ 842 if (!sb->sb_cc && (sb->sb_mb == NULL || sb->sb_mb->m_len)) 843 break; 844 sbdrop(sb, (int)sb->sb_cc); 845 } 846 KASSERT(!(sb->sb_cc || sb->sb_mb || sb->sb_mbcnt || sb->sb_lastmbuf), 847 ("sbflush: cc %ld || mb %p || mbcnt %ld || lastmbuf %p", 848 sb->sb_cc, sb->sb_mb, sb->sb_mbcnt, sb->sb_lastmbuf)); 849 } 850 851 /* 852 * Drop data from (the front of) a sockbuf. 853 */ 854 void 855 sbdrop(struct sockbuf *sb, int len) 856 { 857 struct mbuf *m; 858 struct mbuf *free_chain = NULL; 859 860 sbcheck(sb); 861 crit_enter(); 862 863 /* 864 * Remove mbufs from multiple records until the count is exhausted. 865 */ 866 m = sb->sb_mb; 867 while (m && len > 0) { 868 if (m->m_len > len) { 869 m->m_len -= len; 870 m->m_data += len; 871 sb->sb_cc -= len; 872 break; 873 } 874 len -= m->m_len; 875 m = sbunlinkmbuf(sb, m, &free_chain); 876 if (m == NULL && len) 877 m = sb->sb_mb; 878 } 879 880 /* 881 * Remove any trailing 0-length mbufs in the current record. If 882 * the last record for which data was removed is now empty, m will be 883 * NULL. 884 */ 885 while (m && m->m_len == 0) { 886 m = sbunlinkmbuf(sb, m, &free_chain); 887 } 888 crit_exit(); 889 if (free_chain) 890 m_freem(free_chain); 891 sbcheck(sb); 892 } 893 894 /* 895 * Drop a record off the front of a sockbuf and move the next record 896 * to the front. 897 * 898 * Must be called while holding a critical section. 899 */ 900 void 901 sbdroprecord(struct sockbuf *sb) 902 { 903 struct mbuf *m; 904 struct mbuf *n; 905 906 sbcheck(sb); 907 m = sb->sb_mb; 908 if (m) { 909 if ((sb->sb_mb = m->m_nextpkt) == NULL) { 910 sb->sb_lastrecord = NULL; 911 sb->sb_lastmbuf = NULL; 912 } 913 m->m_nextpkt = NULL; 914 for (n = m; n; n = n->m_next) 915 sbfree(sb, n); 916 m_freem(m); 917 sbcheck(sb); 918 } 919 } 920 921 /* 922 * Drop the first mbuf off the sockbuf and move the next mbuf to the front. 923 * Currently only the head mbuf of the sockbuf may be dropped this way. 924 * 925 * The next mbuf in the same record as the mbuf being removed is returned 926 * or NULL if the record is exhausted. Note that other records may remain 927 * in the sockbuf when NULL is returned. 928 * 929 * Must be called while holding a critical section. 930 */ 931 struct mbuf * 932 sbunlinkmbuf(struct sockbuf *sb, struct mbuf *m, struct mbuf **free_chain) 933 { 934 struct mbuf *n; 935 936 KKASSERT(sb->sb_mb == m); 937 sbfree(sb, m); 938 n = m->m_next; 939 if (n) { 940 sb->sb_mb = n; 941 if (sb->sb_lastrecord == m) 942 sb->sb_lastrecord = n; 943 KKASSERT(sb->sb_lastmbuf != m); 944 n->m_nextpkt = m->m_nextpkt; 945 } else { 946 sb->sb_mb = m->m_nextpkt; 947 if (sb->sb_lastrecord == m) { 948 KKASSERT(sb->sb_mb == NULL); 949 sb->sb_lastrecord = NULL; 950 } 951 if (sb->sb_mb == NULL) 952 sb->sb_lastmbuf = NULL; 953 } 954 m->m_nextpkt = NULL; 955 if (free_chain) { 956 m->m_next = *free_chain; 957 *free_chain = m; 958 } else { 959 m->m_next = NULL; 960 } 961 return(n); 962 } 963 964 /* 965 * Create a "control" mbuf containing the specified data 966 * with the specified type for presentation on a socket buffer. 967 */ 968 struct mbuf * 969 sbcreatecontrol(caddr_t p, int size, int type, int level) 970 { 971 struct cmsghdr *cp; 972 struct mbuf *m; 973 974 if (CMSG_SPACE((u_int)size) > MCLBYTES) 975 return (NULL); 976 m = m_getl(CMSG_SPACE((u_int)size), MB_DONTWAIT, MT_CONTROL, 0, NULL); 977 if (m == NULL) 978 return (NULL); 979 m->m_len = CMSG_SPACE(size); 980 cp = mtod(m, struct cmsghdr *); 981 if (p != NULL) 982 memcpy(CMSG_DATA(cp), p, size); 983 cp->cmsg_len = CMSG_LEN(size); 984 cp->cmsg_level = level; 985 cp->cmsg_type = type; 986 return (m); 987 } 988 989 /* 990 * Some routines that return EOPNOTSUPP for entry points that are not 991 * supported by a protocol. Fill in as needed. 992 */ 993 int 994 pru_accept_notsupp(struct socket *so, struct sockaddr **nam) 995 { 996 return EOPNOTSUPP; 997 } 998 999 int 1000 pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td) 1001 { 1002 return EOPNOTSUPP; 1003 } 1004 1005 int 1006 pru_connect2_notsupp(struct socket *so1, struct socket *so2) 1007 { 1008 return EOPNOTSUPP; 1009 } 1010 1011 int 1012 pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data, 1013 struct ifnet *ifp, struct thread *td) 1014 { 1015 return EOPNOTSUPP; 1016 } 1017 1018 int 1019 pru_listen_notsupp(struct socket *so, struct thread *td) 1020 { 1021 return EOPNOTSUPP; 1022 } 1023 1024 int 1025 pru_rcvd_notsupp(struct socket *so, int flags) 1026 { 1027 return EOPNOTSUPP; 1028 } 1029 1030 int 1031 pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags) 1032 { 1033 return EOPNOTSUPP; 1034 } 1035 1036 /* 1037 * This isn't really a ``null'' operation, but it's the default one 1038 * and doesn't do anything destructive. 1039 */ 1040 int 1041 pru_sense_null(struct socket *so, struct stat *sb) 1042 { 1043 sb->st_blksize = so->so_snd.sb_hiwat; 1044 return 0; 1045 } 1046 1047 /* 1048 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME. Callers 1049 * of this routine assume that it always succeeds, so we have to use a 1050 * blockable allocation even though we might be called from a critical thread. 1051 */ 1052 struct sockaddr * 1053 dup_sockaddr(const struct sockaddr *sa) 1054 { 1055 struct sockaddr *sa2; 1056 1057 sa2 = kmalloc(sa->sa_len, M_SONAME, M_INTWAIT); 1058 bcopy(sa, sa2, sa->sa_len); 1059 return (sa2); 1060 } 1061 1062 /* 1063 * Create an external-format (``xsocket'') structure using the information 1064 * in the kernel-format socket structure pointed to by so. This is done 1065 * to reduce the spew of irrelevant information over this interface, 1066 * to isolate user code from changes in the kernel structure, and 1067 * potentially to provide information-hiding if we decide that 1068 * some of this information should be hidden from users. 1069 */ 1070 void 1071 sotoxsocket(struct socket *so, struct xsocket *xso) 1072 { 1073 xso->xso_len = sizeof *xso; 1074 xso->xso_so = so; 1075 xso->so_type = so->so_type; 1076 xso->so_options = so->so_options; 1077 xso->so_linger = so->so_linger; 1078 xso->so_state = so->so_state; 1079 xso->so_pcb = so->so_pcb; 1080 xso->xso_protocol = so->so_proto->pr_protocol; 1081 xso->xso_family = so->so_proto->pr_domain->dom_family; 1082 xso->so_qlen = so->so_qlen; 1083 xso->so_incqlen = so->so_incqlen; 1084 xso->so_qlimit = so->so_qlimit; 1085 xso->so_timeo = so->so_timeo; 1086 xso->so_error = so->so_error; 1087 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0; 1088 xso->so_oobmark = so->so_oobmark; 1089 sbtoxsockbuf(&so->so_snd, &xso->so_snd); 1090 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv); 1091 xso->so_uid = so->so_cred->cr_uid; 1092 } 1093 1094 /* 1095 * This does the same for sockbufs. Note that the xsockbuf structure, 1096 * since it is always embedded in a socket, does not include a self 1097 * pointer nor a length. We make this entry point public in case 1098 * some other mechanism needs it. 1099 */ 1100 void 1101 sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb) 1102 { 1103 xsb->sb_cc = sb->sb_cc; 1104 xsb->sb_hiwat = sb->sb_hiwat; 1105 xsb->sb_mbcnt = sb->sb_mbcnt; 1106 xsb->sb_mbmax = sb->sb_mbmax; 1107 xsb->sb_lowat = sb->sb_lowat; 1108 xsb->sb_flags = sb->sb_flags; 1109 xsb->sb_timeo = sb->sb_timeo; 1110 } 1111 1112 /* 1113 * Here is the definition of some of the basic objects in the kern.ipc 1114 * branch of the MIB. 1115 */ 1116 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC"); 1117 1118 /* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */ 1119 static int dummy; 1120 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, ""); 1121 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_INT|CTLFLAG_RW, 1122 &sb_max, 0, sysctl_handle_sb_max, "I", "Maximum socket buffer size"); 1123 SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets, CTLFLAG_RD, 1124 &maxsockets, 0, "Maximum number of sockets avaliable"); 1125 SYSCTL_INT(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW, 1126 &sb_efficiency, 0, ""); 1127 1128 /* 1129 * Initialise maxsockets 1130 */ 1131 static void 1132 init_maxsockets(void *ignored) 1133 { 1134 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets); 1135 maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters)); 1136 } 1137 SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL); 1138