1 /* $NetBSD: uipc_mbuf.c,v 1.69 2003/06/23 11:02:06 martin Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (c) 1982, 1986, 1988, 1991, 1993 42 * The Regents of the University of California. All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. All advertising materials mentioning features or use of this software 53 * must display the following acknowledgement: 54 * This product includes software developed by the University of 55 * California, Berkeley and its contributors. 56 * 4. Neither the name of the University nor the names of its contributors 57 * may be used to endorse or promote products derived from this software 58 * without specific prior written permission. 59 * 60 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 61 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 62 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 63 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 64 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 65 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 66 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 70 * SUCH DAMAGE. 71 * 72 * @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95 73 */ 74 75 #include <sys/cdefs.h> 76 __KERNEL_RCSID(0, "$NetBSD: uipc_mbuf.c,v 1.69 2003/06/23 11:02:06 martin Exp $"); 77 78 #include "opt_mbuftrace.h" 79 80 #include <sys/param.h> 81 #include <sys/systm.h> 82 #include <sys/proc.h> 83 #include <sys/malloc.h> 84 #define MBTYPES 85 #include <sys/mbuf.h> 86 #include <sys/kernel.h> 87 #include <sys/syslog.h> 88 #include <sys/domain.h> 89 #include <sys/protosw.h> 90 #include <sys/pool.h> 91 #include <sys/socket.h> 92 #include <sys/sysctl.h> 93 94 #include <net/if.h> 95 96 #include <uvm/uvm.h> 97 98 99 struct pool mbpool; /* mbuf pool */ 100 struct pool mclpool; /* mbuf cluster pool */ 101 102 struct pool_cache mbpool_cache; 103 struct pool_cache mclpool_cache; 104 105 struct mbstat mbstat; 106 int max_linkhdr; 107 int max_protohdr; 108 int max_hdr; 109 int max_datalen; 110 111 static int mb_ctor(void *, void *, int); 112 113 void *mclpool_alloc(struct pool *, int); 114 void mclpool_release(struct pool *, void *); 115 116 struct pool_allocator mclpool_allocator = { 117 mclpool_alloc, mclpool_release, 0, 118 }; 119 120 static struct mbuf *m_copym0 __P((struct mbuf *, int, int, int, int)); 121 122 const char mclpool_warnmsg[] = 123 "WARNING: mclpool limit reached; increase NMBCLUSTERS"; 124 125 MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf"); 126 127 #ifdef MBUFTRACE 128 struct mownerhead mowners = LIST_HEAD_INITIALIZER(mowners); 129 struct mowner unknown_mowners[] = { 130 { "unknown", "free" }, 131 { "unknown", "data" }, 132 { "unknown", "header" }, 133 { "unknown", "soname" }, 134 { "unknown", "soopts" }, 135 { "unknown", "ftable" }, 136 { "unknown", "control" }, 137 { "unknown", "oobdata" }, 138 }; 139 struct mowner revoked_mowner = { "revoked", "" }; 140 #endif 141 142 /* 143 * Initialize the mbuf allocator. 144 */ 145 void 146 mbinit(void) 147 { 148 149 KASSERT(sizeof(struct _m_ext) <= MHLEN); 150 KASSERT(sizeof(struct mbuf) == MSIZE); 151 152 pool_init(&mbpool, msize, 0, 0, 0, "mbpl", NULL); 153 pool_init(&mclpool, mclbytes, 0, 0, 0, "mclpl", &mclpool_allocator); 154 155 pool_set_drain_hook(&mbpool, m_reclaim, NULL); 156 pool_set_drain_hook(&mclpool, m_reclaim, NULL); 157 158 pool_cache_init(&mbpool_cache, &mbpool, mb_ctor, NULL, NULL); 159 pool_cache_init(&mclpool_cache, &mclpool, NULL, NULL, NULL); 160 161 /* 162 * Set the hard limit on the mclpool to the number of 163 * mbuf clusters the kernel is to support. Log the limit 164 * reached message max once a minute. 165 */ 166 pool_sethardlimit(&mclpool, nmbclusters, mclpool_warnmsg, 60); 167 168 /* 169 * Set a low water mark for both mbufs and clusters. This should 170 * help ensure that they can be allocated in a memory starvation 171 * situation. This is important for e.g. diskless systems which 172 * must allocate mbufs in order for the pagedaemon to clean pages. 173 */ 174 pool_setlowat(&mbpool, mblowat); 175 pool_setlowat(&mclpool, mcllowat); 176 177 #ifdef MBUFTRACE 178 { 179 /* 180 * Attach the unknown mowners. 181 */ 182 int i; 183 MOWNER_ATTACH(&revoked_mowner); 184 for (i = sizeof(unknown_mowners)/sizeof(unknown_mowners[0]); 185 i-- > 0; ) 186 MOWNER_ATTACH(&unknown_mowners[i]); 187 } 188 #endif 189 } 190 191 int 192 sysctl_dombuf(int *name, u_int namelen, void *oldp, size_t *oldlenp, 193 void *newp, size_t newlen) 194 { 195 int error, newval; 196 197 /* All sysctl names at this level are terminal. */ 198 if (namelen != 1) 199 return (ENOTDIR); /* overloaded */ 200 201 switch (name[0]) { 202 case MBUF_MSIZE: 203 return (sysctl_rdint(oldp, oldlenp, newp, msize)); 204 case MBUF_MCLBYTES: 205 return (sysctl_rdint(oldp, oldlenp, newp, mclbytes)); 206 case MBUF_NMBCLUSTERS: 207 /* 208 * If we have direct-mapped pool pages, we can adjust this 209 * number on the fly. If not, we're limited by the size 210 * of mb_map, and cannot change this value. 211 * 212 * Note: we only allow the value to be increased, never 213 * decreased. 214 */ 215 if (mb_map == NULL) { 216 newval = nmbclusters; 217 error = sysctl_int(oldp, oldlenp, newp, newlen, 218 &newval); 219 if (error != 0) 220 return (error); 221 if (newp != NULL) { 222 if (newval >= nmbclusters) { 223 nmbclusters = newval; 224 pool_sethardlimit(&mclpool, 225 nmbclusters, mclpool_warnmsg, 60); 226 } else 227 error = EINVAL; 228 } 229 return (error); 230 } else 231 return (sysctl_rdint(oldp, oldlenp, newp, nmbclusters)); 232 case MBUF_MBLOWAT: 233 case MBUF_MCLLOWAT: 234 /* New value must be >= 0. */ 235 newval = (name[0] == MBUF_MBLOWAT) ? mblowat : mcllowat; 236 error = sysctl_int(oldp, oldlenp, newp, newlen, &newval); 237 if (error != 0) 238 return (error); 239 if (newp != NULL) { 240 if (newval >= 0) { 241 if (name[0] == MBUF_MBLOWAT) { 242 mblowat = newval; 243 pool_setlowat(&mbpool, newval); 244 } else { 245 mcllowat = newval; 246 pool_setlowat(&mclpool, newval); 247 } 248 } else 249 error = EINVAL; 250 } 251 return (error); 252 case MBUF_STATS: 253 return (sysctl_rdstruct(oldp, oldlenp, newp, 254 &mbstat, sizeof(mbstat))); 255 #ifdef MBUFTRACE 256 case MBUF_MOWNERS: { 257 struct mowner *mo; 258 size_t len = 0; 259 if (newp != NULL) 260 return (EPERM); 261 error = 0; 262 LIST_FOREACH(mo, &mowners, mo_link) { 263 if (oldp != NULL) { 264 if (*oldlenp - len < sizeof(*mo)) { 265 error = ENOMEM; 266 break; 267 } 268 error = copyout(mo, (caddr_t) oldp + len, 269 sizeof(*mo)); 270 if (error) 271 break; 272 } 273 len += sizeof(*mo); 274 } 275 *oldlenp = len; 276 return (error); 277 } 278 #endif 279 default: 280 return (EOPNOTSUPP); 281 } 282 /* NOTREACHED */ 283 } 284 285 void * 286 mclpool_alloc(struct pool *pp, int flags) 287 { 288 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; 289 290 return ((void *)uvm_km_alloc_poolpage1(mb_map, NULL, waitok)); 291 } 292 293 void 294 mclpool_release(struct pool *pp, void *v) 295 { 296 297 uvm_km_free_poolpage1(mb_map, (vaddr_t)v); 298 } 299 300 /*ARGSUSED*/ 301 static int 302 mb_ctor(void *arg, void *object, int flags) 303 { 304 struct mbuf *m = object; 305 306 #ifdef POOL_VTOPHYS 307 m->m_paddr = POOL_VTOPHYS(m); 308 #else 309 m->m_paddr = M_PADDR_INVALID; 310 #endif 311 return (0); 312 } 313 314 void 315 m_reclaim(void *arg, int flags) 316 { 317 struct domain *dp; 318 struct protosw *pr; 319 struct ifnet *ifp; 320 int s = splvm(); 321 322 for (dp = domains; dp; dp = dp->dom_next) 323 for (pr = dp->dom_protosw; 324 pr < dp->dom_protoswNPROTOSW; pr++) 325 if (pr->pr_drain) 326 (*pr->pr_drain)(); 327 for (ifp = TAILQ_FIRST(&ifnet); ifp; ifp = TAILQ_NEXT(ifp, if_list)) 328 if (ifp->if_drain) 329 (*ifp->if_drain)(ifp); 330 splx(s); 331 mbstat.m_drain++; 332 } 333 334 /* 335 * Space allocation routines. 336 * These are also available as macros 337 * for critical paths. 338 */ 339 struct mbuf * 340 m_get(int nowait, int type) 341 { 342 struct mbuf *m; 343 344 MGET(m, nowait, type); 345 return (m); 346 } 347 348 struct mbuf * 349 m_gethdr(int nowait, int type) 350 { 351 struct mbuf *m; 352 353 MGETHDR(m, nowait, type); 354 return (m); 355 } 356 357 struct mbuf * 358 m_getclr(int nowait, int type) 359 { 360 struct mbuf *m; 361 362 MGET(m, nowait, type); 363 if (m == 0) 364 return (0); 365 memset(mtod(m, caddr_t), 0, MLEN); 366 return (m); 367 } 368 369 void 370 m_clget(struct mbuf *m, int nowait) 371 { 372 MCLGET(m, nowait); 373 } 374 375 struct mbuf * 376 m_free(struct mbuf *m) 377 { 378 struct mbuf *n; 379 380 MFREE(m, n); 381 return (n); 382 } 383 384 void 385 m_freem(struct mbuf *m) 386 { 387 struct mbuf *n; 388 389 if (m == NULL) 390 return; 391 do { 392 MFREE(m, n); 393 m = n; 394 } while (m); 395 } 396 397 #ifdef MBUFTRACE 398 void 399 m_claim(struct mbuf *m, struct mowner *mo) 400 { 401 for (; m != NULL; m = m->m_next) 402 MCLAIM(m, mo); 403 } 404 #endif 405 406 /* 407 * Mbuffer utility routines. 408 */ 409 410 /* 411 * Lesser-used path for M_PREPEND: 412 * allocate new mbuf to prepend to chain, 413 * copy junk along. 414 */ 415 struct mbuf * 416 m_prepend(struct mbuf *m, int len, int how) 417 { 418 struct mbuf *mn; 419 420 MGET(mn, how, m->m_type); 421 if (mn == (struct mbuf *)NULL) { 422 m_freem(m); 423 return ((struct mbuf *)NULL); 424 } 425 if (m->m_flags & M_PKTHDR) { 426 M_COPY_PKTHDR(mn, m); 427 m->m_flags &= ~M_PKTHDR; 428 } else { 429 MCLAIM(mn, m->m_owner); 430 } 431 mn->m_next = m; 432 m = mn; 433 if (len < MHLEN) 434 MH_ALIGN(m, len); 435 m->m_len = len; 436 return (m); 437 } 438 439 /* 440 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 441 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 442 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 443 */ 444 int MCFail; 445 446 struct mbuf * 447 m_copym(struct mbuf *m, int off0, int len, int wait) 448 { 449 return m_copym0(m, off0, len, wait, 0); /* shallow copy on M_EXT */ 450 } 451 452 struct mbuf * 453 m_dup(struct mbuf *m, int off0, int len, int wait) 454 { 455 return m_copym0(m, off0, len, wait, 1); /* deep copy */ 456 } 457 458 static struct mbuf * 459 m_copym0(struct mbuf *m, int off0, int len, int wait, int deep) 460 { 461 struct mbuf *n, **np; 462 int off = off0; 463 struct mbuf *top; 464 int copyhdr = 0; 465 466 if (off < 0 || len < 0) 467 panic("m_copym: off %d, len %d", off, len); 468 if (off == 0 && m->m_flags & M_PKTHDR) 469 copyhdr = 1; 470 while (off > 0) { 471 if (m == 0) 472 panic("m_copym: m == 0"); 473 if (off < m->m_len) 474 break; 475 off -= m->m_len; 476 m = m->m_next; 477 } 478 np = ⊤ 479 top = 0; 480 while (len > 0) { 481 if (m == 0) { 482 if (len != M_COPYALL) 483 panic("m_copym: m == 0 and not COPYALL"); 484 break; 485 } 486 MGET(n, wait, m->m_type); 487 *np = n; 488 if (n == 0) 489 goto nospace; 490 MCLAIM(n, m->m_owner); 491 if (copyhdr) { 492 M_COPY_PKTHDR(n, m); 493 if (len == M_COPYALL) 494 n->m_pkthdr.len -= off0; 495 else 496 n->m_pkthdr.len = len; 497 copyhdr = 0; 498 } 499 n->m_len = min(len, m->m_len - off); 500 if (m->m_flags & M_EXT) { 501 if (!deep) { 502 n->m_data = m->m_data + off; 503 n->m_ext = m->m_ext; 504 MCLADDREFERENCE(m, n); 505 } else { 506 /* 507 * we are unsure about the way m was allocated. 508 * copy into multiple MCLBYTES cluster mbufs. 509 */ 510 MCLGET(n, wait); 511 n->m_len = 0; 512 n->m_len = M_TRAILINGSPACE(n); 513 n->m_len = min(n->m_len, len); 514 n->m_len = min(n->m_len, m->m_len - off); 515 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, 516 (unsigned)n->m_len); 517 } 518 } else 519 memcpy(mtod(n, caddr_t), mtod(m, caddr_t)+off, 520 (unsigned)n->m_len); 521 if (len != M_COPYALL) 522 len -= n->m_len; 523 off += n->m_len; 524 #ifdef DIAGNOSTIC 525 if (off > m->m_len) 526 panic("m_copym0 overrun"); 527 #endif 528 if (off == m->m_len) { 529 m = m->m_next; 530 off = 0; 531 } 532 np = &n->m_next; 533 } 534 if (top == 0) 535 MCFail++; 536 return (top); 537 nospace: 538 m_freem(top); 539 MCFail++; 540 return (0); 541 } 542 543 /* 544 * Copy an entire packet, including header (which must be present). 545 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 546 */ 547 struct mbuf * 548 m_copypacket(struct mbuf *m, int how) 549 { 550 struct mbuf *top, *n, *o; 551 552 MGET(n, how, m->m_type); 553 top = n; 554 if (!n) 555 goto nospace; 556 557 MCLAIM(n, m->m_owner); 558 M_COPY_PKTHDR(n, m); 559 n->m_len = m->m_len; 560 if (m->m_flags & M_EXT) { 561 n->m_data = m->m_data; 562 n->m_ext = m->m_ext; 563 MCLADDREFERENCE(m, n); 564 } else { 565 memcpy(mtod(n, char *), mtod(m, char *), n->m_len); 566 } 567 568 m = m->m_next; 569 while (m) { 570 MGET(o, how, m->m_type); 571 if (!o) 572 goto nospace; 573 574 MCLAIM(o, m->m_owner); 575 n->m_next = o; 576 n = n->m_next; 577 578 n->m_len = m->m_len; 579 if (m->m_flags & M_EXT) { 580 n->m_data = m->m_data; 581 n->m_ext = m->m_ext; 582 MCLADDREFERENCE(m, n); 583 } else { 584 memcpy(mtod(n, char *), mtod(m, char *), n->m_len); 585 } 586 587 m = m->m_next; 588 } 589 return top; 590 nospace: 591 m_freem(top); 592 MCFail++; 593 return 0; 594 } 595 596 /* 597 * Copy data from an mbuf chain starting "off" bytes from the beginning, 598 * continuing for "len" bytes, into the indicated buffer. 599 */ 600 void 601 m_copydata(struct mbuf *m, int off, int len, caddr_t cp) 602 { 603 unsigned count; 604 605 if (off < 0 || len < 0) 606 panic("m_copydata"); 607 while (off > 0) { 608 if (m == 0) 609 panic("m_copydata"); 610 if (off < m->m_len) 611 break; 612 off -= m->m_len; 613 m = m->m_next; 614 } 615 while (len > 0) { 616 if (m == 0) 617 panic("m_copydata"); 618 count = min(m->m_len - off, len); 619 memcpy(cp, mtod(m, caddr_t) + off, count); 620 len -= count; 621 cp += count; 622 off = 0; 623 m = m->m_next; 624 } 625 } 626 627 /* 628 * Concatenate mbuf chain n to m. 629 * Both chains must be of the same type (e.g. MT_DATA). 630 * Any m_pkthdr is not updated. 631 */ 632 void 633 m_cat(struct mbuf *m, struct mbuf *n) 634 { 635 while (m->m_next) 636 m = m->m_next; 637 while (n) { 638 if (m->m_flags & M_EXT || 639 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 640 /* just join the two chains */ 641 m->m_next = n; 642 return; 643 } 644 /* splat the data from one into the other */ 645 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), 646 (u_int)n->m_len); 647 m->m_len += n->m_len; 648 n = m_free(n); 649 } 650 } 651 652 void 653 m_adj(struct mbuf *mp, int req_len) 654 { 655 int len = req_len; 656 struct mbuf *m; 657 int count; 658 659 if ((m = mp) == NULL) 660 return; 661 if (len >= 0) { 662 /* 663 * Trim from head. 664 */ 665 while (m != NULL && len > 0) { 666 if (m->m_len <= len) { 667 len -= m->m_len; 668 m->m_len = 0; 669 m = m->m_next; 670 } else { 671 m->m_len -= len; 672 m->m_data += len; 673 len = 0; 674 } 675 } 676 m = mp; 677 if (mp->m_flags & M_PKTHDR) 678 m->m_pkthdr.len -= (req_len - len); 679 } else { 680 /* 681 * Trim from tail. Scan the mbuf chain, 682 * calculating its length and finding the last mbuf. 683 * If the adjustment only affects this mbuf, then just 684 * adjust and return. Otherwise, rescan and truncate 685 * after the remaining size. 686 */ 687 len = -len; 688 count = 0; 689 for (;;) { 690 count += m->m_len; 691 if (m->m_next == (struct mbuf *)0) 692 break; 693 m = m->m_next; 694 } 695 if (m->m_len >= len) { 696 m->m_len -= len; 697 if (mp->m_flags & M_PKTHDR) 698 mp->m_pkthdr.len -= len; 699 return; 700 } 701 count -= len; 702 if (count < 0) 703 count = 0; 704 /* 705 * Correct length for chain is "count". 706 * Find the mbuf with last data, adjust its length, 707 * and toss data from remaining mbufs on chain. 708 */ 709 m = mp; 710 if (m->m_flags & M_PKTHDR) 711 m->m_pkthdr.len = count; 712 for (; m; m = m->m_next) { 713 if (m->m_len >= count) { 714 m->m_len = count; 715 break; 716 } 717 count -= m->m_len; 718 } 719 while (m->m_next) 720 (m = m->m_next) ->m_len = 0; 721 } 722 } 723 724 /* 725 * Rearange an mbuf chain so that len bytes are contiguous 726 * and in the data area of an mbuf (so that mtod and dtom 727 * will work for a structure of size len). Returns the resulting 728 * mbuf chain on success, frees it and returns null on failure. 729 * If there is room, it will add up to max_protohdr-len extra bytes to the 730 * contiguous region in an attempt to avoid being called next time. 731 */ 732 int MPFail; 733 734 struct mbuf * 735 m_pullup(struct mbuf *n, int len) 736 { 737 struct mbuf *m; 738 int count; 739 int space; 740 741 /* 742 * If first mbuf has no cluster, and has room for len bytes 743 * without shifting current data, pullup into it, 744 * otherwise allocate a new mbuf to prepend to the chain. 745 */ 746 if ((n->m_flags & M_EXT) == 0 && 747 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 748 if (n->m_len >= len) 749 return (n); 750 m = n; 751 n = n->m_next; 752 len -= m->m_len; 753 } else { 754 if (len > MHLEN) 755 goto bad; 756 MGET(m, M_DONTWAIT, n->m_type); 757 if (m == 0) 758 goto bad; 759 MCLAIM(m, n->m_owner); 760 m->m_len = 0; 761 if (n->m_flags & M_PKTHDR) { 762 M_COPY_PKTHDR(m, n); 763 n->m_flags &= ~M_PKTHDR; 764 } 765 } 766 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 767 do { 768 count = min(min(max(len, max_protohdr), space), n->m_len); 769 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), 770 (unsigned)count); 771 len -= count; 772 m->m_len += count; 773 n->m_len -= count; 774 space -= count; 775 if (n->m_len) 776 n->m_data += count; 777 else 778 n = m_free(n); 779 } while (len > 0 && n); 780 if (len > 0) { 781 (void) m_free(m); 782 goto bad; 783 } 784 m->m_next = n; 785 return (m); 786 bad: 787 m_freem(n); 788 MPFail++; 789 return (0); 790 } 791 792 /* 793 * Like m_pullup(), except a new mbuf is always allocated, and we allow 794 * the amount of empty space before the data in the new mbuf to be specified 795 * (in the event that the caller expects to prepend later). 796 */ 797 int MSFail; 798 799 struct mbuf * 800 m_copyup(struct mbuf *n, int len, int dstoff) 801 { 802 struct mbuf *m; 803 int count, space; 804 805 if (len > (MHLEN - dstoff)) 806 goto bad; 807 MGET(m, M_DONTWAIT, n->m_type); 808 if (m == NULL) 809 goto bad; 810 MCLAIM(m, n->m_owner); 811 m->m_len = 0; 812 if (n->m_flags & M_PKTHDR) { 813 M_COPY_PKTHDR(m, n); 814 n->m_flags &= ~M_PKTHDR; 815 } 816 m->m_data += dstoff; 817 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 818 do { 819 count = min(min(max(len, max_protohdr), space), n->m_len); 820 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), 821 (unsigned)count); 822 len -= count; 823 m->m_len += count; 824 n->m_len -= count; 825 space -= count; 826 if (n->m_len) 827 n->m_data += count; 828 else 829 n = m_free(n); 830 } while (len > 0 && n); 831 if (len > 0) { 832 (void) m_free(m); 833 goto bad; 834 } 835 m->m_next = n; 836 return (m); 837 bad: 838 m_freem(n); 839 MSFail++; 840 return (NULL); 841 } 842 843 /* 844 * Partition an mbuf chain in two pieces, returning the tail -- 845 * all but the first len0 bytes. In case of failure, it returns NULL and 846 * attempts to restore the chain to its original state. 847 */ 848 struct mbuf * 849 m_split(struct mbuf *m0, int len0, int wait) 850 { 851 struct mbuf *m, *n; 852 unsigned len = len0, remain, len_save; 853 854 for (m = m0; m && len > m->m_len; m = m->m_next) 855 len -= m->m_len; 856 if (m == 0) 857 return (0); 858 remain = m->m_len - len; 859 if (m0->m_flags & M_PKTHDR) { 860 MGETHDR(n, wait, m0->m_type); 861 if (n == 0) 862 return (0); 863 MCLAIM(m, m0->m_owner); 864 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 865 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 866 len_save = m0->m_pkthdr.len; 867 m0->m_pkthdr.len = len0; 868 if (m->m_flags & M_EXT) 869 goto extpacket; 870 if (remain > MHLEN) { 871 /* m can't be the lead packet */ 872 MH_ALIGN(n, 0); 873 n->m_next = m_split(m, len, wait); 874 if (n->m_next == 0) { 875 (void) m_free(n); 876 m0->m_pkthdr.len = len_save; 877 return (0); 878 } else 879 return (n); 880 } else 881 MH_ALIGN(n, remain); 882 } else if (remain == 0) { 883 n = m->m_next; 884 m->m_next = 0; 885 return (n); 886 } else { 887 MGET(n, wait, m->m_type); 888 if (n == 0) 889 return (0); 890 MCLAIM(n, m->m_owner); 891 M_ALIGN(n, remain); 892 } 893 extpacket: 894 if (m->m_flags & M_EXT) { 895 n->m_ext = m->m_ext; 896 MCLADDREFERENCE(m, n); 897 n->m_data = m->m_data + len; 898 } else { 899 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + len, remain); 900 } 901 n->m_len = remain; 902 m->m_len = len; 903 n->m_next = m->m_next; 904 m->m_next = 0; 905 return (n); 906 } 907 /* 908 * Routine to copy from device local memory into mbufs. 909 */ 910 struct mbuf * 911 m_devget(char *buf, int totlen, int off0, struct ifnet *ifp, 912 void (*copy)(const void *from, void *to, size_t len)) 913 { 914 struct mbuf *m; 915 struct mbuf *top = 0, **mp = ⊤ 916 int off = off0, len; 917 char *cp; 918 char *epkt; 919 920 cp = buf; 921 epkt = cp + totlen; 922 if (off) { 923 /* 924 * If 'off' is non-zero, packet is trailer-encapsulated, 925 * so we have to skip the type and length fields. 926 */ 927 cp += off + 2 * sizeof(u_int16_t); 928 totlen -= 2 * sizeof(u_int16_t); 929 } 930 MGETHDR(m, M_DONTWAIT, MT_DATA); 931 if (m == 0) 932 return (0); 933 m->m_pkthdr.rcvif = ifp; 934 m->m_pkthdr.len = totlen; 935 m->m_len = MHLEN; 936 937 while (totlen > 0) { 938 if (top) { 939 MGET(m, M_DONTWAIT, MT_DATA); 940 if (m == 0) { 941 m_freem(top); 942 return (0); 943 } 944 m->m_len = MLEN; 945 } 946 len = min(totlen, epkt - cp); 947 if (len >= MINCLSIZE) { 948 MCLGET(m, M_DONTWAIT); 949 if ((m->m_flags & M_EXT) == 0) { 950 m_free(m); 951 m_freem(top); 952 return (0); 953 } 954 m->m_len = len = min(len, MCLBYTES); 955 } else { 956 /* 957 * Place initial small packet/header at end of mbuf. 958 */ 959 if (len < m->m_len) { 960 if (top == 0 && len + max_linkhdr <= m->m_len) 961 m->m_data += max_linkhdr; 962 m->m_len = len; 963 } else 964 len = m->m_len; 965 } 966 if (copy) 967 copy(cp, mtod(m, caddr_t), (size_t)len); 968 else 969 memcpy(mtod(m, caddr_t), cp, (size_t)len); 970 cp += len; 971 *mp = m; 972 mp = &m->m_next; 973 totlen -= len; 974 if (cp == epkt) 975 cp = buf; 976 } 977 return (top); 978 } 979 980 /* 981 * Copy data from a buffer back into the indicated mbuf chain, 982 * starting "off" bytes from the beginning, extending the mbuf 983 * chain if necessary. 984 */ 985 void 986 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp) 987 { 988 int mlen; 989 struct mbuf *m = m0, *n; 990 int totlen = 0; 991 992 if (m0 == 0) 993 return; 994 while (off > (mlen = m->m_len)) { 995 off -= mlen; 996 totlen += mlen; 997 if (m->m_next == 0) { 998 n = m_getclr(M_DONTWAIT, m->m_type); 999 if (n == 0) 1000 goto out; 1001 n->m_len = min(MLEN, len + off); 1002 m->m_next = n; 1003 } 1004 m = m->m_next; 1005 } 1006 while (len > 0) { 1007 mlen = min (m->m_len - off, len); 1008 memcpy(mtod(m, caddr_t) + off, cp, (unsigned)mlen); 1009 cp += mlen; 1010 len -= mlen; 1011 mlen += off; 1012 off = 0; 1013 totlen += mlen; 1014 if (len == 0) 1015 break; 1016 if (m->m_next == 0) { 1017 n = m_get(M_DONTWAIT, m->m_type); 1018 if (n == 0) 1019 break; 1020 n->m_len = min(MLEN, len); 1021 m->m_next = n; 1022 } 1023 m = m->m_next; 1024 } 1025 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1026 m->m_pkthdr.len = totlen; 1027 } 1028 1029 /* 1030 * Apply function f to the data in an mbuf chain starting "off" bytes from the 1031 * beginning, continuing for "len" bytes. 1032 */ 1033 int 1034 m_apply(struct mbuf *m, int off, int len, 1035 int (*f)(void *, caddr_t, unsigned int), void *arg) 1036 { 1037 unsigned int count; 1038 int rval; 1039 1040 KASSERT(len >= 0); 1041 KASSERT(off >= 0); 1042 1043 while (off > 0) { 1044 KASSERT(m != NULL); 1045 if (off < m->m_len) 1046 break; 1047 off -= m->m_len; 1048 m = m->m_next; 1049 } 1050 while (len > 0) { 1051 KASSERT(m != NULL); 1052 count = min(m->m_len - off, len); 1053 1054 rval = (*f)(arg, mtod(m, caddr_t) + off, count); 1055 if (rval) 1056 return (rval); 1057 1058 len -= count; 1059 off = 0; 1060 m = m->m_next; 1061 } 1062 1063 return (0); 1064 } 1065 1066 /* 1067 * Return a pointer to mbuf/offset of location in mbuf chain. 1068 */ 1069 struct mbuf * 1070 m_getptr(struct mbuf *m, int loc, int *off) 1071 { 1072 1073 while (loc >= 0) { 1074 /* Normal end of search */ 1075 if (m->m_len > loc) { 1076 *off = loc; 1077 return (m); 1078 } else { 1079 loc -= m->m_len; 1080 1081 if (m->m_next == NULL) { 1082 if (loc == 0) { 1083 /* Point at the end of valid data */ 1084 *off = m->m_len; 1085 return (m); 1086 } else 1087 return (NULL); 1088 } else 1089 m = m->m_next; 1090 } 1091 } 1092 1093 return (NULL); 1094 } 1095