1 /* $NetBSD: uipc_mbuf.c,v 1.52 2001/01/14 02:06:22 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (c) 1982, 1986, 1988, 1991, 1993 42 * The Regents of the University of California. All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. All advertising materials mentioning features or use of this software 53 * must display the following acknowledgement: 54 * This product includes software developed by the University of 55 * California, Berkeley and its contributors. 56 * 4. Neither the name of the University nor the names of its contributors 57 * may be used to endorse or promote products derived from this software 58 * without specific prior written permission. 59 * 60 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 61 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 62 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 63 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 64 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 65 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 66 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 70 * SUCH DAMAGE. 71 * 72 * @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95 73 */ 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/proc.h> 78 #include <sys/malloc.h> 79 #include <sys/map.h> 80 #define MBTYPES 81 #include <sys/mbuf.h> 82 #include <sys/kernel.h> 83 #include <sys/syslog.h> 84 #include <sys/domain.h> 85 #include <sys/protosw.h> 86 #include <sys/pool.h> 87 #include <sys/socket.h> 88 #include <net/if.h> 89 90 #include <uvm/uvm_extern.h> 91 92 #include <sys/sysctl.h> 93 94 struct pool mbpool; /* mbuf pool */ 95 struct pool mclpool; /* mbuf cluster pool */ 96 97 struct mbstat mbstat; 98 int max_linkhdr; 99 int max_protohdr; 100 int max_hdr; 101 int max_datalen; 102 103 void *mclpool_alloc __P((unsigned long, int, int)); 104 void mclpool_release __P((void *, unsigned long, int)); 105 static struct mbuf *m_copym0 __P((struct mbuf *, int, int, int, int)); 106 107 const char *mclpool_warnmsg = 108 "WARNING: mclpool limit reached; increase NMBCLUSTERS"; 109 110 /* 111 * Initialize the mbuf allcator. 112 */ 113 void 114 mbinit() 115 { 116 117 pool_init(&mbpool, msize, 0, 0, 0, "mbpl", 0, NULL, NULL, 0); 118 pool_init(&mclpool, mclbytes, 0, 0, 0, "mclpl", 0, mclpool_alloc, 119 mclpool_release, 0); 120 121 /* 122 * Set the hard limit on the mclpool to the number of 123 * mbuf clusters the kernel is to support. Log the limit 124 * reached message max once a minute. 125 */ 126 pool_sethardlimit(&mclpool, nmbclusters, mclpool_warnmsg, 60); 127 128 /* 129 * Set a low water mark for both mbufs and clusters. This should 130 * help ensure that they can be allocated in a memory starvation 131 * situation. This is important for e.g. diskless systems which 132 * must allocate mbufs in order for the pagedaemon to clean pages. 133 */ 134 pool_setlowat(&mbpool, mblowat); 135 pool_setlowat(&mclpool, mcllowat); 136 } 137 138 int 139 sysctl_dombuf(name, namelen, oldp, oldlenp, newp, newlen) 140 int *name; 141 u_int namelen; 142 void *oldp; 143 size_t *oldlenp; 144 void *newp; 145 size_t newlen; 146 { 147 int error, newval; 148 149 /* All sysctl names at this level are terminal. */ 150 if (namelen != 1) 151 return (ENOTDIR); /* overloaded */ 152 153 switch (name[0]) { 154 case MBUF_MSIZE: 155 return (sysctl_rdint(oldp, oldlenp, newp, msize)); 156 case MBUF_MCLBYTES: 157 return (sysctl_rdint(oldp, oldlenp, newp, mclbytes)); 158 case MBUF_NMBCLUSTERS: 159 /* 160 * If we have direct-mapped pool pages, we can adjust this 161 * number on the fly. If not, we're limited by the size 162 * of mb_map, and cannot change this value. 163 * 164 * Note: we only allow the value to be increased, never 165 * decreased. 166 */ 167 if (mb_map == NULL) { 168 newval = nmbclusters; 169 error = sysctl_int(oldp, oldlenp, newp, newlen, 170 &newval); 171 if (error != 0) 172 return (error); 173 if (newp != NULL) { 174 if (newval >= nmbclusters) { 175 nmbclusters = newval; 176 pool_sethardlimit(&mclpool, 177 nmbclusters, mclpool_warnmsg, 60); 178 } else 179 error = EINVAL; 180 } 181 return (error); 182 } else 183 return (sysctl_rdint(oldp, oldlenp, newp, nmbclusters)); 184 case MBUF_MBLOWAT: 185 case MBUF_MCLLOWAT: 186 /* New value must be >= 0. */ 187 newval = (name[0] == MBUF_MBLOWAT) ? mblowat : mcllowat; 188 error = sysctl_int(oldp, oldlenp, newp, newlen, &newval); 189 if (error != 0) 190 return (error); 191 if (newp != NULL) { 192 if (newval >= 0) { 193 if (name[0] == MBUF_MBLOWAT) { 194 mblowat = newval; 195 pool_setlowat(&mbpool, newval); 196 } else { 197 mcllowat = newval; 198 pool_setlowat(&mclpool, newval); 199 } 200 } else 201 error = EINVAL; 202 } 203 return (error); 204 default: 205 return (EOPNOTSUPP); 206 } 207 /* NOTREACHED */ 208 } 209 210 void * 211 mclpool_alloc(sz, flags, mtype) 212 unsigned long sz; 213 int flags; 214 int mtype; 215 { 216 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; 217 218 return ((void *)uvm_km_alloc_poolpage1(mb_map, uvmexp.mb_object, 219 waitok)); 220 } 221 222 void 223 mclpool_release(v, sz, mtype) 224 void *v; 225 unsigned long sz; 226 int mtype; 227 { 228 229 uvm_km_free_poolpage1(mb_map, (vaddr_t)v); 230 } 231 232 /* 233 * When MGET failes, ask protocols to free space when short of memory, 234 * then re-attempt to allocate an mbuf. 235 */ 236 struct mbuf * 237 m_retry(i, t) 238 int i, t; 239 { 240 struct mbuf *m; 241 242 m_reclaim(i); 243 #define m_retry(i, t) (struct mbuf *)0 244 MGET(m, i, t); 245 #undef m_retry 246 if (m != NULL) 247 mbstat.m_wait++; 248 else 249 mbstat.m_drops++; 250 return (m); 251 } 252 253 /* 254 * As above; retry an MGETHDR. 255 */ 256 struct mbuf * 257 m_retryhdr(i, t) 258 int i, t; 259 { 260 struct mbuf *m; 261 262 m_reclaim(i); 263 #define m_retryhdr(i, t) (struct mbuf *)0 264 MGETHDR(m, i, t); 265 #undef m_retryhdr 266 if (m != NULL) 267 mbstat.m_wait++; 268 else 269 mbstat.m_drops++; 270 return (m); 271 } 272 273 void 274 m_reclaim(how) 275 int how; 276 { 277 struct domain *dp; 278 struct protosw *pr; 279 struct ifnet *ifp; 280 int s = splvm(); 281 282 for (dp = domains; dp; dp = dp->dom_next) 283 for (pr = dp->dom_protosw; 284 pr < dp->dom_protoswNPROTOSW; pr++) 285 if (pr->pr_drain) 286 (*pr->pr_drain)(); 287 for (ifp = TAILQ_FIRST(&ifnet); ifp; ifp = TAILQ_NEXT(ifp, if_list)) 288 if (ifp->if_drain) 289 (*ifp->if_drain)(ifp); 290 splx(s); 291 mbstat.m_drain++; 292 } 293 294 /* 295 * Space allocation routines. 296 * These are also available as macros 297 * for critical paths. 298 */ 299 struct mbuf * 300 m_get(nowait, type) 301 int nowait, type; 302 { 303 struct mbuf *m; 304 305 MGET(m, nowait, type); 306 return (m); 307 } 308 309 struct mbuf * 310 m_gethdr(nowait, type) 311 int nowait, type; 312 { 313 struct mbuf *m; 314 315 MGETHDR(m, nowait, type); 316 return (m); 317 } 318 319 struct mbuf * 320 m_getclr(nowait, type) 321 int nowait, type; 322 { 323 struct mbuf *m; 324 325 MGET(m, nowait, type); 326 if (m == 0) 327 return (0); 328 memset(mtod(m, caddr_t), 0, MLEN); 329 return (m); 330 } 331 332 struct mbuf * 333 m_free(m) 334 struct mbuf *m; 335 { 336 struct mbuf *n; 337 338 MFREE(m, n); 339 return (n); 340 } 341 342 void 343 m_freem(m) 344 struct mbuf *m; 345 { 346 struct mbuf *n; 347 348 if (m == NULL) 349 return; 350 do { 351 MFREE(m, n); 352 m = n; 353 } while (m); 354 } 355 356 /* 357 * Mbuffer utility routines. 358 */ 359 360 /* 361 * Lesser-used path for M_PREPEND: 362 * allocate new mbuf to prepend to chain, 363 * copy junk along. 364 */ 365 struct mbuf * 366 m_prepend(m, len, how) 367 struct mbuf *m; 368 int len, how; 369 { 370 struct mbuf *mn; 371 372 MGET(mn, how, m->m_type); 373 if (mn == (struct mbuf *)NULL) { 374 m_freem(m); 375 return ((struct mbuf *)NULL); 376 } 377 if (m->m_flags & M_PKTHDR) { 378 M_COPY_PKTHDR(mn, m); 379 m->m_flags &= ~M_PKTHDR; 380 } 381 mn->m_next = m; 382 m = mn; 383 if (len < MHLEN) 384 MH_ALIGN(m, len); 385 m->m_len = len; 386 return (m); 387 } 388 389 /* 390 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 391 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 392 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 393 */ 394 int MCFail; 395 396 struct mbuf * 397 m_copym(m, off0, len, wait) 398 struct mbuf *m; 399 int off0, wait; 400 int len; 401 { 402 return m_copym0(m, off0, len, wait, 0); /* shallow copy on M_EXT */ 403 } 404 405 struct mbuf * 406 m_dup(m, off0, len, wait) 407 struct mbuf *m; 408 int off0, wait; 409 int len; 410 { 411 return m_copym0(m, off0, len, wait, 1); /* deep copy */ 412 } 413 414 static struct mbuf * 415 m_copym0(m, off0, len, wait, deep) 416 struct mbuf *m; 417 int off0, wait; 418 int len; 419 int deep; /* deep copy */ 420 { 421 struct mbuf *n, **np; 422 int off = off0; 423 struct mbuf *top; 424 int copyhdr = 0; 425 426 if (off < 0 || len < 0) 427 panic("m_copym: off %d, len %d", off, len); 428 if (off == 0 && m->m_flags & M_PKTHDR) 429 copyhdr = 1; 430 while (off > 0) { 431 if (m == 0) 432 panic("m_copym: m == 0"); 433 if (off < m->m_len) 434 break; 435 off -= m->m_len; 436 m = m->m_next; 437 } 438 np = ⊤ 439 top = 0; 440 while (len > 0) { 441 if (m == 0) { 442 if (len != M_COPYALL) 443 panic("m_copym: m == 0 and not COPYALL"); 444 break; 445 } 446 MGET(n, wait, m->m_type); 447 *np = n; 448 if (n == 0) 449 goto nospace; 450 if (copyhdr) { 451 M_COPY_PKTHDR(n, m); 452 if (len == M_COPYALL) 453 n->m_pkthdr.len -= off0; 454 else 455 n->m_pkthdr.len = len; 456 copyhdr = 0; 457 } 458 n->m_len = min(len, m->m_len - off); 459 if (m->m_flags & M_EXT) { 460 if (!deep) { 461 n->m_data = m->m_data + off; 462 n->m_ext = m->m_ext; 463 MCLADDREFERENCE(m, n); 464 } else { 465 /* 466 * we are unsure about the way m was allocated. 467 * copy into multiple MCLBYTES cluster mbufs. 468 */ 469 MCLGET(n, wait); 470 n->m_len = 0; 471 n->m_len = M_TRAILINGSPACE(n); 472 n->m_len = min(n->m_len, len); 473 n->m_len = min(n->m_len, m->m_len - off); 474 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, 475 (unsigned)n->m_len); 476 } 477 } else 478 memcpy(mtod(n, caddr_t), mtod(m, caddr_t)+off, 479 (unsigned)n->m_len); 480 if (len != M_COPYALL) 481 len -= n->m_len; 482 off += n->m_len; 483 #ifdef DIAGNOSTIC 484 if (off > m->m_len) 485 panic("m_copym0 overrun"); 486 #endif 487 if (off == m->m_len) { 488 m = m->m_next; 489 off = 0; 490 } 491 np = &n->m_next; 492 } 493 if (top == 0) 494 MCFail++; 495 return (top); 496 nospace: 497 m_freem(top); 498 MCFail++; 499 return (0); 500 } 501 502 /* 503 * Copy an entire packet, including header (which must be present). 504 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 505 */ 506 struct mbuf * 507 m_copypacket(m, how) 508 struct mbuf *m; 509 int how; 510 { 511 struct mbuf *top, *n, *o; 512 513 MGET(n, how, m->m_type); 514 top = n; 515 if (!n) 516 goto nospace; 517 518 M_COPY_PKTHDR(n, m); 519 n->m_len = m->m_len; 520 if (m->m_flags & M_EXT) { 521 n->m_data = m->m_data; 522 n->m_ext = m->m_ext; 523 MCLADDREFERENCE(m, n); 524 } else { 525 memcpy(mtod(n, char *), mtod(m, char *), n->m_len); 526 } 527 528 m = m->m_next; 529 while (m) { 530 MGET(o, how, m->m_type); 531 if (!o) 532 goto nospace; 533 534 n->m_next = o; 535 n = n->m_next; 536 537 n->m_len = m->m_len; 538 if (m->m_flags & M_EXT) { 539 n->m_data = m->m_data; 540 n->m_ext = m->m_ext; 541 MCLADDREFERENCE(m, n); 542 } else { 543 memcpy(mtod(n, char *), mtod(m, char *), n->m_len); 544 } 545 546 m = m->m_next; 547 } 548 return top; 549 nospace: 550 m_freem(top); 551 MCFail++; 552 return 0; 553 } 554 555 /* 556 * Copy data from an mbuf chain starting "off" bytes from the beginning, 557 * continuing for "len" bytes, into the indicated buffer. 558 */ 559 void 560 m_copydata(m, off, len, cp) 561 struct mbuf *m; 562 int off; 563 int len; 564 caddr_t cp; 565 { 566 unsigned count; 567 568 if (off < 0 || len < 0) 569 panic("m_copydata"); 570 while (off > 0) { 571 if (m == 0) 572 panic("m_copydata"); 573 if (off < m->m_len) 574 break; 575 off -= m->m_len; 576 m = m->m_next; 577 } 578 while (len > 0) { 579 if (m == 0) 580 panic("m_copydata"); 581 count = min(m->m_len - off, len); 582 memcpy(cp, mtod(m, caddr_t) + off, count); 583 len -= count; 584 cp += count; 585 off = 0; 586 m = m->m_next; 587 } 588 } 589 590 /* 591 * Concatenate mbuf chain n to m. 592 * Both chains must be of the same type (e.g. MT_DATA). 593 * Any m_pkthdr is not updated. 594 */ 595 void 596 m_cat(m, n) 597 struct mbuf *m, *n; 598 { 599 while (m->m_next) 600 m = m->m_next; 601 while (n) { 602 if (m->m_flags & M_EXT || 603 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 604 /* just join the two chains */ 605 m->m_next = n; 606 return; 607 } 608 /* splat the data from one into the other */ 609 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), 610 (u_int)n->m_len); 611 m->m_len += n->m_len; 612 n = m_free(n); 613 } 614 } 615 616 void 617 m_adj(mp, req_len) 618 struct mbuf *mp; 619 int req_len; 620 { 621 int len = req_len; 622 struct mbuf *m; 623 int count; 624 625 if ((m = mp) == NULL) 626 return; 627 if (len >= 0) { 628 /* 629 * Trim from head. 630 */ 631 while (m != NULL && len > 0) { 632 if (m->m_len <= len) { 633 len -= m->m_len; 634 m->m_len = 0; 635 m = m->m_next; 636 } else { 637 m->m_len -= len; 638 m->m_data += len; 639 len = 0; 640 } 641 } 642 m = mp; 643 if (mp->m_flags & M_PKTHDR) 644 m->m_pkthdr.len -= (req_len - len); 645 } else { 646 /* 647 * Trim from tail. Scan the mbuf chain, 648 * calculating its length and finding the last mbuf. 649 * If the adjustment only affects this mbuf, then just 650 * adjust and return. Otherwise, rescan and truncate 651 * after the remaining size. 652 */ 653 len = -len; 654 count = 0; 655 for (;;) { 656 count += m->m_len; 657 if (m->m_next == (struct mbuf *)0) 658 break; 659 m = m->m_next; 660 } 661 if (m->m_len >= len) { 662 m->m_len -= len; 663 if (mp->m_flags & M_PKTHDR) 664 mp->m_pkthdr.len -= len; 665 return; 666 } 667 count -= len; 668 if (count < 0) 669 count = 0; 670 /* 671 * Correct length for chain is "count". 672 * Find the mbuf with last data, adjust its length, 673 * and toss data from remaining mbufs on chain. 674 */ 675 m = mp; 676 if (m->m_flags & M_PKTHDR) 677 m->m_pkthdr.len = count; 678 for (; m; m = m->m_next) { 679 if (m->m_len >= count) { 680 m->m_len = count; 681 break; 682 } 683 count -= m->m_len; 684 } 685 while (m->m_next) 686 (m = m->m_next) ->m_len = 0; 687 } 688 } 689 690 /* 691 * Rearange an mbuf chain so that len bytes are contiguous 692 * and in the data area of an mbuf (so that mtod and dtom 693 * will work for a structure of size len). Returns the resulting 694 * mbuf chain on success, frees it and returns null on failure. 695 * If there is room, it will add up to max_protohdr-len extra bytes to the 696 * contiguous region in an attempt to avoid being called next time. 697 */ 698 int MPFail; 699 700 struct mbuf * 701 m_pullup(n, len) 702 struct mbuf *n; 703 int len; 704 { 705 struct mbuf *m; 706 int count; 707 int space; 708 709 /* 710 * If first mbuf has no cluster, and has room for len bytes 711 * without shifting current data, pullup into it, 712 * otherwise allocate a new mbuf to prepend to the chain. 713 */ 714 if ((n->m_flags & M_EXT) == 0 && 715 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 716 if (n->m_len >= len) 717 return (n); 718 m = n; 719 n = n->m_next; 720 len -= m->m_len; 721 } else { 722 if (len > MHLEN) 723 goto bad; 724 MGET(m, M_DONTWAIT, n->m_type); 725 if (m == 0) 726 goto bad; 727 m->m_len = 0; 728 if (n->m_flags & M_PKTHDR) { 729 M_COPY_PKTHDR(m, n); 730 n->m_flags &= ~M_PKTHDR; 731 } 732 } 733 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 734 do { 735 count = min(min(max(len, max_protohdr), space), n->m_len); 736 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), 737 (unsigned)count); 738 len -= count; 739 m->m_len += count; 740 n->m_len -= count; 741 space -= count; 742 if (n->m_len) 743 n->m_data += count; 744 else 745 n = m_free(n); 746 } while (len > 0 && n); 747 if (len > 0) { 748 (void) m_free(m); 749 goto bad; 750 } 751 m->m_next = n; 752 return (m); 753 bad: 754 m_freem(n); 755 MPFail++; 756 return (0); 757 } 758 759 /* 760 * Partition an mbuf chain in two pieces, returning the tail -- 761 * all but the first len0 bytes. In case of failure, it returns NULL and 762 * attempts to restore the chain to its original state. 763 */ 764 struct mbuf * 765 m_split(m0, len0, wait) 766 struct mbuf *m0; 767 int len0, wait; 768 { 769 struct mbuf *m, *n; 770 unsigned len = len0, remain, len_save; 771 772 for (m = m0; m && len > m->m_len; m = m->m_next) 773 len -= m->m_len; 774 if (m == 0) 775 return (0); 776 remain = m->m_len - len; 777 if (m0->m_flags & M_PKTHDR) { 778 MGETHDR(n, wait, m0->m_type); 779 if (n == 0) 780 return (0); 781 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 782 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 783 len_save = m0->m_pkthdr.len; 784 m0->m_pkthdr.len = len0; 785 if (m->m_flags & M_EXT) 786 goto extpacket; 787 if (remain > MHLEN) { 788 /* m can't be the lead packet */ 789 MH_ALIGN(n, 0); 790 n->m_next = m_split(m, len, wait); 791 if (n->m_next == 0) { 792 (void) m_free(n); 793 m0->m_pkthdr.len = len_save; 794 return (0); 795 } else 796 return (n); 797 } else 798 MH_ALIGN(n, remain); 799 } else if (remain == 0) { 800 n = m->m_next; 801 m->m_next = 0; 802 return (n); 803 } else { 804 MGET(n, wait, m->m_type); 805 if (n == 0) 806 return (0); 807 M_ALIGN(n, remain); 808 } 809 extpacket: 810 if (m->m_flags & M_EXT) { 811 n->m_ext = m->m_ext; 812 MCLADDREFERENCE(m, n); 813 n->m_data = m->m_data + len; 814 } else { 815 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + len, remain); 816 } 817 n->m_len = remain; 818 m->m_len = len; 819 n->m_next = m->m_next; 820 m->m_next = 0; 821 return (n); 822 } 823 /* 824 * Routine to copy from device local memory into mbufs. 825 */ 826 struct mbuf * 827 m_devget(buf, totlen, off0, ifp, copy) 828 char *buf; 829 int totlen, off0; 830 struct ifnet *ifp; 831 void (*copy) __P((const void *from, void *to, size_t len)); 832 { 833 struct mbuf *m; 834 struct mbuf *top = 0, **mp = ⊤ 835 int off = off0, len; 836 char *cp; 837 char *epkt; 838 839 cp = buf; 840 epkt = cp + totlen; 841 if (off) { 842 /* 843 * If 'off' is non-zero, packet is trailer-encapsulated, 844 * so we have to skip the type and length fields. 845 */ 846 cp += off + 2 * sizeof(u_int16_t); 847 totlen -= 2 * sizeof(u_int16_t); 848 } 849 MGETHDR(m, M_DONTWAIT, MT_DATA); 850 if (m == 0) 851 return (0); 852 m->m_pkthdr.rcvif = ifp; 853 m->m_pkthdr.len = totlen; 854 m->m_len = MHLEN; 855 856 while (totlen > 0) { 857 if (top) { 858 MGET(m, M_DONTWAIT, MT_DATA); 859 if (m == 0) { 860 m_freem(top); 861 return (0); 862 } 863 m->m_len = MLEN; 864 } 865 len = min(totlen, epkt - cp); 866 if (len >= MINCLSIZE) { 867 MCLGET(m, M_DONTWAIT); 868 if ((m->m_flags & M_EXT) == 0) { 869 m_free(m); 870 m_freem(top); 871 return (0); 872 } 873 m->m_len = len = min(len, MCLBYTES); 874 } else { 875 /* 876 * Place initial small packet/header at end of mbuf. 877 */ 878 if (len < m->m_len) { 879 if (top == 0 && len + max_linkhdr <= m->m_len) 880 m->m_data += max_linkhdr; 881 m->m_len = len; 882 } else 883 len = m->m_len; 884 } 885 if (copy) 886 copy(cp, mtod(m, caddr_t), (size_t)len); 887 else 888 memcpy(mtod(m, caddr_t), cp, (size_t)len); 889 cp += len; 890 *mp = m; 891 mp = &m->m_next; 892 totlen -= len; 893 if (cp == epkt) 894 cp = buf; 895 } 896 return (top); 897 } 898 899 /* 900 * Copy data from a buffer back into the indicated mbuf chain, 901 * starting "off" bytes from the beginning, extending the mbuf 902 * chain if necessary. 903 */ 904 void 905 m_copyback(m0, off, len, cp) 906 struct mbuf *m0; 907 int off; 908 int len; 909 caddr_t cp; 910 { 911 int mlen; 912 struct mbuf *m = m0, *n; 913 int totlen = 0; 914 915 if (m0 == 0) 916 return; 917 while (off > (mlen = m->m_len)) { 918 off -= mlen; 919 totlen += mlen; 920 if (m->m_next == 0) { 921 n = m_getclr(M_DONTWAIT, m->m_type); 922 if (n == 0) 923 goto out; 924 n->m_len = min(MLEN, len + off); 925 m->m_next = n; 926 } 927 m = m->m_next; 928 } 929 while (len > 0) { 930 mlen = min (m->m_len - off, len); 931 memcpy(mtod(m, caddr_t) + off, cp, (unsigned)mlen); 932 cp += mlen; 933 len -= mlen; 934 mlen += off; 935 off = 0; 936 totlen += mlen; 937 if (len == 0) 938 break; 939 if (m->m_next == 0) { 940 n = m_get(M_DONTWAIT, m->m_type); 941 if (n == 0) 942 break; 943 n->m_len = min(MLEN, len); 944 m->m_next = n; 945 } 946 m = m->m_next; 947 } 948 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 949 m->m_pkthdr.len = totlen; 950 } 951