1 /* $NetBSD: uipc_mbuf.c,v 1.47 2000/06/27 17:41:44 mrg Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (c) 1982, 1986, 1988, 1991, 1993 42 * The Regents of the University of California. All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. All advertising materials mentioning features or use of this software 53 * must display the following acknowledgement: 54 * This product includes software developed by the University of 55 * California, Berkeley and its contributors. 56 * 4. Neither the name of the University nor the names of its contributors 57 * may be used to endorse or promote products derived from this software 58 * without specific prior written permission. 59 * 60 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 61 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 62 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 63 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 64 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 65 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 66 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 70 * SUCH DAMAGE. 71 * 72 * @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95 73 */ 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/proc.h> 78 #include <sys/malloc.h> 79 #include <sys/map.h> 80 #define MBTYPES 81 #include <sys/mbuf.h> 82 #include <sys/kernel.h> 83 #include <sys/syslog.h> 84 #include <sys/domain.h> 85 #include <sys/protosw.h> 86 #include <sys/pool.h> 87 #include <sys/socket.h> 88 #include <net/if.h> 89 90 #include <uvm/uvm_extern.h> 91 92 #include <sys/sysctl.h> 93 94 struct pool mbpool; /* mbuf pool */ 95 struct pool mclpool; /* mbuf cluster pool */ 96 97 struct mbstat mbstat; 98 int max_linkhdr; 99 int max_protohdr; 100 int max_hdr; 101 int max_datalen; 102 103 void *mclpool_alloc __P((unsigned long, int, int)); 104 void mclpool_release __P((void *, unsigned long, int)); 105 static struct mbuf *m_copym0 __P((struct mbuf *, int, int, int, int)); 106 107 const char *mclpool_warnmsg = 108 "WARNING: mclpool limit reached; increase NMBCLUSTERS"; 109 110 /* 111 * Initialize the mbuf allcator. 112 */ 113 void 114 mbinit() 115 { 116 117 pool_init(&mbpool, msize, 0, 0, 0, "mbpl", 0, NULL, NULL, 0); 118 pool_init(&mclpool, mclbytes, 0, 0, 0, "mclpl", 0, mclpool_alloc, 119 mclpool_release, 0); 120 121 /* 122 * Set the hard limit on the mclpool to the number of 123 * mbuf clusters the kernel is to support. Log the limit 124 * reached message max once a minute. 125 */ 126 pool_sethardlimit(&mclpool, nmbclusters, mclpool_warnmsg, 60); 127 128 /* 129 * Set a low water mark for both mbufs and clusters. This should 130 * help ensure that they can be allocated in a memory starvation 131 * situation. This is important for e.g. diskless systems which 132 * must allocate mbufs in order for the pagedaemon to clean pages. 133 */ 134 pool_setlowat(&mbpool, mblowat); 135 pool_setlowat(&mclpool, mcllowat); 136 } 137 138 int 139 sysctl_dombuf(name, namelen, oldp, oldlenp, newp, newlen) 140 int *name; 141 u_int namelen; 142 void *oldp; 143 size_t *oldlenp; 144 void *newp; 145 size_t newlen; 146 { 147 int error, newval; 148 149 /* All sysctl names at this level are terminal. */ 150 if (namelen != 1) 151 return (ENOTDIR); /* overloaded */ 152 153 switch (name[0]) { 154 case MBUF_MSIZE: 155 return (sysctl_rdint(oldp, oldlenp, newp, msize)); 156 case MBUF_MCLBYTES: 157 return (sysctl_rdint(oldp, oldlenp, newp, mclbytes)); 158 case MBUF_NMBCLUSTERS: 159 /* 160 * If we have direct-mapped pool pages, we can adjust this 161 * number on the fly. If not, we're limited by the size 162 * of mb_map, and cannot change this value. 163 * 164 * Note: we only allow the value to be increased, never 165 * decreased. 166 */ 167 if (mb_map == NULL) { 168 newval = nmbclusters; 169 error = sysctl_int(oldp, oldlenp, newp, newlen, 170 &newval); 171 if (error != 0) 172 return (error); 173 if (newp != NULL) { 174 if (newval >= nmbclusters) { 175 nmbclusters = newval; 176 pool_sethardlimit(&mclpool, 177 nmbclusters, mclpool_warnmsg, 60); 178 } else 179 error = EINVAL; 180 } 181 return (error); 182 } else 183 return (sysctl_rdint(oldp, oldlenp, newp, nmbclusters)); 184 case MBUF_MBLOWAT: 185 case MBUF_MCLLOWAT: 186 /* New value must be >= 0. */ 187 newval = (name[0] == MBUF_MBLOWAT) ? mblowat : mcllowat; 188 error = sysctl_int(oldp, oldlenp, newp, newlen, &newval); 189 if (error != 0) 190 return (error); 191 if (newp != NULL) { 192 if (newval >= 0) { 193 if (name[0] == MBUF_MBLOWAT) { 194 mblowat = newval; 195 pool_setlowat(&mbpool, newval); 196 } else { 197 mcllowat = newval; 198 pool_setlowat(&mclpool, newval); 199 } 200 } else 201 error = EINVAL; 202 } 203 return (error); 204 default: 205 return (EOPNOTSUPP); 206 } 207 /* NOTREACHED */ 208 } 209 210 void * 211 mclpool_alloc(sz, flags, mtype) 212 unsigned long sz; 213 int flags; 214 int mtype; 215 { 216 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; 217 218 return ((void *)uvm_km_alloc_poolpage1(mb_map, uvmexp.mb_object, 219 waitok)); 220 } 221 222 void 223 mclpool_release(v, sz, mtype) 224 void *v; 225 unsigned long sz; 226 int mtype; 227 { 228 229 uvm_km_free_poolpage1(mb_map, (vaddr_t)v); 230 } 231 232 /* 233 * When MGET failes, ask protocols to free space when short of memory, 234 * then re-attempt to allocate an mbuf. 235 */ 236 struct mbuf * 237 m_retry(i, t) 238 int i, t; 239 { 240 struct mbuf *m; 241 242 m_reclaim(i); 243 #define m_retry(i, t) (struct mbuf *)0 244 MGET(m, i, t); 245 #undef m_retry 246 if (m != NULL) 247 mbstat.m_wait++; 248 else 249 mbstat.m_drops++; 250 return (m); 251 } 252 253 /* 254 * As above; retry an MGETHDR. 255 */ 256 struct mbuf * 257 m_retryhdr(i, t) 258 int i, t; 259 { 260 struct mbuf *m; 261 262 m_reclaim(i); 263 #define m_retryhdr(i, t) (struct mbuf *)0 264 MGETHDR(m, i, t); 265 #undef m_retryhdr 266 if (m != NULL) 267 mbstat.m_wait++; 268 else 269 mbstat.m_drops++; 270 return (m); 271 } 272 273 void 274 m_reclaim(how) 275 int how; 276 { 277 struct domain *dp; 278 struct protosw *pr; 279 struct ifnet *ifp; 280 int s = splimp(); 281 282 for (dp = domains; dp; dp = dp->dom_next) 283 for (pr = dp->dom_protosw; 284 pr < dp->dom_protoswNPROTOSW; pr++) 285 if (pr->pr_drain) 286 (*pr->pr_drain)(); 287 for (ifp = TAILQ_FIRST(&ifnet); ifp; ifp = TAILQ_NEXT(ifp, if_list)) 288 if (ifp->if_drain) 289 (*ifp->if_drain)(ifp); 290 splx(s); 291 mbstat.m_drain++; 292 } 293 294 /* 295 * Space allocation routines. 296 * These are also available as macros 297 * for critical paths. 298 */ 299 struct mbuf * 300 m_get(nowait, type) 301 int nowait, type; 302 { 303 struct mbuf *m; 304 305 MGET(m, nowait, type); 306 return (m); 307 } 308 309 struct mbuf * 310 m_gethdr(nowait, type) 311 int nowait, type; 312 { 313 struct mbuf *m; 314 315 MGETHDR(m, nowait, type); 316 return (m); 317 } 318 319 struct mbuf * 320 m_getclr(nowait, type) 321 int nowait, type; 322 { 323 struct mbuf *m; 324 325 MGET(m, nowait, type); 326 if (m == 0) 327 return (0); 328 memset(mtod(m, caddr_t), 0, MLEN); 329 return (m); 330 } 331 332 struct mbuf * 333 m_free(m) 334 struct mbuf *m; 335 { 336 struct mbuf *n; 337 338 MFREE(m, n); 339 return (n); 340 } 341 342 void 343 m_freem(m) 344 struct mbuf *m; 345 { 346 struct mbuf *n; 347 348 if (m == NULL) 349 return; 350 if ((m->m_flags & M_PKTHDR) != 0 && m->m_pkthdr.aux) { 351 m_freem(m->m_pkthdr.aux); 352 m->m_pkthdr.aux = NULL; 353 } 354 do { 355 MFREE(m, n); 356 m = n; 357 } while (m); 358 } 359 360 /* 361 * Mbuffer utility routines. 362 */ 363 364 /* 365 * Lesser-used path for M_PREPEND: 366 * allocate new mbuf to prepend to chain, 367 * copy junk along. 368 */ 369 struct mbuf * 370 m_prepend(m, len, how) 371 struct mbuf *m; 372 int len, how; 373 { 374 struct mbuf *mn; 375 376 MGET(mn, how, m->m_type); 377 if (mn == (struct mbuf *)NULL) { 378 m_freem(m); 379 return ((struct mbuf *)NULL); 380 } 381 if (m->m_flags & M_PKTHDR) { 382 M_COPY_PKTHDR(mn, m); 383 m->m_flags &= ~M_PKTHDR; 384 } 385 mn->m_next = m; 386 m = mn; 387 if (len < MHLEN) 388 MH_ALIGN(m, len); 389 m->m_len = len; 390 return (m); 391 } 392 393 /* 394 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 395 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 396 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 397 */ 398 int MCFail; 399 400 struct mbuf * 401 m_copym(m, off0, len, wait) 402 struct mbuf *m; 403 int off0, wait; 404 int len; 405 { 406 return m_copym0(m, off0, len, wait, 0); /* shallow copy on M_EXT */ 407 } 408 409 struct mbuf * 410 m_dup(m, off0, len, wait) 411 struct mbuf *m; 412 int off0, wait; 413 int len; 414 { 415 return m_copym0(m, off0, len, wait, 1); /* deep copy */ 416 } 417 418 static struct mbuf * 419 m_copym0(m, off0, len, wait, deep) 420 struct mbuf *m; 421 int off0, wait; 422 int len; 423 int deep; /* deep copy */ 424 { 425 struct mbuf *n, **np; 426 int off = off0; 427 struct mbuf *top; 428 int copyhdr = 0; 429 430 if (off < 0 || len < 0) 431 panic("m_copym: off %d, len %d", off, len); 432 if (off == 0 && m->m_flags & M_PKTHDR) 433 copyhdr = 1; 434 while (off > 0) { 435 if (m == 0) 436 panic("m_copym: m == 0"); 437 if (off < m->m_len) 438 break; 439 off -= m->m_len; 440 m = m->m_next; 441 } 442 np = ⊤ 443 top = 0; 444 while (len > 0) { 445 if (m == 0) { 446 if (len != M_COPYALL) 447 panic("m_copym: m == 0 and not COPYALL"); 448 break; 449 } 450 MGET(n, wait, m->m_type); 451 *np = n; 452 if (n == 0) 453 goto nospace; 454 if (copyhdr) { 455 M_COPY_PKTHDR(n, m); 456 if (len == M_COPYALL) 457 n->m_pkthdr.len -= off0; 458 else 459 n->m_pkthdr.len = len; 460 copyhdr = 0; 461 } 462 n->m_len = min(len, m->m_len - off); 463 if (m->m_flags & M_EXT) { 464 if (!deep) { 465 n->m_data = m->m_data + off; 466 n->m_ext = m->m_ext; 467 MCLADDREFERENCE(m, n); 468 } else { 469 MCLGET(n, wait); 470 memcpy(mtod(n, caddr_t), mtod(m, caddr_t)+off, 471 (unsigned)n->m_len); 472 } 473 } else 474 memcpy(mtod(n, caddr_t), mtod(m, caddr_t)+off, 475 (unsigned)n->m_len); 476 if (len != M_COPYALL) 477 len -= n->m_len; 478 off = 0; 479 m = m->m_next; 480 np = &n->m_next; 481 } 482 if (top == 0) 483 MCFail++; 484 return (top); 485 nospace: 486 m_freem(top); 487 MCFail++; 488 return (0); 489 } 490 491 /* 492 * Copy an entire packet, including header (which must be present). 493 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 494 */ 495 struct mbuf * 496 m_copypacket(m, how) 497 struct mbuf *m; 498 int how; 499 { 500 struct mbuf *top, *n, *o; 501 502 MGET(n, how, m->m_type); 503 top = n; 504 if (!n) 505 goto nospace; 506 507 M_COPY_PKTHDR(n, m); 508 n->m_len = m->m_len; 509 if (m->m_flags & M_EXT) { 510 n->m_data = m->m_data; 511 n->m_ext = m->m_ext; 512 MCLADDREFERENCE(m, n); 513 } else { 514 memcpy(mtod(n, char *), mtod(m, char *), n->m_len); 515 } 516 517 m = m->m_next; 518 while (m) { 519 MGET(o, how, m->m_type); 520 if (!o) 521 goto nospace; 522 523 n->m_next = o; 524 n = n->m_next; 525 526 n->m_len = m->m_len; 527 if (m->m_flags & M_EXT) { 528 n->m_data = m->m_data; 529 n->m_ext = m->m_ext; 530 MCLADDREFERENCE(m, n); 531 } else { 532 memcpy(mtod(n, char *), mtod(m, char *), n->m_len); 533 } 534 535 m = m->m_next; 536 } 537 return top; 538 nospace: 539 m_freem(top); 540 MCFail++; 541 return 0; 542 } 543 544 /* 545 * Copy data from an mbuf chain starting "off" bytes from the beginning, 546 * continuing for "len" bytes, into the indicated buffer. 547 */ 548 void 549 m_copydata(m, off, len, cp) 550 struct mbuf *m; 551 int off; 552 int len; 553 caddr_t cp; 554 { 555 unsigned count; 556 557 if (off < 0 || len < 0) 558 panic("m_copydata"); 559 while (off > 0) { 560 if (m == 0) 561 panic("m_copydata"); 562 if (off < m->m_len) 563 break; 564 off -= m->m_len; 565 m = m->m_next; 566 } 567 while (len > 0) { 568 if (m == 0) 569 panic("m_copydata"); 570 count = min(m->m_len - off, len); 571 memcpy(cp, mtod(m, caddr_t) + off, count); 572 len -= count; 573 cp += count; 574 off = 0; 575 m = m->m_next; 576 } 577 } 578 579 /* 580 * Concatenate mbuf chain n to m. 581 * Both chains must be of the same type (e.g. MT_DATA). 582 * Any m_pkthdr is not updated. 583 */ 584 void 585 m_cat(m, n) 586 struct mbuf *m, *n; 587 { 588 while (m->m_next) 589 m = m->m_next; 590 while (n) { 591 if (m->m_flags & M_EXT || 592 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 593 /* just join the two chains */ 594 m->m_next = n; 595 return; 596 } 597 /* splat the data from one into the other */ 598 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), 599 (u_int)n->m_len); 600 m->m_len += n->m_len; 601 n = m_free(n); 602 } 603 } 604 605 void 606 m_adj(mp, req_len) 607 struct mbuf *mp; 608 int req_len; 609 { 610 int len = req_len; 611 struct mbuf *m; 612 int count; 613 614 if ((m = mp) == NULL) 615 return; 616 if (len >= 0) { 617 /* 618 * Trim from head. 619 */ 620 while (m != NULL && len > 0) { 621 if (m->m_len <= len) { 622 len -= m->m_len; 623 m->m_len = 0; 624 m = m->m_next; 625 } else { 626 m->m_len -= len; 627 m->m_data += len; 628 len = 0; 629 } 630 } 631 m = mp; 632 if (mp->m_flags & M_PKTHDR) 633 m->m_pkthdr.len -= (req_len - len); 634 } else { 635 /* 636 * Trim from tail. Scan the mbuf chain, 637 * calculating its length and finding the last mbuf. 638 * If the adjustment only affects this mbuf, then just 639 * adjust and return. Otherwise, rescan and truncate 640 * after the remaining size. 641 */ 642 len = -len; 643 count = 0; 644 for (;;) { 645 count += m->m_len; 646 if (m->m_next == (struct mbuf *)0) 647 break; 648 m = m->m_next; 649 } 650 if (m->m_len >= len) { 651 m->m_len -= len; 652 if (mp->m_flags & M_PKTHDR) 653 mp->m_pkthdr.len -= len; 654 return; 655 } 656 count -= len; 657 if (count < 0) 658 count = 0; 659 /* 660 * Correct length for chain is "count". 661 * Find the mbuf with last data, adjust its length, 662 * and toss data from remaining mbufs on chain. 663 */ 664 m = mp; 665 if (m->m_flags & M_PKTHDR) 666 m->m_pkthdr.len = count; 667 for (; m; m = m->m_next) { 668 if (m->m_len >= count) { 669 m->m_len = count; 670 break; 671 } 672 count -= m->m_len; 673 } 674 while (m->m_next) 675 (m = m->m_next) ->m_len = 0; 676 } 677 } 678 679 /* 680 * Rearange an mbuf chain so that len bytes are contiguous 681 * and in the data area of an mbuf (so that mtod and dtom 682 * will work for a structure of size len). Returns the resulting 683 * mbuf chain on success, frees it and returns null on failure. 684 * If there is room, it will add up to max_protohdr-len extra bytes to the 685 * contiguous region in an attempt to avoid being called next time. 686 */ 687 int MPFail; 688 689 struct mbuf * 690 m_pullup(n, len) 691 struct mbuf *n; 692 int len; 693 { 694 struct mbuf *m; 695 int count; 696 int space; 697 698 /* 699 * If first mbuf has no cluster, and has room for len bytes 700 * without shifting current data, pullup into it, 701 * otherwise allocate a new mbuf to prepend to the chain. 702 */ 703 if ((n->m_flags & M_EXT) == 0 && 704 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 705 if (n->m_len >= len) 706 return (n); 707 m = n; 708 n = n->m_next; 709 len -= m->m_len; 710 } else { 711 if (len > MHLEN) 712 goto bad; 713 MGET(m, M_DONTWAIT, n->m_type); 714 if (m == 0) 715 goto bad; 716 m->m_len = 0; 717 if (n->m_flags & M_PKTHDR) { 718 M_COPY_PKTHDR(m, n); 719 n->m_flags &= ~M_PKTHDR; 720 } 721 } 722 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 723 do { 724 count = min(min(max(len, max_protohdr), space), n->m_len); 725 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), 726 (unsigned)count); 727 len -= count; 728 m->m_len += count; 729 n->m_len -= count; 730 space -= count; 731 if (n->m_len) 732 n->m_data += count; 733 else 734 n = m_free(n); 735 } while (len > 0 && n); 736 if (len > 0) { 737 (void) m_free(m); 738 goto bad; 739 } 740 m->m_next = n; 741 return (m); 742 bad: 743 m_freem(n); 744 MPFail++; 745 return (0); 746 } 747 748 /* 749 * Partition an mbuf chain in two pieces, returning the tail -- 750 * all but the first len0 bytes. In case of failure, it returns NULL and 751 * attempts to restore the chain to its original state. 752 */ 753 struct mbuf * 754 m_split(m0, len0, wait) 755 struct mbuf *m0; 756 int len0, wait; 757 { 758 struct mbuf *m, *n; 759 unsigned len = len0, remain, len_save; 760 761 for (m = m0; m && len > m->m_len; m = m->m_next) 762 len -= m->m_len; 763 if (m == 0) 764 return (0); 765 remain = m->m_len - len; 766 if (m0->m_flags & M_PKTHDR) { 767 MGETHDR(n, wait, m0->m_type); 768 if (n == 0) 769 return (0); 770 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 771 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 772 len_save = m0->m_pkthdr.len; 773 m0->m_pkthdr.len = len0; 774 if (m->m_flags & M_EXT) 775 goto extpacket; 776 if (remain > MHLEN) { 777 /* m can't be the lead packet */ 778 MH_ALIGN(n, 0); 779 n->m_next = m_split(m, len, wait); 780 if (n->m_next == 0) { 781 (void) m_free(n); 782 m0->m_pkthdr.len = len_save; 783 return (0); 784 } else 785 return (n); 786 } else 787 MH_ALIGN(n, remain); 788 } else if (remain == 0) { 789 n = m->m_next; 790 m->m_next = 0; 791 return (n); 792 } else { 793 MGET(n, wait, m->m_type); 794 if (n == 0) 795 return (0); 796 M_ALIGN(n, remain); 797 } 798 extpacket: 799 if (m->m_flags & M_EXT) { 800 n->m_ext = m->m_ext; 801 MCLADDREFERENCE(m, n); 802 n->m_data = m->m_data + len; 803 } else { 804 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + len, remain); 805 } 806 n->m_len = remain; 807 m->m_len = len; 808 n->m_next = m->m_next; 809 m->m_next = 0; 810 return (n); 811 } 812 /* 813 * Routine to copy from device local memory into mbufs. 814 */ 815 struct mbuf * 816 m_devget(buf, totlen, off0, ifp, copy) 817 char *buf; 818 int totlen, off0; 819 struct ifnet *ifp; 820 void (*copy) __P((const void *from, void *to, size_t len)); 821 { 822 struct mbuf *m; 823 struct mbuf *top = 0, **mp = ⊤ 824 int off = off0, len; 825 char *cp; 826 char *epkt; 827 828 cp = buf; 829 epkt = cp + totlen; 830 if (off) { 831 /* 832 * If 'off' is non-zero, packet is trailer-encapsulated, 833 * so we have to skip the type and length fields. 834 */ 835 cp += off + 2 * sizeof(u_int16_t); 836 totlen -= 2 * sizeof(u_int16_t); 837 } 838 MGETHDR(m, M_DONTWAIT, MT_DATA); 839 if (m == 0) 840 return (0); 841 m->m_pkthdr.rcvif = ifp; 842 m->m_pkthdr.len = totlen; 843 m->m_len = MHLEN; 844 845 while (totlen > 0) { 846 if (top) { 847 MGET(m, M_DONTWAIT, MT_DATA); 848 if (m == 0) { 849 m_freem(top); 850 return (0); 851 } 852 m->m_len = MLEN; 853 } 854 len = min(totlen, epkt - cp); 855 if (len >= MINCLSIZE) { 856 MCLGET(m, M_DONTWAIT); 857 if ((m->m_flags & M_EXT) == 0) { 858 m_free(m); 859 m_freem(top); 860 return (0); 861 } 862 m->m_len = len = min(len, MCLBYTES); 863 } else { 864 /* 865 * Place initial small packet/header at end of mbuf. 866 */ 867 if (len < m->m_len) { 868 if (top == 0 && len + max_linkhdr <= m->m_len) 869 m->m_data += max_linkhdr; 870 m->m_len = len; 871 } else 872 len = m->m_len; 873 } 874 if (copy) 875 copy(cp, mtod(m, caddr_t), (size_t)len); 876 else 877 memcpy(mtod(m, caddr_t), cp, (size_t)len); 878 cp += len; 879 *mp = m; 880 mp = &m->m_next; 881 totlen -= len; 882 if (cp == epkt) 883 cp = buf; 884 } 885 return (top); 886 } 887 888 /* 889 * Copy data from a buffer back into the indicated mbuf chain, 890 * starting "off" bytes from the beginning, extending the mbuf 891 * chain if necessary. 892 */ 893 void 894 m_copyback(m0, off, len, cp) 895 struct mbuf *m0; 896 int off; 897 int len; 898 caddr_t cp; 899 { 900 int mlen; 901 struct mbuf *m = m0, *n; 902 int totlen = 0; 903 904 if (m0 == 0) 905 return; 906 while (off > (mlen = m->m_len)) { 907 off -= mlen; 908 totlen += mlen; 909 if (m->m_next == 0) { 910 n = m_getclr(M_DONTWAIT, m->m_type); 911 if (n == 0) 912 goto out; 913 n->m_len = min(MLEN, len + off); 914 m->m_next = n; 915 } 916 m = m->m_next; 917 } 918 while (len > 0) { 919 mlen = min (m->m_len - off, len); 920 memcpy(mtod(m, caddr_t) + off, cp, (unsigned)mlen); 921 cp += mlen; 922 len -= mlen; 923 mlen += off; 924 off = 0; 925 totlen += mlen; 926 if (len == 0) 927 break; 928 if (m->m_next == 0) { 929 n = m_get(M_DONTWAIT, m->m_type); 930 if (n == 0) 931 break; 932 n->m_len = min(MLEN, len); 933 m->m_next = n; 934 } 935 m = m->m_next; 936 } 937 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 938 m->m_pkthdr.len = totlen; 939 } 940