1 /* $NetBSD: uipc_mbuf.c,v 1.45 2000/03/01 12:49:28 itojun Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (c) 1982, 1986, 1988, 1991, 1993 42 * The Regents of the University of California. All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. All advertising materials mentioning features or use of this software 53 * must display the following acknowledgement: 54 * This product includes software developed by the University of 55 * California, Berkeley and its contributors. 56 * 4. Neither the name of the University nor the names of its contributors 57 * may be used to endorse or promote products derived from this software 58 * without specific prior written permission. 59 * 60 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 61 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 62 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 63 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 64 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 65 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 66 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 70 * SUCH DAMAGE. 71 * 72 * @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95 73 */ 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/proc.h> 78 #include <sys/malloc.h> 79 #include <sys/map.h> 80 #define MBTYPES 81 #include <sys/mbuf.h> 82 #include <sys/kernel.h> 83 #include <sys/syslog.h> 84 #include <sys/domain.h> 85 #include <sys/protosw.h> 86 #include <sys/pool.h> 87 #include <sys/socket.h> 88 #include <net/if.h> 89 90 #include <vm/vm.h> 91 #include <vm/vm_kern.h> 92 93 #include <uvm/uvm_extern.h> 94 95 #include <sys/sysctl.h> 96 97 struct pool mbpool; /* mbuf pool */ 98 struct pool mclpool; /* mbuf cluster pool */ 99 100 struct mbstat mbstat; 101 int max_linkhdr; 102 int max_protohdr; 103 int max_hdr; 104 int max_datalen; 105 106 void *mclpool_alloc __P((unsigned long, int, int)); 107 void mclpool_release __P((void *, unsigned long, int)); 108 static struct mbuf *m_copym0 __P((struct mbuf *, int, int, int, int)); 109 110 const char *mclpool_warnmsg = 111 "WARNING: mclpool limit reached; increase NMBCLUSTERS"; 112 113 /* 114 * Initialize the mbuf allcator. 115 */ 116 void 117 mbinit() 118 { 119 120 pool_init(&mbpool, msize, 0, 0, 0, "mbpl", 0, NULL, NULL, 0); 121 pool_init(&mclpool, mclbytes, 0, 0, 0, "mclpl", 0, mclpool_alloc, 122 mclpool_release, 0); 123 124 /* 125 * Set the hard limit on the mclpool to the number of 126 * mbuf clusters the kernel is to support. Log the limit 127 * reached message max once a minute. 128 */ 129 pool_sethardlimit(&mclpool, nmbclusters, mclpool_warnmsg, 60); 130 131 /* 132 * Set a low water mark for both mbufs and clusters. This should 133 * help ensure that they can be allocated in a memory starvation 134 * situation. This is important for e.g. diskless systems which 135 * must allocate mbufs in order for the pagedaemon to clean pages. 136 */ 137 pool_setlowat(&mbpool, mblowat); 138 pool_setlowat(&mclpool, mcllowat); 139 } 140 141 int 142 sysctl_dombuf(name, namelen, oldp, oldlenp, newp, newlen) 143 int *name; 144 u_int namelen; 145 void *oldp; 146 size_t *oldlenp; 147 void *newp; 148 size_t newlen; 149 { 150 int error, newval; 151 152 /* All sysctl names at this level are terminal. */ 153 if (namelen != 1) 154 return (ENOTDIR); /* overloaded */ 155 156 switch (name[0]) { 157 case MBUF_MSIZE: 158 return (sysctl_rdint(oldp, oldlenp, newp, msize)); 159 case MBUF_MCLBYTES: 160 return (sysctl_rdint(oldp, oldlenp, newp, mclbytes)); 161 case MBUF_NMBCLUSTERS: 162 /* 163 * If we have direct-mapped pool pages, we can adjust this 164 * number on the fly. If not, we're limited by the size 165 * of mb_map, and cannot change this value. 166 * 167 * Note: we only allow the value to be increased, never 168 * decreased. 169 */ 170 if (mb_map == NULL) { 171 newval = nmbclusters; 172 error = sysctl_int(oldp, oldlenp, newp, newlen, 173 &newval); 174 if (error != 0) 175 return (error); 176 if (newp != NULL) { 177 if (newval >= nmbclusters) { 178 nmbclusters = newval; 179 pool_sethardlimit(&mclpool, 180 nmbclusters, mclpool_warnmsg, 60); 181 } else 182 error = EINVAL; 183 } 184 return (error); 185 } else 186 return (sysctl_rdint(oldp, oldlenp, newp, nmbclusters)); 187 case MBUF_MBLOWAT: 188 case MBUF_MCLLOWAT: 189 /* New value must be >= 0. */ 190 newval = (name[0] == MBUF_MBLOWAT) ? mblowat : mcllowat; 191 error = sysctl_int(oldp, oldlenp, newp, newlen, &newval); 192 if (error != 0) 193 return (error); 194 if (newp != NULL) { 195 if (newval >= 0) { 196 if (name[0] == MBUF_MBLOWAT) { 197 mblowat = newval; 198 pool_setlowat(&mbpool, newval); 199 } else { 200 mcllowat = newval; 201 pool_setlowat(&mclpool, newval); 202 } 203 } else 204 error = EINVAL; 205 } 206 return (error); 207 default: 208 return (EOPNOTSUPP); 209 } 210 /* NOTREACHED */ 211 } 212 213 void * 214 mclpool_alloc(sz, flags, mtype) 215 unsigned long sz; 216 int flags; 217 int mtype; 218 { 219 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; 220 221 return ((void *)uvm_km_alloc_poolpage1(mb_map, uvmexp.mb_object, 222 waitok)); 223 } 224 225 void 226 mclpool_release(v, sz, mtype) 227 void *v; 228 unsigned long sz; 229 int mtype; 230 { 231 232 uvm_km_free_poolpage1(mb_map, (vaddr_t)v); 233 } 234 235 /* 236 * When MGET failes, ask protocols to free space when short of memory, 237 * then re-attempt to allocate an mbuf. 238 */ 239 struct mbuf * 240 m_retry(i, t) 241 int i, t; 242 { 243 struct mbuf *m; 244 245 m_reclaim(i); 246 #define m_retry(i, t) (struct mbuf *)0 247 MGET(m, i, t); 248 #undef m_retry 249 if (m != NULL) 250 mbstat.m_wait++; 251 else 252 mbstat.m_drops++; 253 return (m); 254 } 255 256 /* 257 * As above; retry an MGETHDR. 258 */ 259 struct mbuf * 260 m_retryhdr(i, t) 261 int i, t; 262 { 263 struct mbuf *m; 264 265 m_reclaim(i); 266 #define m_retryhdr(i, t) (struct mbuf *)0 267 MGETHDR(m, i, t); 268 #undef m_retryhdr 269 if (m != NULL) 270 mbstat.m_wait++; 271 else 272 mbstat.m_drops++; 273 return (m); 274 } 275 276 void 277 m_reclaim(how) 278 int how; 279 { 280 struct domain *dp; 281 struct protosw *pr; 282 struct ifnet *ifp; 283 int s = splimp(); 284 285 for (dp = domains; dp; dp = dp->dom_next) 286 for (pr = dp->dom_protosw; 287 pr < dp->dom_protoswNPROTOSW; pr++) 288 if (pr->pr_drain) 289 (*pr->pr_drain)(); 290 for (ifp = TAILQ_FIRST(&ifnet); ifp; ifp = TAILQ_NEXT(ifp, if_list)) 291 if (ifp->if_drain) 292 (*ifp->if_drain)(ifp); 293 splx(s); 294 mbstat.m_drain++; 295 } 296 297 /* 298 * Space allocation routines. 299 * These are also available as macros 300 * for critical paths. 301 */ 302 struct mbuf * 303 m_get(nowait, type) 304 int nowait, type; 305 { 306 struct mbuf *m; 307 308 MGET(m, nowait, type); 309 return (m); 310 } 311 312 struct mbuf * 313 m_gethdr(nowait, type) 314 int nowait, type; 315 { 316 struct mbuf *m; 317 318 MGETHDR(m, nowait, type); 319 return (m); 320 } 321 322 struct mbuf * 323 m_getclr(nowait, type) 324 int nowait, type; 325 { 326 struct mbuf *m; 327 328 MGET(m, nowait, type); 329 if (m == 0) 330 return (0); 331 memset(mtod(m, caddr_t), 0, MLEN); 332 return (m); 333 } 334 335 struct mbuf * 336 m_free(m) 337 struct mbuf *m; 338 { 339 struct mbuf *n; 340 341 MFREE(m, n); 342 return (n); 343 } 344 345 void 346 m_freem(m) 347 struct mbuf *m; 348 { 349 struct mbuf *n; 350 351 if (m == NULL) 352 return; 353 if ((m->m_flags & M_PKTHDR) != 0 && m->m_pkthdr.aux) { 354 m_freem(m->m_pkthdr.aux); 355 m->m_pkthdr.aux = NULL; 356 } 357 do { 358 MFREE(m, n); 359 m = n; 360 } while (m); 361 } 362 363 /* 364 * Mbuffer utility routines. 365 */ 366 367 /* 368 * Lesser-used path for M_PREPEND: 369 * allocate new mbuf to prepend to chain, 370 * copy junk along. 371 */ 372 struct mbuf * 373 m_prepend(m, len, how) 374 struct mbuf *m; 375 int len, how; 376 { 377 struct mbuf *mn; 378 379 MGET(mn, how, m->m_type); 380 if (mn == (struct mbuf *)NULL) { 381 m_freem(m); 382 return ((struct mbuf *)NULL); 383 } 384 if (m->m_flags & M_PKTHDR) { 385 M_COPY_PKTHDR(mn, m); 386 m->m_flags &= ~M_PKTHDR; 387 } 388 mn->m_next = m; 389 m = mn; 390 if (len < MHLEN) 391 MH_ALIGN(m, len); 392 m->m_len = len; 393 return (m); 394 } 395 396 /* 397 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 398 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 399 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 400 */ 401 int MCFail; 402 403 struct mbuf * 404 m_copym(m, off0, len, wait) 405 struct mbuf *m; 406 int off0, wait; 407 int len; 408 { 409 return m_copym0(m, off0, len, wait, 0); /* shallow copy on M_EXT */ 410 } 411 412 struct mbuf * 413 m_dup(m, off0, len, wait) 414 struct mbuf *m; 415 int off0, wait; 416 int len; 417 { 418 return m_copym0(m, off0, len, wait, 1); /* deep copy */ 419 } 420 421 static struct mbuf * 422 m_copym0(m, off0, len, wait, deep) 423 struct mbuf *m; 424 int off0, wait; 425 int len; 426 int deep; /* deep copy */ 427 { 428 struct mbuf *n, **np; 429 int off = off0; 430 struct mbuf *top; 431 int copyhdr = 0; 432 433 if (off < 0 || len < 0) 434 panic("m_copym: off %d, len %d", off, len); 435 if (off == 0 && m->m_flags & M_PKTHDR) 436 copyhdr = 1; 437 while (off > 0) { 438 if (m == 0) 439 panic("m_copym: m == 0"); 440 if (off < m->m_len) 441 break; 442 off -= m->m_len; 443 m = m->m_next; 444 } 445 np = ⊤ 446 top = 0; 447 while (len > 0) { 448 if (m == 0) { 449 if (len != M_COPYALL) 450 panic("m_copym: m == 0 and not COPYALL"); 451 break; 452 } 453 MGET(n, wait, m->m_type); 454 *np = n; 455 if (n == 0) 456 goto nospace; 457 if (copyhdr) { 458 M_COPY_PKTHDR(n, m); 459 if (len == M_COPYALL) 460 n->m_pkthdr.len -= off0; 461 else 462 n->m_pkthdr.len = len; 463 copyhdr = 0; 464 } 465 n->m_len = min(len, m->m_len - off); 466 if (m->m_flags & M_EXT) { 467 if (!deep) { 468 n->m_data = m->m_data + off; 469 n->m_ext = m->m_ext; 470 MCLADDREFERENCE(m, n); 471 } else { 472 MCLGET(n, wait); 473 memcpy(mtod(n, caddr_t), mtod(m, caddr_t)+off, 474 (unsigned)n->m_len); 475 } 476 } else 477 memcpy(mtod(n, caddr_t), mtod(m, caddr_t)+off, 478 (unsigned)n->m_len); 479 if (len != M_COPYALL) 480 len -= n->m_len; 481 off = 0; 482 m = m->m_next; 483 np = &n->m_next; 484 } 485 if (top == 0) 486 MCFail++; 487 return (top); 488 nospace: 489 m_freem(top); 490 MCFail++; 491 return (0); 492 } 493 494 /* 495 * Copy an entire packet, including header (which must be present). 496 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 497 */ 498 struct mbuf * 499 m_copypacket(m, how) 500 struct mbuf *m; 501 int how; 502 { 503 struct mbuf *top, *n, *o; 504 505 MGET(n, how, m->m_type); 506 top = n; 507 if (!n) 508 goto nospace; 509 510 M_COPY_PKTHDR(n, m); 511 n->m_len = m->m_len; 512 if (m->m_flags & M_EXT) { 513 n->m_data = m->m_data; 514 n->m_ext = m->m_ext; 515 MCLADDREFERENCE(m, n); 516 } else { 517 memcpy(mtod(n, char *), mtod(m, char *), n->m_len); 518 } 519 520 m = m->m_next; 521 while (m) { 522 MGET(o, how, m->m_type); 523 if (!o) 524 goto nospace; 525 526 n->m_next = o; 527 n = n->m_next; 528 529 n->m_len = m->m_len; 530 if (m->m_flags & M_EXT) { 531 n->m_data = m->m_data; 532 n->m_ext = m->m_ext; 533 MCLADDREFERENCE(m, n); 534 } else { 535 memcpy(mtod(n, char *), mtod(m, char *), n->m_len); 536 } 537 538 m = m->m_next; 539 } 540 return top; 541 nospace: 542 m_freem(top); 543 MCFail++; 544 return 0; 545 } 546 547 /* 548 * Copy data from an mbuf chain starting "off" bytes from the beginning, 549 * continuing for "len" bytes, into the indicated buffer. 550 */ 551 void 552 m_copydata(m, off, len, cp) 553 struct mbuf *m; 554 int off; 555 int len; 556 caddr_t cp; 557 { 558 unsigned count; 559 560 if (off < 0 || len < 0) 561 panic("m_copydata"); 562 while (off > 0) { 563 if (m == 0) 564 panic("m_copydata"); 565 if (off < m->m_len) 566 break; 567 off -= m->m_len; 568 m = m->m_next; 569 } 570 while (len > 0) { 571 if (m == 0) 572 panic("m_copydata"); 573 count = min(m->m_len - off, len); 574 memcpy(cp, mtod(m, caddr_t) + off, count); 575 len -= count; 576 cp += count; 577 off = 0; 578 m = m->m_next; 579 } 580 } 581 582 /* 583 * Concatenate mbuf chain n to m. 584 * Both chains must be of the same type (e.g. MT_DATA). 585 * Any m_pkthdr is not updated. 586 */ 587 void 588 m_cat(m, n) 589 struct mbuf *m, *n; 590 { 591 while (m->m_next) 592 m = m->m_next; 593 while (n) { 594 if (m->m_flags & M_EXT || 595 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 596 /* just join the two chains */ 597 m->m_next = n; 598 return; 599 } 600 /* splat the data from one into the other */ 601 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), 602 (u_int)n->m_len); 603 m->m_len += n->m_len; 604 n = m_free(n); 605 } 606 } 607 608 void 609 m_adj(mp, req_len) 610 struct mbuf *mp; 611 int req_len; 612 { 613 int len = req_len; 614 struct mbuf *m; 615 int count; 616 617 if ((m = mp) == NULL) 618 return; 619 if (len >= 0) { 620 /* 621 * Trim from head. 622 */ 623 while (m != NULL && len > 0) { 624 if (m->m_len <= len) { 625 len -= m->m_len; 626 m->m_len = 0; 627 m = m->m_next; 628 } else { 629 m->m_len -= len; 630 m->m_data += len; 631 len = 0; 632 } 633 } 634 m = mp; 635 if (mp->m_flags & M_PKTHDR) 636 m->m_pkthdr.len -= (req_len - len); 637 } else { 638 /* 639 * Trim from tail. Scan the mbuf chain, 640 * calculating its length and finding the last mbuf. 641 * If the adjustment only affects this mbuf, then just 642 * adjust and return. Otherwise, rescan and truncate 643 * after the remaining size. 644 */ 645 len = -len; 646 count = 0; 647 for (;;) { 648 count += m->m_len; 649 if (m->m_next == (struct mbuf *)0) 650 break; 651 m = m->m_next; 652 } 653 if (m->m_len >= len) { 654 m->m_len -= len; 655 if (mp->m_flags & M_PKTHDR) 656 mp->m_pkthdr.len -= len; 657 return; 658 } 659 count -= len; 660 if (count < 0) 661 count = 0; 662 /* 663 * Correct length for chain is "count". 664 * Find the mbuf with last data, adjust its length, 665 * and toss data from remaining mbufs on chain. 666 */ 667 m = mp; 668 if (m->m_flags & M_PKTHDR) 669 m->m_pkthdr.len = count; 670 for (; m; m = m->m_next) { 671 if (m->m_len >= count) { 672 m->m_len = count; 673 break; 674 } 675 count -= m->m_len; 676 } 677 while (m->m_next) 678 (m = m->m_next) ->m_len = 0; 679 } 680 } 681 682 /* 683 * Rearange an mbuf chain so that len bytes are contiguous 684 * and in the data area of an mbuf (so that mtod and dtom 685 * will work for a structure of size len). Returns the resulting 686 * mbuf chain on success, frees it and returns null on failure. 687 * If there is room, it will add up to max_protohdr-len extra bytes to the 688 * contiguous region in an attempt to avoid being called next time. 689 */ 690 int MPFail; 691 692 struct mbuf * 693 m_pullup(n, len) 694 struct mbuf *n; 695 int len; 696 { 697 struct mbuf *m; 698 int count; 699 int space; 700 701 /* 702 * If first mbuf has no cluster, and has room for len bytes 703 * without shifting current data, pullup into it, 704 * otherwise allocate a new mbuf to prepend to the chain. 705 */ 706 if ((n->m_flags & M_EXT) == 0 && 707 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 708 if (n->m_len >= len) 709 return (n); 710 m = n; 711 n = n->m_next; 712 len -= m->m_len; 713 } else { 714 if (len > MHLEN) 715 goto bad; 716 MGET(m, M_DONTWAIT, n->m_type); 717 if (m == 0) 718 goto bad; 719 m->m_len = 0; 720 if (n->m_flags & M_PKTHDR) { 721 M_COPY_PKTHDR(m, n); 722 n->m_flags &= ~M_PKTHDR; 723 } 724 } 725 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 726 do { 727 count = min(min(max(len, max_protohdr), space), n->m_len); 728 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), 729 (unsigned)count); 730 len -= count; 731 m->m_len += count; 732 n->m_len -= count; 733 space -= count; 734 if (n->m_len) 735 n->m_data += count; 736 else 737 n = m_free(n); 738 } while (len > 0 && n); 739 if (len > 0) { 740 (void) m_free(m); 741 goto bad; 742 } 743 m->m_next = n; 744 return (m); 745 bad: 746 m_freem(n); 747 MPFail++; 748 return (0); 749 } 750 751 /* 752 * Partition an mbuf chain in two pieces, returning the tail -- 753 * all but the first len0 bytes. In case of failure, it returns NULL and 754 * attempts to restore the chain to its original state. 755 */ 756 struct mbuf * 757 m_split(m0, len0, wait) 758 struct mbuf *m0; 759 int len0, wait; 760 { 761 struct mbuf *m, *n; 762 unsigned len = len0, remain, len_save; 763 764 for (m = m0; m && len > m->m_len; m = m->m_next) 765 len -= m->m_len; 766 if (m == 0) 767 return (0); 768 remain = m->m_len - len; 769 if (m0->m_flags & M_PKTHDR) { 770 MGETHDR(n, wait, m0->m_type); 771 if (n == 0) 772 return (0); 773 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 774 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 775 len_save = m0->m_pkthdr.len; 776 m0->m_pkthdr.len = len0; 777 if (m->m_flags & M_EXT) 778 goto extpacket; 779 if (remain > MHLEN) { 780 /* m can't be the lead packet */ 781 MH_ALIGN(n, 0); 782 n->m_next = m_split(m, len, wait); 783 if (n->m_next == 0) { 784 (void) m_free(n); 785 m0->m_pkthdr.len = len_save; 786 return (0); 787 } else 788 return (n); 789 } else 790 MH_ALIGN(n, remain); 791 } else if (remain == 0) { 792 n = m->m_next; 793 m->m_next = 0; 794 return (n); 795 } else { 796 MGET(n, wait, m->m_type); 797 if (n == 0) 798 return (0); 799 M_ALIGN(n, remain); 800 } 801 extpacket: 802 if (m->m_flags & M_EXT) { 803 n->m_ext = m->m_ext; 804 MCLADDREFERENCE(m, n); 805 n->m_data = m->m_data + len; 806 } else { 807 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + len, remain); 808 } 809 n->m_len = remain; 810 m->m_len = len; 811 n->m_next = m->m_next; 812 m->m_next = 0; 813 return (n); 814 } 815 /* 816 * Routine to copy from device local memory into mbufs. 817 */ 818 struct mbuf * 819 m_devget(buf, totlen, off0, ifp, copy) 820 char *buf; 821 int totlen, off0; 822 struct ifnet *ifp; 823 void (*copy) __P((const void *from, void *to, size_t len)); 824 { 825 struct mbuf *m; 826 struct mbuf *top = 0, **mp = ⊤ 827 int off = off0, len; 828 char *cp; 829 char *epkt; 830 831 cp = buf; 832 epkt = cp + totlen; 833 if (off) { 834 /* 835 * If 'off' is non-zero, packet is trailer-encapsulated, 836 * so we have to skip the type and length fields. 837 */ 838 cp += off + 2 * sizeof(u_int16_t); 839 totlen -= 2 * sizeof(u_int16_t); 840 } 841 MGETHDR(m, M_DONTWAIT, MT_DATA); 842 if (m == 0) 843 return (0); 844 m->m_pkthdr.rcvif = ifp; 845 m->m_pkthdr.len = totlen; 846 m->m_len = MHLEN; 847 848 while (totlen > 0) { 849 if (top) { 850 MGET(m, M_DONTWAIT, MT_DATA); 851 if (m == 0) { 852 m_freem(top); 853 return (0); 854 } 855 m->m_len = MLEN; 856 } 857 len = min(totlen, epkt - cp); 858 if (len >= MINCLSIZE) { 859 MCLGET(m, M_DONTWAIT); 860 if ((m->m_flags & M_EXT) == 0) { 861 m_free(m); 862 m_freem(top); 863 return (0); 864 } 865 m->m_len = len = min(len, MCLBYTES); 866 } else { 867 /* 868 * Place initial small packet/header at end of mbuf. 869 */ 870 if (len < m->m_len) { 871 if (top == 0 && len + max_linkhdr <= m->m_len) 872 m->m_data += max_linkhdr; 873 m->m_len = len; 874 } else 875 len = m->m_len; 876 } 877 if (copy) 878 copy(cp, mtod(m, caddr_t), (size_t)len); 879 else 880 memcpy(mtod(m, caddr_t), cp, (size_t)len); 881 cp += len; 882 *mp = m; 883 mp = &m->m_next; 884 totlen -= len; 885 if (cp == epkt) 886 cp = buf; 887 } 888 return (top); 889 } 890 891 /* 892 * Copy data from a buffer back into the indicated mbuf chain, 893 * starting "off" bytes from the beginning, extending the mbuf 894 * chain if necessary. 895 */ 896 void 897 m_copyback(m0, off, len, cp) 898 struct mbuf *m0; 899 int off; 900 int len; 901 caddr_t cp; 902 { 903 int mlen; 904 struct mbuf *m = m0, *n; 905 int totlen = 0; 906 907 if (m0 == 0) 908 return; 909 while (off > (mlen = m->m_len)) { 910 off -= mlen; 911 totlen += mlen; 912 if (m->m_next == 0) { 913 n = m_getclr(M_DONTWAIT, m->m_type); 914 if (n == 0) 915 goto out; 916 n->m_len = min(MLEN, len + off); 917 m->m_next = n; 918 } 919 m = m->m_next; 920 } 921 while (len > 0) { 922 mlen = min (m->m_len - off, len); 923 memcpy(mtod(m, caddr_t) + off, cp, (unsigned)mlen); 924 cp += mlen; 925 len -= mlen; 926 mlen += off; 927 off = 0; 928 totlen += mlen; 929 if (len == 0) 930 break; 931 if (m->m_next == 0) { 932 n = m_get(M_DONTWAIT, m->m_type); 933 if (n == 0) 934 break; 935 n->m_len = min(MLEN, len); 936 m->m_next = n; 937 } 938 m = m->m_next; 939 } 940 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 941 m->m_pkthdr.len = totlen; 942 } 943