1 /* $OpenBSD: uipc_mbuf.c,v 1.62 2003/06/02 23:28:06 millert Exp $ */ 2 /* $NetBSD: uipc_mbuf.c,v 1.15.4.1 1996/06/13 17:11:44 cgd Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1988, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 33 */ 34 35 /* 36 * @(#)COPYRIGHT 1.1 (NRL) 17 January 1995 37 * 38 * NRL grants permission for redistribution and use in source and binary 39 * forms, with or without modification, of the software and documentation 40 * created at NRL provided that the following conditions are met: 41 * 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgements: 49 * This product includes software developed by the University of 50 * California, Berkeley and its contributors. 51 * This product includes software developed at the Information 52 * Technology Division, US Naval Research Laboratory. 53 * 4. Neither the name of the NRL nor the names of its contributors 54 * may be used to endorse or promote products derived from this software 55 * without specific prior written permission. 56 * 57 * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS 58 * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 59 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 60 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NRL OR 61 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 62 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 63 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 64 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 65 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 66 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 67 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 68 * 69 * The views and conclusions contained in the software and documentation 70 * are those of the authors and should not be interpreted as representing 71 * official policies, either expressed or implied, of the US Naval 72 * Research Laboratory (NRL). 73 */ 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/proc.h> 78 #include <sys/malloc.h> 79 #define MBTYPES 80 #include <sys/mbuf.h> 81 #include <sys/kernel.h> 82 #include <sys/syslog.h> 83 #include <sys/domain.h> 84 #include <sys/protosw.h> 85 #include <sys/pool.h> 86 87 #include <machine/cpu.h> 88 89 #include <uvm/uvm_extern.h> 90 91 struct mbstat mbstat; /* mbuf stats */ 92 struct pool mbpool; /* mbuf pool */ 93 struct pool mclpool; /* mbuf cluster pool */ 94 95 struct vm_map *mb_map; 96 97 int max_linkhdr; /* largest link-level header */ 98 int max_protohdr; /* largest protocol header */ 99 int max_hdr; /* largest link+protocol header */ 100 int max_datalen; /* MHLEN - max_hdr */ 101 102 void *mclpool_alloc(struct pool *, int); 103 void mclpool_release(struct pool *, void *); 104 struct mbuf *m_copym0(struct mbuf *, int, int, int, int); 105 106 const char *mclpool_warnmsg = 107 "WARNING: mclpool limit reached; increase NMBCLUSTERS"; 108 109 struct pool_allocator mclpool_allocator = { 110 mclpool_alloc, mclpool_release, 0, 111 }; 112 113 /* 114 * Initialize the mbuf allcator. 115 */ 116 void 117 mbinit() 118 { 119 vaddr_t minaddr, maxaddr; 120 121 minaddr = vm_map_min(kernel_map); 122 mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 123 nmbclust*(MCLBYTES), VM_MAP_INTRSAFE, FALSE, NULL); 124 125 pool_init(&mbpool, MSIZE, 0, 0, 0, "mbpl", NULL); 126 pool_init(&mclpool, MCLBYTES, 0, 0, 0, "mclpl", &mclpool_allocator); 127 128 pool_set_drain_hook(&mbpool, m_reclaim, NULL); 129 pool_set_drain_hook(&mclpool, m_reclaim, NULL); 130 131 /* 132 * Set the hard limit on the mclpool to the number of 133 * mbuf clusters the kernel is to support. Log the limit 134 * reached message max once a minute. 135 */ 136 (void)pool_sethardlimit(&mclpool, nmbclust, mclpool_warnmsg, 60); 137 138 /* 139 * Set a low water mark for both mbufs and clusters. This should 140 * help ensure that they can be allocated in a memory starvation 141 * situation. This is important for e.g. diskless systems which 142 * must allocate mbufs in order for the pagedaemon to clean pages. 143 */ 144 pool_setlowat(&mbpool, mblowat); 145 pool_setlowat(&mclpool, mcllowat); 146 } 147 148 149 void * 150 mclpool_alloc(struct pool *pp, int flags) 151 { 152 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; 153 154 return ((void *)uvm_km_alloc_poolpage1(mb_map, uvmexp.mb_object, 155 waitok)); 156 } 157 158 void 159 mclpool_release(struct pool *pp, void *v) 160 { 161 uvm_km_free_poolpage1(mb_map, (vaddr_t)v); 162 } 163 164 void 165 m_reclaim(void *arg, int flags) 166 { 167 register struct domain *dp; 168 register struct protosw *pr; 169 int s = splimp(); 170 171 for (dp = domains; dp; dp = dp->dom_next) 172 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 173 if (pr->pr_drain) 174 (*pr->pr_drain)(); 175 splx(s); 176 mbstat.m_drain++; 177 } 178 179 /* 180 * Space allocation routines. 181 * These are also available as macros 182 * for critical paths. 183 */ 184 struct mbuf * 185 m_get(nowait, type) 186 int nowait, type; 187 { 188 register struct mbuf *m; 189 190 MGET(m, nowait, type); 191 return (m); 192 } 193 194 struct mbuf * 195 m_gethdr(nowait, type) 196 int nowait, type; 197 { 198 register struct mbuf *m; 199 200 MGETHDR(m, nowait, type); 201 return (m); 202 } 203 204 struct mbuf * 205 m_getclr(nowait, type) 206 int nowait, type; 207 { 208 register struct mbuf *m; 209 210 MGET(m, nowait, type); 211 if (m == NULL) 212 return (NULL); 213 memset(mtod(m, caddr_t), 0, MLEN); 214 return (m); 215 } 216 217 struct mbuf * 218 m_free(m) 219 struct mbuf *m; 220 { 221 register struct mbuf *n; 222 223 MFREE(m, n); 224 return (n); 225 } 226 227 void 228 m_freem(m) 229 register struct mbuf *m; 230 { 231 register struct mbuf *n; 232 233 if (m == NULL) 234 return; 235 do { 236 MFREE(m, n); 237 } while ((m = n) != NULL); 238 } 239 240 /* 241 * Mbuffer utility routines. 242 */ 243 244 /* 245 * Lesser-used path for M_PREPEND: 246 * allocate new mbuf to prepend to chain, 247 * copy junk along. 248 */ 249 struct mbuf * 250 m_prepend(m, len, how) 251 register struct mbuf *m; 252 int len, how; 253 { 254 struct mbuf *mn; 255 256 MGET(mn, how, m->m_type); 257 if (mn == NULL) { 258 m_freem(m); 259 return (NULL); 260 } 261 if (m->m_flags & M_PKTHDR) 262 M_MOVE_PKTHDR(mn, m); 263 mn->m_next = m; 264 m = mn; 265 if (len < MHLEN) 266 MH_ALIGN(m, len); 267 m->m_len = len; 268 return (m); 269 } 270 271 /* 272 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 273 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 274 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 275 */ 276 int MCFail; 277 278 struct mbuf * 279 m_copym(m, off0, len, wait) 280 struct mbuf *m; 281 int off0, wait; 282 int len; 283 { 284 return m_copym0(m, off0, len, wait, 0); /* shallow copy on M_EXT */ 285 } 286 287 /* 288 * m_copym2() is like m_copym(), except it COPIES cluster mbufs, instead 289 * of merely bumping the reference count. 290 */ 291 struct mbuf * 292 m_copym2(m, off0, len, wait) 293 struct mbuf *m; 294 int off0, wait; 295 int len; 296 { 297 return m_copym0(m, off0, len, wait, 1); /* deep copy */ 298 } 299 300 struct mbuf * 301 m_copym0(m, off0, len, wait, deep) 302 struct mbuf *m; 303 int off0, wait; 304 int len; 305 int deep; /* deep copy */ 306 { 307 struct mbuf *n, **np; 308 int off = off0; 309 struct mbuf *top; 310 int copyhdr = 0; 311 312 if (off < 0 || len < 0) 313 panic("m_copym0: off %d, len %d", off, len); 314 if (off == 0 && m->m_flags & M_PKTHDR) 315 copyhdr = 1; 316 while (off > 0) { 317 if (m == 0) 318 panic("m_copym0: null mbuf"); 319 if (off < m->m_len) 320 break; 321 off -= m->m_len; 322 m = m->m_next; 323 } 324 np = ⊤ 325 top = 0; 326 while (len > 0) { 327 if (m == 0) { 328 if (len != M_COPYALL) 329 panic("m_copym0: m == 0 and not COPYALL"); 330 break; 331 } 332 MGET(n, wait, m->m_type); 333 *np = n; 334 if (n == 0) 335 goto nospace; 336 if (copyhdr) { 337 M_DUP_PKTHDR(n, m); 338 if (len == M_COPYALL) 339 n->m_pkthdr.len -= off0; 340 else 341 n->m_pkthdr.len = len; 342 copyhdr = 0; 343 } 344 n->m_len = min(len, m->m_len - off); 345 if (m->m_flags & M_EXT) { 346 if (!deep) { 347 n->m_data = m->m_data + off; 348 n->m_ext = m->m_ext; 349 MCLADDREFERENCE(m, n); 350 } else { 351 /* 352 * we are unsure about the way m was allocated. 353 * copy into multiple MCLBYTES cluster mbufs. 354 */ 355 MCLGET(n, wait); 356 n->m_len = 0; 357 n->m_len = M_TRAILINGSPACE(n); 358 n->m_len = min(n->m_len, len); 359 n->m_len = min(n->m_len, m->m_len - off); 360 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, 361 (unsigned)n->m_len); 362 } 363 } else 364 memcpy(mtod(n, caddr_t), mtod(m, caddr_t)+off, 365 (unsigned)n->m_len); 366 if (len != M_COPYALL) 367 len -= n->m_len; 368 off += n->m_len; 369 #ifdef DIAGNOSTIC 370 if (off > m->m_len) 371 panic("m_copym0 overrun"); 372 #endif 373 if (off == m->m_len) { 374 m = m->m_next; 375 off = 0; 376 } 377 np = &n->m_next; 378 } 379 if (top == 0) 380 MCFail++; 381 return (top); 382 nospace: 383 m_freem(top); 384 MCFail++; 385 return (0); 386 } 387 388 /* 389 * Copy data from an mbuf chain starting "off" bytes from the beginning, 390 * continuing for "len" bytes, into the indicated buffer. 391 */ 392 void 393 m_copydata(m, off, len, cp) 394 register struct mbuf *m; 395 register int off; 396 register int len; 397 caddr_t cp; 398 { 399 register unsigned count; 400 401 if (off < 0) 402 panic("m_copydata: off %d < 0", off); 403 if (len < 0) 404 panic("m_copydata: len %d < 0", len); 405 while (off > 0) { 406 if (m == NULL) 407 panic("m_copydata: null mbuf in skip"); 408 if (off < m->m_len) 409 break; 410 off -= m->m_len; 411 m = m->m_next; 412 } 413 while (len > 0) { 414 if (m == NULL) 415 panic("m_copydata: null mbuf"); 416 count = min(m->m_len - off, len); 417 bcopy(mtod(m, caddr_t) + off, cp, count); 418 len -= count; 419 cp += count; 420 off = 0; 421 m = m->m_next; 422 } 423 } 424 425 /* 426 * Copy data from a buffer back into the indicated mbuf chain, 427 * starting "off" bytes from the beginning, extending the mbuf 428 * chain if necessary. The mbuf needs to be properly initialized 429 * including the setting of m_len. 430 */ 431 void 432 m_copyback(m0, off, len, cp) 433 struct mbuf *m0; 434 register int off; 435 register int len; 436 caddr_t cp; 437 { 438 register int mlen; 439 register struct mbuf *m = m0, *n; 440 int totlen = 0; 441 442 if (m0 == 0) 443 return; 444 while (off > (mlen = m->m_len)) { 445 off -= mlen; 446 totlen += mlen; 447 if (m->m_next == 0) { 448 n = m_getclr(M_DONTWAIT, m->m_type); 449 if (n == 0) 450 goto out; 451 n->m_len = min(MLEN, len + off); 452 m->m_next = n; 453 } 454 m = m->m_next; 455 } 456 while (len > 0) { 457 mlen = min (m->m_len - off, len); 458 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 459 cp += mlen; 460 len -= mlen; 461 mlen += off; 462 off = 0; 463 totlen += mlen; 464 if (len == 0) 465 break; 466 if (m->m_next == 0) { 467 n = m_get(M_DONTWAIT, m->m_type); 468 if (n == 0) 469 break; 470 n->m_len = min(MLEN, len); 471 m->m_next = n; 472 } 473 m = m->m_next; 474 } 475 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 476 m->m_pkthdr.len = totlen; 477 } 478 479 /* 480 * Concatenate mbuf chain n to m. 481 * Both chains must be of the same type (e.g. MT_DATA). 482 * Any m_pkthdr is not updated. 483 */ 484 void 485 m_cat(m, n) 486 register struct mbuf *m, *n; 487 { 488 while (m->m_next) 489 m = m->m_next; 490 while (n) { 491 if (m->m_flags & M_EXT || 492 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 493 /* just join the two chains */ 494 m->m_next = n; 495 return; 496 } 497 /* splat the data from one into the other */ 498 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 499 (u_int)n->m_len); 500 m->m_len += n->m_len; 501 n = m_free(n); 502 } 503 } 504 505 void 506 m_adj(mp, req_len) 507 struct mbuf *mp; 508 int req_len; 509 { 510 register int len = req_len; 511 register struct mbuf *m; 512 register int count; 513 514 if ((m = mp) == NULL) 515 return; 516 if (len >= 0) { 517 /* 518 * Trim from head. 519 */ 520 while (m != NULL && len > 0) { 521 if (m->m_len <= len) { 522 len -= m->m_len; 523 m->m_len = 0; 524 m = m->m_next; 525 } else { 526 m->m_len -= len; 527 m->m_data += len; 528 len = 0; 529 } 530 } 531 m = mp; 532 if (mp->m_flags & M_PKTHDR) 533 m->m_pkthdr.len -= (req_len - len); 534 } else { 535 /* 536 * Trim from tail. Scan the mbuf chain, 537 * calculating its length and finding the last mbuf. 538 * If the adjustment only affects this mbuf, then just 539 * adjust and return. Otherwise, rescan and truncate 540 * after the remaining size. 541 */ 542 len = -len; 543 count = 0; 544 for (;;) { 545 count += m->m_len; 546 if (m->m_next == NULL) 547 break; 548 m = m->m_next; 549 } 550 if (m->m_len >= len) { 551 m->m_len -= len; 552 if (mp->m_flags & M_PKTHDR) 553 mp->m_pkthdr.len -= len; 554 return; 555 } 556 count -= len; 557 if (count < 0) 558 count = 0; 559 /* 560 * Correct length for chain is "count". 561 * Find the mbuf with last data, adjust its length, 562 * and toss data from remaining mbufs on chain. 563 */ 564 m = mp; 565 if (m->m_flags & M_PKTHDR) 566 m->m_pkthdr.len = count; 567 for (; m; m = m->m_next) { 568 if (m->m_len >= count) { 569 m->m_len = count; 570 break; 571 } 572 count -= m->m_len; 573 } 574 while ((m = m->m_next) != NULL) 575 m->m_len = 0; 576 } 577 } 578 579 /* 580 * Rearange an mbuf chain so that len bytes are contiguous 581 * and in the data area of an mbuf (so that mtod and dtom 582 * will work for a structure of size len). Returns the resulting 583 * mbuf chain on success, frees it and returns null on failure. 584 * If there is room, it will add up to max_protohdr-len extra bytes to the 585 * contiguous region in an attempt to avoid being called next time. 586 */ 587 int MPFail; 588 589 struct mbuf * 590 m_pullup(n, len) 591 register struct mbuf *n; 592 int len; 593 { 594 register struct mbuf *m; 595 register int count; 596 int space; 597 598 /* 599 * If first mbuf has no cluster, and has room for len bytes 600 * without shifting current data, pullup into it, 601 * otherwise allocate a new mbuf to prepend to the chain. 602 */ 603 if ((n->m_flags & M_EXT) == 0 && 604 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 605 if (n->m_len >= len) 606 return (n); 607 m = n; 608 n = n->m_next; 609 len -= m->m_len; 610 } else { 611 if (len > MHLEN) 612 goto bad; 613 MGET(m, M_DONTWAIT, n->m_type); 614 if (m == NULL) 615 goto bad; 616 m->m_len = 0; 617 if (n->m_flags & M_PKTHDR) 618 M_MOVE_PKTHDR(m, n); 619 } 620 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 621 do { 622 count = min(min(max(len, max_protohdr), space), n->m_len); 623 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 624 (unsigned)count); 625 len -= count; 626 m->m_len += count; 627 n->m_len -= count; 628 space -= count; 629 if (n->m_len) 630 n->m_data += count; 631 else 632 n = m_free(n); 633 } while (len > 0 && n); 634 if (len > 0) { 635 (void)m_free(m); 636 goto bad; 637 } 638 m->m_next = n; 639 return (m); 640 bad: 641 m_freem(n); 642 MPFail++; 643 return (NULL); 644 } 645 646 /* 647 * m_pullup2() works like m_pullup, save that len can be <= MCLBYTES. 648 * m_pullup2() only works on values of len such that MHLEN < len <= MCLBYTES, 649 * it calls m_pullup() for values <= MHLEN. It also only coagulates the 650 * reqested number of bytes. (For those of us who expect unwieldly option 651 * headers. 652 * 653 * KEBE SAYS: Remember that dtom() calls with data in clusters does not work! 654 */ 655 struct mbuf * 656 m_pullup2(n, len) 657 register struct mbuf *n; 658 int len; 659 { 660 register struct mbuf *m; 661 register int count; 662 int space; 663 664 if (len <= MHLEN) 665 return m_pullup(n, len); 666 if ((n->m_flags & M_EXT) != 0 && 667 n->m_data + len < &n->m_data[MCLBYTES] && n->m_next) { 668 if (n->m_len >= len) 669 return (n); 670 m = n; 671 n = n->m_next; 672 len -= m->m_len; 673 } else { 674 if (len > MCLBYTES) 675 goto bad; 676 MGET(m, M_DONTWAIT, n->m_type); 677 if (m == NULL) 678 goto bad; 679 MCLGET(m, M_DONTWAIT); 680 if ((m->m_flags & M_EXT) == 0) 681 goto bad; 682 m->m_len = 0; 683 if (n->m_flags & M_PKTHDR) { 684 /* Too many adverse side effects. */ 685 /* M_MOVE_PKTHDR(m, n); */ 686 m->m_flags = (n->m_flags & M_COPYFLAGS) | M_EXT; 687 M_MOVE_HDR(m, n); 688 /* n->m_data is cool. */ 689 } 690 } 691 692 do { 693 count = min(len, n->m_len); 694 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 695 (unsigned)count); 696 len -= count; 697 m->m_len += count; 698 n->m_len -= count; 699 space -= count; 700 if (n->m_len) 701 n->m_data += count; 702 else 703 n = m_free(n); 704 } while (len > 0 && n); 705 if (len > 0) { 706 (void)m_free(m); 707 goto bad; 708 } 709 m->m_next = n; 710 711 return (m); 712 bad: 713 m_freem(n); 714 MPFail++; 715 return (NULL); 716 } 717 718 /* 719 * Return a pointer to mbuf/offset of location in mbuf chain. 720 */ 721 struct mbuf * 722 m_getptr(m, loc, off) 723 struct mbuf *m; 724 int loc; 725 int *off; 726 { 727 while (loc >= 0) { 728 /* Normal end of search */ 729 if (m->m_len > loc) { 730 *off = loc; 731 return (m); 732 } 733 else { 734 loc -= m->m_len; 735 736 if (m->m_next == NULL) { 737 if (loc == 0) { 738 /* Point at the end of valid data */ 739 *off = m->m_len; 740 return (m); 741 } 742 else 743 return (NULL); 744 } 745 else 746 m = m->m_next; 747 } 748 } 749 750 return (NULL); 751 } 752 753 /* 754 * Inject a new mbuf chain of length siz in mbuf chain m0 at 755 * position len0. Returns a pointer to the first injected mbuf, or 756 * NULL on failure (m0 is left undisturbed). Note that if there is 757 * enough space for an object of size siz in the appropriate position, 758 * no memory will be allocated. Also, there will be no data movement in 759 * the first len0 bytes (pointers to that will remain valid). 760 * 761 * XXX It is assumed that siz is less than the size of an mbuf at the moment. 762 */ 763 struct mbuf * 764 m_inject(m0, len0, siz, wait) 765 register struct mbuf *m0; 766 int len0, siz, wait; 767 { 768 register struct mbuf *m, *n, *n2 = NULL, *n3; 769 unsigned len = len0, remain; 770 771 if ((siz >= MHLEN) || (len0 <= 0)) 772 return (NULL); 773 for (m = m0; m && len > m->m_len; m = m->m_next) 774 len -= m->m_len; 775 if (m == NULL) 776 return (NULL); 777 remain = m->m_len - len; 778 if (remain == 0) { 779 if ((m->m_next) && (M_LEADINGSPACE(m->m_next) >= siz)) { 780 m->m_next->m_len += siz; 781 if (m0->m_flags & M_PKTHDR) 782 m0->m_pkthdr.len += siz; 783 m->m_next->m_data -= siz; 784 return m->m_next; 785 } 786 } else { 787 n2 = m_copym2(m, len, remain, wait); 788 if (n2 == NULL) 789 return (NULL); 790 } 791 792 MGET(n, wait, MT_DATA); 793 if (n == NULL) { 794 if (n2) 795 m_freem(n2); 796 return (NULL); 797 } 798 799 n->m_len = siz; 800 if (m0->m_flags & M_PKTHDR) 801 m0->m_pkthdr.len += siz; 802 m->m_len -= remain; /* Trim */ 803 if (n2) { 804 for (n3 = n; n3->m_next != NULL; n3 = n3->m_next) 805 ; 806 n3->m_next = n2; 807 } else 808 n3 = n; 809 for (; n3->m_next != NULL; n3 = n3->m_next) 810 ; 811 n3->m_next = m->m_next; 812 m->m_next = n; 813 return n; 814 } 815 816 /* 817 * Partition an mbuf chain in two pieces, returning the tail -- 818 * all but the first len0 bytes. In case of failure, it returns NULL and 819 * attempts to restore the chain to its original state. 820 */ 821 struct mbuf * 822 m_split(m0, len0, wait) 823 register struct mbuf *m0; 824 int len0, wait; 825 { 826 register struct mbuf *m, *n; 827 unsigned len = len0, remain, olen; 828 829 for (m = m0; m && len > m->m_len; m = m->m_next) 830 len -= m->m_len; 831 if (m == NULL) 832 return (NULL); 833 remain = m->m_len - len; 834 if (m0->m_flags & M_PKTHDR) { 835 MGETHDR(n, wait, m0->m_type); 836 if (n == NULL) 837 return (NULL); 838 M_DUP_PKTHDR(n, m0); 839 n->m_pkthdr.len -= len0; 840 olen = m0->m_pkthdr.len; 841 m0->m_pkthdr.len = len0; 842 if (m->m_flags & M_EXT) 843 goto extpacket; 844 if (remain > MHLEN) { 845 /* m can't be the lead packet */ 846 MH_ALIGN(n, 0); 847 n->m_next = m_split(m, len, wait); 848 if (n->m_next == NULL) { 849 (void) m_free(n); 850 m0->m_pkthdr.len = olen; 851 return (NULL); 852 } else 853 return (n); 854 } else 855 MH_ALIGN(n, remain); 856 } else if (remain == 0) { 857 n = m->m_next; 858 m->m_next = NULL; 859 return (n); 860 } else { 861 MGET(n, wait, m->m_type); 862 if (n == NULL) 863 return (NULL); 864 M_ALIGN(n, remain); 865 } 866 extpacket: 867 if (m->m_flags & M_EXT) { 868 n->m_flags |= M_EXT; 869 MCLADDREFERENCE(m, n); 870 n->m_data = m->m_data + len; 871 } else { 872 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 873 } 874 n->m_len = remain; 875 m->m_len = len; 876 n->m_next = m->m_next; 877 m->m_next = NULL; 878 return (n); 879 } 880 881 /* 882 * Routine to copy from device local memory into mbufs. 883 */ 884 struct mbuf * 885 m_devget(buf, totlen, off0, ifp, copy) 886 char *buf; 887 int totlen, off0; 888 struct ifnet *ifp; 889 void (*copy)(const void *, void *, size_t); 890 { 891 register struct mbuf *m; 892 struct mbuf *top = NULL, **mp = ⊤ 893 register int off = off0, len; 894 register char *cp; 895 char *epkt; 896 897 cp = buf; 898 epkt = cp + totlen; 899 if (off) { 900 /* 901 * If 'off' is non-zero, packet is trailer-encapsulated, 902 * so we have to skip the type and length fields. 903 */ 904 cp += off + 2 * sizeof(u_int16_t); 905 totlen -= 2 * sizeof(u_int16_t); 906 } 907 MGETHDR(m, M_DONTWAIT, MT_DATA); 908 if (m == NULL) 909 return (NULL); 910 m->m_pkthdr.rcvif = ifp; 911 m->m_pkthdr.len = totlen; 912 m->m_len = MHLEN; 913 914 while (totlen > 0) { 915 if (top != NULL) { 916 MGET(m, M_DONTWAIT, MT_DATA); 917 if (m == NULL) { 918 m_freem(top); 919 return (NULL); 920 } 921 m->m_len = MLEN; 922 } 923 len = min(totlen, epkt - cp); 924 if (len >= MINCLSIZE) { 925 MCLGET(m, M_DONTWAIT); 926 if (m->m_flags & M_EXT) 927 m->m_len = len = min(len, MCLBYTES); 928 else 929 len = m->m_len; 930 } else { 931 /* 932 * Place initial small packet/header at end of mbuf. 933 */ 934 if (len < m->m_len) { 935 if (top == NULL && 936 len + max_linkhdr <= m->m_len) 937 m->m_data += max_linkhdr; 938 m->m_len = len; 939 } else 940 len = m->m_len; 941 } 942 if (copy) 943 copy(cp, mtod(m, caddr_t), (size_t)len); 944 else 945 bcopy(cp, mtod(m, caddr_t), (size_t)len); 946 cp += len; 947 *mp = m; 948 mp = &m->m_next; 949 totlen -= len; 950 if (cp == epkt) 951 cp = buf; 952 } 953 return (top); 954 } 955 956 void 957 m_zero(m) 958 struct mbuf *m; 959 { 960 while (m) { 961 #ifdef DIAGNOSTIC 962 if (M_READONLY(m)) 963 panic("m_zero: M_READONLY"); 964 #endif /* DIAGNOSTIC */ 965 if (m->m_flags & M_EXT) 966 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 967 else { 968 if (m->m_flags & M_PKTHDR) 969 memset(m->m_pktdat, 0, MHLEN); 970 else 971 memset(m->m_dat, 0, MLEN); 972 } 973 m = m->m_next; 974 } 975 } 976 977 /* 978 * Apply function f to the data in an mbuf chain starting "off" bytes from the 979 * beginning, continuing for "len" bytes. 980 */ 981 int 982 m_apply(m, off, len, f, fstate) 983 struct mbuf *m; 984 int off; 985 int len; 986 /* fstate, data, len */ 987 int (*f)(caddr_t, caddr_t, unsigned int); 988 caddr_t fstate; 989 { 990 int rval; 991 unsigned int count; 992 993 if (len < 0) 994 panic("m_apply: len %d < 0", len); 995 if (off < 0) 996 panic("m_apply: off %d < 0", off); 997 while (off > 0) { 998 if (m == NULL) 999 panic("m_apply: null mbuf in skip"); 1000 if (off < m->m_len) 1001 break; 1002 off -= m->m_len; 1003 m = m->m_next; 1004 } 1005 while (len > 0) { 1006 if (m == NULL) 1007 panic("m_apply: null mbuf"); 1008 count = min(m->m_len - off, len); 1009 1010 rval = f(fstate, mtod(m, caddr_t) + off, count); 1011 if (rval) 1012 return (rval); 1013 1014 len -= count; 1015 off = 0; 1016 m = m->m_next; 1017 } 1018 1019 return (0); 1020 } 1021 1022 #ifdef SMALL_KERNEL 1023 /* 1024 * The idea of adding code in a small kernel might look absurd, but this is 1025 * instead of macros. 1026 */ 1027 struct mbuf * 1028 _sk_mget(int how, int type) 1029 { 1030 struct mbuf *m; 1031 _MGET(m, how, type); 1032 return m; 1033 } 1034 1035 struct mbuf * 1036 _sk_mgethdr(int how, int type) 1037 { 1038 struct mbuf *m; 1039 _MGETHDR(m, how, type); 1040 return m; 1041 } 1042 1043 void 1044 _sk_mclget(struct mbuf *m, int how) 1045 { 1046 _MCLGET(m, how); 1047 } 1048 #endif /* SMALL_KERNEL */ 1049