1 /* $NetBSD: uipc_mbuf.c,v 1.123 2007/11/14 14:11:57 yamt Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (c) 1982, 1986, 1988, 1991, 1993 42 * The Regents of the University of California. All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. Neither the name of the University nor the names of its contributors 53 * may be used to endorse or promote products derived from this software 54 * without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * SUCH DAMAGE. 67 * 68 * @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95 69 */ 70 71 #include <sys/cdefs.h> 72 __KERNEL_RCSID(0, "$NetBSD: uipc_mbuf.c,v 1.123 2007/11/14 14:11:57 yamt Exp $"); 73 74 #include "opt_mbuftrace.h" 75 #include "opt_ddb.h" 76 77 #include <sys/param.h> 78 #include <sys/systm.h> 79 #include <sys/proc.h> 80 #include <sys/malloc.h> 81 #define MBTYPES 82 #include <sys/mbuf.h> 83 #include <sys/kernel.h> 84 #include <sys/syslog.h> 85 #include <sys/domain.h> 86 #include <sys/protosw.h> 87 #include <sys/pool.h> 88 #include <sys/socket.h> 89 #include <sys/sysctl.h> 90 91 #include <net/if.h> 92 93 #include <uvm/uvm.h> 94 95 pool_cache_t mb_cache; /* mbuf cache */ 96 pool_cache_t mcl_cache; /* mbuf cluster cache */ 97 98 struct mbstat mbstat; 99 int max_linkhdr; 100 int max_protohdr; 101 int max_hdr; 102 int max_datalen; 103 104 static int mb_ctor(void *, void *, int); 105 106 static void *mclpool_alloc(struct pool *, int); 107 static void mclpool_release(struct pool *, void *); 108 109 static struct pool_allocator mclpool_allocator = { 110 .pa_alloc = mclpool_alloc, 111 .pa_free = mclpool_release, 112 }; 113 114 static struct mbuf *m_copym0(struct mbuf *, int, int, int, int); 115 static struct mbuf *m_split0(struct mbuf *, int, int, int); 116 static int m_copyback0(struct mbuf **, int, int, const void *, int, int); 117 118 /* flags for m_copyback0 */ 119 #define M_COPYBACK0_COPYBACK 0x0001 /* copyback from cp */ 120 #define M_COPYBACK0_PRESERVE 0x0002 /* preserve original data */ 121 #define M_COPYBACK0_COW 0x0004 /* do copy-on-write */ 122 #define M_COPYBACK0_EXTEND 0x0008 /* extend chain */ 123 124 static const char mclpool_warnmsg[] = 125 "WARNING: mclpool limit reached; increase NMBCLUSTERS"; 126 127 MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf"); 128 129 #ifdef MBUFTRACE 130 struct mownerhead mowners = LIST_HEAD_INITIALIZER(mowners); 131 struct mowner unknown_mowners[] = { 132 MOWNER_INIT("unknown", "free"), 133 MOWNER_INIT("unknown", "data"), 134 MOWNER_INIT("unknown", "header"), 135 MOWNER_INIT("unknown", "soname"), 136 MOWNER_INIT("unknown", "soopts"), 137 MOWNER_INIT("unknown", "ftable"), 138 MOWNER_INIT("unknown", "control"), 139 MOWNER_INIT("unknown", "oobdata"), 140 }; 141 struct mowner revoked_mowner = MOWNER_INIT("revoked", ""); 142 #endif 143 144 /* 145 * Initialize the mbuf allocator. 146 */ 147 void 148 mbinit(void) 149 { 150 151 KASSERT(sizeof(struct _m_ext) <= MHLEN); 152 KASSERT(sizeof(struct mbuf) == MSIZE); 153 154 mclpool_allocator.pa_backingmap = mb_map; 155 156 mb_cache = pool_cache_init(msize, 0, 0, 0, "mbpl", 157 NULL, IPL_VM, mb_ctor, NULL, NULL); 158 KASSERT(mb_cache != NULL); 159 160 mcl_cache = pool_cache_init(mclbytes, 0, 0, 0, "mclpl", 161 &mclpool_allocator, IPL_VM, NULL, NULL, NULL); 162 KASSERT(mcl_cache != NULL); 163 164 pool_cache_set_drain_hook(mb_cache, m_reclaim, NULL); 165 pool_cache_set_drain_hook(mcl_cache, m_reclaim, NULL); 166 167 /* 168 * Set the hard limit on the mclpool to the number of 169 * mbuf clusters the kernel is to support. Log the limit 170 * reached message max once a minute. 171 */ 172 pool_cache_sethardlimit(mcl_cache, nmbclusters, mclpool_warnmsg, 60); 173 174 /* 175 * Set a low water mark for both mbufs and clusters. This should 176 * help ensure that they can be allocated in a memory starvation 177 * situation. This is important for e.g. diskless systems which 178 * must allocate mbufs in order for the pagedaemon to clean pages. 179 */ 180 pool_cache_setlowat(mb_cache, mblowat); 181 pool_cache_setlowat(mcl_cache, mcllowat); 182 183 #ifdef MBUFTRACE 184 { 185 /* 186 * Attach the unknown mowners. 187 */ 188 int i; 189 MOWNER_ATTACH(&revoked_mowner); 190 for (i = sizeof(unknown_mowners)/sizeof(unknown_mowners[0]); 191 i-- > 0; ) 192 MOWNER_ATTACH(&unknown_mowners[i]); 193 } 194 #endif 195 } 196 197 /* 198 * sysctl helper routine for the kern.mbuf subtree. nmbclusters may 199 * or may not be writable, and mblowat and mcllowat need range 200 * checking and pool tweaking after being reset. 201 */ 202 static int 203 sysctl_kern_mbuf(SYSCTLFN_ARGS) 204 { 205 int error, newval; 206 struct sysctlnode node; 207 208 node = *rnode; 209 node.sysctl_data = &newval; 210 switch (rnode->sysctl_num) { 211 case MBUF_NMBCLUSTERS: 212 if (mb_map != NULL) { 213 node.sysctl_flags &= ~CTLFLAG_READWRITE; 214 node.sysctl_flags |= CTLFLAG_READONLY; 215 } 216 /* FALLTHROUGH */ 217 case MBUF_MBLOWAT: 218 case MBUF_MCLLOWAT: 219 newval = *(int*)rnode->sysctl_data; 220 break; 221 default: 222 return (EOPNOTSUPP); 223 } 224 225 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 226 if (error || newp == NULL) 227 return (error); 228 if (newval < 0) 229 return (EINVAL); 230 231 switch (node.sysctl_num) { 232 case MBUF_NMBCLUSTERS: 233 if (newval < nmbclusters) 234 return (EINVAL); 235 nmbclusters = newval; 236 pool_cache_sethardlimit(mcl_cache, nmbclusters, 237 mclpool_warnmsg, 60); 238 break; 239 case MBUF_MBLOWAT: 240 mblowat = newval; 241 pool_cache_setlowat(mb_cache, mblowat); 242 break; 243 case MBUF_MCLLOWAT: 244 mcllowat = newval; 245 pool_cache_setlowat(mcl_cache, mcllowat); 246 break; 247 } 248 249 return (0); 250 } 251 252 #ifdef MBUFTRACE 253 static int 254 sysctl_kern_mbuf_mowners(SYSCTLFN_ARGS) 255 { 256 struct mowner *mo; 257 size_t len = 0; 258 int error = 0; 259 260 if (namelen != 0) 261 return (EINVAL); 262 if (newp != NULL) 263 return (EPERM); 264 265 LIST_FOREACH(mo, &mowners, mo_link) { 266 if (oldp != NULL) { 267 if (*oldlenp - len < sizeof(*mo)) { 268 error = ENOMEM; 269 break; 270 } 271 error = copyout(mo, (char *)oldp + len, sizeof(*mo)); 272 if (error) 273 break; 274 } 275 len += sizeof(*mo); 276 } 277 278 if (error == 0) 279 *oldlenp = len; 280 281 return (error); 282 } 283 #endif /* MBUFTRACE */ 284 285 SYSCTL_SETUP(sysctl_kern_mbuf_setup, "sysctl kern.mbuf subtree setup") 286 { 287 288 sysctl_createv(clog, 0, NULL, NULL, 289 CTLFLAG_PERMANENT, 290 CTLTYPE_NODE, "kern", NULL, 291 NULL, 0, NULL, 0, 292 CTL_KERN, CTL_EOL); 293 sysctl_createv(clog, 0, NULL, NULL, 294 CTLFLAG_PERMANENT, 295 CTLTYPE_NODE, "mbuf", 296 SYSCTL_DESCR("mbuf control variables"), 297 NULL, 0, NULL, 0, 298 CTL_KERN, KERN_MBUF, CTL_EOL); 299 300 sysctl_createv(clog, 0, NULL, NULL, 301 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, 302 CTLTYPE_INT, "msize", 303 SYSCTL_DESCR("mbuf base size"), 304 NULL, msize, NULL, 0, 305 CTL_KERN, KERN_MBUF, MBUF_MSIZE, CTL_EOL); 306 sysctl_createv(clog, 0, NULL, NULL, 307 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, 308 CTLTYPE_INT, "mclbytes", 309 SYSCTL_DESCR("mbuf cluster size"), 310 NULL, mclbytes, NULL, 0, 311 CTL_KERN, KERN_MBUF, MBUF_MCLBYTES, CTL_EOL); 312 sysctl_createv(clog, 0, NULL, NULL, 313 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 314 CTLTYPE_INT, "nmbclusters", 315 SYSCTL_DESCR("Limit on the number of mbuf clusters"), 316 sysctl_kern_mbuf, 0, &nmbclusters, 0, 317 CTL_KERN, KERN_MBUF, MBUF_NMBCLUSTERS, CTL_EOL); 318 sysctl_createv(clog, 0, NULL, NULL, 319 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 320 CTLTYPE_INT, "mblowat", 321 SYSCTL_DESCR("mbuf low water mark"), 322 sysctl_kern_mbuf, 0, &mblowat, 0, 323 CTL_KERN, KERN_MBUF, MBUF_MBLOWAT, CTL_EOL); 324 sysctl_createv(clog, 0, NULL, NULL, 325 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 326 CTLTYPE_INT, "mcllowat", 327 SYSCTL_DESCR("mbuf cluster low water mark"), 328 sysctl_kern_mbuf, 0, &mcllowat, 0, 329 CTL_KERN, KERN_MBUF, MBUF_MCLLOWAT, CTL_EOL); 330 sysctl_createv(clog, 0, NULL, NULL, 331 CTLFLAG_PERMANENT, 332 CTLTYPE_STRUCT, "stats", 333 SYSCTL_DESCR("mbuf allocation statistics"), 334 NULL, 0, &mbstat, sizeof(mbstat), 335 CTL_KERN, KERN_MBUF, MBUF_STATS, CTL_EOL); 336 #ifdef MBUFTRACE 337 sysctl_createv(clog, 0, NULL, NULL, 338 CTLFLAG_PERMANENT, 339 CTLTYPE_STRUCT, "mowners", 340 SYSCTL_DESCR("Information about mbuf owners"), 341 sysctl_kern_mbuf_mowners, 0, NULL, 0, 342 CTL_KERN, KERN_MBUF, MBUF_MOWNERS, CTL_EOL); 343 #endif /* MBUFTRACE */ 344 } 345 346 static void * 347 mclpool_alloc(struct pool *pp, int flags) 348 { 349 bool waitok = (flags & PR_WAITOK) ? true : false; 350 351 return ((void *)uvm_km_alloc_poolpage(mb_map, waitok)); 352 } 353 354 static void 355 mclpool_release(struct pool *pp, void *v) 356 { 357 358 uvm_km_free_poolpage(mb_map, (vaddr_t)v); 359 } 360 361 /*ARGSUSED*/ 362 static int 363 mb_ctor(void *arg, void *object, int flags) 364 { 365 struct mbuf *m = object; 366 367 #ifdef POOL_VTOPHYS 368 m->m_paddr = POOL_VTOPHYS(m); 369 #else 370 m->m_paddr = M_PADDR_INVALID; 371 #endif 372 return (0); 373 } 374 375 void 376 m_reclaim(void *arg, int flags) 377 { 378 struct domain *dp; 379 const struct protosw *pr; 380 struct ifnet *ifp; 381 int s; 382 383 KERNEL_LOCK(1, NULL); 384 s = splvm(); 385 DOMAIN_FOREACH(dp) { 386 for (pr = dp->dom_protosw; 387 pr < dp->dom_protoswNPROTOSW; pr++) 388 if (pr->pr_drain) 389 (*pr->pr_drain)(); 390 } 391 IFNET_FOREACH(ifp) { 392 if (ifp->if_drain) 393 (*ifp->if_drain)(ifp); 394 } 395 splx(s); 396 mbstat.m_drain++; 397 KERNEL_UNLOCK_ONE(NULL); 398 } 399 400 /* 401 * Space allocation routines. 402 * These are also available as macros 403 * for critical paths. 404 */ 405 struct mbuf * 406 m_get(int nowait, int type) 407 { 408 struct mbuf *m; 409 410 MGET(m, nowait, type); 411 return (m); 412 } 413 414 struct mbuf * 415 m_gethdr(int nowait, int type) 416 { 417 struct mbuf *m; 418 419 MGETHDR(m, nowait, type); 420 return (m); 421 } 422 423 struct mbuf * 424 m_getclr(int nowait, int type) 425 { 426 struct mbuf *m; 427 428 MGET(m, nowait, type); 429 if (m == 0) 430 return (NULL); 431 memset(mtod(m, void *), 0, MLEN); 432 return (m); 433 } 434 435 void 436 m_clget(struct mbuf *m, int nowait) 437 { 438 439 MCLGET(m, nowait); 440 } 441 442 struct mbuf * 443 m_free(struct mbuf *m) 444 { 445 struct mbuf *n; 446 447 MFREE(m, n); 448 return (n); 449 } 450 451 void 452 m_freem(struct mbuf *m) 453 { 454 struct mbuf *n; 455 456 if (m == NULL) 457 return; 458 do { 459 MFREE(m, n); 460 m = n; 461 } while (m); 462 } 463 464 #ifdef MBUFTRACE 465 /* 466 * Walk a chain of mbufs, claiming ownership of each mbuf in the chain. 467 */ 468 void 469 m_claimm(struct mbuf *m, struct mowner *mo) 470 { 471 472 for (; m != NULL; m = m->m_next) 473 MCLAIM(m, mo); 474 } 475 #endif 476 477 /* 478 * Mbuffer utility routines. 479 */ 480 481 /* 482 * Lesser-used path for M_PREPEND: 483 * allocate new mbuf to prepend to chain, 484 * copy junk along. 485 */ 486 struct mbuf * 487 m_prepend(struct mbuf *m, int len, int how) 488 { 489 struct mbuf *mn; 490 491 MGET(mn, how, m->m_type); 492 if (mn == (struct mbuf *)NULL) { 493 m_freem(m); 494 return ((struct mbuf *)NULL); 495 } 496 if (m->m_flags & M_PKTHDR) { 497 M_MOVE_PKTHDR(mn, m); 498 } else { 499 MCLAIM(mn, m->m_owner); 500 } 501 mn->m_next = m; 502 m = mn; 503 if (len < MHLEN) 504 MH_ALIGN(m, len); 505 m->m_len = len; 506 return (m); 507 } 508 509 /* 510 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 511 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 512 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 513 */ 514 int MCFail; 515 516 struct mbuf * 517 m_copym(struct mbuf *m, int off0, int len, int wait) 518 { 519 520 return m_copym0(m, off0, len, wait, 0); /* shallow copy on M_EXT */ 521 } 522 523 struct mbuf * 524 m_dup(struct mbuf *m, int off0, int len, int wait) 525 { 526 527 return m_copym0(m, off0, len, wait, 1); /* deep copy */ 528 } 529 530 static struct mbuf * 531 m_copym0(struct mbuf *m, int off0, int len, int wait, int deep) 532 { 533 struct mbuf *n, **np; 534 int off = off0; 535 struct mbuf *top; 536 int copyhdr = 0; 537 538 if (off < 0 || len < 0) 539 panic("m_copym: off %d, len %d", off, len); 540 if (off == 0 && m->m_flags & M_PKTHDR) 541 copyhdr = 1; 542 while (off > 0) { 543 if (m == 0) 544 panic("m_copym: m == 0, off %d", off); 545 if (off < m->m_len) 546 break; 547 off -= m->m_len; 548 m = m->m_next; 549 } 550 np = ⊤ 551 top = 0; 552 while (len > 0) { 553 if (m == 0) { 554 if (len != M_COPYALL) 555 panic("m_copym: m == 0, len %d [!COPYALL]", 556 len); 557 break; 558 } 559 MGET(n, wait, m->m_type); 560 *np = n; 561 if (n == 0) 562 goto nospace; 563 MCLAIM(n, m->m_owner); 564 if (copyhdr) { 565 M_COPY_PKTHDR(n, m); 566 if (len == M_COPYALL) 567 n->m_pkthdr.len -= off0; 568 else 569 n->m_pkthdr.len = len; 570 copyhdr = 0; 571 } 572 n->m_len = min(len, m->m_len - off); 573 if (m->m_flags & M_EXT) { 574 if (!deep) { 575 n->m_data = m->m_data + off; 576 n->m_ext = m->m_ext; 577 MCLADDREFERENCE(m, n); 578 } else { 579 /* 580 * we are unsure about the way m was allocated. 581 * copy into multiple MCLBYTES cluster mbufs. 582 */ 583 MCLGET(n, wait); 584 n->m_len = 0; 585 n->m_len = M_TRAILINGSPACE(n); 586 n->m_len = min(n->m_len, len); 587 n->m_len = min(n->m_len, m->m_len - off); 588 memcpy(mtod(n, void *), mtod(m, char *) + off, 589 (unsigned)n->m_len); 590 } 591 } else 592 memcpy(mtod(n, void *), mtod(m, char *) + off, 593 (unsigned)n->m_len); 594 if (len != M_COPYALL) 595 len -= n->m_len; 596 off += n->m_len; 597 #ifdef DIAGNOSTIC 598 if (off > m->m_len) 599 panic("m_copym0 overrun"); 600 #endif 601 if (off == m->m_len) { 602 m = m->m_next; 603 off = 0; 604 } 605 np = &n->m_next; 606 } 607 if (top == 0) 608 MCFail++; 609 return (top); 610 nospace: 611 m_freem(top); 612 MCFail++; 613 return (NULL); 614 } 615 616 /* 617 * Copy an entire packet, including header (which must be present). 618 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 619 */ 620 struct mbuf * 621 m_copypacket(struct mbuf *m, int how) 622 { 623 struct mbuf *top, *n, *o; 624 625 MGET(n, how, m->m_type); 626 top = n; 627 if (!n) 628 goto nospace; 629 630 MCLAIM(n, m->m_owner); 631 M_COPY_PKTHDR(n, m); 632 n->m_len = m->m_len; 633 if (m->m_flags & M_EXT) { 634 n->m_data = m->m_data; 635 n->m_ext = m->m_ext; 636 MCLADDREFERENCE(m, n); 637 } else { 638 memcpy(mtod(n, char *), mtod(m, char *), n->m_len); 639 } 640 641 m = m->m_next; 642 while (m) { 643 MGET(o, how, m->m_type); 644 if (!o) 645 goto nospace; 646 647 MCLAIM(o, m->m_owner); 648 n->m_next = o; 649 n = n->m_next; 650 651 n->m_len = m->m_len; 652 if (m->m_flags & M_EXT) { 653 n->m_data = m->m_data; 654 n->m_ext = m->m_ext; 655 MCLADDREFERENCE(m, n); 656 } else { 657 memcpy(mtod(n, char *), mtod(m, char *), n->m_len); 658 } 659 660 m = m->m_next; 661 } 662 return top; 663 nospace: 664 m_freem(top); 665 MCFail++; 666 return NULL; 667 } 668 669 /* 670 * Copy data from an mbuf chain starting "off" bytes from the beginning, 671 * continuing for "len" bytes, into the indicated buffer. 672 */ 673 void 674 m_copydata(struct mbuf *m, int off, int len, void *vp) 675 { 676 unsigned count; 677 void * cp = vp; 678 679 if (off < 0 || len < 0) 680 panic("m_copydata: off %d, len %d", off, len); 681 while (off > 0) { 682 if (m == NULL) 683 panic("m_copydata: m == NULL, off %d", off); 684 if (off < m->m_len) 685 break; 686 off -= m->m_len; 687 m = m->m_next; 688 } 689 while (len > 0) { 690 if (m == NULL) 691 panic("m_copydata: m == NULL, len %d", len); 692 count = min(m->m_len - off, len); 693 memcpy(cp, mtod(m, char *) + off, count); 694 len -= count; 695 cp = (char *)cp + count; 696 off = 0; 697 m = m->m_next; 698 } 699 } 700 701 /* 702 * Concatenate mbuf chain n to m. 703 * n might be copied into m (when n->m_len is small), therefore data portion of 704 * n could be copied into an mbuf of different mbuf type. 705 * Any m_pkthdr is not updated. 706 */ 707 void 708 m_cat(struct mbuf *m, struct mbuf *n) 709 { 710 711 while (m->m_next) 712 m = m->m_next; 713 while (n) { 714 if (M_READONLY(m) || n->m_len > M_TRAILINGSPACE(m)) { 715 /* just join the two chains */ 716 m->m_next = n; 717 return; 718 } 719 /* splat the data from one into the other */ 720 memcpy(mtod(m, char *) + m->m_len, mtod(n, void *), 721 (u_int)n->m_len); 722 m->m_len += n->m_len; 723 n = m_free(n); 724 } 725 } 726 727 void 728 m_adj(struct mbuf *mp, int req_len) 729 { 730 int len = req_len; 731 struct mbuf *m; 732 int count; 733 734 if ((m = mp) == NULL) 735 return; 736 if (len >= 0) { 737 /* 738 * Trim from head. 739 */ 740 while (m != NULL && len > 0) { 741 if (m->m_len <= len) { 742 len -= m->m_len; 743 m->m_len = 0; 744 m = m->m_next; 745 } else { 746 m->m_len -= len; 747 m->m_data += len; 748 len = 0; 749 } 750 } 751 m = mp; 752 if (mp->m_flags & M_PKTHDR) 753 m->m_pkthdr.len -= (req_len - len); 754 } else { 755 /* 756 * Trim from tail. Scan the mbuf chain, 757 * calculating its length and finding the last mbuf. 758 * If the adjustment only affects this mbuf, then just 759 * adjust and return. Otherwise, rescan and truncate 760 * after the remaining size. 761 */ 762 len = -len; 763 count = 0; 764 for (;;) { 765 count += m->m_len; 766 if (m->m_next == (struct mbuf *)0) 767 break; 768 m = m->m_next; 769 } 770 if (m->m_len >= len) { 771 m->m_len -= len; 772 if (mp->m_flags & M_PKTHDR) 773 mp->m_pkthdr.len -= len; 774 return; 775 } 776 count -= len; 777 if (count < 0) 778 count = 0; 779 /* 780 * Correct length for chain is "count". 781 * Find the mbuf with last data, adjust its length, 782 * and toss data from remaining mbufs on chain. 783 */ 784 m = mp; 785 if (m->m_flags & M_PKTHDR) 786 m->m_pkthdr.len = count; 787 for (; m; m = m->m_next) { 788 if (m->m_len >= count) { 789 m->m_len = count; 790 break; 791 } 792 count -= m->m_len; 793 } 794 if (m) 795 while (m->m_next) 796 (m = m->m_next)->m_len = 0; 797 } 798 } 799 800 /* 801 * Rearrange an mbuf chain so that len bytes are contiguous 802 * and in the data area of an mbuf (so that mtod and dtom 803 * will work for a structure of size len). Returns the resulting 804 * mbuf chain on success, frees it and returns null on failure. 805 * If there is room, it will add up to max_protohdr-len extra bytes to the 806 * contiguous region in an attempt to avoid being called next time. 807 */ 808 int MPFail; 809 810 struct mbuf * 811 m_pullup(struct mbuf *n, int len) 812 { 813 struct mbuf *m; 814 int count; 815 int space; 816 817 /* 818 * If first mbuf has no cluster, and has room for len bytes 819 * without shifting current data, pullup into it, 820 * otherwise allocate a new mbuf to prepend to the chain. 821 */ 822 if ((n->m_flags & M_EXT) == 0 && 823 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 824 if (n->m_len >= len) 825 return (n); 826 m = n; 827 n = n->m_next; 828 len -= m->m_len; 829 } else { 830 if (len > MHLEN) 831 goto bad; 832 MGET(m, M_DONTWAIT, n->m_type); 833 if (m == 0) 834 goto bad; 835 MCLAIM(m, n->m_owner); 836 m->m_len = 0; 837 if (n->m_flags & M_PKTHDR) { 838 M_MOVE_PKTHDR(m, n); 839 } 840 } 841 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 842 do { 843 count = min(min(max(len, max_protohdr), space), n->m_len); 844 memcpy(mtod(m, char *) + m->m_len, mtod(n, void *), 845 (unsigned)count); 846 len -= count; 847 m->m_len += count; 848 n->m_len -= count; 849 space -= count; 850 if (n->m_len) 851 n->m_data += count; 852 else 853 n = m_free(n); 854 } while (len > 0 && n); 855 if (len > 0) { 856 (void) m_free(m); 857 goto bad; 858 } 859 m->m_next = n; 860 return (m); 861 bad: 862 m_freem(n); 863 MPFail++; 864 return (NULL); 865 } 866 867 /* 868 * Like m_pullup(), except a new mbuf is always allocated, and we allow 869 * the amount of empty space before the data in the new mbuf to be specified 870 * (in the event that the caller expects to prepend later). 871 */ 872 int MSFail; 873 874 struct mbuf * 875 m_copyup(struct mbuf *n, int len, int dstoff) 876 { 877 struct mbuf *m; 878 int count, space; 879 880 if (len > (MHLEN - dstoff)) 881 goto bad; 882 MGET(m, M_DONTWAIT, n->m_type); 883 if (m == NULL) 884 goto bad; 885 MCLAIM(m, n->m_owner); 886 m->m_len = 0; 887 if (n->m_flags & M_PKTHDR) { 888 M_MOVE_PKTHDR(m, n); 889 } 890 m->m_data += dstoff; 891 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 892 do { 893 count = min(min(max(len, max_protohdr), space), n->m_len); 894 memcpy(mtod(m, char *) + m->m_len, mtod(n, void *), 895 (unsigned)count); 896 len -= count; 897 m->m_len += count; 898 n->m_len -= count; 899 space -= count; 900 if (n->m_len) 901 n->m_data += count; 902 else 903 n = m_free(n); 904 } while (len > 0 && n); 905 if (len > 0) { 906 (void) m_free(m); 907 goto bad; 908 } 909 m->m_next = n; 910 return (m); 911 bad: 912 m_freem(n); 913 MSFail++; 914 return (NULL); 915 } 916 917 /* 918 * Partition an mbuf chain in two pieces, returning the tail -- 919 * all but the first len0 bytes. In case of failure, it returns NULL and 920 * attempts to restore the chain to its original state. 921 */ 922 struct mbuf * 923 m_split(struct mbuf *m0, int len0, int wait) 924 { 925 926 return m_split0(m0, len0, wait, 1); 927 } 928 929 static struct mbuf * 930 m_split0(struct mbuf *m0, int len0, int wait, int copyhdr) 931 { 932 struct mbuf *m, *n; 933 unsigned len = len0, remain, len_save; 934 935 for (m = m0; m && len > m->m_len; m = m->m_next) 936 len -= m->m_len; 937 if (m == 0) 938 return (NULL); 939 remain = m->m_len - len; 940 if (copyhdr && (m0->m_flags & M_PKTHDR)) { 941 MGETHDR(n, wait, m0->m_type); 942 if (n == 0) 943 return (NULL); 944 MCLAIM(n, m0->m_owner); 945 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 946 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 947 len_save = m0->m_pkthdr.len; 948 m0->m_pkthdr.len = len0; 949 if (m->m_flags & M_EXT) 950 goto extpacket; 951 if (remain > MHLEN) { 952 /* m can't be the lead packet */ 953 MH_ALIGN(n, 0); 954 n->m_next = m_split(m, len, wait); 955 if (n->m_next == 0) { 956 (void) m_free(n); 957 m0->m_pkthdr.len = len_save; 958 return (NULL); 959 } else 960 return (n); 961 } else 962 MH_ALIGN(n, remain); 963 } else if (remain == 0) { 964 n = m->m_next; 965 m->m_next = 0; 966 return (n); 967 } else { 968 MGET(n, wait, m->m_type); 969 if (n == 0) 970 return (NULL); 971 MCLAIM(n, m->m_owner); 972 M_ALIGN(n, remain); 973 } 974 extpacket: 975 if (m->m_flags & M_EXT) { 976 n->m_ext = m->m_ext; 977 MCLADDREFERENCE(m, n); 978 n->m_data = m->m_data + len; 979 } else { 980 memcpy(mtod(n, void *), mtod(m, char *) + len, remain); 981 } 982 n->m_len = remain; 983 m->m_len = len; 984 n->m_next = m->m_next; 985 m->m_next = 0; 986 return (n); 987 } 988 /* 989 * Routine to copy from device local memory into mbufs. 990 */ 991 struct mbuf * 992 m_devget(char *buf, int totlen, int off0, struct ifnet *ifp, 993 void (*copy)(const void *from, void *to, size_t len)) 994 { 995 struct mbuf *m; 996 struct mbuf *top = 0, **mp = ⊤ 997 int off = off0, len; 998 char *cp; 999 char *epkt; 1000 1001 cp = buf; 1002 epkt = cp + totlen; 1003 if (off) { 1004 /* 1005 * If 'off' is non-zero, packet is trailer-encapsulated, 1006 * so we have to skip the type and length fields. 1007 */ 1008 cp += off + 2 * sizeof(uint16_t); 1009 totlen -= 2 * sizeof(uint16_t); 1010 } 1011 MGETHDR(m, M_DONTWAIT, MT_DATA); 1012 if (m == 0) 1013 return (NULL); 1014 m->m_pkthdr.rcvif = ifp; 1015 m->m_pkthdr.len = totlen; 1016 m->m_len = MHLEN; 1017 1018 while (totlen > 0) { 1019 if (top) { 1020 MGET(m, M_DONTWAIT, MT_DATA); 1021 if (m == 0) { 1022 m_freem(top); 1023 return (NULL); 1024 } 1025 m->m_len = MLEN; 1026 } 1027 len = min(totlen, epkt - cp); 1028 if (len >= MINCLSIZE) { 1029 MCLGET(m, M_DONTWAIT); 1030 if ((m->m_flags & M_EXT) == 0) { 1031 m_free(m); 1032 m_freem(top); 1033 return (NULL); 1034 } 1035 m->m_len = len = min(len, MCLBYTES); 1036 } else { 1037 /* 1038 * Place initial small packet/header at end of mbuf. 1039 */ 1040 if (len < m->m_len) { 1041 if (top == 0 && len + max_linkhdr <= m->m_len) 1042 m->m_data += max_linkhdr; 1043 m->m_len = len; 1044 } else 1045 len = m->m_len; 1046 } 1047 if (copy) 1048 copy(cp, mtod(m, void *), (size_t)len); 1049 else 1050 memcpy(mtod(m, void *), cp, (size_t)len); 1051 cp += len; 1052 *mp = m; 1053 mp = &m->m_next; 1054 totlen -= len; 1055 if (cp == epkt) 1056 cp = buf; 1057 } 1058 return (top); 1059 } 1060 1061 /* 1062 * Copy data from a buffer back into the indicated mbuf chain, 1063 * starting "off" bytes from the beginning, extending the mbuf 1064 * chain if necessary. 1065 */ 1066 void 1067 m_copyback(struct mbuf *m0, int off, int len, const void *cp) 1068 { 1069 #if defined(DEBUG) 1070 struct mbuf *origm = m0; 1071 int error; 1072 #endif /* defined(DEBUG) */ 1073 1074 if (m0 == NULL) 1075 return; 1076 1077 #if defined(DEBUG) 1078 error = 1079 #endif /* defined(DEBUG) */ 1080 m_copyback0(&m0, off, len, cp, 1081 M_COPYBACK0_COPYBACK|M_COPYBACK0_EXTEND, M_DONTWAIT); 1082 1083 #if defined(DEBUG) 1084 if (error != 0 || (m0 != NULL && origm != m0)) 1085 panic("m_copyback"); 1086 #endif /* defined(DEBUG) */ 1087 } 1088 1089 struct mbuf * 1090 m_copyback_cow(struct mbuf *m0, int off, int len, const void *cp, int how) 1091 { 1092 int error; 1093 1094 /* don't support chain expansion */ 1095 KDASSERT(off + len <= m_length(m0)); 1096 1097 error = m_copyback0(&m0, off, len, cp, 1098 M_COPYBACK0_COPYBACK|M_COPYBACK0_COW, how); 1099 if (error) { 1100 /* 1101 * no way to recover from partial success. 1102 * just free the chain. 1103 */ 1104 m_freem(m0); 1105 return NULL; 1106 } 1107 return m0; 1108 } 1109 1110 /* 1111 * m_makewritable: ensure the specified range writable. 1112 */ 1113 int 1114 m_makewritable(struct mbuf **mp, int off, int len, int how) 1115 { 1116 int error; 1117 #if defined(DEBUG) 1118 struct mbuf *n; 1119 int origlen, reslen; 1120 1121 origlen = m_length(*mp); 1122 #endif /* defined(DEBUG) */ 1123 1124 #if 0 /* M_COPYALL is large enough */ 1125 if (len == M_COPYALL) 1126 len = m_length(*mp) - off; /* XXX */ 1127 #endif 1128 1129 error = m_copyback0(mp, off, len, NULL, 1130 M_COPYBACK0_PRESERVE|M_COPYBACK0_COW, how); 1131 1132 #if defined(DEBUG) 1133 reslen = 0; 1134 for (n = *mp; n; n = n->m_next) 1135 reslen += n->m_len; 1136 if (origlen != reslen) 1137 panic("m_makewritable: length changed"); 1138 if (((*mp)->m_flags & M_PKTHDR) != 0 && reslen != (*mp)->m_pkthdr.len) 1139 panic("m_makewritable: inconsist"); 1140 #endif /* defined(DEBUG) */ 1141 1142 return error; 1143 } 1144 1145 int 1146 m_copyback0(struct mbuf **mp0, int off, int len, const void *vp, int flags, 1147 int how) 1148 { 1149 int mlen; 1150 struct mbuf *m, *n; 1151 struct mbuf **mp; 1152 int totlen = 0; 1153 const char *cp = vp; 1154 1155 KASSERT(mp0 != NULL); 1156 KASSERT(*mp0 != NULL); 1157 KASSERT((flags & M_COPYBACK0_PRESERVE) == 0 || cp == NULL); 1158 KASSERT((flags & M_COPYBACK0_COPYBACK) == 0 || cp != NULL); 1159 1160 /* 1161 * we don't bother to update "totlen" in the case of M_COPYBACK0_COW, 1162 * assuming that M_COPYBACK0_EXTEND and M_COPYBACK0_COW are exclusive. 1163 */ 1164 1165 KASSERT((~flags & (M_COPYBACK0_EXTEND|M_COPYBACK0_COW)) != 0); 1166 1167 mp = mp0; 1168 m = *mp; 1169 while (off > (mlen = m->m_len)) { 1170 off -= mlen; 1171 totlen += mlen; 1172 if (m->m_next == NULL) { 1173 int tspace; 1174 extend: 1175 if ((flags & M_COPYBACK0_EXTEND) == 0) 1176 goto out; 1177 1178 /* 1179 * try to make some space at the end of "m". 1180 */ 1181 1182 mlen = m->m_len; 1183 if (off + len >= MINCLSIZE && 1184 (m->m_flags & M_EXT) == 0 && m->m_len == 0) { 1185 MCLGET(m, how); 1186 } 1187 tspace = M_TRAILINGSPACE(m); 1188 if (tspace > 0) { 1189 tspace = min(tspace, off + len); 1190 KASSERT(tspace > 0); 1191 memset(mtod(m, char *) + m->m_len, 0, 1192 min(off, tspace)); 1193 m->m_len += tspace; 1194 off += mlen; 1195 totlen -= mlen; 1196 continue; 1197 } 1198 1199 /* 1200 * need to allocate an mbuf. 1201 */ 1202 1203 if (off + len >= MINCLSIZE) { 1204 n = m_getcl(how, m->m_type, 0); 1205 } else { 1206 n = m_get(how, m->m_type); 1207 } 1208 if (n == NULL) { 1209 goto out; 1210 } 1211 n->m_len = 0; 1212 n->m_len = min(M_TRAILINGSPACE(n), off + len); 1213 memset(mtod(n, char *), 0, min(n->m_len, off)); 1214 m->m_next = n; 1215 } 1216 mp = &m->m_next; 1217 m = m->m_next; 1218 } 1219 while (len > 0) { 1220 mlen = m->m_len - off; 1221 if (mlen != 0 && M_READONLY(m)) { 1222 char *datap; 1223 int eatlen; 1224 1225 /* 1226 * this mbuf is read-only. 1227 * allocate a new writable mbuf and try again. 1228 */ 1229 1230 #if defined(DIAGNOSTIC) 1231 if ((flags & M_COPYBACK0_COW) == 0) 1232 panic("m_copyback0: read-only"); 1233 #endif /* defined(DIAGNOSTIC) */ 1234 1235 /* 1236 * if we're going to write into the middle of 1237 * a mbuf, split it first. 1238 */ 1239 if (off > 0 && len < mlen) { 1240 n = m_split0(m, off, how, 0); 1241 if (n == NULL) 1242 goto enobufs; 1243 m->m_next = n; 1244 mp = &m->m_next; 1245 m = n; 1246 off = 0; 1247 continue; 1248 } 1249 1250 /* 1251 * XXX TODO coalesce into the trailingspace of 1252 * the previous mbuf when possible. 1253 */ 1254 1255 /* 1256 * allocate a new mbuf. copy packet header if needed. 1257 */ 1258 MGET(n, how, m->m_type); 1259 if (n == NULL) 1260 goto enobufs; 1261 MCLAIM(n, m->m_owner); 1262 if (off == 0 && (m->m_flags & M_PKTHDR) != 0) { 1263 M_MOVE_PKTHDR(n, m); 1264 n->m_len = MHLEN; 1265 } else { 1266 if (len >= MINCLSIZE) 1267 MCLGET(n, M_DONTWAIT); 1268 n->m_len = 1269 (n->m_flags & M_EXT) ? MCLBYTES : MLEN; 1270 } 1271 if (n->m_len > len) 1272 n->m_len = len; 1273 1274 /* 1275 * free the region which has been overwritten. 1276 * copying data from old mbufs if requested. 1277 */ 1278 if (flags & M_COPYBACK0_PRESERVE) 1279 datap = mtod(n, char *); 1280 else 1281 datap = NULL; 1282 eatlen = n->m_len; 1283 KDASSERT(off == 0 || eatlen >= mlen); 1284 if (off > 0) { 1285 KDASSERT(len >= mlen); 1286 m->m_len = off; 1287 m->m_next = n; 1288 if (datap) { 1289 m_copydata(m, off, mlen, datap); 1290 datap += mlen; 1291 } 1292 eatlen -= mlen; 1293 mp = &m->m_next; 1294 m = m->m_next; 1295 } 1296 while (m != NULL && M_READONLY(m) && 1297 n->m_type == m->m_type && eatlen > 0) { 1298 mlen = min(eatlen, m->m_len); 1299 if (datap) { 1300 m_copydata(m, 0, mlen, datap); 1301 datap += mlen; 1302 } 1303 m->m_data += mlen; 1304 m->m_len -= mlen; 1305 eatlen -= mlen; 1306 if (m->m_len == 0) 1307 *mp = m = m_free(m); 1308 } 1309 if (eatlen > 0) 1310 n->m_len -= eatlen; 1311 n->m_next = m; 1312 *mp = m = n; 1313 continue; 1314 } 1315 mlen = min(mlen, len); 1316 if (flags & M_COPYBACK0_COPYBACK) { 1317 memcpy(mtod(m, char *) + off, cp, (unsigned)mlen); 1318 cp += mlen; 1319 } 1320 len -= mlen; 1321 mlen += off; 1322 off = 0; 1323 totlen += mlen; 1324 if (len == 0) 1325 break; 1326 if (m->m_next == NULL) { 1327 goto extend; 1328 } 1329 mp = &m->m_next; 1330 m = m->m_next; 1331 } 1332 out: if (((m = *mp0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) { 1333 KASSERT((flags & M_COPYBACK0_EXTEND) != 0); 1334 m->m_pkthdr.len = totlen; 1335 } 1336 1337 return 0; 1338 1339 enobufs: 1340 return ENOBUFS; 1341 } 1342 1343 void 1344 m_move_pkthdr(struct mbuf *to, struct mbuf *from) 1345 { 1346 1347 KASSERT((to->m_flags & M_EXT) == 0); 1348 KASSERT((to->m_flags & M_PKTHDR) == 0 || m_tag_first(to) == NULL); 1349 KASSERT((from->m_flags & M_PKTHDR) != 0); 1350 1351 to->m_pkthdr = from->m_pkthdr; 1352 to->m_flags = from->m_flags & M_COPYFLAGS; 1353 to->m_data = to->m_pktdat; 1354 1355 from->m_flags &= ~M_PKTHDR; 1356 } 1357 1358 /* 1359 * Apply function f to the data in an mbuf chain starting "off" bytes from the 1360 * beginning, continuing for "len" bytes. 1361 */ 1362 int 1363 m_apply(struct mbuf *m, int off, int len, 1364 int (*f)(void *, void *, unsigned int), void *arg) 1365 { 1366 unsigned int count; 1367 int rval; 1368 1369 KASSERT(len >= 0); 1370 KASSERT(off >= 0); 1371 1372 while (off > 0) { 1373 KASSERT(m != NULL); 1374 if (off < m->m_len) 1375 break; 1376 off -= m->m_len; 1377 m = m->m_next; 1378 } 1379 while (len > 0) { 1380 KASSERT(m != NULL); 1381 count = min(m->m_len - off, len); 1382 1383 rval = (*f)(arg, mtod(m, char *) + off, count); 1384 if (rval) 1385 return (rval); 1386 1387 len -= count; 1388 off = 0; 1389 m = m->m_next; 1390 } 1391 1392 return (0); 1393 } 1394 1395 /* 1396 * Return a pointer to mbuf/offset of location in mbuf chain. 1397 */ 1398 struct mbuf * 1399 m_getptr(struct mbuf *m, int loc, int *off) 1400 { 1401 1402 while (loc >= 0) { 1403 /* Normal end of search */ 1404 if (m->m_len > loc) { 1405 *off = loc; 1406 return (m); 1407 } else { 1408 loc -= m->m_len; 1409 1410 if (m->m_next == NULL) { 1411 if (loc == 0) { 1412 /* Point at the end of valid data */ 1413 *off = m->m_len; 1414 return (m); 1415 } else 1416 return (NULL); 1417 } else 1418 m = m->m_next; 1419 } 1420 } 1421 1422 return (NULL); 1423 } 1424 1425 #if defined(DDB) 1426 void 1427 m_print(const struct mbuf *m, const char *modif, void (*pr)(const char *, ...)) 1428 { 1429 char ch; 1430 bool opt_c = false; 1431 char buf[512]; 1432 1433 while ((ch = *(modif++)) != '\0') { 1434 switch (ch) { 1435 case 'c': 1436 opt_c = true; 1437 break; 1438 } 1439 } 1440 1441 nextchain: 1442 (*pr)("MBUF %p\n", m); 1443 bitmask_snprintf((u_int)m->m_flags, M_FLAGS_BITS, buf, sizeof(buf)); 1444 (*pr)(" data=%p, len=%d, type=%d, flags=0x%s\n", 1445 m->m_data, m->m_len, m->m_type, buf); 1446 (*pr)(" owner=%p, next=%p, nextpkt=%p\n", m->m_owner, m->m_next, 1447 m->m_nextpkt); 1448 (*pr)(" leadingspace=%u, trailingspace=%u, readonly=%u\n", 1449 (int)M_LEADINGSPACE(m), (int)M_TRAILINGSPACE(m), 1450 (int)M_READONLY(m)); 1451 if ((m->m_flags & M_PKTHDR) != 0) { 1452 bitmask_snprintf(m->m_pkthdr.csum_flags, M_CSUM_BITS, buf, 1453 sizeof(buf)); 1454 (*pr)(" pktlen=%d, rcvif=%p, csum_flags=0x%s, csum_data=0x%" 1455 PRIx32 ", segsz=%u\n", 1456 m->m_pkthdr.len, m->m_pkthdr.rcvif, 1457 buf, m->m_pkthdr.csum_data, m->m_pkthdr.segsz); 1458 } 1459 if ((m->m_flags & M_EXT)) { 1460 (*pr)(" shared=%u, ext_buf=%p, ext_size=%zd, " 1461 "ext_free=%p, ext_arg=%p\n", 1462 (int)MCLISREFERENCED(m), 1463 m->m_ext.ext_buf, m->m_ext.ext_size, 1464 m->m_ext.ext_free, m->m_ext.ext_arg); 1465 } 1466 if ((~m->m_flags & (M_EXT|M_EXT_PAGES)) == 0) { 1467 vaddr_t sva = (vaddr_t)m->m_ext.ext_buf; 1468 vaddr_t eva = sva + m->m_ext.ext_size; 1469 int n = (round_page(eva) - trunc_page(sva)) >> PAGE_SHIFT; 1470 int i; 1471 1472 (*pr)(" pages:"); 1473 for (i = 0; i < n; i ++) { 1474 (*pr)(" %p", m->m_ext.ext_pgs[i]); 1475 } 1476 (*pr)("\n"); 1477 } 1478 1479 if (opt_c) { 1480 m = m->m_next; 1481 if (m != NULL) { 1482 goto nextchain; 1483 } 1484 } 1485 } 1486 #endif /* defined(DDB) */ 1487