1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 5 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Jeffrey M. Hsu. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 /* 37 * Copyright (c) 1982, 1986, 1988, 1991, 1993 38 * The Regents of the University of California. All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed by the University of 51 * California, Berkeley and its contributors. 52 * 4. Neither the name of the University nor the names of its contributors 53 * may be used to endorse or promote products derived from this software 54 * without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * SUCH DAMAGE. 67 * 68 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 69 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $ 70 */ 71 72 #include "opt_param.h" 73 #include "opt_mbuf_stress_test.h" 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/file.h> 77 #include <sys/malloc.h> 78 #include <sys/mbuf.h> 79 #include <sys/kernel.h> 80 #include <sys/sysctl.h> 81 #include <sys/domain.h> 82 #include <sys/objcache.h> 83 #include <sys/tree.h> 84 #include <sys/protosw.h> 85 #include <sys/uio.h> 86 #include <sys/thread.h> 87 #include <sys/globaldata.h> 88 89 #include <sys/thread2.h> 90 #include <sys/spinlock2.h> 91 92 #include <machine/atomic.h> 93 #include <machine/limits.h> 94 95 #include <vm/vm.h> 96 #include <vm/vm_kern.h> 97 #include <vm/vm_extern.h> 98 99 #ifdef INVARIANTS 100 #include <machine/cpu.h> 101 #endif 102 103 /* 104 * mbuf cluster meta-data 105 */ 106 struct mbcluster { 107 int32_t mcl_refs; 108 void *mcl_data; 109 }; 110 111 /* 112 * mbuf tracking for debugging purposes 113 */ 114 #ifdef MBUF_DEBUG 115 116 static MALLOC_DEFINE(M_MTRACK, "mtrack", "mtrack"); 117 118 struct mbctrack; 119 RB_HEAD(mbuf_rb_tree, mbtrack); 120 RB_PROTOTYPE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *); 121 122 struct mbtrack { 123 RB_ENTRY(mbtrack) rb_node; 124 int trackid; 125 struct mbuf *m; 126 }; 127 128 static int 129 mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2) 130 { 131 if (mb1->m < mb2->m) 132 return(-1); 133 if (mb1->m > mb2->m) 134 return(1); 135 return(0); 136 } 137 138 RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m); 139 140 struct mbuf_rb_tree mbuf_track_root; 141 static struct spinlock mbuf_track_spin = SPINLOCK_INITIALIZER(mbuf_track_spin); 142 143 static void 144 mbuftrack(struct mbuf *m) 145 { 146 struct mbtrack *mbt; 147 148 mbt = kmalloc(sizeof(*mbt), M_MTRACK, M_INTWAIT|M_ZERO); 149 spin_lock(&mbuf_track_spin); 150 mbt->m = m; 151 if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root, mbt)) { 152 spin_unlock(&mbuf_track_spin); 153 panic("mbuftrack: mbuf %p already being tracked\n", m); 154 } 155 spin_unlock(&mbuf_track_spin); 156 } 157 158 static void 159 mbufuntrack(struct mbuf *m) 160 { 161 struct mbtrack *mbt; 162 163 spin_lock(&mbuf_track_spin); 164 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m); 165 if (mbt == NULL) { 166 spin_unlock(&mbuf_track_spin); 167 panic("mbufuntrack: mbuf %p was not tracked\n", m); 168 } else { 169 mbuf_rb_tree_RB_REMOVE(&mbuf_track_root, mbt); 170 spin_unlock(&mbuf_track_spin); 171 kfree(mbt, M_MTRACK); 172 } 173 } 174 175 void 176 mbuftrackid(struct mbuf *m, int trackid) 177 { 178 struct mbtrack *mbt; 179 struct mbuf *n; 180 181 spin_lock(&mbuf_track_spin); 182 while (m) { 183 n = m->m_nextpkt; 184 while (m) { 185 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m); 186 if (mbt == NULL) { 187 spin_unlock(&mbuf_track_spin); 188 panic("mbuftrackid: mbuf %p not tracked", m); 189 } 190 mbt->trackid = trackid; 191 m = m->m_next; 192 } 193 m = n; 194 } 195 spin_unlock(&mbuf_track_spin); 196 } 197 198 static int 199 mbuftrack_callback(struct mbtrack *mbt, void *arg) 200 { 201 struct sysctl_req *req = arg; 202 char buf[64]; 203 int error; 204 205 ksnprintf(buf, sizeof(buf), "mbuf %p track %d\n", mbt->m, mbt->trackid); 206 207 spin_unlock(&mbuf_track_spin); 208 error = SYSCTL_OUT(req, buf, strlen(buf)); 209 spin_lock(&mbuf_track_spin); 210 if (error) 211 return(-error); 212 return(0); 213 } 214 215 static int 216 mbuftrack_show(SYSCTL_HANDLER_ARGS) 217 { 218 int error; 219 220 spin_lock(&mbuf_track_spin); 221 error = mbuf_rb_tree_RB_SCAN(&mbuf_track_root, NULL, 222 mbuftrack_callback, req); 223 spin_unlock(&mbuf_track_spin); 224 return (-error); 225 } 226 SYSCTL_PROC(_kern_ipc, OID_AUTO, showmbufs, CTLFLAG_RD|CTLTYPE_STRING, 227 0, 0, mbuftrack_show, "A", "Show all in-use mbufs"); 228 229 #else 230 231 #define mbuftrack(m) 232 #define mbufuntrack(m) 233 234 #endif 235 236 static void mbinit(void *); 237 SYSINIT(mbuf, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, mbinit, NULL) 238 239 static u_long mbtypes[SMP_MAXCPU][MT_NTYPES]; 240 241 static struct mbstat mbstat[SMP_MAXCPU]; 242 int max_linkhdr; 243 int max_protohdr; 244 int max_hdr; 245 int max_datalen; 246 int m_defragpackets; 247 int m_defragbytes; 248 int m_defraguseless; 249 int m_defragfailure; 250 #ifdef MBUF_STRESS_TEST 251 int m_defragrandomfailures; 252 #endif 253 254 struct objcache *mbuf_cache, *mbufphdr_cache; 255 struct objcache *mclmeta_cache, *mjclmeta_cache; 256 struct objcache *mbufcluster_cache, *mbufphdrcluster_cache; 257 struct objcache *mbufjcluster_cache, *mbufphdrjcluster_cache; 258 259 int nmbclusters; 260 int nmbufs; 261 262 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 263 &max_linkhdr, 0, "Max size of a link-level header"); 264 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 265 &max_protohdr, 0, "Max size of a protocol header"); 266 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, 267 "Max size of link+protocol headers"); 268 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 269 &max_datalen, 0, "Max data payload size without headers"); 270 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW, 271 &mbuf_wait, 0, "Time in ticks to sleep after failed mbuf allocations"); 272 static int do_mbstat(SYSCTL_HANDLER_ARGS); 273 274 SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, CTLTYPE_STRUCT|CTLFLAG_RD, 275 0, 0, do_mbstat, "S,mbstat", "mbuf usage statistics"); 276 277 static int do_mbtypes(SYSCTL_HANDLER_ARGS); 278 279 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbtypes, CTLTYPE_ULONG|CTLFLAG_RD, 280 0, 0, do_mbtypes, "LU", ""); 281 282 static int 283 do_mbstat(SYSCTL_HANDLER_ARGS) 284 { 285 struct mbstat mbstat_total; 286 struct mbstat *mbstat_totalp; 287 int i; 288 289 bzero(&mbstat_total, sizeof(mbstat_total)); 290 mbstat_totalp = &mbstat_total; 291 292 for (i = 0; i < ncpus; i++) 293 { 294 mbstat_total.m_mbufs += mbstat[i].m_mbufs; 295 mbstat_total.m_clusters += mbstat[i].m_clusters; 296 mbstat_total.m_spare += mbstat[i].m_spare; 297 mbstat_total.m_clfree += mbstat[i].m_clfree; 298 mbstat_total.m_drops += mbstat[i].m_drops; 299 mbstat_total.m_wait += mbstat[i].m_wait; 300 mbstat_total.m_drain += mbstat[i].m_drain; 301 mbstat_total.m_mcfail += mbstat[i].m_mcfail; 302 mbstat_total.m_mpfail += mbstat[i].m_mpfail; 303 304 } 305 /* 306 * The following fields are not cumulative fields so just 307 * get their values once. 308 */ 309 mbstat_total.m_msize = mbstat[0].m_msize; 310 mbstat_total.m_mclbytes = mbstat[0].m_mclbytes; 311 mbstat_total.m_minclsize = mbstat[0].m_minclsize; 312 mbstat_total.m_mlen = mbstat[0].m_mlen; 313 mbstat_total.m_mhlen = mbstat[0].m_mhlen; 314 315 return(sysctl_handle_opaque(oidp, mbstat_totalp, sizeof(mbstat_total), req)); 316 } 317 318 static int 319 do_mbtypes(SYSCTL_HANDLER_ARGS) 320 { 321 u_long totals[MT_NTYPES]; 322 int i, j; 323 324 for (i = 0; i < MT_NTYPES; i++) 325 totals[i] = 0; 326 327 for (i = 0; i < ncpus; i++) 328 { 329 for (j = 0; j < MT_NTYPES; j++) 330 totals[j] += mbtypes[i][j]; 331 } 332 333 return(sysctl_handle_opaque(oidp, totals, sizeof(totals), req)); 334 } 335 336 /* 337 * These are read-only because we do not currently have any code 338 * to adjust the objcache limits after the fact. The variables 339 * may only be set as boot-time tunables. 340 */ 341 SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD, 342 &nmbclusters, 0, "Maximum number of mbuf clusters available"); 343 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0, 344 "Maximum number of mbufs available"); 345 346 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD, 347 &m_defragpackets, 0, "Number of defragment packets"); 348 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD, 349 &m_defragbytes, 0, "Number of defragment bytes"); 350 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD, 351 &m_defraguseless, 0, "Number of useless defragment mbuf chain operations"); 352 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD, 353 &m_defragfailure, 0, "Number of failed defragment mbuf chain operations"); 354 #ifdef MBUF_STRESS_TEST 355 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW, 356 &m_defragrandomfailures, 0, ""); 357 #endif 358 359 static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf"); 360 static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl"); 361 static MALLOC_DEFINE(M_MJBUFCL, "mbufcl", "mbufcl"); 362 static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta"); 363 static MALLOC_DEFINE(M_MJCLMETA, "mjclmeta", "mjclmeta"); 364 365 static void m_reclaim (void); 366 static void m_mclref(void *arg); 367 static void m_mclfree(void *arg); 368 369 /* 370 * NOTE: Default NMBUFS must take into account a possible DOS attack 371 * using fd passing on unix domain sockets. 372 */ 373 #ifndef NMBCLUSTERS 374 #define NMBCLUSTERS (512 + maxusers * 16) 375 #endif 376 #ifndef NMBUFS 377 #define NMBUFS (nmbclusters * 2 + maxfiles) 378 #endif 379 380 /* 381 * Perform sanity checks of tunables declared above. 382 */ 383 static void 384 tunable_mbinit(void *dummy) 385 { 386 /* 387 * This has to be done before VM init. 388 */ 389 nmbclusters = NMBCLUSTERS; 390 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters); 391 nmbufs = NMBUFS; 392 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs); 393 /* Sanity checks */ 394 if (nmbufs < nmbclusters * 2) 395 nmbufs = nmbclusters * 2; 396 } 397 SYSINIT(tunable_mbinit, SI_BOOT1_TUNABLES, SI_ORDER_ANY, 398 tunable_mbinit, NULL); 399 400 /* "number of clusters of pages" */ 401 #define NCL_INIT 1 402 403 #define NMB_INIT 16 404 405 /* 406 * The mbuf object cache only guarantees that m_next and m_nextpkt are 407 * NULL and that m_data points to the beginning of the data area. In 408 * particular, m_len and m_pkthdr.len are uninitialized. It is the 409 * responsibility of the caller to initialize those fields before use. 410 */ 411 412 static __inline boolean_t 413 mbuf_ctor(void *obj, void *private, int ocflags) 414 { 415 struct mbuf *m = obj; 416 417 m->m_next = NULL; 418 m->m_nextpkt = NULL; 419 m->m_data = m->m_dat; 420 m->m_flags = 0; 421 422 return (TRUE); 423 } 424 425 /* 426 * Initialize the mbuf and the packet header fields. 427 */ 428 static boolean_t 429 mbufphdr_ctor(void *obj, void *private, int ocflags) 430 { 431 struct mbuf *m = obj; 432 433 m->m_next = NULL; 434 m->m_nextpkt = NULL; 435 m->m_data = m->m_pktdat; 436 m->m_flags = M_PKTHDR | M_PHCACHE; 437 438 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */ 439 SLIST_INIT(&m->m_pkthdr.tags); 440 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */ 441 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */ 442 443 return (TRUE); 444 } 445 446 /* 447 * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount. 448 */ 449 static boolean_t 450 mclmeta_ctor(void *obj, void *private, int ocflags) 451 { 452 struct mbcluster *cl = obj; 453 void *buf; 454 455 if (ocflags & M_NOWAIT) 456 buf = kmalloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO); 457 else 458 buf = kmalloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO); 459 if (buf == NULL) 460 return (FALSE); 461 cl->mcl_refs = 0; 462 cl->mcl_data = buf; 463 return (TRUE); 464 } 465 466 static boolean_t 467 mjclmeta_ctor(void *obj, void *private, int ocflags) 468 { 469 struct mbcluster *cl = obj; 470 void *buf; 471 472 if (ocflags & M_NOWAIT) 473 buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_NOWAIT | M_ZERO); 474 else 475 buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_INTWAIT | M_ZERO); 476 if (buf == NULL) 477 return (FALSE); 478 cl->mcl_refs = 0; 479 cl->mcl_data = buf; 480 return (TRUE); 481 } 482 483 static void 484 mclmeta_dtor(void *obj, void *private) 485 { 486 struct mbcluster *mcl = obj; 487 488 KKASSERT(mcl->mcl_refs == 0); 489 kfree(mcl->mcl_data, M_MBUFCL); 490 } 491 492 static void 493 linkjcluster(struct mbuf *m, struct mbcluster *cl, uint size) 494 { 495 /* 496 * Add the cluster to the mbuf. The caller will detect that the 497 * mbuf now has an attached cluster. 498 */ 499 m->m_ext.ext_arg = cl; 500 m->m_ext.ext_buf = cl->mcl_data; 501 m->m_ext.ext_ref = m_mclref; 502 m->m_ext.ext_free = m_mclfree; 503 m->m_ext.ext_size = size; 504 atomic_add_int(&cl->mcl_refs, 1); 505 506 m->m_data = m->m_ext.ext_buf; 507 m->m_flags |= M_EXT | M_EXT_CLUSTER; 508 } 509 510 static void 511 linkcluster(struct mbuf *m, struct mbcluster *cl) 512 { 513 linkjcluster(m, cl, MCLBYTES); 514 } 515 516 static boolean_t 517 mbufphdrcluster_ctor(void *obj, void *private, int ocflags) 518 { 519 struct mbuf *m = obj; 520 struct mbcluster *cl; 521 522 mbufphdr_ctor(obj, private, ocflags); 523 cl = objcache_get(mclmeta_cache, ocflags); 524 if (cl == NULL) { 525 ++mbstat[mycpu->gd_cpuid].m_drops; 526 return (FALSE); 527 } 528 m->m_flags |= M_CLCACHE; 529 linkcluster(m, cl); 530 return (TRUE); 531 } 532 533 static boolean_t 534 mbufphdrjcluster_ctor(void *obj, void *private, int ocflags) 535 { 536 struct mbuf *m = obj; 537 struct mbcluster *cl; 538 539 mbufphdr_ctor(obj, private, ocflags); 540 cl = objcache_get(mjclmeta_cache, ocflags); 541 if (cl == NULL) { 542 ++mbstat[mycpu->gd_cpuid].m_drops; 543 return (FALSE); 544 } 545 m->m_flags |= M_CLCACHE; 546 linkjcluster(m, cl, MJUMPAGESIZE); 547 return (TRUE); 548 } 549 550 static boolean_t 551 mbufcluster_ctor(void *obj, void *private, int ocflags) 552 { 553 struct mbuf *m = obj; 554 struct mbcluster *cl; 555 556 mbuf_ctor(obj, private, ocflags); 557 cl = objcache_get(mclmeta_cache, ocflags); 558 if (cl == NULL) { 559 ++mbstat[mycpu->gd_cpuid].m_drops; 560 return (FALSE); 561 } 562 m->m_flags |= M_CLCACHE; 563 linkcluster(m, cl); 564 return (TRUE); 565 } 566 567 static boolean_t 568 mbufjcluster_ctor(void *obj, void *private, int ocflags) 569 { 570 struct mbuf *m = obj; 571 struct mbcluster *cl; 572 573 mbuf_ctor(obj, private, ocflags); 574 cl = objcache_get(mjclmeta_cache, ocflags); 575 if (cl == NULL) { 576 ++mbstat[mycpu->gd_cpuid].m_drops; 577 return (FALSE); 578 } 579 m->m_flags |= M_CLCACHE; 580 linkjcluster(m, cl, MJUMPAGESIZE); 581 return (TRUE); 582 } 583 584 /* 585 * Used for both the cluster and cluster PHDR caches. 586 * 587 * The mbuf may have lost its cluster due to sharing, deal 588 * with the situation by checking M_EXT. 589 */ 590 static void 591 mbufcluster_dtor(void *obj, void *private) 592 { 593 struct mbuf *m = obj; 594 struct mbcluster *mcl; 595 596 if (m->m_flags & M_EXT) { 597 KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0); 598 mcl = m->m_ext.ext_arg; 599 KKASSERT(mcl->mcl_refs == 1); 600 mcl->mcl_refs = 0; 601 if (m->m_flags & M_EXT && m->m_ext.ext_size != MCLBYTES) 602 objcache_put(mjclmeta_cache, mcl); 603 else 604 objcache_put(mclmeta_cache, mcl); 605 } 606 } 607 608 struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF }; 609 struct objcache_malloc_args mclmeta_malloc_args = 610 { sizeof(struct mbcluster), M_MCLMETA }; 611 612 /* ARGSUSED*/ 613 static void 614 mbinit(void *dummy) 615 { 616 int mb_limit, cl_limit; 617 int limit; 618 int i; 619 620 /* 621 * Initialize statistics 622 */ 623 for (i = 0; i < ncpus; i++) { 624 atomic_set_long_nonlocked(&mbstat[i].m_msize, MSIZE); 625 atomic_set_long_nonlocked(&mbstat[i].m_mclbytes, MCLBYTES); 626 atomic_set_long_nonlocked(&mbstat[i].m_mjumpagesize, MJUMPAGESIZE); 627 atomic_set_long_nonlocked(&mbstat[i].m_minclsize, MINCLSIZE); 628 atomic_set_long_nonlocked(&mbstat[i].m_mlen, MLEN); 629 atomic_set_long_nonlocked(&mbstat[i].m_mhlen, MHLEN); 630 } 631 632 /* 633 * Create objtect caches and save cluster limits, which will 634 * be used to adjust backing kmalloc pools' limit later. 635 */ 636 637 mb_limit = cl_limit = 0; 638 639 limit = nmbufs; 640 mbuf_cache = objcache_create("mbuf", 641 &limit, 0, 642 mbuf_ctor, NULL, NULL, 643 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 644 mb_limit += limit; 645 646 limit = nmbufs; 647 mbufphdr_cache = objcache_create("mbuf pkt hdr", 648 &limit, nmbufs / 4, 649 mbufphdr_ctor, NULL, NULL, 650 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 651 mb_limit += limit; 652 653 cl_limit = nmbclusters; 654 mclmeta_cache = objcache_create("cluster mbuf", 655 &cl_limit, 0, 656 mclmeta_ctor, mclmeta_dtor, NULL, 657 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args); 658 659 cl_limit = nmbclusters; 660 mjclmeta_cache = objcache_create("jcluster mbuf", 661 &cl_limit, 0, 662 mjclmeta_ctor, mclmeta_dtor, NULL, 663 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args); 664 665 limit = nmbclusters; 666 mbufcluster_cache = objcache_create("mbuf + cluster", 667 &limit, 0, 668 mbufcluster_ctor, mbufcluster_dtor, NULL, 669 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 670 mb_limit += limit; 671 672 limit = nmbclusters; 673 mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster", 674 &limit, nmbclusters / 16, 675 mbufphdrcluster_ctor, mbufcluster_dtor, NULL, 676 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 677 mb_limit += limit; 678 679 limit = nmbclusters; 680 mbufjcluster_cache = objcache_create("mbuf + jcluster", 681 &limit, 0, 682 mbufjcluster_ctor, mbufcluster_dtor, NULL, 683 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 684 mb_limit += limit; 685 686 limit = nmbclusters; 687 mbufphdrjcluster_cache = objcache_create("mbuf pkt hdr + jcluster", 688 &limit, nmbclusters / 16, 689 mbufphdrjcluster_ctor, mbufcluster_dtor, NULL, 690 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 691 mb_limit += limit; 692 693 /* 694 * Adjust backing kmalloc pools' limit 695 * 696 * NOTE: We raise the limit by another 1/8 to take the effect 697 * of loosememuse into account. 698 */ 699 cl_limit += cl_limit / 8; 700 kmalloc_raise_limit(mclmeta_malloc_args.mtype, 701 mclmeta_malloc_args.objsize * cl_limit); 702 kmalloc_raise_limit(M_MBUFCL, MCLBYTES * cl_limit * 3/4 + MJUMPAGESIZE * cl_limit / 4); 703 /*kmalloc_raise_limit(M_MBUFCL, MCLBYTES * cl_limit);*/ 704 705 mb_limit += mb_limit / 8; 706 kmalloc_raise_limit(mbuf_malloc_args.mtype, 707 mbuf_malloc_args.objsize * mb_limit); 708 } 709 710 /* 711 * Return the number of references to this mbuf's data. 0 is returned 712 * if the mbuf is not M_EXT, a reference count is returned if it is 713 * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT. 714 */ 715 int 716 m_sharecount(struct mbuf *m) 717 { 718 switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) { 719 case 0: 720 return (0); 721 case M_EXT: 722 return (99); 723 case M_EXT | M_EXT_CLUSTER: 724 return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs); 725 } 726 /* NOTREACHED */ 727 return (0); /* to shut up compiler */ 728 } 729 730 /* 731 * change mbuf to new type 732 */ 733 void 734 m_chtype(struct mbuf *m, int type) 735 { 736 struct globaldata *gd = mycpu; 737 738 atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1); 739 atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1); 740 atomic_set_short_nonlocked(&m->m_type, type); 741 } 742 743 static void 744 m_reclaim(void) 745 { 746 struct domain *dp; 747 struct protosw *pr; 748 749 kprintf("Debug: m_reclaim() called\n"); 750 751 SLIST_FOREACH(dp, &domains, dom_next) { 752 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) { 753 if (pr->pr_drain) 754 (*pr->pr_drain)(); 755 } 756 } 757 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_drain, 1); 758 } 759 760 static __inline void 761 updatestats(struct mbuf *m, int type) 762 { 763 struct globaldata *gd = mycpu; 764 765 m->m_type = type; 766 mbuftrack(m); 767 #ifdef MBUF_DEBUG 768 KASSERT(m->m_next == NULL, ("mbuf %p: bad m_next in get", m)); 769 KASSERT(m->m_nextpkt == NULL, ("mbuf %p: bad m_nextpkt in get", m)); 770 #endif 771 772 atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1); 773 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1); 774 775 } 776 777 /* 778 * Allocate an mbuf. 779 */ 780 struct mbuf * 781 m_get(int how, int type) 782 { 783 struct mbuf *m; 784 int ntries = 0; 785 int ocf = MBTOM(how); 786 787 retryonce: 788 789 m = objcache_get(mbuf_cache, ocf); 790 791 if (m == NULL) { 792 if ((how & MB_TRYWAIT) && ntries++ == 0) { 793 struct objcache *reclaimlist[] = { 794 mbufphdr_cache, 795 mbufcluster_cache, 796 mbufphdrcluster_cache, 797 mbufjcluster_cache, 798 mbufphdrjcluster_cache 799 }; 800 const int nreclaims = NELEM(reclaimlist); 801 802 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf)) 803 m_reclaim(); 804 goto retryonce; 805 } 806 ++mbstat[mycpu->gd_cpuid].m_drops; 807 return (NULL); 808 } 809 #ifdef MBUF_DEBUG 810 KASSERT(m->m_data == m->m_dat, ("mbuf %p: bad m_data in get", m)); 811 #endif 812 m->m_len = 0; 813 814 updatestats(m, type); 815 return (m); 816 } 817 818 struct mbuf * 819 m_gethdr(int how, int type) 820 { 821 struct mbuf *m; 822 int ocf = MBTOM(how); 823 int ntries = 0; 824 825 retryonce: 826 827 m = objcache_get(mbufphdr_cache, ocf); 828 829 if (m == NULL) { 830 if ((how & MB_TRYWAIT) && ntries++ == 0) { 831 struct objcache *reclaimlist[] = { 832 mbuf_cache, 833 mbufcluster_cache, mbufphdrcluster_cache, 834 mbufjcluster_cache, mbufphdrjcluster_cache 835 }; 836 const int nreclaims = NELEM(reclaimlist); 837 838 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf)) 839 m_reclaim(); 840 goto retryonce; 841 } 842 ++mbstat[mycpu->gd_cpuid].m_drops; 843 return (NULL); 844 } 845 #ifdef MBUF_DEBUG 846 KASSERT(m->m_data == m->m_pktdat, ("mbuf %p: bad m_data in get", m)); 847 #endif 848 m->m_len = 0; 849 m->m_pkthdr.len = 0; 850 851 updatestats(m, type); 852 return (m); 853 } 854 855 /* 856 * Get a mbuf (not a mbuf cluster!) and zero it. 857 * Deprecated. 858 */ 859 struct mbuf * 860 m_getclr(int how, int type) 861 { 862 struct mbuf *m; 863 864 m = m_get(how, type); 865 if (m != NULL) 866 bzero(m->m_data, MLEN); 867 return (m); 868 } 869 870 struct mbuf * 871 m_getjcl(int how, short type, int flags, size_t size) 872 { 873 struct mbuf *m = NULL; 874 struct objcache *mbclc, *mbphclc; 875 int ocflags = MBTOM(how); 876 int ntries = 0; 877 878 switch (size) { 879 case MCLBYTES: 880 mbclc = mbufcluster_cache; 881 mbphclc = mbufphdrcluster_cache; 882 break; 883 default: 884 mbclc = mbufjcluster_cache; 885 mbphclc = mbufphdrjcluster_cache; 886 break; 887 } 888 889 retryonce: 890 891 if (flags & M_PKTHDR) 892 m = objcache_get(mbphclc, ocflags); 893 else 894 m = objcache_get(mbclc, ocflags); 895 896 if (m == NULL) { 897 if ((how & MB_TRYWAIT) && ntries++ == 0) { 898 struct objcache *reclaimlist[1]; 899 900 if (flags & M_PKTHDR) 901 reclaimlist[0] = mbclc; 902 else 903 reclaimlist[0] = mbphclc; 904 if (!objcache_reclaimlist(reclaimlist, 1, ocflags)) 905 m_reclaim(); 906 goto retryonce; 907 } 908 ++mbstat[mycpu->gd_cpuid].m_drops; 909 return (NULL); 910 } 911 912 #ifdef MBUF_DEBUG 913 KASSERT(m->m_data == m->m_ext.ext_buf, 914 ("mbuf %p: bad m_data in get", m)); 915 #endif 916 m->m_type = type; 917 m->m_len = 0; 918 m->m_pkthdr.len = 0; /* just do it unconditonally */ 919 920 mbuftrack(m); 921 922 atomic_add_long_nonlocked(&mbtypes[mycpu->gd_cpuid][type], 1); 923 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1); 924 return (m); 925 } 926 927 /* 928 * Returns an mbuf with an attached cluster. 929 * Because many network drivers use this kind of buffers a lot, it is 930 * convenient to keep a small pool of free buffers of this kind. 931 * Even a small size such as 10 gives about 10% improvement in the 932 * forwarding rate in a bridge or router. 933 */ 934 struct mbuf * 935 m_getcl(int how, short type, int flags) 936 { 937 return (m_getjcl(how, type, flags, MCLBYTES)); 938 } 939 940 /* 941 * Allocate chain of requested length. 942 */ 943 struct mbuf * 944 m_getc(int len, int how, int type) 945 { 946 struct mbuf *n, *nfirst = NULL, **ntail = &nfirst; 947 int nsize; 948 949 while (len > 0) { 950 n = m_getl(len, how, type, 0, &nsize); 951 if (n == NULL) 952 goto failed; 953 n->m_len = 0; 954 *ntail = n; 955 ntail = &n->m_next; 956 len -= nsize; 957 } 958 return (nfirst); 959 960 failed: 961 m_freem(nfirst); 962 return (NULL); 963 } 964 965 /* 966 * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best) 967 * and return a pointer to the head of the allocated chain. If m0 is 968 * non-null, then we assume that it is a single mbuf or an mbuf chain to 969 * which we want len bytes worth of mbufs and/or clusters attached, and so 970 * if we succeed in allocating it, we will just return a pointer to m0. 971 * 972 * If we happen to fail at any point during the allocation, we will free 973 * up everything we have already allocated and return NULL. 974 * 975 * Deprecated. Use m_getc() and m_cat() instead. 976 */ 977 struct mbuf * 978 m_getm(struct mbuf *m0, int len, int type, int how) 979 { 980 struct mbuf *nfirst; 981 982 nfirst = m_getc(len, how, type); 983 984 if (m0 != NULL) { 985 m_last(m0)->m_next = nfirst; 986 return (m0); 987 } 988 989 return (nfirst); 990 } 991 992 /* 993 * Adds a cluster to a normal mbuf, M_EXT is set on success. 994 * Deprecated. Use m_getcl() instead. 995 */ 996 void 997 m_mclget(struct mbuf *m, int how) 998 { 999 struct mbcluster *mcl; 1000 1001 KKASSERT((m->m_flags & M_EXT) == 0); 1002 mcl = objcache_get(mclmeta_cache, MBTOM(how)); 1003 if (mcl != NULL) { 1004 linkcluster(m, mcl); 1005 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1006 1); 1007 } else { 1008 ++mbstat[mycpu->gd_cpuid].m_drops; 1009 } 1010 } 1011 1012 /* 1013 * Updates to mbcluster must be MPSAFE. Only an entity which already has 1014 * a reference to the cluster can ref it, so we are in no danger of 1015 * racing an add with a subtract. But the operation must still be atomic 1016 * since multiple entities may have a reference on the cluster. 1017 * 1018 * m_mclfree() is almost the same but it must contend with two entities 1019 * freeing the cluster at the same time. 1020 */ 1021 static void 1022 m_mclref(void *arg) 1023 { 1024 struct mbcluster *mcl = arg; 1025 1026 atomic_add_int(&mcl->mcl_refs, 1); 1027 } 1028 1029 /* 1030 * When dereferencing a cluster we have to deal with a N->0 race, where 1031 * N entities free their references simultaniously. To do this we use 1032 * atomic_fetchadd_int(). 1033 */ 1034 static void 1035 m_mclfree(void *arg) 1036 { 1037 struct mbcluster *mcl = arg; 1038 1039 if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1) 1040 objcache_put(mclmeta_cache, mcl); 1041 } 1042 1043 /* 1044 * Free a single mbuf and any associated external storage. The successor, 1045 * if any, is returned. 1046 * 1047 * We do need to check non-first mbuf for m_aux, since some of existing 1048 * code does not call M_PREPEND properly. 1049 * (example: call to bpf_mtap from drivers) 1050 */ 1051 1052 #ifdef MBUF_DEBUG 1053 1054 struct mbuf * 1055 _m_free(struct mbuf *m, const char *func) 1056 1057 #else 1058 1059 struct mbuf * 1060 m_free(struct mbuf *m) 1061 1062 #endif 1063 { 1064 struct mbuf *n; 1065 struct globaldata *gd = mycpu; 1066 1067 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m)); 1068 KASSERT(M_TRAILINGSPACE(m) >= 0, ("overflowed mbuf %p", m)); 1069 atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1); 1070 1071 n = m->m_next; 1072 1073 /* 1074 * Make sure the mbuf is in constructed state before returning it 1075 * to the objcache. 1076 */ 1077 m->m_next = NULL; 1078 mbufuntrack(m); 1079 #ifdef MBUF_DEBUG 1080 m->m_hdr.mh_lastfunc = func; 1081 #endif 1082 #ifdef notyet 1083 KKASSERT(m->m_nextpkt == NULL); 1084 #else 1085 if (m->m_nextpkt != NULL) { 1086 static int afewtimes = 10; 1087 1088 if (afewtimes-- > 0) { 1089 kprintf("mfree: m->m_nextpkt != NULL\n"); 1090 print_backtrace(-1); 1091 } 1092 m->m_nextpkt = NULL; 1093 } 1094 #endif 1095 if (m->m_flags & M_PKTHDR) { 1096 m_tag_delete_chain(m); /* eliminate XXX JH */ 1097 } 1098 1099 m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE); 1100 1101 /* 1102 * Clean the M_PKTHDR state so we can return the mbuf to its original 1103 * cache. This is based on the PHCACHE flag which tells us whether 1104 * the mbuf was originally allocated out of a packet-header cache 1105 * or a non-packet-header cache. 1106 */ 1107 if (m->m_flags & M_PHCACHE) { 1108 m->m_flags |= M_PKTHDR; 1109 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */ 1110 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */ 1111 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */ 1112 SLIST_INIT(&m->m_pkthdr.tags); 1113 } 1114 1115 /* 1116 * Handle remaining flags combinations. M_CLCACHE tells us whether 1117 * the mbuf was originally allocated from a cluster cache or not, 1118 * and is totally separate from whether the mbuf is currently 1119 * associated with a cluster. 1120 */ 1121 switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) { 1122 case M_CLCACHE | M_EXT | M_EXT_CLUSTER: 1123 /* 1124 * mbuf+cluster cache case. The mbuf was allocated from the 1125 * combined mbuf_cluster cache and can be returned to the 1126 * cache if the cluster hasn't been shared. 1127 */ 1128 if (m_sharecount(m) == 1) { 1129 /* 1130 * The cluster has not been shared, we can just 1131 * reset the data pointer and return the mbuf 1132 * to the cluster cache. Note that the reference 1133 * count is left intact (it is still associated with 1134 * an mbuf). 1135 */ 1136 m->m_data = m->m_ext.ext_buf; 1137 if (m->m_flags & M_EXT && m->m_ext.ext_size != MCLBYTES) { 1138 if (m->m_flags & M_PHCACHE) 1139 objcache_put(mbufphdrjcluster_cache, m); 1140 else 1141 objcache_put(mbufjcluster_cache, m); 1142 } else { 1143 if (m->m_flags & M_PHCACHE) 1144 objcache_put(mbufphdrcluster_cache, m); 1145 else 1146 objcache_put(mbufcluster_cache, m); 1147 } 1148 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1); 1149 } else { 1150 /* 1151 * Hell. Someone else has a ref on this cluster, 1152 * we have to disconnect it which means we can't 1153 * put it back into the mbufcluster_cache, we 1154 * have to destroy the mbuf. 1155 * 1156 * Other mbuf references to the cluster will typically 1157 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE. 1158 * 1159 * XXX we could try to connect another cluster to 1160 * it. 1161 */ 1162 1163 m->m_ext.ext_free(m->m_ext.ext_arg); 1164 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER); 1165 if (m->m_ext.ext_size == MCLBYTES) { 1166 if (m->m_flags & M_PHCACHE) 1167 objcache_dtor(mbufphdrcluster_cache, m); 1168 else 1169 objcache_dtor(mbufcluster_cache, m); 1170 } else { 1171 if (m->m_flags & M_PHCACHE) 1172 objcache_dtor(mbufphdrjcluster_cache, m); 1173 else 1174 objcache_dtor(mbufjcluster_cache, m); 1175 } 1176 } 1177 break; 1178 case M_EXT | M_EXT_CLUSTER: 1179 /* 1180 * Normal cluster associated with an mbuf that was allocated 1181 * from the normal mbuf pool rather then the cluster pool. 1182 * The cluster has to be independantly disassociated from the 1183 * mbuf. 1184 */ 1185 if (m_sharecount(m) == 1) 1186 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1); 1187 /* fall through */ 1188 case M_EXT: 1189 /* 1190 * Normal cluster association case, disconnect the cluster from 1191 * the mbuf. The cluster may or may not be custom. 1192 */ 1193 m->m_ext.ext_free(m->m_ext.ext_arg); 1194 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER); 1195 /* fall through */ 1196 case 0: 1197 /* 1198 * return the mbuf to the mbuf cache. 1199 */ 1200 if (m->m_flags & M_PHCACHE) { 1201 m->m_data = m->m_pktdat; 1202 objcache_put(mbufphdr_cache, m); 1203 } else { 1204 m->m_data = m->m_dat; 1205 objcache_put(mbuf_cache, m); 1206 } 1207 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1); 1208 break; 1209 default: 1210 if (!panicstr) 1211 panic("bad mbuf flags %p %08x\n", m, m->m_flags); 1212 break; 1213 } 1214 return (n); 1215 } 1216 1217 #ifdef MBUF_DEBUG 1218 1219 void 1220 _m_freem(struct mbuf *m, const char *func) 1221 { 1222 while (m) 1223 m = _m_free(m, func); 1224 } 1225 1226 #else 1227 1228 void 1229 m_freem(struct mbuf *m) 1230 { 1231 while (m) 1232 m = m_free(m); 1233 } 1234 1235 #endif 1236 1237 void 1238 m_extadd(struct mbuf *m, caddr_t buf, u_int size, void (*reff)(void *), 1239 void (*freef)(void *), void *arg) 1240 { 1241 m->m_ext.ext_arg = arg; 1242 m->m_ext.ext_buf = buf; 1243 m->m_ext.ext_ref = reff; 1244 m->m_ext.ext_free = freef; 1245 m->m_ext.ext_size = size; 1246 reff(arg); 1247 m->m_data = buf; 1248 m->m_flags |= M_EXT; 1249 } 1250 1251 /* 1252 * mbuf utility routines 1253 */ 1254 1255 /* 1256 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and 1257 * copy junk along. 1258 */ 1259 struct mbuf * 1260 m_prepend(struct mbuf *m, int len, int how) 1261 { 1262 struct mbuf *mn; 1263 1264 if (m->m_flags & M_PKTHDR) 1265 mn = m_gethdr(how, m->m_type); 1266 else 1267 mn = m_get(how, m->m_type); 1268 if (mn == NULL) { 1269 m_freem(m); 1270 return (NULL); 1271 } 1272 if (m->m_flags & M_PKTHDR) 1273 M_MOVE_PKTHDR(mn, m); 1274 mn->m_next = m; 1275 m = mn; 1276 if (len < MHLEN) 1277 MH_ALIGN(m, len); 1278 m->m_len = len; 1279 return (m); 1280 } 1281 1282 /* 1283 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 1284 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 1285 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller. 1286 * Note that the copy is read-only, because clusters are not copied, 1287 * only their reference counts are incremented. 1288 */ 1289 struct mbuf * 1290 m_copym(const struct mbuf *m, int off0, int len, int wait) 1291 { 1292 struct mbuf *n, **np; 1293 int off = off0; 1294 struct mbuf *top; 1295 int copyhdr = 0; 1296 1297 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 1298 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 1299 if (off == 0 && (m->m_flags & M_PKTHDR)) 1300 copyhdr = 1; 1301 while (off > 0) { 1302 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 1303 if (off < m->m_len) 1304 break; 1305 off -= m->m_len; 1306 m = m->m_next; 1307 } 1308 np = ⊤ 1309 top = NULL; 1310 while (len > 0) { 1311 if (m == NULL) { 1312 KASSERT(len == M_COPYALL, 1313 ("m_copym, length > size of mbuf chain")); 1314 break; 1315 } 1316 /* 1317 * Because we are sharing any cluster attachment below, 1318 * be sure to get an mbuf that does not have a cluster 1319 * associated with it. 1320 */ 1321 if (copyhdr) 1322 n = m_gethdr(wait, m->m_type); 1323 else 1324 n = m_get(wait, m->m_type); 1325 *np = n; 1326 if (n == NULL) 1327 goto nospace; 1328 if (copyhdr) { 1329 if (!m_dup_pkthdr(n, m, wait)) 1330 goto nospace; 1331 if (len == M_COPYALL) 1332 n->m_pkthdr.len -= off0; 1333 else 1334 n->m_pkthdr.len = len; 1335 copyhdr = 0; 1336 } 1337 n->m_len = min(len, m->m_len - off); 1338 if (m->m_flags & M_EXT) { 1339 KKASSERT((n->m_flags & M_EXT) == 0); 1340 n->m_data = m->m_data + off; 1341 m->m_ext.ext_ref(m->m_ext.ext_arg); 1342 n->m_ext = m->m_ext; 1343 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 1344 } else { 1345 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 1346 (unsigned)n->m_len); 1347 } 1348 if (len != M_COPYALL) 1349 len -= n->m_len; 1350 off = 0; 1351 m = m->m_next; 1352 np = &n->m_next; 1353 } 1354 if (top == NULL) 1355 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1); 1356 return (top); 1357 nospace: 1358 m_freem(top); 1359 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1); 1360 return (NULL); 1361 } 1362 1363 /* 1364 * Copy an entire packet, including header (which must be present). 1365 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 1366 * Note that the copy is read-only, because clusters are not copied, 1367 * only their reference counts are incremented. 1368 * Preserve alignment of the first mbuf so if the creator has left 1369 * some room at the beginning (e.g. for inserting protocol headers) 1370 * the copies also have the room available. 1371 */ 1372 struct mbuf * 1373 m_copypacket(struct mbuf *m, int how) 1374 { 1375 struct mbuf *top, *n, *o; 1376 1377 n = m_gethdr(how, m->m_type); 1378 top = n; 1379 if (!n) 1380 goto nospace; 1381 1382 if (!m_dup_pkthdr(n, m, how)) 1383 goto nospace; 1384 n->m_len = m->m_len; 1385 if (m->m_flags & M_EXT) { 1386 KKASSERT((n->m_flags & M_EXT) == 0); 1387 n->m_data = m->m_data; 1388 m->m_ext.ext_ref(m->m_ext.ext_arg); 1389 n->m_ext = m->m_ext; 1390 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 1391 } else { 1392 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); 1393 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 1394 } 1395 1396 m = m->m_next; 1397 while (m) { 1398 o = m_get(how, m->m_type); 1399 if (!o) 1400 goto nospace; 1401 1402 n->m_next = o; 1403 n = n->m_next; 1404 1405 n->m_len = m->m_len; 1406 if (m->m_flags & M_EXT) { 1407 KKASSERT((n->m_flags & M_EXT) == 0); 1408 n->m_data = m->m_data; 1409 m->m_ext.ext_ref(m->m_ext.ext_arg); 1410 n->m_ext = m->m_ext; 1411 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 1412 } else { 1413 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 1414 } 1415 1416 m = m->m_next; 1417 } 1418 return top; 1419 nospace: 1420 m_freem(top); 1421 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1); 1422 return (NULL); 1423 } 1424 1425 /* 1426 * Copy data from an mbuf chain starting "off" bytes from the beginning, 1427 * continuing for "len" bytes, into the indicated buffer. 1428 */ 1429 void 1430 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp) 1431 { 1432 unsigned count; 1433 1434 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 1435 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 1436 while (off > 0) { 1437 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 1438 if (off < m->m_len) 1439 break; 1440 off -= m->m_len; 1441 m = m->m_next; 1442 } 1443 while (len > 0) { 1444 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 1445 count = min(m->m_len - off, len); 1446 bcopy(mtod(m, caddr_t) + off, cp, count); 1447 len -= count; 1448 cp += count; 1449 off = 0; 1450 m = m->m_next; 1451 } 1452 } 1453 1454 /* 1455 * Copy a packet header mbuf chain into a completely new chain, including 1456 * copying any mbuf clusters. Use this instead of m_copypacket() when 1457 * you need a writable copy of an mbuf chain. 1458 */ 1459 struct mbuf * 1460 m_dup(struct mbuf *m, int how) 1461 { 1462 struct mbuf **p, *top = NULL; 1463 int remain, moff, nsize; 1464 1465 /* Sanity check */ 1466 if (m == NULL) 1467 return (NULL); 1468 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__)); 1469 1470 /* While there's more data, get a new mbuf, tack it on, and fill it */ 1471 remain = m->m_pkthdr.len; 1472 moff = 0; 1473 p = ⊤ 1474 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 1475 struct mbuf *n; 1476 1477 /* Get the next new mbuf */ 1478 n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0, 1479 &nsize); 1480 if (n == NULL) 1481 goto nospace; 1482 if (top == NULL) 1483 if (!m_dup_pkthdr(n, m, how)) 1484 goto nospace0; 1485 1486 /* Link it into the new chain */ 1487 *p = n; 1488 p = &n->m_next; 1489 1490 /* Copy data from original mbuf(s) into new mbuf */ 1491 n->m_len = 0; 1492 while (n->m_len < nsize && m != NULL) { 1493 int chunk = min(nsize - n->m_len, m->m_len - moff); 1494 1495 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 1496 moff += chunk; 1497 n->m_len += chunk; 1498 remain -= chunk; 1499 if (moff == m->m_len) { 1500 m = m->m_next; 1501 moff = 0; 1502 } 1503 } 1504 1505 /* Check correct total mbuf length */ 1506 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 1507 ("%s: bogus m_pkthdr.len", __func__)); 1508 } 1509 return (top); 1510 1511 nospace: 1512 m_freem(top); 1513 nospace0: 1514 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1); 1515 return (NULL); 1516 } 1517 1518 /* 1519 * Copy the non-packet mbuf data chain into a new set of mbufs, including 1520 * copying any mbuf clusters. This is typically used to realign a data 1521 * chain by nfs_realign(). 1522 * 1523 * The original chain is left intact. how should be MB_WAIT or MB_DONTWAIT 1524 * and NULL can be returned if MB_DONTWAIT is passed. 1525 * 1526 * Be careful to use cluster mbufs, a large mbuf chain converted to non 1527 * cluster mbufs can exhaust our supply of mbufs. 1528 */ 1529 struct mbuf * 1530 m_dup_data(struct mbuf *m, int how) 1531 { 1532 struct mbuf **p, *n, *top = NULL; 1533 int mlen, moff, chunk, gsize, nsize; 1534 1535 /* 1536 * Degenerate case 1537 */ 1538 if (m == NULL) 1539 return (NULL); 1540 1541 /* 1542 * Optimize the mbuf allocation but do not get too carried away. 1543 */ 1544 if (m->m_next || m->m_len > MLEN) 1545 if (m->m_flags & M_EXT && m->m_ext.ext_size == MCLBYTES) 1546 gsize = MCLBYTES; 1547 else 1548 gsize = MJUMPAGESIZE; 1549 else 1550 gsize = MLEN; 1551 1552 /* Chain control */ 1553 p = ⊤ 1554 n = NULL; 1555 nsize = 0; 1556 1557 /* 1558 * Scan the mbuf chain until nothing is left, the new mbuf chain 1559 * will be allocated on the fly as needed. 1560 */ 1561 while (m) { 1562 mlen = m->m_len; 1563 moff = 0; 1564 1565 while (mlen) { 1566 KKASSERT(m->m_type == MT_DATA); 1567 if (n == NULL) { 1568 n = m_getl(gsize, how, MT_DATA, 0, &nsize); 1569 n->m_len = 0; 1570 if (n == NULL) 1571 goto nospace; 1572 *p = n; 1573 p = &n->m_next; 1574 } 1575 chunk = imin(mlen, nsize); 1576 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 1577 mlen -= chunk; 1578 moff += chunk; 1579 n->m_len += chunk; 1580 nsize -= chunk; 1581 if (nsize == 0) 1582 n = NULL; 1583 } 1584 m = m->m_next; 1585 } 1586 *p = NULL; 1587 return(top); 1588 nospace: 1589 *p = NULL; 1590 m_freem(top); 1591 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1); 1592 return (NULL); 1593 } 1594 1595 /* 1596 * Concatenate mbuf chain n to m. 1597 * Both chains must be of the same type (e.g. MT_DATA). 1598 * Any m_pkthdr is not updated. 1599 */ 1600 void 1601 m_cat(struct mbuf *m, struct mbuf *n) 1602 { 1603 m = m_last(m); 1604 while (n) { 1605 if (m->m_flags & M_EXT || 1606 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 1607 /* just join the two chains */ 1608 m->m_next = n; 1609 return; 1610 } 1611 /* splat the data from one into the other */ 1612 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 1613 (u_int)n->m_len); 1614 m->m_len += n->m_len; 1615 n = m_free(n); 1616 } 1617 } 1618 1619 void 1620 m_adj(struct mbuf *mp, int req_len) 1621 { 1622 int len = req_len; 1623 struct mbuf *m; 1624 int count; 1625 1626 if ((m = mp) == NULL) 1627 return; 1628 if (len >= 0) { 1629 /* 1630 * Trim from head. 1631 */ 1632 while (m != NULL && len > 0) { 1633 if (m->m_len <= len) { 1634 len -= m->m_len; 1635 m->m_len = 0; 1636 m = m->m_next; 1637 } else { 1638 m->m_len -= len; 1639 m->m_data += len; 1640 len = 0; 1641 } 1642 } 1643 m = mp; 1644 if (mp->m_flags & M_PKTHDR) 1645 m->m_pkthdr.len -= (req_len - len); 1646 } else { 1647 /* 1648 * Trim from tail. Scan the mbuf chain, 1649 * calculating its length and finding the last mbuf. 1650 * If the adjustment only affects this mbuf, then just 1651 * adjust and return. Otherwise, rescan and truncate 1652 * after the remaining size. 1653 */ 1654 len = -len; 1655 count = 0; 1656 for (;;) { 1657 count += m->m_len; 1658 if (m->m_next == NULL) 1659 break; 1660 m = m->m_next; 1661 } 1662 if (m->m_len >= len) { 1663 m->m_len -= len; 1664 if (mp->m_flags & M_PKTHDR) 1665 mp->m_pkthdr.len -= len; 1666 return; 1667 } 1668 count -= len; 1669 if (count < 0) 1670 count = 0; 1671 /* 1672 * Correct length for chain is "count". 1673 * Find the mbuf with last data, adjust its length, 1674 * and toss data from remaining mbufs on chain. 1675 */ 1676 m = mp; 1677 if (m->m_flags & M_PKTHDR) 1678 m->m_pkthdr.len = count; 1679 for (; m; m = m->m_next) { 1680 if (m->m_len >= count) { 1681 m->m_len = count; 1682 break; 1683 } 1684 count -= m->m_len; 1685 } 1686 while (m->m_next) 1687 (m = m->m_next) ->m_len = 0; 1688 } 1689 } 1690 1691 /* 1692 * Set the m_data pointer of a newly-allocated mbuf 1693 * to place an object of the specified size at the 1694 * end of the mbuf, longword aligned. 1695 */ 1696 void 1697 m_align(struct mbuf *m, int len) 1698 { 1699 int adjust; 1700 1701 if (m->m_flags & M_EXT) 1702 adjust = m->m_ext.ext_size - len; 1703 else if (m->m_flags & M_PKTHDR) 1704 adjust = MHLEN - len; 1705 else 1706 adjust = MLEN - len; 1707 m->m_data += adjust &~ (sizeof(long)-1); 1708 } 1709 1710 /* 1711 * Rearrange an mbuf chain so that len bytes are contiguous 1712 * and in the data area of an mbuf (so that mtod will work for a structure 1713 * of size len). Returns the resulting mbuf chain on success, frees it and 1714 * returns null on failure. If there is room, it will add up to 1715 * max_protohdr-len extra bytes to the contiguous region in an attempt to 1716 * avoid being called next time. 1717 */ 1718 struct mbuf * 1719 m_pullup(struct mbuf *n, int len) 1720 { 1721 struct mbuf *m; 1722 int count; 1723 int space; 1724 1725 /* 1726 * If first mbuf has no cluster, and has room for len bytes 1727 * without shifting current data, pullup into it, 1728 * otherwise allocate a new mbuf to prepend to the chain. 1729 */ 1730 if (!(n->m_flags & M_EXT) && 1731 n->m_data + len < &n->m_dat[MLEN] && 1732 n->m_next) { 1733 if (n->m_len >= len) 1734 return (n); 1735 m = n; 1736 n = n->m_next; 1737 len -= m->m_len; 1738 } else { 1739 if (len > MHLEN) 1740 goto bad; 1741 if (n->m_flags & M_PKTHDR) 1742 m = m_gethdr(MB_DONTWAIT, n->m_type); 1743 else 1744 m = m_get(MB_DONTWAIT, n->m_type); 1745 if (m == NULL) 1746 goto bad; 1747 m->m_len = 0; 1748 if (n->m_flags & M_PKTHDR) 1749 M_MOVE_PKTHDR(m, n); 1750 } 1751 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 1752 do { 1753 count = min(min(max(len, max_protohdr), space), n->m_len); 1754 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 1755 (unsigned)count); 1756 len -= count; 1757 m->m_len += count; 1758 n->m_len -= count; 1759 space -= count; 1760 if (n->m_len) 1761 n->m_data += count; 1762 else 1763 n = m_free(n); 1764 } while (len > 0 && n); 1765 if (len > 0) { 1766 m_free(m); 1767 goto bad; 1768 } 1769 m->m_next = n; 1770 return (m); 1771 bad: 1772 m_freem(n); 1773 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1); 1774 return (NULL); 1775 } 1776 1777 /* 1778 * Partition an mbuf chain in two pieces, returning the tail -- 1779 * all but the first len0 bytes. In case of failure, it returns NULL and 1780 * attempts to restore the chain to its original state. 1781 * 1782 * Note that the resulting mbufs might be read-only, because the new 1783 * mbuf can end up sharing an mbuf cluster with the original mbuf if 1784 * the "breaking point" happens to lie within a cluster mbuf. Use the 1785 * M_WRITABLE() macro to check for this case. 1786 */ 1787 struct mbuf * 1788 m_split(struct mbuf *m0, int len0, int wait) 1789 { 1790 struct mbuf *m, *n; 1791 unsigned len = len0, remain; 1792 1793 for (m = m0; m && len > m->m_len; m = m->m_next) 1794 len -= m->m_len; 1795 if (m == NULL) 1796 return (NULL); 1797 remain = m->m_len - len; 1798 if (m0->m_flags & M_PKTHDR) { 1799 n = m_gethdr(wait, m0->m_type); 1800 if (n == NULL) 1801 return (NULL); 1802 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1803 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1804 m0->m_pkthdr.len = len0; 1805 if (m->m_flags & M_EXT) 1806 goto extpacket; 1807 if (remain > MHLEN) { 1808 /* m can't be the lead packet */ 1809 MH_ALIGN(n, 0); 1810 n->m_next = m_split(m, len, wait); 1811 if (n->m_next == NULL) { 1812 m_free(n); 1813 return (NULL); 1814 } else { 1815 n->m_len = 0; 1816 return (n); 1817 } 1818 } else 1819 MH_ALIGN(n, remain); 1820 } else if (remain == 0) { 1821 n = m->m_next; 1822 m->m_next = 0; 1823 return (n); 1824 } else { 1825 n = m_get(wait, m->m_type); 1826 if (n == NULL) 1827 return (NULL); 1828 M_ALIGN(n, remain); 1829 } 1830 extpacket: 1831 if (m->m_flags & M_EXT) { 1832 KKASSERT((n->m_flags & M_EXT) == 0); 1833 n->m_data = m->m_data + len; 1834 m->m_ext.ext_ref(m->m_ext.ext_arg); 1835 n->m_ext = m->m_ext; 1836 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 1837 } else { 1838 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1839 } 1840 n->m_len = remain; 1841 m->m_len = len; 1842 n->m_next = m->m_next; 1843 m->m_next = 0; 1844 return (n); 1845 } 1846 1847 /* 1848 * Routine to copy from device local memory into mbufs. 1849 * Note: "offset" is ill-defined and always called as 0, so ignore it. 1850 */ 1851 struct mbuf * 1852 m_devget(char *buf, int len, int offset, struct ifnet *ifp, 1853 void (*copy)(volatile const void *from, volatile void *to, size_t length)) 1854 { 1855 struct mbuf *m, *mfirst = NULL, **mtail; 1856 int nsize, flags; 1857 1858 if (copy == NULL) 1859 copy = bcopy; 1860 mtail = &mfirst; 1861 flags = M_PKTHDR; 1862 1863 while (len > 0) { 1864 m = m_getl(len, MB_DONTWAIT, MT_DATA, flags, &nsize); 1865 if (m == NULL) { 1866 m_freem(mfirst); 1867 return (NULL); 1868 } 1869 m->m_len = min(len, nsize); 1870 1871 if (flags & M_PKTHDR) { 1872 if (len + max_linkhdr <= nsize) 1873 m->m_data += max_linkhdr; 1874 m->m_pkthdr.rcvif = ifp; 1875 m->m_pkthdr.len = len; 1876 flags = 0; 1877 } 1878 1879 copy(buf, m->m_data, (unsigned)m->m_len); 1880 buf += m->m_len; 1881 len -= m->m_len; 1882 *mtail = m; 1883 mtail = &m->m_next; 1884 } 1885 1886 return (mfirst); 1887 } 1888 1889 /* 1890 * Routine to pad mbuf to the specified length 'padto'. 1891 */ 1892 int 1893 m_devpad(struct mbuf *m, int padto) 1894 { 1895 struct mbuf *last = NULL; 1896 int padlen; 1897 1898 if (padto <= m->m_pkthdr.len) 1899 return 0; 1900 1901 padlen = padto - m->m_pkthdr.len; 1902 1903 /* if there's only the packet-header and we can pad there, use it. */ 1904 if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) { 1905 last = m; 1906 } else { 1907 /* 1908 * Walk packet chain to find last mbuf. We will either 1909 * pad there, or append a new mbuf and pad it 1910 */ 1911 for (last = m; last->m_next != NULL; last = last->m_next) 1912 ; /* EMPTY */ 1913 1914 /* `last' now points to last in chain. */ 1915 if (M_TRAILINGSPACE(last) < padlen) { 1916 struct mbuf *n; 1917 1918 /* Allocate new empty mbuf, pad it. Compact later. */ 1919 MGET(n, MB_DONTWAIT, MT_DATA); 1920 if (n == NULL) 1921 return ENOBUFS; 1922 n->m_len = 0; 1923 last->m_next = n; 1924 last = n; 1925 } 1926 } 1927 KKASSERT(M_TRAILINGSPACE(last) >= padlen); 1928 KKASSERT(M_WRITABLE(last)); 1929 1930 /* Now zero the pad area */ 1931 bzero(mtod(last, char *) + last->m_len, padlen); 1932 last->m_len += padlen; 1933 m->m_pkthdr.len += padlen; 1934 return 0; 1935 } 1936 1937 /* 1938 * Copy data from a buffer back into the indicated mbuf chain, 1939 * starting "off" bytes from the beginning, extending the mbuf 1940 * chain if necessary. 1941 */ 1942 void 1943 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp) 1944 { 1945 int mlen; 1946 struct mbuf *m = m0, *n; 1947 int totlen = 0; 1948 1949 if (m0 == NULL) 1950 return; 1951 while (off > (mlen = m->m_len)) { 1952 off -= mlen; 1953 totlen += mlen; 1954 if (m->m_next == NULL) { 1955 n = m_getclr(MB_DONTWAIT, m->m_type); 1956 if (n == NULL) 1957 goto out; 1958 n->m_len = min(MLEN, len + off); 1959 m->m_next = n; 1960 } 1961 m = m->m_next; 1962 } 1963 while (len > 0) { 1964 mlen = min (m->m_len - off, len); 1965 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 1966 cp += mlen; 1967 len -= mlen; 1968 mlen += off; 1969 off = 0; 1970 totlen += mlen; 1971 if (len == 0) 1972 break; 1973 if (m->m_next == NULL) { 1974 n = m_get(MB_DONTWAIT, m->m_type); 1975 if (n == NULL) 1976 break; 1977 n->m_len = min(MLEN, len); 1978 m->m_next = n; 1979 } 1980 m = m->m_next; 1981 } 1982 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1983 m->m_pkthdr.len = totlen; 1984 } 1985 1986 /* 1987 * Append the specified data to the indicated mbuf chain, 1988 * Extend the mbuf chain if the new data does not fit in 1989 * existing space. 1990 * 1991 * Return 1 if able to complete the job; otherwise 0. 1992 */ 1993 int 1994 m_append(struct mbuf *m0, int len, c_caddr_t cp) 1995 { 1996 struct mbuf *m, *n; 1997 int remainder, space; 1998 1999 for (m = m0; m->m_next != NULL; m = m->m_next) 2000 ; 2001 remainder = len; 2002 space = M_TRAILINGSPACE(m); 2003 if (space > 0) { 2004 /* 2005 * Copy into available space. 2006 */ 2007 if (space > remainder) 2008 space = remainder; 2009 bcopy(cp, mtod(m, caddr_t) + m->m_len, space); 2010 m->m_len += space; 2011 cp += space, remainder -= space; 2012 } 2013 while (remainder > 0) { 2014 /* 2015 * Allocate a new mbuf; could check space 2016 * and allocate a cluster instead. 2017 */ 2018 n = m_get(MB_DONTWAIT, m->m_type); 2019 if (n == NULL) 2020 break; 2021 n->m_len = min(MLEN, remainder); 2022 bcopy(cp, mtod(n, caddr_t), n->m_len); 2023 cp += n->m_len, remainder -= n->m_len; 2024 m->m_next = n; 2025 m = n; 2026 } 2027 if (m0->m_flags & M_PKTHDR) 2028 m0->m_pkthdr.len += len - remainder; 2029 return (remainder == 0); 2030 } 2031 2032 /* 2033 * Apply function f to the data in an mbuf chain starting "off" bytes from 2034 * the beginning, continuing for "len" bytes. 2035 */ 2036 int 2037 m_apply(struct mbuf *m, int off, int len, 2038 int (*f)(void *, void *, u_int), void *arg) 2039 { 2040 u_int count; 2041 int rval; 2042 2043 KASSERT(off >= 0, ("m_apply, negative off %d", off)); 2044 KASSERT(len >= 0, ("m_apply, negative len %d", len)); 2045 while (off > 0) { 2046 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 2047 if (off < m->m_len) 2048 break; 2049 off -= m->m_len; 2050 m = m->m_next; 2051 } 2052 while (len > 0) { 2053 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 2054 count = min(m->m_len - off, len); 2055 rval = (*f)(arg, mtod(m, caddr_t) + off, count); 2056 if (rval) 2057 return (rval); 2058 len -= count; 2059 off = 0; 2060 m = m->m_next; 2061 } 2062 return (0); 2063 } 2064 2065 /* 2066 * Return a pointer to mbuf/offset of location in mbuf chain. 2067 */ 2068 struct mbuf * 2069 m_getptr(struct mbuf *m, int loc, int *off) 2070 { 2071 2072 while (loc >= 0) { 2073 /* Normal end of search. */ 2074 if (m->m_len > loc) { 2075 *off = loc; 2076 return (m); 2077 } else { 2078 loc -= m->m_len; 2079 if (m->m_next == NULL) { 2080 if (loc == 0) { 2081 /* Point at the end of valid data. */ 2082 *off = m->m_len; 2083 return (m); 2084 } 2085 return (NULL); 2086 } 2087 m = m->m_next; 2088 } 2089 } 2090 return (NULL); 2091 } 2092 2093 void 2094 m_print(const struct mbuf *m) 2095 { 2096 int len; 2097 const struct mbuf *m2; 2098 2099 len = m->m_pkthdr.len; 2100 m2 = m; 2101 while (len) { 2102 kprintf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-"); 2103 len -= m2->m_len; 2104 m2 = m2->m_next; 2105 } 2106 return; 2107 } 2108 2109 /* 2110 * "Move" mbuf pkthdr from "from" to "to". 2111 * "from" must have M_PKTHDR set, and "to" must be empty. 2112 */ 2113 void 2114 m_move_pkthdr(struct mbuf *to, struct mbuf *from) 2115 { 2116 KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header")); 2117 2118 to->m_flags |= from->m_flags & M_COPYFLAGS; 2119 to->m_pkthdr = from->m_pkthdr; /* especially tags */ 2120 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */ 2121 } 2122 2123 /* 2124 * Duplicate "from"'s mbuf pkthdr in "to". 2125 * "from" must have M_PKTHDR set, and "to" must be empty. 2126 * In particular, this does a deep copy of the packet tags. 2127 */ 2128 int 2129 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how) 2130 { 2131 KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header")); 2132 2133 to->m_flags = (from->m_flags & M_COPYFLAGS) | 2134 (to->m_flags & ~M_COPYFLAGS); 2135 to->m_pkthdr = from->m_pkthdr; 2136 SLIST_INIT(&to->m_pkthdr.tags); 2137 return (m_tag_copy_chain(to, from, how)); 2138 } 2139 2140 /* 2141 * Defragment a mbuf chain, returning the shortest possible 2142 * chain of mbufs and clusters. If allocation fails and 2143 * this cannot be completed, NULL will be returned, but 2144 * the passed in chain will be unchanged. Upon success, 2145 * the original chain will be freed, and the new chain 2146 * will be returned. 2147 * 2148 * If a non-packet header is passed in, the original 2149 * mbuf (chain?) will be returned unharmed. 2150 * 2151 * m_defrag_nofree doesn't free the passed in mbuf. 2152 */ 2153 struct mbuf * 2154 m_defrag(struct mbuf *m0, int how) 2155 { 2156 struct mbuf *m_new; 2157 2158 if ((m_new = m_defrag_nofree(m0, how)) == NULL) 2159 return (NULL); 2160 if (m_new != m0) 2161 m_freem(m0); 2162 return (m_new); 2163 } 2164 2165 struct mbuf * 2166 m_defrag_nofree(struct mbuf *m0, int how) 2167 { 2168 struct mbuf *m_new = NULL, *m_final = NULL; 2169 int progress = 0, length, nsize; 2170 2171 if (!(m0->m_flags & M_PKTHDR)) 2172 return (m0); 2173 2174 #ifdef MBUF_STRESS_TEST 2175 if (m_defragrandomfailures) { 2176 int temp = karc4random() & 0xff; 2177 if (temp == 0xba) 2178 goto nospace; 2179 } 2180 #endif 2181 2182 m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize); 2183 if (m_final == NULL) 2184 goto nospace; 2185 m_final->m_len = 0; /* in case m0->m_pkthdr.len is zero */ 2186 2187 if (m_dup_pkthdr(m_final, m0, how) == 0) 2188 goto nospace; 2189 2190 m_new = m_final; 2191 2192 while (progress < m0->m_pkthdr.len) { 2193 length = m0->m_pkthdr.len - progress; 2194 if (length > MCLBYTES) 2195 length = MCLBYTES; 2196 2197 if (m_new == NULL) { 2198 m_new = m_getl(length, how, MT_DATA, 0, &nsize); 2199 if (m_new == NULL) 2200 goto nospace; 2201 } 2202 2203 m_copydata(m0, progress, length, mtod(m_new, caddr_t)); 2204 progress += length; 2205 m_new->m_len = length; 2206 if (m_new != m_final) 2207 m_cat(m_final, m_new); 2208 m_new = NULL; 2209 } 2210 if (m0->m_next == NULL) 2211 m_defraguseless++; 2212 m_defragpackets++; 2213 m_defragbytes += m_final->m_pkthdr.len; 2214 return (m_final); 2215 nospace: 2216 m_defragfailure++; 2217 if (m_new) 2218 m_free(m_new); 2219 m_freem(m_final); 2220 return (NULL); 2221 } 2222 2223 /* 2224 * Move data from uio into mbufs. 2225 */ 2226 struct mbuf * 2227 m_uiomove(struct uio *uio) 2228 { 2229 struct mbuf *m; /* current working mbuf */ 2230 struct mbuf *head = NULL; /* result mbuf chain */ 2231 struct mbuf **mp = &head; 2232 int flags = M_PKTHDR; 2233 int nsize; 2234 int error; 2235 int resid; 2236 2237 do { 2238 if (uio->uio_resid > INT_MAX) 2239 resid = INT_MAX; 2240 else 2241 resid = (int)uio->uio_resid; 2242 m = m_getl(resid, MB_WAIT, MT_DATA, flags, &nsize); 2243 if (flags) { 2244 m->m_pkthdr.len = 0; 2245 /* Leave room for protocol headers. */ 2246 if (resid < MHLEN) 2247 MH_ALIGN(m, resid); 2248 flags = 0; 2249 } 2250 m->m_len = imin(nsize, resid); 2251 error = uiomove(mtod(m, caddr_t), m->m_len, uio); 2252 if (error) { 2253 m_free(m); 2254 goto failed; 2255 } 2256 *mp = m; 2257 mp = &m->m_next; 2258 head->m_pkthdr.len += m->m_len; 2259 } while (uio->uio_resid > 0); 2260 2261 return (head); 2262 2263 failed: 2264 m_freem(head); 2265 return (NULL); 2266 } 2267 2268 struct mbuf * 2269 m_last(struct mbuf *m) 2270 { 2271 while (m->m_next) 2272 m = m->m_next; 2273 return (m); 2274 } 2275 2276 /* 2277 * Return the number of bytes in an mbuf chain. 2278 * If lastm is not NULL, also return the last mbuf. 2279 */ 2280 u_int 2281 m_lengthm(struct mbuf *m, struct mbuf **lastm) 2282 { 2283 u_int len = 0; 2284 struct mbuf *prev = m; 2285 2286 while (m) { 2287 len += m->m_len; 2288 prev = m; 2289 m = m->m_next; 2290 } 2291 if (lastm != NULL) 2292 *lastm = prev; 2293 return (len); 2294 } 2295 2296 /* 2297 * Like m_lengthm(), except also keep track of mbuf usage. 2298 */ 2299 u_int 2300 m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt) 2301 { 2302 u_int len = 0, mbcnt = 0; 2303 struct mbuf *prev = m; 2304 2305 while (m) { 2306 len += m->m_len; 2307 mbcnt += MSIZE; 2308 if (m->m_flags & M_EXT) 2309 mbcnt += m->m_ext.ext_size; 2310 prev = m; 2311 m = m->m_next; 2312 } 2313 if (lastm != NULL) 2314 *lastm = prev; 2315 *pmbcnt = mbcnt; 2316 return (len); 2317 } 2318