1 /* $NetBSD: uipc_mbuf.c,v 1.13 1994/10/30 21:48:06 cgd Exp $ */ 2 3 /* 4 * Copyright (c) 1982, 1986, 1988, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 36 */ 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/proc.h> 41 #include <sys/malloc.h> 42 #include <sys/map.h> 43 #define MBTYPES 44 #include <sys/mbuf.h> 45 #include <sys/kernel.h> 46 #include <sys/syslog.h> 47 #include <sys/domain.h> 48 #include <sys/protosw.h> 49 50 #include <vm/vm.h> 51 52 extern vm_map_t mb_map; 53 struct mbuf *mbutl; 54 char *mclrefcnt; 55 56 void 57 mbinit() 58 { 59 int s; 60 61 s = splimp(); 62 if (m_clalloc(max(4096/CLBYTES, 1), M_DONTWAIT) == 0) 63 goto bad; 64 splx(s); 65 return; 66 bad: 67 panic("mbinit"); 68 } 69 70 /* 71 * Allocate some number of mbuf clusters 72 * and place on cluster free list. 73 * Must be called at splimp. 74 */ 75 /* ARGSUSED */ 76 m_clalloc(ncl, nowait) 77 register int ncl; 78 int nowait; 79 { 80 static int logged; 81 register caddr_t p; 82 register int i; 83 int npg; 84 85 npg = ncl * CLSIZE; 86 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), !nowait); 87 if (p == NULL) { 88 if (logged == 0) { 89 logged++; 90 log(LOG_ERR, "mb_map full\n"); 91 } 92 return (0); 93 } 94 ncl = ncl * CLBYTES / MCLBYTES; 95 for (i = 0; i < ncl; i++) { 96 ((union mcluster *)p)->mcl_next = mclfree; 97 mclfree = (union mcluster *)p; 98 p += MCLBYTES; 99 mbstat.m_clfree++; 100 } 101 mbstat.m_clusters += ncl; 102 return (1); 103 } 104 105 /* 106 * When MGET failes, ask protocols to free space when short of memory, 107 * then re-attempt to allocate an mbuf. 108 */ 109 struct mbuf * 110 m_retry(i, t) 111 int i, t; 112 { 113 register struct mbuf *m; 114 115 m_reclaim(); 116 #define m_retry(i, t) (struct mbuf *)0 117 MGET(m, i, t); 118 #undef m_retry 119 return (m); 120 } 121 122 /* 123 * As above; retry an MGETHDR. 124 */ 125 struct mbuf * 126 m_retryhdr(i, t) 127 int i, t; 128 { 129 register struct mbuf *m; 130 131 m_reclaim(); 132 #define m_retryhdr(i, t) (struct mbuf *)0 133 MGETHDR(m, i, t); 134 #undef m_retryhdr 135 return (m); 136 } 137 138 m_reclaim() 139 { 140 register struct domain *dp; 141 register struct protosw *pr; 142 int s = splimp(); 143 144 for (dp = domains; dp; dp = dp->dom_next) 145 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 146 if (pr->pr_drain) 147 (*pr->pr_drain)(); 148 splx(s); 149 mbstat.m_drain++; 150 } 151 152 /* 153 * Space allocation routines. 154 * These are also available as macros 155 * for critical paths. 156 */ 157 struct mbuf * 158 m_get(nowait, type) 159 int nowait, type; 160 { 161 register struct mbuf *m; 162 163 MGET(m, nowait, type); 164 return (m); 165 } 166 167 struct mbuf * 168 m_gethdr(nowait, type) 169 int nowait, type; 170 { 171 register struct mbuf *m; 172 173 MGETHDR(m, nowait, type); 174 return (m); 175 } 176 177 struct mbuf * 178 m_getclr(nowait, type) 179 int nowait, type; 180 { 181 register struct mbuf *m; 182 183 MGET(m, nowait, type); 184 if (m == 0) 185 return (0); 186 bzero(mtod(m, caddr_t), MLEN); 187 return (m); 188 } 189 190 struct mbuf * 191 m_free(m) 192 struct mbuf *m; 193 { 194 register struct mbuf *n; 195 196 MFREE(m, n); 197 return (n); 198 } 199 200 void 201 m_freem(m) 202 register struct mbuf *m; 203 { 204 register struct mbuf *n; 205 206 if (m == NULL) 207 return; 208 do { 209 MFREE(m, n); 210 } while (m = n); 211 } 212 213 /* 214 * Mbuffer utility routines. 215 */ 216 217 /* 218 * Lesser-used path for M_PREPEND: 219 * allocate new mbuf to prepend to chain, 220 * copy junk along. 221 */ 222 struct mbuf * 223 m_prepend(m, len, how) 224 register struct mbuf *m; 225 int len, how; 226 { 227 struct mbuf *mn; 228 229 MGET(mn, how, m->m_type); 230 if (mn == (struct mbuf *)NULL) { 231 m_freem(m); 232 return ((struct mbuf *)NULL); 233 } 234 if (m->m_flags & M_PKTHDR) { 235 M_COPY_PKTHDR(mn, m); 236 m->m_flags &= ~M_PKTHDR; 237 } 238 mn->m_next = m; 239 m = mn; 240 if (len < MHLEN) 241 MH_ALIGN(m, len); 242 m->m_len = len; 243 return (m); 244 } 245 246 /* 247 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 248 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 249 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 250 */ 251 int MCFail; 252 253 struct mbuf * 254 m_copym(m, off0, len, wait) 255 register struct mbuf *m; 256 int off0, wait; 257 register int len; 258 { 259 register struct mbuf *n, **np; 260 register int off = off0; 261 struct mbuf *top; 262 int copyhdr = 0; 263 264 if (off < 0 || len < 0) 265 panic("m_copym"); 266 if (off == 0 && m->m_flags & M_PKTHDR) 267 copyhdr = 1; 268 while (off > 0) { 269 if (m == 0) 270 panic("m_copym"); 271 if (off < m->m_len) 272 break; 273 off -= m->m_len; 274 m = m->m_next; 275 } 276 np = ⊤ 277 top = 0; 278 while (len > 0) { 279 if (m == 0) { 280 if (len != M_COPYALL) 281 panic("m_copym"); 282 break; 283 } 284 MGET(n, wait, m->m_type); 285 *np = n; 286 if (n == 0) 287 goto nospace; 288 if (copyhdr) { 289 M_COPY_PKTHDR(n, m); 290 if (len == M_COPYALL) 291 n->m_pkthdr.len -= off0; 292 else 293 n->m_pkthdr.len = len; 294 copyhdr = 0; 295 } 296 n->m_len = min(len, m->m_len - off); 297 if (m->m_flags & M_EXT) { 298 n->m_data = m->m_data + off; 299 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 300 n->m_ext = m->m_ext; 301 n->m_flags |= M_EXT; 302 } else 303 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 304 (unsigned)n->m_len); 305 if (len != M_COPYALL) 306 len -= n->m_len; 307 off = 0; 308 m = m->m_next; 309 np = &n->m_next; 310 } 311 if (top == 0) 312 MCFail++; 313 return (top); 314 nospace: 315 m_freem(top); 316 MCFail++; 317 return (0); 318 } 319 320 /* 321 * Copy data from an mbuf chain starting "off" bytes from the beginning, 322 * continuing for "len" bytes, into the indicated buffer. 323 */ 324 m_copydata(m, off, len, cp) 325 register struct mbuf *m; 326 register int off; 327 register int len; 328 caddr_t cp; 329 { 330 register unsigned count; 331 332 if (off < 0 || len < 0) 333 panic("m_copydata"); 334 while (off > 0) { 335 if (m == 0) 336 panic("m_copydata"); 337 if (off < m->m_len) 338 break; 339 off -= m->m_len; 340 m = m->m_next; 341 } 342 while (len > 0) { 343 if (m == 0) 344 panic("m_copydata"); 345 count = min(m->m_len - off, len); 346 bcopy(mtod(m, caddr_t) + off, cp, count); 347 len -= count; 348 cp += count; 349 off = 0; 350 m = m->m_next; 351 } 352 } 353 354 /* 355 * Concatenate mbuf chain n to m. 356 * Both chains must be of the same type (e.g. MT_DATA). 357 * Any m_pkthdr is not updated. 358 */ 359 m_cat(m, n) 360 register struct mbuf *m, *n; 361 { 362 while (m->m_next) 363 m = m->m_next; 364 while (n) { 365 if (m->m_flags & M_EXT || 366 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 367 /* just join the two chains */ 368 m->m_next = n; 369 return; 370 } 371 /* splat the data from one into the other */ 372 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 373 (u_int)n->m_len); 374 m->m_len += n->m_len; 375 n = m_free(n); 376 } 377 } 378 379 void 380 m_adj(mp, req_len) 381 struct mbuf *mp; 382 int req_len; 383 { 384 register int len = req_len; 385 register struct mbuf *m; 386 register count; 387 388 if ((m = mp) == NULL) 389 return; 390 if (len >= 0) { 391 /* 392 * Trim from head. 393 */ 394 while (m != NULL && len > 0) { 395 if (m->m_len <= len) { 396 len -= m->m_len; 397 m->m_len = 0; 398 m = m->m_next; 399 } else { 400 m->m_len -= len; 401 m->m_data += len; 402 len = 0; 403 } 404 } 405 m = mp; 406 if (mp->m_flags & M_PKTHDR) 407 m->m_pkthdr.len -= (req_len - len); 408 } else { 409 /* 410 * Trim from tail. Scan the mbuf chain, 411 * calculating its length and finding the last mbuf. 412 * If the adjustment only affects this mbuf, then just 413 * adjust and return. Otherwise, rescan and truncate 414 * after the remaining size. 415 */ 416 len = -len; 417 count = 0; 418 for (;;) { 419 count += m->m_len; 420 if (m->m_next == (struct mbuf *)0) 421 break; 422 m = m->m_next; 423 } 424 if (m->m_len >= len) { 425 m->m_len -= len; 426 if (mp->m_flags & M_PKTHDR) 427 mp->m_pkthdr.len -= len; 428 return; 429 } 430 count -= len; 431 if (count < 0) 432 count = 0; 433 /* 434 * Correct length for chain is "count". 435 * Find the mbuf with last data, adjust its length, 436 * and toss data from remaining mbufs on chain. 437 */ 438 m = mp; 439 if (m->m_flags & M_PKTHDR) 440 m->m_pkthdr.len = count; 441 for (; m; m = m->m_next) { 442 if (m->m_len >= count) { 443 m->m_len = count; 444 break; 445 } 446 count -= m->m_len; 447 } 448 while (m = m->m_next) 449 m->m_len = 0; 450 } 451 } 452 453 /* 454 * Rearange an mbuf chain so that len bytes are contiguous 455 * and in the data area of an mbuf (so that mtod and dtom 456 * will work for a structure of size len). Returns the resulting 457 * mbuf chain on success, frees it and returns null on failure. 458 * If there is room, it will add up to max_protohdr-len extra bytes to the 459 * contiguous region in an attempt to avoid being called next time. 460 */ 461 int MPFail; 462 463 struct mbuf * 464 m_pullup(n, len) 465 register struct mbuf *n; 466 int len; 467 { 468 register struct mbuf *m; 469 register int count; 470 int space; 471 472 /* 473 * If first mbuf has no cluster, and has room for len bytes 474 * without shifting current data, pullup into it, 475 * otherwise allocate a new mbuf to prepend to the chain. 476 */ 477 if ((n->m_flags & M_EXT) == 0 && 478 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 479 if (n->m_len >= len) 480 return (n); 481 m = n; 482 n = n->m_next; 483 len -= m->m_len; 484 } else { 485 if (len > MHLEN) 486 goto bad; 487 MGET(m, M_DONTWAIT, n->m_type); 488 if (m == 0) 489 goto bad; 490 m->m_len = 0; 491 if (n->m_flags & M_PKTHDR) { 492 M_COPY_PKTHDR(m, n); 493 n->m_flags &= ~M_PKTHDR; 494 } 495 } 496 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 497 do { 498 count = min(min(max(len, max_protohdr), space), n->m_len); 499 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 500 (unsigned)count); 501 len -= count; 502 m->m_len += count; 503 n->m_len -= count; 504 space -= count; 505 if (n->m_len) 506 n->m_data += count; 507 else 508 n = m_free(n); 509 } while (len > 0 && n); 510 if (len > 0) { 511 (void) m_free(m); 512 goto bad; 513 } 514 m->m_next = n; 515 return (m); 516 bad: 517 m_freem(n); 518 MPFail++; 519 return (0); 520 } 521 522 /* 523 * Partition an mbuf chain in two pieces, returning the tail -- 524 * all but the first len0 bytes. In case of failure, it returns NULL and 525 * attempts to restore the chain to its original state. 526 */ 527 struct mbuf * 528 m_split(m0, len0, wait) 529 register struct mbuf *m0; 530 int len0, wait; 531 { 532 register struct mbuf *m, *n; 533 unsigned len = len0, remain; 534 535 for (m = m0; m && len > m->m_len; m = m->m_next) 536 len -= m->m_len; 537 if (m == 0) 538 return (0); 539 remain = m->m_len - len; 540 if (m0->m_flags & M_PKTHDR) { 541 MGETHDR(n, wait, m0->m_type); 542 if (n == 0) 543 return (0); 544 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 545 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 546 m0->m_pkthdr.len = len0; 547 if (m->m_flags & M_EXT) 548 goto extpacket; 549 if (remain > MHLEN) { 550 /* m can't be the lead packet */ 551 MH_ALIGN(n, 0); 552 n->m_next = m_split(m, len, wait); 553 if (n->m_next == 0) { 554 (void) m_free(n); 555 return (0); 556 } else 557 return (n); 558 } else 559 MH_ALIGN(n, remain); 560 } else if (remain == 0) { 561 n = m->m_next; 562 m->m_next = 0; 563 return (n); 564 } else { 565 MGET(n, wait, m->m_type); 566 if (n == 0) 567 return (0); 568 M_ALIGN(n, remain); 569 } 570 extpacket: 571 if (m->m_flags & M_EXT) { 572 n->m_flags |= M_EXT; 573 n->m_ext = m->m_ext; 574 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 575 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 576 n->m_data = m->m_data + len; 577 } else { 578 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 579 } 580 n->m_len = remain; 581 m->m_len = len; 582 n->m_next = m->m_next; 583 m->m_next = 0; 584 return (n); 585 } 586 /* 587 * Routine to copy from device local memory into mbufs. 588 */ 589 struct mbuf * 590 m_devget(buf, totlen, off0, ifp, copy) 591 char *buf; 592 int totlen, off0; 593 struct ifnet *ifp; 594 void (*copy)(); 595 { 596 register struct mbuf *m; 597 struct mbuf *top = 0, **mp = ⊤ 598 register int off = off0, len; 599 register char *cp; 600 char *epkt; 601 602 cp = buf; 603 epkt = cp + totlen; 604 if (off) { 605 /* 606 * If 'off' is non-zero, packet is trailer-encapsulated, 607 * so we have to skip the type and length fields. 608 */ 609 cp += off + 2 * sizeof(u_int16_t); 610 totlen -= 2 * sizeof(u_int16_t); 611 } 612 MGETHDR(m, M_DONTWAIT, MT_DATA); 613 if (m == 0) 614 return (0); 615 m->m_pkthdr.rcvif = ifp; 616 m->m_pkthdr.len = totlen; 617 m->m_len = MHLEN; 618 619 while (totlen > 0) { 620 if (top) { 621 MGET(m, M_DONTWAIT, MT_DATA); 622 if (m == 0) { 623 m_freem(top); 624 return (0); 625 } 626 m->m_len = MLEN; 627 } 628 len = min(totlen, epkt - cp); 629 if (len >= MINCLSIZE) { 630 MCLGET(m, M_DONTWAIT); 631 if (m->m_flags & M_EXT) 632 m->m_len = len = min(len, MCLBYTES); 633 else 634 len = m->m_len; 635 } else { 636 /* 637 * Place initial small packet/header at end of mbuf. 638 */ 639 if (len < m->m_len) { 640 if (top == 0 && len + max_linkhdr <= m->m_len) 641 m->m_data += max_linkhdr; 642 m->m_len = len; 643 } else 644 len = m->m_len; 645 } 646 if (copy) 647 copy(cp, mtod(m, caddr_t), (unsigned)len); 648 else 649 bcopy(cp, mtod(m, caddr_t), (unsigned)len); 650 cp += len; 651 *mp = m; 652 mp = &m->m_next; 653 totlen -= len; 654 if (cp == epkt) 655 cp = buf; 656 } 657 return (top); 658 } 659