123417Smckusick /* 233460Skarels * Copyright (c) 1982, 1986, 1988 Regents of the University of California. 333185Sbostic * All rights reserved. 423417Smckusick * 544448Sbostic * %sccs.include.redist.c% 633185Sbostic * 7*44969Skarels * @(#)uipc_mbuf.c 7.17 (Berkeley) 07/25/90 823417Smckusick */ 94585Swnj 1017102Sbloom #include "param.h" 1117102Sbloom #include "user.h" 1217102Sbloom #include "proc.h" 1317102Sbloom #include "cmap.h" 1438962Skarels #include "malloc.h" 1517102Sbloom #include "map.h" 1638962Skarels #define MBTYPES 1717102Sbloom #include "mbuf.h" 1817102Sbloom #include "vm.h" 1917102Sbloom #include "kernel.h" 2030442Skarels #include "syslog.h" 2130442Skarels #include "domain.h" 2230442Skarels #include "protosw.h" 2338962Skarels #include "machine/pte.h" 244585Swnj 255228Swnj mbinit() 265228Swnj { 2721107Skarels int s; 285228Swnj 2938962Skarels #if MCLBYTES < 4096 3041998Smckusick #define NCL_INIT (4096/CLBYTES) 3132659Skarels #else 3232659Skarels #define NCL_INIT 1 3332659Skarels #endif 3421107Skarels s = splimp(); 3538962Skarels if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 365228Swnj goto bad; 3721107Skarels splx(s); 385228Swnj return; 395228Swnj bad: 405228Swnj panic("mbinit"); 415228Swnj } 425228Swnj 4321107Skarels /* 4438962Skarels * Allocate some number of mbuf clusters 4538962Skarels * and place on cluster free list. 4621107Skarels * Must be called at splimp. 4721107Skarels */ 4830442Skarels /* ARGSUSED */ 4938962Skarels m_clalloc(ncl, canwait) 505228Swnj register int ncl; 515228Swnj { 525228Swnj int npg, mbx; 5338962Skarels register caddr_t p; 545228Swnj register int i; 5530442Skarels static int logged; 565228Swnj 575228Swnj npg = ncl * CLSIZE; 588792Sroot mbx = rmalloc(mbmap, (long)npg); 5921107Skarels if (mbx == 0) { 6030442Skarels if (logged == 0) { 6130442Skarels logged++; 6230442Skarels log(LOG_ERR, "mbuf map full\n"); 6330442Skarels } 645228Swnj return (0); 6521107Skarels } 6638962Skarels p = cltom(mbx * NBPG / MCLBYTES); 679765Ssam if (memall(&Mbmap[mbx], npg, proc, CSYS) == 0) { 689765Ssam rmfree(mbmap, (long)npg, (long)mbx); 695228Swnj return (0); 709765Ssam } 7138962Skarels vmaccess(&Mbmap[mbx], p, npg); 7238962Skarels ncl = ncl * CLBYTES / MCLBYTES; 7338962Skarels for (i = 0; i < ncl; i++) { 7438962Skarels ((union mcluster *)p)->mcl_next = mclfree; 7538962Skarels mclfree = (union mcluster *)p; 7638962Skarels p += MCLBYTES; 7738962Skarels mbstat.m_clfree++; 7838962Skarels } 7938962Skarels mbstat.m_clusters += ncl; 8038962Skarels return (1); 8138962Skarels } 825228Swnj 8338962Skarels /* 8438962Skarels * When MGET failes, ask protocols to free space when short of memory, 8538962Skarels * then re-attempt to allocate an mbuf. 8638962Skarels */ 8738962Skarels struct mbuf * 8838962Skarels m_retry(i, t) 8938962Skarels int i, t; 9038962Skarels { 9138962Skarels register struct mbuf *m; 925228Swnj 9338962Skarels m_reclaim(); 9438962Skarels #define m_retry(i, t) (struct mbuf *)0 9538962Skarels MGET(m, i, t); 9638962Skarels #undef m_retry 9738962Skarels return (m); 985228Swnj } 995228Swnj 10021107Skarels /* 10138962Skarels * As above; retry an MGETHDR. 10221107Skarels */ 10338962Skarels struct mbuf * 10438962Skarels m_retryhdr(i, t) 10538962Skarels int i, t; 1065228Swnj { 10738962Skarels register struct mbuf *m; 10838962Skarels 10938962Skarels m_reclaim(); 11038962Skarels #define m_retryhdr(i, t) (struct mbuf *)0 11138962Skarels MGETHDR(m, i, t); 11238962Skarels #undef m_retryhdr 11338962Skarels return (m); 11438962Skarels } 11538962Skarels 11638962Skarels m_reclaim() 11738962Skarels { 11830442Skarels register struct domain *dp; 11930442Skarels register struct protosw *pr; 12038962Skarels int s = splimp(); 1215228Swnj 12238962Skarels for (dp = domains; dp; dp = dp->dom_next) 12338962Skarels for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 12438962Skarels if (pr->pr_drain) 12538962Skarels (*pr->pr_drain)(); 12638962Skarels splx(s); 12738962Skarels mbstat.m_drain++; 1285228Swnj } 1295228Swnj 1305228Swnj /* 1315228Swnj * Space allocation routines. 1325228Swnj * These are also available as macros 1335228Swnj * for critical paths. 1345228Swnj */ 1354585Swnj struct mbuf * 1369634Ssam m_get(canwait, type) 1379634Ssam int canwait, type; 1384585Swnj { 1394585Swnj register struct mbuf *m; 1404585Swnj 1419634Ssam MGET(m, canwait, type); 1424585Swnj return (m); 1434585Swnj } 1444585Swnj 1454585Swnj struct mbuf * 14638962Skarels m_gethdr(canwait, type) 14738962Skarels int canwait, type; 14838962Skarels { 14938962Skarels register struct mbuf *m; 15038962Skarels 15138962Skarels MGETHDR(m, canwait, type); 15238962Skarels return (m); 15338962Skarels } 15438962Skarels 15538962Skarels struct mbuf * 1569634Ssam m_getclr(canwait, type) 1579634Ssam int canwait, type; 1584890Swnj { 1594890Swnj register struct mbuf *m; 1604890Swnj 16121107Skarels MGET(m, canwait, type); 1624890Swnj if (m == 0) 1634890Swnj return (0); 1644890Swnj bzero(mtod(m, caddr_t), MLEN); 1654890Swnj return (m); 1664890Swnj } 1674890Swnj 1684890Swnj struct mbuf * 1694585Swnj m_free(m) 1704585Swnj struct mbuf *m; 1714585Swnj { 1724585Swnj register struct mbuf *n; 1734585Swnj 1744585Swnj MFREE(m, n); 1754585Swnj return (n); 1764585Swnj } 1774585Swnj 1784669Swnj m_freem(m) 1794585Swnj register struct mbuf *m; 1804585Swnj { 1814585Swnj register struct mbuf *n; 1824585Swnj 1834585Swnj if (m == NULL) 1844916Swnj return; 1854585Swnj do { 1864585Swnj MFREE(m, n); 1874585Swnj } while (m = n); 1884585Swnj } 1894585Swnj 1905228Swnj /* 1915228Swnj * Mbuffer utility routines. 1925228Swnj */ 19321107Skarels 19421107Skarels /* 19538962Skarels * Lesser-used path for M_PREPEND: 19638962Skarels * allocate new mbuf to prepend to chain, 19738962Skarels * copy junk along. 19838962Skarels */ 19938962Skarels struct mbuf * 20038962Skarels m_prepend(m, len, how) 20138962Skarels register struct mbuf *m; 20238962Skarels int len, how; 20338962Skarels { 20438962Skarels struct mbuf *mn; 20538962Skarels 20638962Skarels MGET(mn, how, m->m_type); 20738962Skarels if (mn == (struct mbuf *)NULL) { 20838962Skarels m_freem(m); 20938962Skarels return ((struct mbuf *)NULL); 21038962Skarels } 21138962Skarels if (m->m_flags & M_PKTHDR) { 21238962Skarels M_COPY_PKTHDR(mn, m); 21338962Skarels m->m_flags &= ~M_PKTHDR; 21438962Skarels } 21538962Skarels mn->m_next = m; 21638962Skarels m = mn; 21738962Skarels if (len < MHLEN) 21838962Skarels MH_ALIGN(m, len); 21938962Skarels m->m_len = len; 22038962Skarels return (m); 22138962Skarels } 22238962Skarels 22333606Skarels /* 22438962Skarels * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 22521107Skarels * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 22638962Skarels * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 22721107Skarels */ 22841999Smckusick int MCFail; 22941999Smckusick 2304927Swnj struct mbuf * 23138962Skarels m_copym(m, off0, len, wait) 2324927Swnj register struct mbuf *m; 23338962Skarels int off0, wait; 2344927Swnj register int len; 2354927Swnj { 2364927Swnj register struct mbuf *n, **np; 23738962Skarels register int off = off0; 23838962Skarels struct mbuf *top; 23938962Skarels int copyhdr = 0; 2404927Swnj 2414927Swnj if (off < 0 || len < 0) 24238962Skarels panic("m_copym"); 24338962Skarels if (off == 0 && m->m_flags & M_PKTHDR) 24438962Skarels copyhdr = 1; 2454927Swnj while (off > 0) { 2464927Swnj if (m == 0) 24738962Skarels panic("m_copym"); 2484927Swnj if (off < m->m_len) 2494927Swnj break; 2504927Swnj off -= m->m_len; 2514927Swnj m = m->m_next; 2524927Swnj } 2534927Swnj np = ⊤ 2544927Swnj top = 0; 2554927Swnj while (len > 0) { 2565609Swnj if (m == 0) { 2575609Swnj if (len != M_COPYALL) 25838962Skarels panic("m_copym"); 2595609Swnj break; 2605609Swnj } 26138962Skarels MGET(n, wait, m->m_type); 2624927Swnj *np = n; 2634927Swnj if (n == 0) 2644927Swnj goto nospace; 26538962Skarels if (copyhdr) { 26638962Skarels M_COPY_PKTHDR(n, m); 26738962Skarels if (len == M_COPYALL) 26838962Skarels n->m_pkthdr.len -= off0; 26938962Skarels else 27038962Skarels n->m_pkthdr.len = len; 27138962Skarels copyhdr = 0; 27238962Skarels } 2734927Swnj n->m_len = MIN(len, m->m_len - off); 27438962Skarels if (m->m_flags & M_EXT) { 27538962Skarels n->m_data = m->m_data + off; 27638962Skarels mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 27738962Skarels n->m_ext = m->m_ext; 27838962Skarels n->m_flags |= M_EXT; 2798318Sroot } else 2804927Swnj bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 2814927Swnj (unsigned)n->m_len); 2825609Swnj if (len != M_COPYALL) 2835609Swnj len -= n->m_len; 2844927Swnj off = 0; 2854927Swnj m = m->m_next; 2864927Swnj np = &n->m_next; 2874927Swnj } 28841999Smckusick if (top == 0) 28941999Smckusick MCFail++; 2904927Swnj return (top); 2914927Swnj nospace: 2924927Swnj m_freem(top); 29341999Smckusick MCFail++; 2944927Swnj return (0); 2954927Swnj } 2964927Swnj 29733606Skarels /* 29833606Skarels * Copy data from an mbuf chain starting "off" bytes from the beginning, 29933606Skarels * continuing for "len" bytes, into the indicated buffer. 30033606Skarels */ 30133606Skarels m_copydata(m, off, len, cp) 30233606Skarels register struct mbuf *m; 30333988Skarels register int off; 30433606Skarels register int len; 30533988Skarels caddr_t cp; 30633606Skarels { 30733606Skarels register unsigned count; 30833606Skarels 30933606Skarels if (off < 0 || len < 0) 31033606Skarels panic("m_copydata"); 31133606Skarels while (off > 0) { 31233606Skarels if (m == 0) 31333606Skarels panic("m_copydata"); 31433606Skarels if (off < m->m_len) 31533606Skarels break; 31633606Skarels off -= m->m_len; 31733606Skarels m = m->m_next; 31833606Skarels } 31933606Skarels while (len > 0) { 32033606Skarels if (m == 0) 32133606Skarels panic("m_copydata"); 32233988Skarels count = MIN(m->m_len - off, len); 32333606Skarels bcopy(mtod(m, caddr_t) + off, cp, count); 32433606Skarels len -= count; 32533988Skarels cp += count; 32633606Skarels off = 0; 32733606Skarels m = m->m_next; 32833606Skarels } 32933606Skarels } 33033606Skarels 33138962Skarels /* 33238962Skarels * Concatenate mbuf chain n to m. 33338962Skarels * Both chains must be of the same type (e.g. MT_DATA). 33438962Skarels * Any m_pkthdr is not updated. 33538962Skarels */ 3364669Swnj m_cat(m, n) 3374669Swnj register struct mbuf *m, *n; 3384669Swnj { 3394669Swnj while (m->m_next) 3404669Swnj m = m->m_next; 3416091Sroot while (n) { 34238962Skarels if (m->m_flags & M_EXT || 34338962Skarels m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 3446091Sroot /* just join the two chains */ 3454669Swnj m->m_next = n; 3466091Sroot return; 3474669Swnj } 3486091Sroot /* splat the data from one into the other */ 3496091Sroot bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 3506091Sroot (u_int)n->m_len); 3516091Sroot m->m_len += n->m_len; 3526091Sroot n = m_free(n); 3536091Sroot } 3544669Swnj } 3554669Swnj 35638962Skarels m_adj(mp, req_len) 3574585Swnj struct mbuf *mp; 3584585Swnj { 35938962Skarels register int len = req_len; 36024764Skarels register struct mbuf *m; 36124764Skarels register count; 3624585Swnj 3634585Swnj if ((m = mp) == NULL) 3644585Swnj return; 3654822Swnj if (len >= 0) { 36638962Skarels /* 36738962Skarels * Trim from head. 36838962Skarels */ 3694585Swnj while (m != NULL && len > 0) { 3704822Swnj if (m->m_len <= len) { 3714585Swnj len -= m->m_len; 3724585Swnj m->m_len = 0; 3734585Swnj m = m->m_next; 3744822Swnj } else { 3754585Swnj m->m_len -= len; 37638962Skarels m->m_data += len; 37738962Skarels len = 0; 3784585Swnj } 3794585Swnj } 38038962Skarels m = mp; 38138962Skarels if (mp->m_flags & M_PKTHDR) 38238962Skarels m->m_pkthdr.len -= (req_len - len); 3834822Swnj } else { 38424764Skarels /* 38524764Skarels * Trim from tail. Scan the mbuf chain, 38624764Skarels * calculating its length and finding the last mbuf. 38724764Skarels * If the adjustment only affects this mbuf, then just 38824764Skarels * adjust and return. Otherwise, rescan and truncate 38924764Skarels * after the remaining size. 39024764Skarels */ 3914585Swnj len = -len; 39224764Skarels count = 0; 39324764Skarels for (;;) { 39424764Skarels count += m->m_len; 39524764Skarels if (m->m_next == (struct mbuf *)0) 3964585Swnj break; 39724764Skarels m = m->m_next; 39824764Skarels } 39924764Skarels if (m->m_len >= len) { 40024764Skarels m->m_len -= len; 40139227Ssklower if ((mp = m)->m_flags & M_PKTHDR) 40239227Ssklower m->m_pkthdr.len -= len; 40324764Skarels return; 40424764Skarels } 40524764Skarels count -= len; 40638962Skarels if (count < 0) 40738962Skarels count = 0; 40824764Skarels /* 40924764Skarels * Correct length for chain is "count". 41024764Skarels * Find the mbuf with last data, adjust its length, 41124764Skarels * and toss data from remaining mbufs on chain. 41224764Skarels */ 41338962Skarels m = mp; 41438962Skarels if (m->m_flags & M_PKTHDR) 41538962Skarels m->m_pkthdr.len = count; 41638962Skarels for (; m; m = m->m_next) { 41724764Skarels if (m->m_len >= count) { 41824764Skarels m->m_len = count; 41924764Skarels break; 4204585Swnj } 42124764Skarels count -= m->m_len; 4224585Swnj } 42324764Skarels while (m = m->m_next) 42424764Skarels m->m_len = 0; 4254585Swnj } 4264585Swnj } 4275228Swnj 42823816Skarels /* 42923816Skarels * Rearange an mbuf chain so that len bytes are contiguous 43023816Skarels * and in the data area of an mbuf (so that mtod and dtom 43124764Skarels * will work for a structure of size len). Returns the resulting 43224764Skarels * mbuf chain on success, frees it and returns null on failure. 43338962Skarels * If there is room, it will add up to max_protohdr-len extra bytes to the 43424764Skarels * contiguous region in an attempt to avoid being called next time. 43523816Skarels */ 43641999Smckusick int MPFail; 43741999Smckusick 4385310Sroot struct mbuf * 43924764Skarels m_pullup(n, len) 44024764Skarels register struct mbuf *n; 4415228Swnj int len; 4425228Swnj { 44324764Skarels register struct mbuf *m; 44424764Skarels register int count; 44524764Skarels int space; 4465228Swnj 44738962Skarels /* 44838962Skarels * If first mbuf has no cluster, and has room for len bytes 44938962Skarels * without shifting current data, pullup into it, 45038962Skarels * otherwise allocate a new mbuf to prepend to the chain. 45138962Skarels */ 45238962Skarels if ((n->m_flags & M_EXT) == 0 && 45338962Skarels n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 45438962Skarels if (n->m_len >= len) 45538962Skarels return (n); 45624764Skarels m = n; 45724764Skarels n = n->m_next; 45824764Skarels len -= m->m_len; 45924764Skarels } else { 46038962Skarels if (len > MHLEN) 46124764Skarels goto bad; 46224764Skarels MGET(m, M_DONTWAIT, n->m_type); 46324764Skarels if (m == 0) 46424764Skarels goto bad; 46524764Skarels m->m_len = 0; 466*44969Skarels if (n->m_flags & M_PKTHDR) { 46738962Skarels M_COPY_PKTHDR(m, n); 468*44969Skarels n->m_flags &= ~M_PKTHDR; 469*44969Skarels } 47024764Skarels } 47138962Skarels space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 4725310Sroot do { 47338962Skarels count = min(min(max(len, max_protohdr), space), n->m_len); 47438962Skarels bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 4756164Ssam (unsigned)count); 4766164Ssam len -= count; 4776164Ssam m->m_len += count; 4786164Ssam n->m_len -= count; 47938962Skarels space -= count; 4805310Sroot if (n->m_len) 48138962Skarels n->m_data += count; 48223816Skarels else 48323816Skarels n = m_free(n); 48424764Skarels } while (len > 0 && n); 48524764Skarels if (len > 0) { 4865310Sroot (void) m_free(m); 4875310Sroot goto bad; 4885310Sroot } 4895310Sroot m->m_next = n; 4905310Sroot return (m); 4915310Sroot bad: 4925732Sroot m_freem(n); 49341999Smckusick MPFail++; 4945228Swnj return (0); 4955228Swnj } 496