123417Smckusick /* 233460Skarels * Copyright (c) 1982, 1986, 1988 Regents of the University of California. 333185Sbostic * All rights reserved. 423417Smckusick * 533185Sbostic * Redistribution and use in source and binary forms are permitted 634860Sbostic * provided that the above copyright notice and this paragraph are 734860Sbostic * duplicated in all such forms and that any documentation, 834860Sbostic * advertising materials, and other materials related to such 934860Sbostic * distribution and use acknowledge that the software was developed 1034860Sbostic * by the University of California, Berkeley. The name of the 1134860Sbostic * University may not be used to endorse or promote products derived 1234860Sbostic * from this software without specific prior written permission. 1334860Sbostic * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 1434860Sbostic * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 1534860Sbostic * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 1633185Sbostic * 17*39227Ssklower * @(#)uipc_mbuf.c 7.12 (Berkeley) 09/26/89 1823417Smckusick */ 194585Swnj 2017102Sbloom #include "param.h" 2117102Sbloom #include "dir.h" 2217102Sbloom #include "user.h" 2317102Sbloom #include "proc.h" 2417102Sbloom #include "cmap.h" 2538962Skarels #include "malloc.h" 2617102Sbloom #include "map.h" 2738962Skarels #define MBTYPES 2817102Sbloom #include "mbuf.h" 2917102Sbloom #include "vm.h" 3017102Sbloom #include "kernel.h" 3130442Skarels #include "syslog.h" 3230442Skarels #include "domain.h" 3330442Skarels #include "protosw.h" 3438962Skarels #include "machine/pte.h" 354585Swnj 365228Swnj mbinit() 375228Swnj { 3821107Skarels int s; 395228Swnj 4038962Skarels #if MCLBYTES < 4096 4138962Skarels #define NCL_INIT (4096/MCLBYTES) 4232659Skarels #else 4332659Skarels #define NCL_INIT 1 4432659Skarels #endif 4521107Skarels s = splimp(); 4638962Skarels if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 475228Swnj goto bad; 4821107Skarels splx(s); 495228Swnj return; 505228Swnj bad: 515228Swnj panic("mbinit"); 525228Swnj } 535228Swnj 5421107Skarels /* 5538962Skarels * Allocate some number of mbuf clusters 5638962Skarels * and place on cluster free list. 5721107Skarels * Must be called at splimp. 5821107Skarels */ 5930442Skarels /* ARGSUSED */ 6038962Skarels m_clalloc(ncl, canwait) 615228Swnj register int ncl; 625228Swnj { 635228Swnj int npg, mbx; 6438962Skarels register caddr_t p; 655228Swnj register int i; 6630442Skarels static int logged; 675228Swnj 685228Swnj npg = ncl * CLSIZE; 698792Sroot mbx = rmalloc(mbmap, (long)npg); 7021107Skarels if (mbx == 0) { 7130442Skarels if (logged == 0) { 7230442Skarels logged++; 7330442Skarels log(LOG_ERR, "mbuf map full\n"); 7430442Skarels } 755228Swnj return (0); 7621107Skarels } 7738962Skarels p = cltom(mbx * NBPG / MCLBYTES); 789765Ssam if (memall(&Mbmap[mbx], npg, proc, CSYS) == 0) { 799765Ssam rmfree(mbmap, (long)npg, (long)mbx); 805228Swnj return (0); 819765Ssam } 8238962Skarels vmaccess(&Mbmap[mbx], p, npg); 8338962Skarels ncl = ncl * CLBYTES / MCLBYTES; 8438962Skarels for (i = 0; i < ncl; i++) { 8538962Skarels ((union mcluster *)p)->mcl_next = mclfree; 8638962Skarels mclfree = (union mcluster *)p; 8738962Skarels p += MCLBYTES; 8838962Skarels mbstat.m_clfree++; 8938962Skarels } 9038962Skarels mbstat.m_clusters += ncl; 9138962Skarels return (1); 9238962Skarels } 935228Swnj 9438962Skarels /* 9538962Skarels * When MGET failes, ask protocols to free space when short of memory, 9638962Skarels * then re-attempt to allocate an mbuf. 9738962Skarels */ 9838962Skarels struct mbuf * 9938962Skarels m_retry(i, t) 10038962Skarels int i, t; 10138962Skarels { 10238962Skarels register struct mbuf *m; 1035228Swnj 10438962Skarels m_reclaim(); 10538962Skarels #define m_retry(i, t) (struct mbuf *)0 10638962Skarels MGET(m, i, t); 10738962Skarels #undef m_retry 10838962Skarels return (m); 1095228Swnj } 1105228Swnj 11121107Skarels /* 11238962Skarels * As above; retry an MGETHDR. 11321107Skarels */ 11438962Skarels struct mbuf * 11538962Skarels m_retryhdr(i, t) 11638962Skarels int i, t; 1175228Swnj { 11838962Skarels register struct mbuf *m; 11938962Skarels 12038962Skarels m_reclaim(); 12138962Skarels #define m_retryhdr(i, t) (struct mbuf *)0 12238962Skarels MGETHDR(m, i, t); 12338962Skarels #undef m_retryhdr 12438962Skarels return (m); 12538962Skarels } 12638962Skarels 12738962Skarels m_reclaim() 12838962Skarels { 12930442Skarels register struct domain *dp; 13030442Skarels register struct protosw *pr; 13138962Skarels int s = splimp(); 1325228Swnj 13338962Skarels for (dp = domains; dp; dp = dp->dom_next) 13438962Skarels for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 13538962Skarels if (pr->pr_drain) 13638962Skarels (*pr->pr_drain)(); 13738962Skarels splx(s); 13838962Skarels mbstat.m_drain++; 1395228Swnj } 1405228Swnj 1415228Swnj /* 1425228Swnj * Space allocation routines. 1435228Swnj * These are also available as macros 1445228Swnj * for critical paths. 1455228Swnj */ 1464585Swnj struct mbuf * 1479634Ssam m_get(canwait, type) 1489634Ssam int canwait, type; 1494585Swnj { 1504585Swnj register struct mbuf *m; 1514585Swnj 1529634Ssam MGET(m, canwait, type); 1534585Swnj return (m); 1544585Swnj } 1554585Swnj 1564585Swnj struct mbuf * 15738962Skarels m_gethdr(canwait, type) 15838962Skarels int canwait, type; 15938962Skarels { 16038962Skarels register struct mbuf *m; 16138962Skarels 16238962Skarels MGETHDR(m, canwait, type); 16338962Skarels return (m); 16438962Skarels } 16538962Skarels 16638962Skarels struct mbuf * 1679634Ssam m_getclr(canwait, type) 1689634Ssam int canwait, type; 1694890Swnj { 1704890Swnj register struct mbuf *m; 1714890Swnj 17221107Skarels MGET(m, canwait, type); 1734890Swnj if (m == 0) 1744890Swnj return (0); 1754890Swnj bzero(mtod(m, caddr_t), MLEN); 1764890Swnj return (m); 1774890Swnj } 1784890Swnj 1794890Swnj struct mbuf * 1804585Swnj m_free(m) 1814585Swnj struct mbuf *m; 1824585Swnj { 1834585Swnj register struct mbuf *n; 1844585Swnj 1854585Swnj MFREE(m, n); 1864585Swnj return (n); 1874585Swnj } 1884585Swnj 1894669Swnj m_freem(m) 1904585Swnj register struct mbuf *m; 1914585Swnj { 1924585Swnj register struct mbuf *n; 1934585Swnj 1944585Swnj if (m == NULL) 1954916Swnj return; 1964585Swnj do { 1974585Swnj MFREE(m, n); 1984585Swnj } while (m = n); 1994585Swnj } 2004585Swnj 2015228Swnj /* 2025228Swnj * Mbuffer utility routines. 2035228Swnj */ 20421107Skarels 20521107Skarels /* 20638962Skarels * Lesser-used path for M_PREPEND: 20738962Skarels * allocate new mbuf to prepend to chain, 20838962Skarels * copy junk along. 20938962Skarels */ 21038962Skarels struct mbuf * 21138962Skarels m_prepend(m, len, how) 21238962Skarels register struct mbuf *m; 21338962Skarels int len, how; 21438962Skarels { 21538962Skarels struct mbuf *mn; 21638962Skarels 21738962Skarels MGET(mn, how, m->m_type); 21838962Skarels if (mn == (struct mbuf *)NULL) { 21938962Skarels m_freem(m); 22038962Skarels return ((struct mbuf *)NULL); 22138962Skarels } 22238962Skarels if (m->m_flags & M_PKTHDR) { 22338962Skarels M_COPY_PKTHDR(mn, m); 22438962Skarels m->m_flags &= ~M_PKTHDR; 22538962Skarels } 22638962Skarels mn->m_next = m; 22738962Skarels m = mn; 22838962Skarels if (len < MHLEN) 22938962Skarels MH_ALIGN(m, len); 23038962Skarels m->m_len = len; 23138962Skarels return (m); 23238962Skarels } 23338962Skarels 23433606Skarels /* 23538962Skarels /* 23638962Skarels * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 23721107Skarels * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 23838962Skarels * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 23921107Skarels */ 2404927Swnj struct mbuf * 24138962Skarels m_copym(m, off0, len, wait) 2424927Swnj register struct mbuf *m; 24338962Skarels int off0, wait; 2444927Swnj register int len; 2454927Swnj { 2464927Swnj register struct mbuf *n, **np; 24738962Skarels register int off = off0; 24838962Skarels struct mbuf *top; 24938962Skarels int copyhdr = 0; 2504927Swnj 2514927Swnj if (off < 0 || len < 0) 25238962Skarels panic("m_copym"); 25338962Skarels if (off == 0 && m->m_flags & M_PKTHDR) 25438962Skarels copyhdr = 1; 2554927Swnj while (off > 0) { 2564927Swnj if (m == 0) 25738962Skarels panic("m_copym"); 2584927Swnj if (off < m->m_len) 2594927Swnj break; 2604927Swnj off -= m->m_len; 2614927Swnj m = m->m_next; 2624927Swnj } 2634927Swnj np = ⊤ 2644927Swnj top = 0; 2654927Swnj while (len > 0) { 2665609Swnj if (m == 0) { 2675609Swnj if (len != M_COPYALL) 26838962Skarels panic("m_copym"); 2695609Swnj break; 2705609Swnj } 27138962Skarels MGET(n, wait, m->m_type); 2724927Swnj *np = n; 2734927Swnj if (n == 0) 2744927Swnj goto nospace; 27538962Skarels if (copyhdr) { 27638962Skarels M_COPY_PKTHDR(n, m); 27738962Skarels if (len == M_COPYALL) 27838962Skarels n->m_pkthdr.len -= off0; 27938962Skarels else 28038962Skarels n->m_pkthdr.len = len; 28138962Skarels copyhdr = 0; 28238962Skarels } 2834927Swnj n->m_len = MIN(len, m->m_len - off); 28438962Skarels if (m->m_flags & M_EXT) { 28538962Skarels n->m_data = m->m_data + off; 28638962Skarels mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 28738962Skarels n->m_ext = m->m_ext; 28838962Skarels n->m_flags |= M_EXT; 2898318Sroot } else 2904927Swnj bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 2914927Swnj (unsigned)n->m_len); 2925609Swnj if (len != M_COPYALL) 2935609Swnj len -= n->m_len; 2944927Swnj off = 0; 2954927Swnj m = m->m_next; 2964927Swnj np = &n->m_next; 2974927Swnj } 2984927Swnj return (top); 2994927Swnj nospace: 3004927Swnj m_freem(top); 3014927Swnj return (0); 3024927Swnj } 3034927Swnj 30433606Skarels /* 30533606Skarels * Copy data from an mbuf chain starting "off" bytes from the beginning, 30633606Skarels * continuing for "len" bytes, into the indicated buffer. 30733606Skarels */ 30833606Skarels m_copydata(m, off, len, cp) 30933606Skarels register struct mbuf *m; 31033988Skarels register int off; 31133606Skarels register int len; 31233988Skarels caddr_t cp; 31333606Skarels { 31433606Skarels register unsigned count; 31533606Skarels 31633606Skarels if (off < 0 || len < 0) 31733606Skarels panic("m_copydata"); 31833606Skarels while (off > 0) { 31933606Skarels if (m == 0) 32033606Skarels panic("m_copydata"); 32133606Skarels if (off < m->m_len) 32233606Skarels break; 32333606Skarels off -= m->m_len; 32433606Skarels m = m->m_next; 32533606Skarels } 32633606Skarels while (len > 0) { 32733606Skarels if (m == 0) 32833606Skarels panic("m_copydata"); 32933988Skarels count = MIN(m->m_len - off, len); 33033606Skarels bcopy(mtod(m, caddr_t) + off, cp, count); 33133606Skarels len -= count; 33233988Skarels cp += count; 33333606Skarels off = 0; 33433606Skarels m = m->m_next; 33533606Skarels } 33633606Skarels } 33733606Skarels 33838962Skarels /* 33938962Skarels * Concatenate mbuf chain n to m. 34038962Skarels * Both chains must be of the same type (e.g. MT_DATA). 34138962Skarels * Any m_pkthdr is not updated. 34238962Skarels */ 3434669Swnj m_cat(m, n) 3444669Swnj register struct mbuf *m, *n; 3454669Swnj { 3464669Swnj while (m->m_next) 3474669Swnj m = m->m_next; 3486091Sroot while (n) { 34938962Skarels if (m->m_flags & M_EXT || 35038962Skarels m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 3516091Sroot /* just join the two chains */ 3524669Swnj m->m_next = n; 3536091Sroot return; 3544669Swnj } 3556091Sroot /* splat the data from one into the other */ 3566091Sroot bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 3576091Sroot (u_int)n->m_len); 3586091Sroot m->m_len += n->m_len; 3596091Sroot n = m_free(n); 3606091Sroot } 3614669Swnj } 3624669Swnj 36338962Skarels m_adj(mp, req_len) 3644585Swnj struct mbuf *mp; 3654585Swnj { 36638962Skarels register int len = req_len; 36724764Skarels register struct mbuf *m; 36824764Skarels register count; 3694585Swnj 3704585Swnj if ((m = mp) == NULL) 3714585Swnj return; 3724822Swnj if (len >= 0) { 37338962Skarels /* 37438962Skarels * Trim from head. 37538962Skarels */ 3764585Swnj while (m != NULL && len > 0) { 3774822Swnj if (m->m_len <= len) { 3784585Swnj len -= m->m_len; 3794585Swnj m->m_len = 0; 3804585Swnj m = m->m_next; 3814822Swnj } else { 3824585Swnj m->m_len -= len; 38338962Skarels m->m_data += len; 38438962Skarels len = 0; 3854585Swnj } 3864585Swnj } 38738962Skarels m = mp; 38838962Skarels if (mp->m_flags & M_PKTHDR) 38938962Skarels m->m_pkthdr.len -= (req_len - len); 3904822Swnj } else { 39124764Skarels /* 39224764Skarels * Trim from tail. Scan the mbuf chain, 39324764Skarels * calculating its length and finding the last mbuf. 39424764Skarels * If the adjustment only affects this mbuf, then just 39524764Skarels * adjust and return. Otherwise, rescan and truncate 39624764Skarels * after the remaining size. 39724764Skarels */ 3984585Swnj len = -len; 39924764Skarels count = 0; 40024764Skarels for (;;) { 40124764Skarels count += m->m_len; 40224764Skarels if (m->m_next == (struct mbuf *)0) 4034585Swnj break; 40424764Skarels m = m->m_next; 40524764Skarels } 40624764Skarels if (m->m_len >= len) { 40724764Skarels m->m_len -= len; 408*39227Ssklower if ((mp = m)->m_flags & M_PKTHDR) 409*39227Ssklower m->m_pkthdr.len -= len; 41024764Skarels return; 41124764Skarels } 41224764Skarels count -= len; 41338962Skarels if (count < 0) 41438962Skarels count = 0; 41524764Skarels /* 41624764Skarels * Correct length for chain is "count". 41724764Skarels * Find the mbuf with last data, adjust its length, 41824764Skarels * and toss data from remaining mbufs on chain. 41924764Skarels */ 42038962Skarels m = mp; 42138962Skarels if (m->m_flags & M_PKTHDR) 42238962Skarels m->m_pkthdr.len = count; 42338962Skarels for (; m; m = m->m_next) { 42424764Skarels if (m->m_len >= count) { 42524764Skarels m->m_len = count; 42624764Skarels break; 4274585Swnj } 42824764Skarels count -= m->m_len; 4294585Swnj } 43024764Skarels while (m = m->m_next) 43124764Skarels m->m_len = 0; 4324585Swnj } 4334585Swnj } 4345228Swnj 43523816Skarels /* 43623816Skarels * Rearange an mbuf chain so that len bytes are contiguous 43723816Skarels * and in the data area of an mbuf (so that mtod and dtom 43824764Skarels * will work for a structure of size len). Returns the resulting 43924764Skarels * mbuf chain on success, frees it and returns null on failure. 44038962Skarels * If there is room, it will add up to max_protohdr-len extra bytes to the 44124764Skarels * contiguous region in an attempt to avoid being called next time. 44223816Skarels */ 4435310Sroot struct mbuf * 44424764Skarels m_pullup(n, len) 44524764Skarels register struct mbuf *n; 4465228Swnj int len; 4475228Swnj { 44824764Skarels register struct mbuf *m; 44924764Skarels register int count; 45024764Skarels int space; 4515228Swnj 45238962Skarels /* 45338962Skarels * If first mbuf has no cluster, and has room for len bytes 45438962Skarels * without shifting current data, pullup into it, 45538962Skarels * otherwise allocate a new mbuf to prepend to the chain. 45638962Skarels */ 45738962Skarels if ((n->m_flags & M_EXT) == 0 && 45838962Skarels n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 45938962Skarels if (n->m_len >= len) 46038962Skarels return (n); 46124764Skarels m = n; 46224764Skarels n = n->m_next; 46324764Skarels len -= m->m_len; 46424764Skarels } else { 46538962Skarels if (len > MHLEN) 46624764Skarels goto bad; 46724764Skarels MGET(m, M_DONTWAIT, n->m_type); 46824764Skarels if (m == 0) 46924764Skarels goto bad; 47024764Skarels m->m_len = 0; 47138962Skarels if (n->m_flags & M_PKTHDR) 47238962Skarels M_COPY_PKTHDR(m, n); 47324764Skarels } 47438962Skarels space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 4755310Sroot do { 47638962Skarels count = min(min(max(len, max_protohdr), space), n->m_len); 47738962Skarels bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 4786164Ssam (unsigned)count); 4796164Ssam len -= count; 4806164Ssam m->m_len += count; 4816164Ssam n->m_len -= count; 48238962Skarels space -= count; 4835310Sroot if (n->m_len) 48438962Skarels n->m_data += count; 48523816Skarels else 48623816Skarels n = m_free(n); 48724764Skarels } while (len > 0 && n); 48824764Skarels if (len > 0) { 4895310Sroot (void) m_free(m); 4905310Sroot goto bad; 4915310Sroot } 4925310Sroot m->m_next = n; 4935310Sroot return (m); 4945310Sroot bad: 4955732Sroot m_freem(n); 4965228Swnj return (0); 4975228Swnj } 498