123417Smckusick /* 233460Skarels * Copyright (c) 1982, 1986, 1988 Regents of the University of California. 333185Sbostic * All rights reserved. 423417Smckusick * 533185Sbostic * Redistribution and use in source and binary forms are permitted 634860Sbostic * provided that the above copyright notice and this paragraph are 734860Sbostic * duplicated in all such forms and that any documentation, 834860Sbostic * advertising materials, and other materials related to such 934860Sbostic * distribution and use acknowledge that the software was developed 1034860Sbostic * by the University of California, Berkeley. The name of the 1134860Sbostic * University may not be used to endorse or promote products derived 1234860Sbostic * from this software without specific prior written permission. 1334860Sbostic * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 1434860Sbostic * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 1534860Sbostic * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 1633185Sbostic * 17*38962Skarels * @(#)uipc_mbuf.c 7.11 (Berkeley) 09/04/89 1823417Smckusick */ 194585Swnj 2017102Sbloom #include "param.h" 2117102Sbloom #include "dir.h" 2217102Sbloom #include "user.h" 2317102Sbloom #include "proc.h" 2417102Sbloom #include "cmap.h" 25*38962Skarels #include "malloc.h" 2617102Sbloom #include "map.h" 27*38962Skarels #define MBTYPES 2817102Sbloom #include "mbuf.h" 2917102Sbloom #include "vm.h" 3017102Sbloom #include "kernel.h" 3130442Skarels #include "syslog.h" 3230442Skarels #include "domain.h" 3330442Skarels #include "protosw.h" 34*38962Skarels #include "machine/pte.h" 354585Swnj 365228Swnj mbinit() 375228Swnj { 3821107Skarels int s; 395228Swnj 40*38962Skarels #if MCLBYTES < 4096 41*38962Skarels #define NCL_INIT (4096/MCLBYTES) 4232659Skarels #else 4332659Skarels #define NCL_INIT 1 4432659Skarels #endif 4521107Skarels s = splimp(); 46*38962Skarels if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 475228Swnj goto bad; 4821107Skarels splx(s); 495228Swnj return; 505228Swnj bad: 515228Swnj panic("mbinit"); 525228Swnj } 535228Swnj 5421107Skarels /* 55*38962Skarels * Allocate some number of mbuf clusters 56*38962Skarels * and place on cluster free list. 5721107Skarels * Must be called at splimp. 5821107Skarels */ 5930442Skarels /* ARGSUSED */ 60*38962Skarels m_clalloc(ncl, canwait) 615228Swnj register int ncl; 625228Swnj { 635228Swnj int npg, mbx; 64*38962Skarels register caddr_t p; 655228Swnj register int i; 6630442Skarels static int logged; 675228Swnj 685228Swnj npg = ncl * CLSIZE; 698792Sroot mbx = rmalloc(mbmap, (long)npg); 7021107Skarels if (mbx == 0) { 7130442Skarels if (logged == 0) { 7230442Skarels logged++; 7330442Skarels log(LOG_ERR, "mbuf map full\n"); 7430442Skarels } 755228Swnj return (0); 7621107Skarels } 77*38962Skarels p = cltom(mbx * NBPG / MCLBYTES); 789765Ssam if (memall(&Mbmap[mbx], npg, proc, CSYS) == 0) { 799765Ssam rmfree(mbmap, (long)npg, (long)mbx); 805228Swnj return (0); 819765Ssam } 82*38962Skarels vmaccess(&Mbmap[mbx], p, npg); 83*38962Skarels ncl = ncl * CLBYTES / MCLBYTES; 84*38962Skarels for (i = 0; i < ncl; i++) { 85*38962Skarels ((union mcluster *)p)->mcl_next = mclfree; 86*38962Skarels mclfree = (union mcluster *)p; 87*38962Skarels p += MCLBYTES; 88*38962Skarels mbstat.m_clfree++; 89*38962Skarels } 90*38962Skarels mbstat.m_clusters += ncl; 91*38962Skarels return (1); 92*38962Skarels } 935228Swnj 94*38962Skarels /* 95*38962Skarels * When MGET failes, ask protocols to free space when short of memory, 96*38962Skarels * then re-attempt to allocate an mbuf. 97*38962Skarels */ 98*38962Skarels struct mbuf * 99*38962Skarels m_retry(i, t) 100*38962Skarels int i, t; 101*38962Skarels { 102*38962Skarels register struct mbuf *m; 1035228Swnj 104*38962Skarels m_reclaim(); 105*38962Skarels #define m_retry(i, t) (struct mbuf *)0 106*38962Skarels MGET(m, i, t); 107*38962Skarels #undef m_retry 108*38962Skarels return (m); 1095228Swnj } 1105228Swnj 11121107Skarels /* 112*38962Skarels * As above; retry an MGETHDR. 11321107Skarels */ 114*38962Skarels struct mbuf * 115*38962Skarels m_retryhdr(i, t) 116*38962Skarels int i, t; 1175228Swnj { 118*38962Skarels register struct mbuf *m; 119*38962Skarels 120*38962Skarels m_reclaim(); 121*38962Skarels #define m_retryhdr(i, t) (struct mbuf *)0 122*38962Skarels MGETHDR(m, i, t); 123*38962Skarels #undef m_retryhdr 124*38962Skarels return (m); 125*38962Skarels } 126*38962Skarels 127*38962Skarels m_reclaim() 128*38962Skarels { 12930442Skarels register struct domain *dp; 13030442Skarels register struct protosw *pr; 131*38962Skarels int s = splimp(); 1325228Swnj 133*38962Skarels for (dp = domains; dp; dp = dp->dom_next) 134*38962Skarels for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 135*38962Skarels if (pr->pr_drain) 136*38962Skarels (*pr->pr_drain)(); 137*38962Skarels splx(s); 138*38962Skarels mbstat.m_drain++; 1395228Swnj } 1405228Swnj 1415228Swnj /* 1425228Swnj * Space allocation routines. 1435228Swnj * These are also available as macros 1445228Swnj * for critical paths. 1455228Swnj */ 1464585Swnj struct mbuf * 1479634Ssam m_get(canwait, type) 1489634Ssam int canwait, type; 1494585Swnj { 1504585Swnj register struct mbuf *m; 1514585Swnj 1529634Ssam MGET(m, canwait, type); 1534585Swnj return (m); 1544585Swnj } 1554585Swnj 1564585Swnj struct mbuf * 157*38962Skarels m_gethdr(canwait, type) 158*38962Skarels int canwait, type; 159*38962Skarels { 160*38962Skarels register struct mbuf *m; 161*38962Skarels 162*38962Skarels MGETHDR(m, canwait, type); 163*38962Skarels return (m); 164*38962Skarels } 165*38962Skarels 166*38962Skarels struct mbuf * 1679634Ssam m_getclr(canwait, type) 1689634Ssam int canwait, type; 1694890Swnj { 1704890Swnj register struct mbuf *m; 1714890Swnj 17221107Skarels MGET(m, canwait, type); 1734890Swnj if (m == 0) 1744890Swnj return (0); 1754890Swnj bzero(mtod(m, caddr_t), MLEN); 1764890Swnj return (m); 1774890Swnj } 1784890Swnj 1794890Swnj struct mbuf * 1804585Swnj m_free(m) 1814585Swnj struct mbuf *m; 1824585Swnj { 1834585Swnj register struct mbuf *n; 1844585Swnj 1854585Swnj MFREE(m, n); 1864585Swnj return (n); 1874585Swnj } 1884585Swnj 1894669Swnj m_freem(m) 1904585Swnj register struct mbuf *m; 1914585Swnj { 1924585Swnj register struct mbuf *n; 1934585Swnj 1944585Swnj if (m == NULL) 1954916Swnj return; 1964585Swnj do { 1974585Swnj MFREE(m, n); 1984585Swnj } while (m = n); 1994585Swnj } 2004585Swnj 2015228Swnj /* 2025228Swnj * Mbuffer utility routines. 2035228Swnj */ 20421107Skarels 20521107Skarels /* 206*38962Skarels * Lesser-used path for M_PREPEND: 207*38962Skarels * allocate new mbuf to prepend to chain, 208*38962Skarels * copy junk along. 209*38962Skarels */ 210*38962Skarels struct mbuf * 211*38962Skarels m_prepend(m, len, how) 212*38962Skarels register struct mbuf *m; 213*38962Skarels int len, how; 214*38962Skarels { 215*38962Skarels struct mbuf *mn; 216*38962Skarels 217*38962Skarels MGET(mn, how, m->m_type); 218*38962Skarels if (mn == (struct mbuf *)NULL) { 219*38962Skarels m_freem(m); 220*38962Skarels return ((struct mbuf *)NULL); 221*38962Skarels } 222*38962Skarels if (m->m_flags & M_PKTHDR) { 223*38962Skarels M_COPY_PKTHDR(mn, m); 224*38962Skarels m->m_flags &= ~M_PKTHDR; 225*38962Skarels } 226*38962Skarels mn->m_next = m; 227*38962Skarels m = mn; 228*38962Skarels if (len < MHLEN) 229*38962Skarels MH_ALIGN(m, len); 230*38962Skarels m->m_len = len; 231*38962Skarels return (m); 232*38962Skarels } 233*38962Skarels 23433606Skarels /* 235*38962Skarels /* 236*38962Skarels * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 23721107Skarels * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 238*38962Skarels * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 23921107Skarels */ 2404927Swnj struct mbuf * 241*38962Skarels m_copym(m, off0, len, wait) 2424927Swnj register struct mbuf *m; 243*38962Skarels int off0, wait; 2444927Swnj register int len; 2454927Swnj { 2464927Swnj register struct mbuf *n, **np; 247*38962Skarels register int off = off0; 248*38962Skarels struct mbuf *top; 249*38962Skarels int copyhdr = 0; 2504927Swnj 2514927Swnj if (off < 0 || len < 0) 252*38962Skarels panic("m_copym"); 253*38962Skarels if (off == 0 && m->m_flags & M_PKTHDR) 254*38962Skarels copyhdr = 1; 2554927Swnj while (off > 0) { 2564927Swnj if (m == 0) 257*38962Skarels panic("m_copym"); 2584927Swnj if (off < m->m_len) 2594927Swnj break; 2604927Swnj off -= m->m_len; 2614927Swnj m = m->m_next; 2624927Swnj } 2634927Swnj np = ⊤ 2644927Swnj top = 0; 2654927Swnj while (len > 0) { 2665609Swnj if (m == 0) { 2675609Swnj if (len != M_COPYALL) 268*38962Skarels panic("m_copym"); 2695609Swnj break; 2705609Swnj } 271*38962Skarels MGET(n, wait, m->m_type); 2724927Swnj *np = n; 2734927Swnj if (n == 0) 2744927Swnj goto nospace; 275*38962Skarels if (copyhdr) { 276*38962Skarels M_COPY_PKTHDR(n, m); 277*38962Skarels if (len == M_COPYALL) 278*38962Skarels n->m_pkthdr.len -= off0; 279*38962Skarels else 280*38962Skarels n->m_pkthdr.len = len; 281*38962Skarels copyhdr = 0; 282*38962Skarels } 2834927Swnj n->m_len = MIN(len, m->m_len - off); 284*38962Skarels if (m->m_flags & M_EXT) { 285*38962Skarels n->m_data = m->m_data + off; 286*38962Skarels mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 287*38962Skarels n->m_ext = m->m_ext; 288*38962Skarels n->m_flags |= M_EXT; 2898318Sroot } else 2904927Swnj bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 2914927Swnj (unsigned)n->m_len); 2925609Swnj if (len != M_COPYALL) 2935609Swnj len -= n->m_len; 2944927Swnj off = 0; 2954927Swnj m = m->m_next; 2964927Swnj np = &n->m_next; 2974927Swnj } 2984927Swnj return (top); 2994927Swnj nospace: 3004927Swnj m_freem(top); 3014927Swnj return (0); 3024927Swnj } 3034927Swnj 30433606Skarels /* 30533606Skarels * Copy data from an mbuf chain starting "off" bytes from the beginning, 30633606Skarels * continuing for "len" bytes, into the indicated buffer. 30733606Skarels */ 30833606Skarels m_copydata(m, off, len, cp) 30933606Skarels register struct mbuf *m; 31033988Skarels register int off; 31133606Skarels register int len; 31233988Skarels caddr_t cp; 31333606Skarels { 31433606Skarels register unsigned count; 31533606Skarels 31633606Skarels if (off < 0 || len < 0) 31733606Skarels panic("m_copydata"); 31833606Skarels while (off > 0) { 31933606Skarels if (m == 0) 32033606Skarels panic("m_copydata"); 32133606Skarels if (off < m->m_len) 32233606Skarels break; 32333606Skarels off -= m->m_len; 32433606Skarels m = m->m_next; 32533606Skarels } 32633606Skarels while (len > 0) { 32733606Skarels if (m == 0) 32833606Skarels panic("m_copydata"); 32933988Skarels count = MIN(m->m_len - off, len); 33033606Skarels bcopy(mtod(m, caddr_t) + off, cp, count); 33133606Skarels len -= count; 33233988Skarels cp += count; 33333606Skarels off = 0; 33433606Skarels m = m->m_next; 33533606Skarels } 33633606Skarels } 33733606Skarels 338*38962Skarels /* 339*38962Skarels * Concatenate mbuf chain n to m. 340*38962Skarels * Both chains must be of the same type (e.g. MT_DATA). 341*38962Skarels * Any m_pkthdr is not updated. 342*38962Skarels */ 3434669Swnj m_cat(m, n) 3444669Swnj register struct mbuf *m, *n; 3454669Swnj { 3464669Swnj while (m->m_next) 3474669Swnj m = m->m_next; 3486091Sroot while (n) { 349*38962Skarels if (m->m_flags & M_EXT || 350*38962Skarels m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 3516091Sroot /* just join the two chains */ 3524669Swnj m->m_next = n; 3536091Sroot return; 3544669Swnj } 3556091Sroot /* splat the data from one into the other */ 3566091Sroot bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 3576091Sroot (u_int)n->m_len); 3586091Sroot m->m_len += n->m_len; 3596091Sroot n = m_free(n); 3606091Sroot } 3614669Swnj } 3624669Swnj 363*38962Skarels m_adj(mp, req_len) 3644585Swnj struct mbuf *mp; 3654585Swnj { 366*38962Skarels register int len = req_len; 36724764Skarels register struct mbuf *m; 36824764Skarels register count; 3694585Swnj 3704585Swnj if ((m = mp) == NULL) 3714585Swnj return; 3724822Swnj if (len >= 0) { 373*38962Skarels /* 374*38962Skarels * Trim from head. 375*38962Skarels */ 3764585Swnj while (m != NULL && len > 0) { 3774822Swnj if (m->m_len <= len) { 3784585Swnj len -= m->m_len; 3794585Swnj m->m_len = 0; 3804585Swnj m = m->m_next; 3814822Swnj } else { 3824585Swnj m->m_len -= len; 383*38962Skarels m->m_data += len; 384*38962Skarels len = 0; 3854585Swnj } 3864585Swnj } 387*38962Skarels m = mp; 388*38962Skarels if (mp->m_flags & M_PKTHDR) 389*38962Skarels m->m_pkthdr.len -= (req_len - len); 3904822Swnj } else { 39124764Skarels /* 39224764Skarels * Trim from tail. Scan the mbuf chain, 39324764Skarels * calculating its length and finding the last mbuf. 39424764Skarels * If the adjustment only affects this mbuf, then just 39524764Skarels * adjust and return. Otherwise, rescan and truncate 39624764Skarels * after the remaining size. 39724764Skarels */ 3984585Swnj len = -len; 39924764Skarels count = 0; 40024764Skarels for (;;) { 40124764Skarels count += m->m_len; 40224764Skarels if (m->m_next == (struct mbuf *)0) 4034585Swnj break; 40424764Skarels m = m->m_next; 40524764Skarels } 40624764Skarels if (m->m_len >= len) { 40724764Skarels m->m_len -= len; 40824764Skarels return; 40924764Skarels } 41024764Skarels count -= len; 411*38962Skarels if (count < 0) 412*38962Skarels count = 0; 41324764Skarels /* 41424764Skarels * Correct length for chain is "count". 41524764Skarels * Find the mbuf with last data, adjust its length, 41624764Skarels * and toss data from remaining mbufs on chain. 41724764Skarels */ 418*38962Skarels m = mp; 419*38962Skarels if (m->m_flags & M_PKTHDR) 420*38962Skarels m->m_pkthdr.len = count; 421*38962Skarels for (; m; m = m->m_next) { 42224764Skarels if (m->m_len >= count) { 42324764Skarels m->m_len = count; 42424764Skarels break; 4254585Swnj } 42624764Skarels count -= m->m_len; 4274585Swnj } 42824764Skarels while (m = m->m_next) 42924764Skarels m->m_len = 0; 4304585Swnj } 4314585Swnj } 4325228Swnj 43323816Skarels /* 43423816Skarels * Rearange an mbuf chain so that len bytes are contiguous 43523816Skarels * and in the data area of an mbuf (so that mtod and dtom 43624764Skarels * will work for a structure of size len). Returns the resulting 43724764Skarels * mbuf chain on success, frees it and returns null on failure. 438*38962Skarels * If there is room, it will add up to max_protohdr-len extra bytes to the 43924764Skarels * contiguous region in an attempt to avoid being called next time. 44023816Skarels */ 4415310Sroot struct mbuf * 44224764Skarels m_pullup(n, len) 44324764Skarels register struct mbuf *n; 4445228Swnj int len; 4455228Swnj { 44624764Skarels register struct mbuf *m; 44724764Skarels register int count; 44824764Skarels int space; 4495228Swnj 450*38962Skarels /* 451*38962Skarels * If first mbuf has no cluster, and has room for len bytes 452*38962Skarels * without shifting current data, pullup into it, 453*38962Skarels * otherwise allocate a new mbuf to prepend to the chain. 454*38962Skarels */ 455*38962Skarels if ((n->m_flags & M_EXT) == 0 && 456*38962Skarels n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 457*38962Skarels if (n->m_len >= len) 458*38962Skarels return (n); 45924764Skarels m = n; 46024764Skarels n = n->m_next; 46124764Skarels len -= m->m_len; 46224764Skarels } else { 463*38962Skarels if (len > MHLEN) 46424764Skarels goto bad; 46524764Skarels MGET(m, M_DONTWAIT, n->m_type); 46624764Skarels if (m == 0) 46724764Skarels goto bad; 46824764Skarels m->m_len = 0; 469*38962Skarels if (n->m_flags & M_PKTHDR) 470*38962Skarels M_COPY_PKTHDR(m, n); 47124764Skarels } 472*38962Skarels space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 4735310Sroot do { 474*38962Skarels count = min(min(max(len, max_protohdr), space), n->m_len); 475*38962Skarels bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 4766164Ssam (unsigned)count); 4776164Ssam len -= count; 4786164Ssam m->m_len += count; 4796164Ssam n->m_len -= count; 480*38962Skarels space -= count; 4815310Sroot if (n->m_len) 482*38962Skarels n->m_data += count; 48323816Skarels else 48423816Skarels n = m_free(n); 48524764Skarels } while (len > 0 && n); 48624764Skarels if (len > 0) { 4875310Sroot (void) m_free(m); 4885310Sroot goto bad; 4895310Sroot } 4905310Sroot m->m_next = n; 4915310Sroot return (m); 4925310Sroot bad: 4935732Sroot m_freem(n); 4945228Swnj return (0); 4955228Swnj } 496