xref: /csrg-svn/sys/kern/uipc_mbuf.c (revision 45736)
123417Smckusick /*
233460Skarels  * Copyright (c) 1982, 1986, 1988 Regents of the University of California.
333185Sbostic  * All rights reserved.
423417Smckusick  *
544448Sbostic  * %sccs.include.redist.c%
633185Sbostic  *
7*45736Smckusick  *	@(#)uipc_mbuf.c	7.18 (Berkeley) 12/05/90
823417Smckusick  */
94585Swnj 
1017102Sbloom #include "param.h"
1117102Sbloom #include "user.h"
1217102Sbloom #include "proc.h"
1317102Sbloom #include "cmap.h"
1438962Skarels #include "malloc.h"
1517102Sbloom #include "map.h"
1638962Skarels #define MBTYPES
1717102Sbloom #include "mbuf.h"
1817102Sbloom #include "kernel.h"
1930442Skarels #include "syslog.h"
2030442Skarels #include "domain.h"
2130442Skarels #include "protosw.h"
22*45736Smckusick #include "../vm/vm_param.h"
23*45736Smckusick #include "../vm/vm_map.h"
244585Swnj 
25*45736Smckusick extern vm_map_t mb_map;
26*45736Smckusick struct mbuf *mbutl;
27*45736Smckusick char *mclrefcnt;
28*45736Smckusick 
295228Swnj mbinit()
305228Swnj {
3121107Skarels 	int s;
325228Swnj 
33*45736Smckusick #if CLBYTES < 4096
3441998Smckusick #define NCL_INIT	(4096/CLBYTES)
3532659Skarels #else
3632659Skarels #define NCL_INIT	1
3732659Skarels #endif
3821107Skarels 	s = splimp();
3938962Skarels 	if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
405228Swnj 		goto bad;
4121107Skarels 	splx(s);
425228Swnj 	return;
435228Swnj bad:
445228Swnj 	panic("mbinit");
455228Swnj }
465228Swnj 
4721107Skarels /*
4838962Skarels  * Allocate some number of mbuf clusters
4938962Skarels  * and place on cluster free list.
5021107Skarels  * Must be called at splimp.
5121107Skarels  */
5230442Skarels /* ARGSUSED */
5338962Skarels m_clalloc(ncl, canwait)
545228Swnj 	register int ncl;
555228Swnj {
565228Swnj 	int npg, mbx;
5738962Skarels 	register caddr_t p;
585228Swnj 	register int i;
5930442Skarels 	static int logged;
605228Swnj 
615228Swnj 	npg = ncl * CLSIZE;
62*45736Smckusick 	p = (caddr_t)kmem_malloc(mb_map, ctob(npg), canwait);
63*45736Smckusick 	if (p == NULL) {
6430442Skarels 		if (logged == 0) {
6530442Skarels 			logged++;
66*45736Smckusick 			log(LOG_ERR, "mb_map full\n");
6730442Skarels 		}
685228Swnj 		return (0);
6921107Skarels 	}
7038962Skarels 	ncl = ncl * CLBYTES / MCLBYTES;
7138962Skarels 	for (i = 0; i < ncl; i++) {
7238962Skarels 		((union mcluster *)p)->mcl_next = mclfree;
7338962Skarels 		mclfree = (union mcluster *)p;
7438962Skarels 		p += MCLBYTES;
7538962Skarels 		mbstat.m_clfree++;
7638962Skarels 	}
7738962Skarels 	mbstat.m_clusters += ncl;
7838962Skarels 	return (1);
7938962Skarels }
805228Swnj 
8138962Skarels /*
8238962Skarels  * When MGET failes, ask protocols to free space when short of memory,
8338962Skarels  * then re-attempt to allocate an mbuf.
8438962Skarels  */
8538962Skarels struct mbuf *
8638962Skarels m_retry(i, t)
8738962Skarels 	int i, t;
8838962Skarels {
8938962Skarels 	register struct mbuf *m;
905228Swnj 
9138962Skarels 	m_reclaim();
9238962Skarels #define m_retry(i, t)	(struct mbuf *)0
9338962Skarels 	MGET(m, i, t);
9438962Skarels #undef m_retry
9538962Skarels 	return (m);
965228Swnj }
975228Swnj 
9821107Skarels /*
9938962Skarels  * As above; retry an MGETHDR.
10021107Skarels  */
10138962Skarels struct mbuf *
10238962Skarels m_retryhdr(i, t)
10338962Skarels 	int i, t;
1045228Swnj {
10538962Skarels 	register struct mbuf *m;
10638962Skarels 
10738962Skarels 	m_reclaim();
10838962Skarels #define m_retryhdr(i, t) (struct mbuf *)0
10938962Skarels 	MGETHDR(m, i, t);
11038962Skarels #undef m_retryhdr
11138962Skarels 	return (m);
11238962Skarels }
11338962Skarels 
11438962Skarels m_reclaim()
11538962Skarels {
11630442Skarels 	register struct domain *dp;
11730442Skarels 	register struct protosw *pr;
11838962Skarels 	int s = splimp();
1195228Swnj 
12038962Skarels 	for (dp = domains; dp; dp = dp->dom_next)
12138962Skarels 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
12238962Skarels 			if (pr->pr_drain)
12338962Skarels 				(*pr->pr_drain)();
12438962Skarels 	splx(s);
12538962Skarels 	mbstat.m_drain++;
1265228Swnj }
1275228Swnj 
1285228Swnj /*
1295228Swnj  * Space allocation routines.
1305228Swnj  * These are also available as macros
1315228Swnj  * for critical paths.
1325228Swnj  */
1334585Swnj struct mbuf *
1349634Ssam m_get(canwait, type)
1359634Ssam 	int canwait, type;
1364585Swnj {
1374585Swnj 	register struct mbuf *m;
1384585Swnj 
1399634Ssam 	MGET(m, canwait, type);
1404585Swnj 	return (m);
1414585Swnj }
1424585Swnj 
1434585Swnj struct mbuf *
14438962Skarels m_gethdr(canwait, type)
14538962Skarels 	int canwait, type;
14638962Skarels {
14738962Skarels 	register struct mbuf *m;
14838962Skarels 
14938962Skarels 	MGETHDR(m, canwait, type);
15038962Skarels 	return (m);
15138962Skarels }
15238962Skarels 
15338962Skarels struct mbuf *
1549634Ssam m_getclr(canwait, type)
1559634Ssam 	int canwait, type;
1564890Swnj {
1574890Swnj 	register struct mbuf *m;
1584890Swnj 
15921107Skarels 	MGET(m, canwait, type);
1604890Swnj 	if (m == 0)
1614890Swnj 		return (0);
1624890Swnj 	bzero(mtod(m, caddr_t), MLEN);
1634890Swnj 	return (m);
1644890Swnj }
1654890Swnj 
1664890Swnj struct mbuf *
1674585Swnj m_free(m)
1684585Swnj 	struct mbuf *m;
1694585Swnj {
1704585Swnj 	register struct mbuf *n;
1714585Swnj 
1724585Swnj 	MFREE(m, n);
1734585Swnj 	return (n);
1744585Swnj }
1754585Swnj 
1764669Swnj m_freem(m)
1774585Swnj 	register struct mbuf *m;
1784585Swnj {
1794585Swnj 	register struct mbuf *n;
1804585Swnj 
1814585Swnj 	if (m == NULL)
1824916Swnj 		return;
1834585Swnj 	do {
1844585Swnj 		MFREE(m, n);
1854585Swnj 	} while (m = n);
1864585Swnj }
1874585Swnj 
1885228Swnj /*
1895228Swnj  * Mbuffer utility routines.
1905228Swnj  */
19121107Skarels 
19221107Skarels /*
19338962Skarels  * Lesser-used path for M_PREPEND:
19438962Skarels  * allocate new mbuf to prepend to chain,
19538962Skarels  * copy junk along.
19638962Skarels  */
19738962Skarels struct mbuf *
19838962Skarels m_prepend(m, len, how)
19938962Skarels 	register struct mbuf *m;
20038962Skarels 	int len, how;
20138962Skarels {
20238962Skarels 	struct mbuf *mn;
20338962Skarels 
20438962Skarels 	MGET(mn, how, m->m_type);
20538962Skarels 	if (mn == (struct mbuf *)NULL) {
20638962Skarels 		m_freem(m);
20738962Skarels 		return ((struct mbuf *)NULL);
20838962Skarels 	}
20938962Skarels 	if (m->m_flags & M_PKTHDR) {
21038962Skarels 		M_COPY_PKTHDR(mn, m);
21138962Skarels 		m->m_flags &= ~M_PKTHDR;
21238962Skarels 	}
21338962Skarels 	mn->m_next = m;
21438962Skarels 	m = mn;
21538962Skarels 	if (len < MHLEN)
21638962Skarels 		MH_ALIGN(m, len);
21738962Skarels 	m->m_len = len;
21838962Skarels 	return (m);
21938962Skarels }
22038962Skarels 
22133606Skarels /*
22238962Skarels  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
22321107Skarels  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
22438962Skarels  * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
22521107Skarels  */
22641999Smckusick int MCFail;
22741999Smckusick 
2284927Swnj struct mbuf *
22938962Skarels m_copym(m, off0, len, wait)
2304927Swnj 	register struct mbuf *m;
23138962Skarels 	int off0, wait;
2324927Swnj 	register int len;
2334927Swnj {
2344927Swnj 	register struct mbuf *n, **np;
23538962Skarels 	register int off = off0;
23638962Skarels 	struct mbuf *top;
23738962Skarels 	int copyhdr = 0;
2384927Swnj 
2394927Swnj 	if (off < 0 || len < 0)
24038962Skarels 		panic("m_copym");
24138962Skarels 	if (off == 0 && m->m_flags & M_PKTHDR)
24238962Skarels 		copyhdr = 1;
2434927Swnj 	while (off > 0) {
2444927Swnj 		if (m == 0)
24538962Skarels 			panic("m_copym");
2464927Swnj 		if (off < m->m_len)
2474927Swnj 			break;
2484927Swnj 		off -= m->m_len;
2494927Swnj 		m = m->m_next;
2504927Swnj 	}
2514927Swnj 	np = &top;
2524927Swnj 	top = 0;
2534927Swnj 	while (len > 0) {
2545609Swnj 		if (m == 0) {
2555609Swnj 			if (len != M_COPYALL)
25638962Skarels 				panic("m_copym");
2575609Swnj 			break;
2585609Swnj 		}
25938962Skarels 		MGET(n, wait, m->m_type);
2604927Swnj 		*np = n;
2614927Swnj 		if (n == 0)
2624927Swnj 			goto nospace;
26338962Skarels 		if (copyhdr) {
26438962Skarels 			M_COPY_PKTHDR(n, m);
26538962Skarels 			if (len == M_COPYALL)
26638962Skarels 				n->m_pkthdr.len -= off0;
26738962Skarels 			else
26838962Skarels 				n->m_pkthdr.len = len;
26938962Skarels 			copyhdr = 0;
27038962Skarels 		}
2714927Swnj 		n->m_len = MIN(len, m->m_len - off);
27238962Skarels 		if (m->m_flags & M_EXT) {
27338962Skarels 			n->m_data = m->m_data + off;
27438962Skarels 			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
27538962Skarels 			n->m_ext = m->m_ext;
27638962Skarels 			n->m_flags |= M_EXT;
2778318Sroot 		} else
2784927Swnj 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
2794927Swnj 			    (unsigned)n->m_len);
2805609Swnj 		if (len != M_COPYALL)
2815609Swnj 			len -= n->m_len;
2824927Swnj 		off = 0;
2834927Swnj 		m = m->m_next;
2844927Swnj 		np = &n->m_next;
2854927Swnj 	}
28641999Smckusick 	if (top == 0)
28741999Smckusick 		MCFail++;
2884927Swnj 	return (top);
2894927Swnj nospace:
2904927Swnj 	m_freem(top);
29141999Smckusick 	MCFail++;
2924927Swnj 	return (0);
2934927Swnj }
2944927Swnj 
29533606Skarels /*
29633606Skarels  * Copy data from an mbuf chain starting "off" bytes from the beginning,
29733606Skarels  * continuing for "len" bytes, into the indicated buffer.
29833606Skarels  */
29933606Skarels m_copydata(m, off, len, cp)
30033606Skarels 	register struct mbuf *m;
30133988Skarels 	register int off;
30233606Skarels 	register int len;
30333988Skarels 	caddr_t cp;
30433606Skarels {
30533606Skarels 	register unsigned count;
30633606Skarels 
30733606Skarels 	if (off < 0 || len < 0)
30833606Skarels 		panic("m_copydata");
30933606Skarels 	while (off > 0) {
31033606Skarels 		if (m == 0)
31133606Skarels 			panic("m_copydata");
31233606Skarels 		if (off < m->m_len)
31333606Skarels 			break;
31433606Skarels 		off -= m->m_len;
31533606Skarels 		m = m->m_next;
31633606Skarels 	}
31733606Skarels 	while (len > 0) {
31833606Skarels 		if (m == 0)
31933606Skarels 			panic("m_copydata");
32033988Skarels 		count = MIN(m->m_len - off, len);
32133606Skarels 		bcopy(mtod(m, caddr_t) + off, cp, count);
32233606Skarels 		len -= count;
32333988Skarels 		cp += count;
32433606Skarels 		off = 0;
32533606Skarels 		m = m->m_next;
32633606Skarels 	}
32733606Skarels }
32833606Skarels 
32938962Skarels /*
33038962Skarels  * Concatenate mbuf chain n to m.
33138962Skarels  * Both chains must be of the same type (e.g. MT_DATA).
33238962Skarels  * Any m_pkthdr is not updated.
33338962Skarels  */
3344669Swnj m_cat(m, n)
3354669Swnj 	register struct mbuf *m, *n;
3364669Swnj {
3374669Swnj 	while (m->m_next)
3384669Swnj 		m = m->m_next;
3396091Sroot 	while (n) {
34038962Skarels 		if (m->m_flags & M_EXT ||
34138962Skarels 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
3426091Sroot 			/* just join the two chains */
3434669Swnj 			m->m_next = n;
3446091Sroot 			return;
3454669Swnj 		}
3466091Sroot 		/* splat the data from one into the other */
3476091Sroot 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
3486091Sroot 		    (u_int)n->m_len);
3496091Sroot 		m->m_len += n->m_len;
3506091Sroot 		n = m_free(n);
3516091Sroot 	}
3524669Swnj }
3534669Swnj 
35438962Skarels m_adj(mp, req_len)
3554585Swnj 	struct mbuf *mp;
3564585Swnj {
35738962Skarels 	register int len = req_len;
35824764Skarels 	register struct mbuf *m;
35924764Skarels 	register count;
3604585Swnj 
3614585Swnj 	if ((m = mp) == NULL)
3624585Swnj 		return;
3634822Swnj 	if (len >= 0) {
36438962Skarels 		/*
36538962Skarels 		 * Trim from head.
36638962Skarels 		 */
3674585Swnj 		while (m != NULL && len > 0) {
3684822Swnj 			if (m->m_len <= len) {
3694585Swnj 				len -= m->m_len;
3704585Swnj 				m->m_len = 0;
3714585Swnj 				m = m->m_next;
3724822Swnj 			} else {
3734585Swnj 				m->m_len -= len;
37438962Skarels 				m->m_data += len;
37538962Skarels 				len = 0;
3764585Swnj 			}
3774585Swnj 		}
37838962Skarels 		m = mp;
37938962Skarels 		if (mp->m_flags & M_PKTHDR)
38038962Skarels 			m->m_pkthdr.len -= (req_len - len);
3814822Swnj 	} else {
38224764Skarels 		/*
38324764Skarels 		 * Trim from tail.  Scan the mbuf chain,
38424764Skarels 		 * calculating its length and finding the last mbuf.
38524764Skarels 		 * If the adjustment only affects this mbuf, then just
38624764Skarels 		 * adjust and return.  Otherwise, rescan and truncate
38724764Skarels 		 * after the remaining size.
38824764Skarels 		 */
3894585Swnj 		len = -len;
39024764Skarels 		count = 0;
39124764Skarels 		for (;;) {
39224764Skarels 			count += m->m_len;
39324764Skarels 			if (m->m_next == (struct mbuf *)0)
3944585Swnj 				break;
39524764Skarels 			m = m->m_next;
39624764Skarels 		}
39724764Skarels 		if (m->m_len >= len) {
39824764Skarels 			m->m_len -= len;
39939227Ssklower 			if ((mp = m)->m_flags & M_PKTHDR)
40039227Ssklower 				m->m_pkthdr.len -= len;
40124764Skarels 			return;
40224764Skarels 		}
40324764Skarels 		count -= len;
40438962Skarels 		if (count < 0)
40538962Skarels 			count = 0;
40624764Skarels 		/*
40724764Skarels 		 * Correct length for chain is "count".
40824764Skarels 		 * Find the mbuf with last data, adjust its length,
40924764Skarels 		 * and toss data from remaining mbufs on chain.
41024764Skarels 		 */
41138962Skarels 		m = mp;
41238962Skarels 		if (m->m_flags & M_PKTHDR)
41338962Skarels 			m->m_pkthdr.len = count;
41438962Skarels 		for (; m; m = m->m_next) {
41524764Skarels 			if (m->m_len >= count) {
41624764Skarels 				m->m_len = count;
41724764Skarels 				break;
4184585Swnj 			}
41924764Skarels 			count -= m->m_len;
4204585Swnj 		}
42124764Skarels 		while (m = m->m_next)
42224764Skarels 			m->m_len = 0;
4234585Swnj 	}
4244585Swnj }
4255228Swnj 
42623816Skarels /*
42723816Skarels  * Rearange an mbuf chain so that len bytes are contiguous
42823816Skarels  * and in the data area of an mbuf (so that mtod and dtom
42924764Skarels  * will work for a structure of size len).  Returns the resulting
43024764Skarels  * mbuf chain on success, frees it and returns null on failure.
43138962Skarels  * If there is room, it will add up to max_protohdr-len extra bytes to the
43224764Skarels  * contiguous region in an attempt to avoid being called next time.
43323816Skarels  */
43441999Smckusick int MPFail;
43541999Smckusick 
4365310Sroot struct mbuf *
43724764Skarels m_pullup(n, len)
43824764Skarels 	register struct mbuf *n;
4395228Swnj 	int len;
4405228Swnj {
44124764Skarels 	register struct mbuf *m;
44224764Skarels 	register int count;
44324764Skarels 	int space;
4445228Swnj 
44538962Skarels 	/*
44638962Skarels 	 * If first mbuf has no cluster, and has room for len bytes
44738962Skarels 	 * without shifting current data, pullup into it,
44838962Skarels 	 * otherwise allocate a new mbuf to prepend to the chain.
44938962Skarels 	 */
45038962Skarels 	if ((n->m_flags & M_EXT) == 0 &&
45138962Skarels 	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
45238962Skarels 		if (n->m_len >= len)
45338962Skarels 			return (n);
45424764Skarels 		m = n;
45524764Skarels 		n = n->m_next;
45624764Skarels 		len -= m->m_len;
45724764Skarels 	} else {
45838962Skarels 		if (len > MHLEN)
45924764Skarels 			goto bad;
46024764Skarels 		MGET(m, M_DONTWAIT, n->m_type);
46124764Skarels 		if (m == 0)
46224764Skarels 			goto bad;
46324764Skarels 		m->m_len = 0;
46444969Skarels 		if (n->m_flags & M_PKTHDR) {
46538962Skarels 			M_COPY_PKTHDR(m, n);
46644969Skarels 			n->m_flags &= ~M_PKTHDR;
46744969Skarels 		}
46824764Skarels 	}
46938962Skarels 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
4705310Sroot 	do {
47138962Skarels 		count = min(min(max(len, max_protohdr), space), n->m_len);
47238962Skarels 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
4736164Ssam 		  (unsigned)count);
4746164Ssam 		len -= count;
4756164Ssam 		m->m_len += count;
4766164Ssam 		n->m_len -= count;
47738962Skarels 		space -= count;
4785310Sroot 		if (n->m_len)
47938962Skarels 			n->m_data += count;
48023816Skarels 		else
48123816Skarels 			n = m_free(n);
48224764Skarels 	} while (len > 0 && n);
48324764Skarels 	if (len > 0) {
4845310Sroot 		(void) m_free(m);
4855310Sroot 		goto bad;
4865310Sroot 	}
4875310Sroot 	m->m_next = n;
4885310Sroot 	return (m);
4895310Sroot bad:
4905732Sroot 	m_freem(n);
49141999Smckusick 	MPFail++;
4925228Swnj 	return (0);
4935228Swnj }
494