xref: /csrg-svn/sys/kern/uipc_mbuf.c (revision 50941)
123417Smckusick /*
248445Skarels  * Copyright (c) 1982, 1986, 1988, 1991 Regents of the University of California.
333185Sbostic  * All rights reserved.
423417Smckusick  *
544448Sbostic  * %sccs.include.redist.c%
633185Sbostic  *
7*50941Ssklower  *	@(#)uipc_mbuf.c	7.20 (Berkeley) 08/30/91
823417Smckusick  */
94585Swnj 
1017102Sbloom #include "param.h"
1117102Sbloom #include "proc.h"
1238962Skarels #include "malloc.h"
1317102Sbloom #include "map.h"
1438962Skarels #define MBTYPES
1517102Sbloom #include "mbuf.h"
1617102Sbloom #include "kernel.h"
1730442Skarels #include "syslog.h"
1830442Skarels #include "domain.h"
1930442Skarels #include "protosw.h"
2048445Skarels #include "vm/vm.h"
214585Swnj 
2248445Skarels extern	vm_map_t mb_map;
2348445Skarels struct	mbuf *mbutl;
2448445Skarels char	*mclrefcnt;
2545736Smckusick 
265228Swnj mbinit()
275228Swnj {
2821107Skarels 	int s;
295228Swnj 
3045736Smckusick #if CLBYTES < 4096
3141998Smckusick #define NCL_INIT	(4096/CLBYTES)
3232659Skarels #else
3332659Skarels #define NCL_INIT	1
3432659Skarels #endif
3521107Skarels 	s = splimp();
3638962Skarels 	if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
375228Swnj 		goto bad;
3821107Skarels 	splx(s);
395228Swnj 	return;
405228Swnj bad:
415228Swnj 	panic("mbinit");
425228Swnj }
435228Swnj 
4421107Skarels /*
4538962Skarels  * Allocate some number of mbuf clusters
4638962Skarels  * and place on cluster free list.
4721107Skarels  * Must be called at splimp.
4821107Skarels  */
4930442Skarels /* ARGSUSED */
5038962Skarels m_clalloc(ncl, canwait)
515228Swnj 	register int ncl;
525228Swnj {
535228Swnj 	int npg, mbx;
5438962Skarels 	register caddr_t p;
555228Swnj 	register int i;
5630442Skarels 	static int logged;
575228Swnj 
585228Swnj 	npg = ncl * CLSIZE;
5945736Smckusick 	p = (caddr_t)kmem_malloc(mb_map, ctob(npg), canwait);
6045736Smckusick 	if (p == NULL) {
6130442Skarels 		if (logged == 0) {
6230442Skarels 			logged++;
6345736Smckusick 			log(LOG_ERR, "mb_map full\n");
6430442Skarels 		}
655228Swnj 		return (0);
6621107Skarels 	}
6738962Skarels 	ncl = ncl * CLBYTES / MCLBYTES;
6838962Skarels 	for (i = 0; i < ncl; i++) {
6938962Skarels 		((union mcluster *)p)->mcl_next = mclfree;
7038962Skarels 		mclfree = (union mcluster *)p;
7138962Skarels 		p += MCLBYTES;
7238962Skarels 		mbstat.m_clfree++;
7338962Skarels 	}
7438962Skarels 	mbstat.m_clusters += ncl;
7538962Skarels 	return (1);
7638962Skarels }
775228Swnj 
7838962Skarels /*
7938962Skarels  * When MGET failes, ask protocols to free space when short of memory,
8038962Skarels  * then re-attempt to allocate an mbuf.
8138962Skarels  */
8238962Skarels struct mbuf *
8338962Skarels m_retry(i, t)
8438962Skarels 	int i, t;
8538962Skarels {
8638962Skarels 	register struct mbuf *m;
875228Swnj 
8838962Skarels 	m_reclaim();
8938962Skarels #define m_retry(i, t)	(struct mbuf *)0
9038962Skarels 	MGET(m, i, t);
9138962Skarels #undef m_retry
9238962Skarels 	return (m);
935228Swnj }
945228Swnj 
9521107Skarels /*
9638962Skarels  * As above; retry an MGETHDR.
9721107Skarels  */
9838962Skarels struct mbuf *
9938962Skarels m_retryhdr(i, t)
10038962Skarels 	int i, t;
1015228Swnj {
10238962Skarels 	register struct mbuf *m;
10338962Skarels 
10438962Skarels 	m_reclaim();
10538962Skarels #define m_retryhdr(i, t) (struct mbuf *)0
10638962Skarels 	MGETHDR(m, i, t);
10738962Skarels #undef m_retryhdr
10838962Skarels 	return (m);
10938962Skarels }
11038962Skarels 
11138962Skarels m_reclaim()
11238962Skarels {
11330442Skarels 	register struct domain *dp;
11430442Skarels 	register struct protosw *pr;
11538962Skarels 	int s = splimp();
1165228Swnj 
11738962Skarels 	for (dp = domains; dp; dp = dp->dom_next)
11838962Skarels 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
11938962Skarels 			if (pr->pr_drain)
12038962Skarels 				(*pr->pr_drain)();
12138962Skarels 	splx(s);
12238962Skarels 	mbstat.m_drain++;
1235228Swnj }
1245228Swnj 
1255228Swnj /*
1265228Swnj  * Space allocation routines.
1275228Swnj  * These are also available as macros
1285228Swnj  * for critical paths.
1295228Swnj  */
1304585Swnj struct mbuf *
1319634Ssam m_get(canwait, type)
1329634Ssam 	int canwait, type;
1334585Swnj {
1344585Swnj 	register struct mbuf *m;
1354585Swnj 
1369634Ssam 	MGET(m, canwait, type);
1374585Swnj 	return (m);
1384585Swnj }
1394585Swnj 
1404585Swnj struct mbuf *
14138962Skarels m_gethdr(canwait, type)
14238962Skarels 	int canwait, type;
14338962Skarels {
14438962Skarels 	register struct mbuf *m;
14538962Skarels 
14638962Skarels 	MGETHDR(m, canwait, type);
14738962Skarels 	return (m);
14838962Skarels }
14938962Skarels 
15038962Skarels struct mbuf *
1519634Ssam m_getclr(canwait, type)
1529634Ssam 	int canwait, type;
1534890Swnj {
1544890Swnj 	register struct mbuf *m;
1554890Swnj 
15621107Skarels 	MGET(m, canwait, type);
1574890Swnj 	if (m == 0)
1584890Swnj 		return (0);
1594890Swnj 	bzero(mtod(m, caddr_t), MLEN);
1604890Swnj 	return (m);
1614890Swnj }
1624890Swnj 
1634890Swnj struct mbuf *
1644585Swnj m_free(m)
1654585Swnj 	struct mbuf *m;
1664585Swnj {
1674585Swnj 	register struct mbuf *n;
1684585Swnj 
1694585Swnj 	MFREE(m, n);
1704585Swnj 	return (n);
1714585Swnj }
1724585Swnj 
1734669Swnj m_freem(m)
1744585Swnj 	register struct mbuf *m;
1754585Swnj {
1764585Swnj 	register struct mbuf *n;
1774585Swnj 
1784585Swnj 	if (m == NULL)
1794916Swnj 		return;
1804585Swnj 	do {
1814585Swnj 		MFREE(m, n);
1824585Swnj 	} while (m = n);
1834585Swnj }
1844585Swnj 
1855228Swnj /*
1865228Swnj  * Mbuffer utility routines.
1875228Swnj  */
18821107Skarels 
18921107Skarels /*
19038962Skarels  * Lesser-used path for M_PREPEND:
19138962Skarels  * allocate new mbuf to prepend to chain,
19238962Skarels  * copy junk along.
19338962Skarels  */
19438962Skarels struct mbuf *
19538962Skarels m_prepend(m, len, how)
19638962Skarels 	register struct mbuf *m;
19738962Skarels 	int len, how;
19838962Skarels {
19938962Skarels 	struct mbuf *mn;
20038962Skarels 
20138962Skarels 	MGET(mn, how, m->m_type);
20238962Skarels 	if (mn == (struct mbuf *)NULL) {
20338962Skarels 		m_freem(m);
20438962Skarels 		return ((struct mbuf *)NULL);
20538962Skarels 	}
20638962Skarels 	if (m->m_flags & M_PKTHDR) {
20738962Skarels 		M_COPY_PKTHDR(mn, m);
20838962Skarels 		m->m_flags &= ~M_PKTHDR;
20938962Skarels 	}
21038962Skarels 	mn->m_next = m;
21138962Skarels 	m = mn;
21238962Skarels 	if (len < MHLEN)
21338962Skarels 		MH_ALIGN(m, len);
21438962Skarels 	m->m_len = len;
21538962Skarels 	return (m);
21638962Skarels }
21738962Skarels 
21833606Skarels /*
21938962Skarels  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
22021107Skarels  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
22138962Skarels  * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
22221107Skarels  */
22341999Smckusick int MCFail;
22441999Smckusick 
2254927Swnj struct mbuf *
22638962Skarels m_copym(m, off0, len, wait)
2274927Swnj 	register struct mbuf *m;
22838962Skarels 	int off0, wait;
2294927Swnj 	register int len;
2304927Swnj {
2314927Swnj 	register struct mbuf *n, **np;
23238962Skarels 	register int off = off0;
23338962Skarels 	struct mbuf *top;
23438962Skarels 	int copyhdr = 0;
2354927Swnj 
2364927Swnj 	if (off < 0 || len < 0)
23738962Skarels 		panic("m_copym");
23838962Skarels 	if (off == 0 && m->m_flags & M_PKTHDR)
23938962Skarels 		copyhdr = 1;
2404927Swnj 	while (off > 0) {
2414927Swnj 		if (m == 0)
24238962Skarels 			panic("m_copym");
2434927Swnj 		if (off < m->m_len)
2444927Swnj 			break;
2454927Swnj 		off -= m->m_len;
2464927Swnj 		m = m->m_next;
2474927Swnj 	}
2484927Swnj 	np = &top;
2494927Swnj 	top = 0;
2504927Swnj 	while (len > 0) {
2515609Swnj 		if (m == 0) {
2525609Swnj 			if (len != M_COPYALL)
25338962Skarels 				panic("m_copym");
2545609Swnj 			break;
2555609Swnj 		}
25638962Skarels 		MGET(n, wait, m->m_type);
2574927Swnj 		*np = n;
2584927Swnj 		if (n == 0)
2594927Swnj 			goto nospace;
26038962Skarels 		if (copyhdr) {
26138962Skarels 			M_COPY_PKTHDR(n, m);
26238962Skarels 			if (len == M_COPYALL)
26338962Skarels 				n->m_pkthdr.len -= off0;
26438962Skarels 			else
26538962Skarels 				n->m_pkthdr.len = len;
26638962Skarels 			copyhdr = 0;
26738962Skarels 		}
2684927Swnj 		n->m_len = MIN(len, m->m_len - off);
26938962Skarels 		if (m->m_flags & M_EXT) {
27038962Skarels 			n->m_data = m->m_data + off;
27138962Skarels 			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
27238962Skarels 			n->m_ext = m->m_ext;
27338962Skarels 			n->m_flags |= M_EXT;
2748318Sroot 		} else
2754927Swnj 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
2764927Swnj 			    (unsigned)n->m_len);
2775609Swnj 		if (len != M_COPYALL)
2785609Swnj 			len -= n->m_len;
2794927Swnj 		off = 0;
2804927Swnj 		m = m->m_next;
2814927Swnj 		np = &n->m_next;
2824927Swnj 	}
28341999Smckusick 	if (top == 0)
28441999Smckusick 		MCFail++;
2854927Swnj 	return (top);
2864927Swnj nospace:
2874927Swnj 	m_freem(top);
28841999Smckusick 	MCFail++;
2894927Swnj 	return (0);
2904927Swnj }
2914927Swnj 
29233606Skarels /*
29333606Skarels  * Copy data from an mbuf chain starting "off" bytes from the beginning,
29433606Skarels  * continuing for "len" bytes, into the indicated buffer.
29533606Skarels  */
29633606Skarels m_copydata(m, off, len, cp)
29733606Skarels 	register struct mbuf *m;
29833988Skarels 	register int off;
29933606Skarels 	register int len;
30033988Skarels 	caddr_t cp;
30133606Skarels {
30233606Skarels 	register unsigned count;
30333606Skarels 
30433606Skarels 	if (off < 0 || len < 0)
30533606Skarels 		panic("m_copydata");
30633606Skarels 	while (off > 0) {
30733606Skarels 		if (m == 0)
30833606Skarels 			panic("m_copydata");
30933606Skarels 		if (off < m->m_len)
31033606Skarels 			break;
31133606Skarels 		off -= m->m_len;
31233606Skarels 		m = m->m_next;
31333606Skarels 	}
31433606Skarels 	while (len > 0) {
31533606Skarels 		if (m == 0)
31633606Skarels 			panic("m_copydata");
31733988Skarels 		count = MIN(m->m_len - off, len);
31833606Skarels 		bcopy(mtod(m, caddr_t) + off, cp, count);
31933606Skarels 		len -= count;
32033988Skarels 		cp += count;
32133606Skarels 		off = 0;
32233606Skarels 		m = m->m_next;
32333606Skarels 	}
32433606Skarels }
32533606Skarels 
32638962Skarels /*
32738962Skarels  * Concatenate mbuf chain n to m.
32838962Skarels  * Both chains must be of the same type (e.g. MT_DATA).
32938962Skarels  * Any m_pkthdr is not updated.
33038962Skarels  */
3314669Swnj m_cat(m, n)
3324669Swnj 	register struct mbuf *m, *n;
3334669Swnj {
3344669Swnj 	while (m->m_next)
3354669Swnj 		m = m->m_next;
3366091Sroot 	while (n) {
33738962Skarels 		if (m->m_flags & M_EXT ||
33838962Skarels 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
3396091Sroot 			/* just join the two chains */
3404669Swnj 			m->m_next = n;
3416091Sroot 			return;
3424669Swnj 		}
3436091Sroot 		/* splat the data from one into the other */
3446091Sroot 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
3456091Sroot 		    (u_int)n->m_len);
3466091Sroot 		m->m_len += n->m_len;
3476091Sroot 		n = m_free(n);
3486091Sroot 	}
3494669Swnj }
3504669Swnj 
35138962Skarels m_adj(mp, req_len)
3524585Swnj 	struct mbuf *mp;
3534585Swnj {
35438962Skarels 	register int len = req_len;
35524764Skarels 	register struct mbuf *m;
35624764Skarels 	register count;
3574585Swnj 
3584585Swnj 	if ((m = mp) == NULL)
3594585Swnj 		return;
3604822Swnj 	if (len >= 0) {
36138962Skarels 		/*
36238962Skarels 		 * Trim from head.
36338962Skarels 		 */
3644585Swnj 		while (m != NULL && len > 0) {
3654822Swnj 			if (m->m_len <= len) {
3664585Swnj 				len -= m->m_len;
3674585Swnj 				m->m_len = 0;
3684585Swnj 				m = m->m_next;
3694822Swnj 			} else {
3704585Swnj 				m->m_len -= len;
37138962Skarels 				m->m_data += len;
37238962Skarels 				len = 0;
3734585Swnj 			}
3744585Swnj 		}
37538962Skarels 		m = mp;
37638962Skarels 		if (mp->m_flags & M_PKTHDR)
37738962Skarels 			m->m_pkthdr.len -= (req_len - len);
3784822Swnj 	} else {
37924764Skarels 		/*
38024764Skarels 		 * Trim from tail.  Scan the mbuf chain,
38124764Skarels 		 * calculating its length and finding the last mbuf.
38224764Skarels 		 * If the adjustment only affects this mbuf, then just
38324764Skarels 		 * adjust and return.  Otherwise, rescan and truncate
38424764Skarels 		 * after the remaining size.
38524764Skarels 		 */
3864585Swnj 		len = -len;
38724764Skarels 		count = 0;
38824764Skarels 		for (;;) {
38924764Skarels 			count += m->m_len;
39024764Skarels 			if (m->m_next == (struct mbuf *)0)
3914585Swnj 				break;
39224764Skarels 			m = m->m_next;
39324764Skarels 		}
39424764Skarels 		if (m->m_len >= len) {
39524764Skarels 			m->m_len -= len;
39639227Ssklower 			if ((mp = m)->m_flags & M_PKTHDR)
39739227Ssklower 				m->m_pkthdr.len -= len;
39824764Skarels 			return;
39924764Skarels 		}
40024764Skarels 		count -= len;
40138962Skarels 		if (count < 0)
40238962Skarels 			count = 0;
40324764Skarels 		/*
40424764Skarels 		 * Correct length for chain is "count".
40524764Skarels 		 * Find the mbuf with last data, adjust its length,
40624764Skarels 		 * and toss data from remaining mbufs on chain.
40724764Skarels 		 */
40838962Skarels 		m = mp;
40938962Skarels 		if (m->m_flags & M_PKTHDR)
41038962Skarels 			m->m_pkthdr.len = count;
41138962Skarels 		for (; m; m = m->m_next) {
41224764Skarels 			if (m->m_len >= count) {
41324764Skarels 				m->m_len = count;
41424764Skarels 				break;
4154585Swnj 			}
41624764Skarels 			count -= m->m_len;
4174585Swnj 		}
41824764Skarels 		while (m = m->m_next)
41924764Skarels 			m->m_len = 0;
4204585Swnj 	}
4214585Swnj }
4225228Swnj 
42323816Skarels /*
42423816Skarels  * Rearange an mbuf chain so that len bytes are contiguous
42523816Skarels  * and in the data area of an mbuf (so that mtod and dtom
42624764Skarels  * will work for a structure of size len).  Returns the resulting
42724764Skarels  * mbuf chain on success, frees it and returns null on failure.
42838962Skarels  * If there is room, it will add up to max_protohdr-len extra bytes to the
42924764Skarels  * contiguous region in an attempt to avoid being called next time.
43023816Skarels  */
43141999Smckusick int MPFail;
43241999Smckusick 
4335310Sroot struct mbuf *
43424764Skarels m_pullup(n, len)
43524764Skarels 	register struct mbuf *n;
4365228Swnj 	int len;
4375228Swnj {
43824764Skarels 	register struct mbuf *m;
43924764Skarels 	register int count;
44024764Skarels 	int space;
4415228Swnj 
44238962Skarels 	/*
44338962Skarels 	 * If first mbuf has no cluster, and has room for len bytes
44438962Skarels 	 * without shifting current data, pullup into it,
44538962Skarels 	 * otherwise allocate a new mbuf to prepend to the chain.
44638962Skarels 	 */
44738962Skarels 	if ((n->m_flags & M_EXT) == 0 &&
44838962Skarels 	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
44938962Skarels 		if (n->m_len >= len)
45038962Skarels 			return (n);
45124764Skarels 		m = n;
45224764Skarels 		n = n->m_next;
45324764Skarels 		len -= m->m_len;
45424764Skarels 	} else {
45538962Skarels 		if (len > MHLEN)
45624764Skarels 			goto bad;
45724764Skarels 		MGET(m, M_DONTWAIT, n->m_type);
45824764Skarels 		if (m == 0)
45924764Skarels 			goto bad;
46024764Skarels 		m->m_len = 0;
46144969Skarels 		if (n->m_flags & M_PKTHDR) {
46238962Skarels 			M_COPY_PKTHDR(m, n);
46344969Skarels 			n->m_flags &= ~M_PKTHDR;
46444969Skarels 		}
46524764Skarels 	}
46638962Skarels 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
4675310Sroot 	do {
46838962Skarels 		count = min(min(max(len, max_protohdr), space), n->m_len);
46938962Skarels 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
4706164Ssam 		  (unsigned)count);
4716164Ssam 		len -= count;
4726164Ssam 		m->m_len += count;
4736164Ssam 		n->m_len -= count;
47438962Skarels 		space -= count;
4755310Sroot 		if (n->m_len)
47638962Skarels 			n->m_data += count;
47723816Skarels 		else
47823816Skarels 			n = m_free(n);
47924764Skarels 	} while (len > 0 && n);
48024764Skarels 	if (len > 0) {
4815310Sroot 		(void) m_free(m);
4825310Sroot 		goto bad;
4835310Sroot 	}
4845310Sroot 	m->m_next = n;
4855310Sroot 	return (m);
4865310Sroot bad:
4875732Sroot 	m_freem(n);
48841999Smckusick 	MPFail++;
4895228Swnj 	return (0);
4905228Swnj }
491*50941Ssklower 
492*50941Ssklower /*
493*50941Ssklower  * Partition an mbuf chain in two pieces, returning the tail --
494*50941Ssklower  * all but the first len0 bytes.  In case of failure, it returns NULL and
495*50941Ssklower  * attempts to restore the chain to its original state.
496*50941Ssklower  */
497*50941Ssklower struct mbuf *
498*50941Ssklower m_split(m0, len0, wait)
499*50941Ssklower register struct mbuf *m0;
500*50941Ssklower int len0;
501*50941Ssklower {
502*50941Ssklower 	register struct mbuf *m, *n;
503*50941Ssklower 	unsigned len = len0, remain;
504*50941Ssklower 
505*50941Ssklower 	for (m = m0; m && len > m->m_len; m = m->m_next)
506*50941Ssklower 		len -= m->m_len;
507*50941Ssklower 	if (m == 0)
508*50941Ssklower 		return (0);
509*50941Ssklower 	remain = m->m_len - len;
510*50941Ssklower 	if (m0->m_flags & M_PKTHDR) {
511*50941Ssklower 		MGETHDR(n, wait, m0->m_type);
512*50941Ssklower 		if (n == 0)
513*50941Ssklower 			return (0);
514*50941Ssklower 		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
515*50941Ssklower 		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
516*50941Ssklower 		m0->m_pkthdr.len = len0;
517*50941Ssklower 		if (m->m_flags & M_EXT)
518*50941Ssklower 			goto extpacket;
519*50941Ssklower 		if (remain > MHLEN) {
520*50941Ssklower 			/* m can't be the lead packet */
521*50941Ssklower 			MH_ALIGN(n, 0);
522*50941Ssklower 			n->m_next = m_split(m, len, wait);
523*50941Ssklower 			if (n->m_next == 0) {
524*50941Ssklower 				(void) m_free(n);
525*50941Ssklower 				return (0);
526*50941Ssklower 			} else
527*50941Ssklower 				return (n);
528*50941Ssklower 		} else
529*50941Ssklower 			MH_ALIGN(n, remain);
530*50941Ssklower 	} else if (remain == 0) {
531*50941Ssklower 		n = m->m_next;
532*50941Ssklower 		m->m_next = 0;
533*50941Ssklower 		return (n);
534*50941Ssklower 	} else {
535*50941Ssklower 		MGET(n, wait, m->m_type);
536*50941Ssklower 		if (n == 0)
537*50941Ssklower 			return (0);
538*50941Ssklower 		M_ALIGN(n, remain);
539*50941Ssklower 	}
540*50941Ssklower extpacket:
541*50941Ssklower 	if (m->m_flags & M_EXT) {
542*50941Ssklower 		n->m_flags |= M_EXT;
543*50941Ssklower 		n->m_ext = m->m_ext;
544*50941Ssklower 		mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
545*50941Ssklower 		m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
546*50941Ssklower 		n->m_data = m->m_data + len;
547*50941Ssklower 	} else {
548*50941Ssklower 		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
549*50941Ssklower 	}
550*50941Ssklower 	n->m_len = remain;
551*50941Ssklower 	m->m_len = len;
552*50941Ssklower 	n->m_next = m->m_next;
553*50941Ssklower 	m->m_next = 0;
554*50941Ssklower 	return (n);
555*50941Ssklower }
556*50941Ssklower /*
557*50941Ssklower  * Routine to copy from device local memory into mbufs.
558*50941Ssklower  */
559*50941Ssklower struct mbuf *
560*50941Ssklower m_devget(buf, totlen, off0, ifp, copy)
561*50941Ssklower 	char *buf;
562*50941Ssklower 	int totlen, off0;
563*50941Ssklower 	struct ifnet *ifp;
564*50941Ssklower 	void (*copy)();
565*50941Ssklower {
566*50941Ssklower 	register struct mbuf *m;
567*50941Ssklower 	struct mbuf *top = 0, **mp = &top;
568*50941Ssklower 	register int off = off0, len;
569*50941Ssklower 	register char *cp;
570*50941Ssklower 	char *epkt;
571*50941Ssklower 
572*50941Ssklower 	cp = buf;
573*50941Ssklower 	epkt = cp + totlen;
574*50941Ssklower 	if (off) {
575*50941Ssklower 		cp += off + 2 * sizeof(u_short);
576*50941Ssklower 		totlen -= 2 * sizeof(u_short);
577*50941Ssklower 	}
578*50941Ssklower 	MGETHDR(m, M_DONTWAIT, MT_DATA);
579*50941Ssklower 	if (m == 0)
580*50941Ssklower 		return (0);
581*50941Ssklower 	m->m_pkthdr.rcvif = ifp;
582*50941Ssklower 	m->m_pkthdr.len = totlen;
583*50941Ssklower 	m->m_len = MHLEN;
584*50941Ssklower 
585*50941Ssklower 	while (totlen > 0) {
586*50941Ssklower 		if (top) {
587*50941Ssklower 			MGET(m, M_DONTWAIT, MT_DATA);
588*50941Ssklower 			if (m == 0) {
589*50941Ssklower 				m_freem(top);
590*50941Ssklower 				return (0);
591*50941Ssklower 			}
592*50941Ssklower 			m->m_len = MLEN;
593*50941Ssklower 		}
594*50941Ssklower 		len = min(totlen, epkt - cp);
595*50941Ssklower 		if (len >= MINCLSIZE) {
596*50941Ssklower 			MCLGET(m, M_DONTWAIT);
597*50941Ssklower 			if (m->m_flags & M_EXT)
598*50941Ssklower 				m->m_len = len = min(len, MCLBYTES);
599*50941Ssklower 			else
600*50941Ssklower 				len = m->m_len;
601*50941Ssklower 		} else {
602*50941Ssklower 			/*
603*50941Ssklower 			 * Place initial small packet/header at end of mbuf.
604*50941Ssklower 			 */
605*50941Ssklower 			if (len < m->m_len) {
606*50941Ssklower 				if (top == 0 && len + max_linkhdr <= m->m_len)
607*50941Ssklower 					m->m_data += max_linkhdr;
608*50941Ssklower 				m->m_len = len;
609*50941Ssklower 			} else
610*50941Ssklower 				len = m->m_len;
611*50941Ssklower 		}
612*50941Ssklower 		if (copy)
613*50941Ssklower 			copy(cp, mtod(m, caddr_t), (unsigned)len);
614*50941Ssklower 		else
615*50941Ssklower 			bcopy(cp, mtod(m, caddr_t), (unsigned)len);
616*50941Ssklower 		cp += len;
617*50941Ssklower 		*mp = m;
618*50941Ssklower 		mp = &m->m_next;
619*50941Ssklower 		totlen -= len;
620*50941Ssklower 		if (cp == epkt)
621*50941Ssklower 			cp = buf;
622*50941Ssklower 	}
623*50941Ssklower 	return (top);
624*50941Ssklower }
625