xref: /csrg-svn/sys/kern/uipc_mbuf.c (revision 32659)
1 /*
2  * Copyright (c) 1982, 1986 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)uipc_mbuf.c	7.4 (Berkeley) 11/23/87
7  */
8 
9 #include "../machine/pte.h"
10 
11 #include "param.h"
12 #include "dir.h"
13 #include "user.h"
14 #include "proc.h"
15 #include "cmap.h"
16 #include "map.h"
17 #include "mbuf.h"
18 #include "vm.h"
19 #include "kernel.h"
20 #include "syslog.h"
21 #include "domain.h"
22 #include "protosw.h"
23 
24 mbinit()
25 {
26 	int s;
27 
28 #if CLBYTES < 4096
29 #define NCL_INIT	(4096/CLBYTES)
30 #else
31 #define NCL_INIT	1
32 #endif
33 	s = splimp();
34 	if (m_clalloc(NCL_INIT, MPG_MBUFS, M_DONTWAIT) == 0)
35 		goto bad;
36 	if (m_clalloc(NCL_INIT, MPG_CLUSTERS, M_DONTWAIT) == 0)
37 		goto bad;
38 	splx(s);
39 	return;
40 bad:
41 	panic("mbinit");
42 }
43 
44 /*
45  * Must be called at splimp.
46  */
47 /* ARGSUSED */
48 caddr_t
49 m_clalloc(ncl, how, canwait)
50 	register int ncl;
51 	int how;
52 {
53 	int npg, mbx;
54 	register struct mbuf *m;
55 	register int i;
56 	static int logged;
57 
58 	npg = ncl * CLSIZE;
59 	mbx = rmalloc(mbmap, (long)npg);
60 	if (mbx == 0) {
61 		if (logged == 0) {
62 			logged++;
63 			log(LOG_ERR, "mbuf map full\n");
64 		}
65 		return (0);
66 	}
67 	m = cltom(mbx * NBPG / MCLBYTES);
68 	if (memall(&Mbmap[mbx], npg, proc, CSYS) == 0) {
69 		rmfree(mbmap, (long)npg, (long)mbx);
70 		return (0);
71 	}
72 	vmaccess(&Mbmap[mbx], (caddr_t)m, npg);
73 	switch (how) {
74 
75 	case MPG_CLUSTERS:
76 		ncl = ncl * CLBYTES / MCLBYTES;
77 		for (i = 0; i < ncl; i++) {
78 			m->m_off = 0;
79 			m->m_next = mclfree;
80 			mclfree = m;
81 			m += MCLBYTES / sizeof (*m);
82 			mbstat.m_clfree++;
83 		}
84 		mbstat.m_clusters += ncl;
85 		break;
86 
87 	case MPG_MBUFS:
88 		for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) {
89 			m->m_off = 0;
90 			m->m_type = MT_DATA;
91 			mbstat.m_mtypes[MT_DATA]++;
92 			mbstat.m_mbufs++;
93 			(void) m_free(m);
94 			m++;
95 		}
96 		break;
97 	}
98 	return ((caddr_t)m);
99 }
100 
101 m_pgfree(addr, n)
102 	caddr_t addr;
103 	int n;
104 {
105 
106 #ifdef lint
107 	addr = addr; n = n;
108 #endif
109 }
110 
111 /*
112  * Must be called at splimp.
113  */
114 m_expand(canwait)
115 	int canwait;
116 {
117 	register struct domain *dp;
118 	register struct protosw *pr;
119 	int tries;
120 
121 	for (tries = 0;; ) {
122 		if (m_clalloc(1, MPG_MBUFS, canwait))
123 			return (1);
124 		if (canwait == 0 || tries++)
125 			return (0);
126 
127 		/* ask protocols to free space */
128 		for (dp = domains; dp; dp = dp->dom_next)
129 			for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW;
130 			    pr++)
131 				if (pr->pr_drain)
132 					(*pr->pr_drain)();
133 		mbstat.m_drain++;
134 	}
135 }
136 
137 /* NEED SOME WAY TO RELEASE SPACE */
138 
139 /*
140  * Space allocation routines.
141  * These are also available as macros
142  * for critical paths.
143  */
144 struct mbuf *
145 m_get(canwait, type)
146 	int canwait, type;
147 {
148 	register struct mbuf *m;
149 
150 	MGET(m, canwait, type);
151 	return (m);
152 }
153 
154 struct mbuf *
155 m_getclr(canwait, type)
156 	int canwait, type;
157 {
158 	register struct mbuf *m;
159 
160 	MGET(m, canwait, type);
161 	if (m == 0)
162 		return (0);
163 	bzero(mtod(m, caddr_t), MLEN);
164 	return (m);
165 }
166 
167 struct mbuf *
168 m_free(m)
169 	struct mbuf *m;
170 {
171 	register struct mbuf *n;
172 
173 	MFREE(m, n);
174 	return (n);
175 }
176 
177 /*
178  * Get more mbufs; called from MGET macro if mfree list is empty.
179  * Must be called at splimp.
180  */
181 /*ARGSUSED*/
182 struct mbuf *
183 m_more(canwait, type)
184 	int canwait, type;
185 {
186 	register struct mbuf *m;
187 
188 	while (m_expand(canwait) == 0) {
189 		if (canwait == M_WAIT) {
190 			mbstat.m_wait++;
191 			m_want++;
192 			sleep((caddr_t)&mfree, PZERO - 1);
193 		} else {
194 			mbstat.m_drops++;
195 			return (NULL);
196 		}
197 	}
198 #define m_more(x,y) (panic("m_more"), (struct mbuf *)0)
199 	MGET(m, canwait, type);
200 #undef m_more
201 	return (m);
202 }
203 
204 m_freem(m)
205 	register struct mbuf *m;
206 {
207 	register struct mbuf *n;
208 	register int s;
209 
210 	if (m == NULL)
211 		return;
212 	s = splimp();
213 	do {
214 		MFREE(m, n);
215 	} while (m = n);
216 	splx(s);
217 }
218 
219 /*
220  * Mbuffer utility routines.
221  */
222 
223 /*
224  * Make a copy of an mbuf chain starting "off" bytes from the beginning,
225  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
226  * Should get M_WAIT/M_DONTWAIT from caller.
227  */
228 struct mbuf *
229 m_copy(m, off, len)
230 	register struct mbuf *m;
231 	int off;
232 	register int len;
233 {
234 	register struct mbuf *n, **np;
235 	struct mbuf *top, *p;
236 
237 	if (len == 0)
238 		return (0);
239 	if (off < 0 || len < 0)
240 		panic("m_copy");
241 	while (off > 0) {
242 		if (m == 0)
243 			panic("m_copy");
244 		if (off < m->m_len)
245 			break;
246 		off -= m->m_len;
247 		m = m->m_next;
248 	}
249 	np = &top;
250 	top = 0;
251 	while (len > 0) {
252 		if (m == 0) {
253 			if (len != M_COPYALL)
254 				panic("m_copy");
255 			break;
256 		}
257 		MGET(n, M_DONTWAIT, m->m_type);
258 		*np = n;
259 		if (n == 0)
260 			goto nospace;
261 		n->m_len = MIN(len, m->m_len - off);
262 		if (m->m_off > MMAXOFF) {
263 			p = mtod(m, struct mbuf *);
264 			n->m_off = ((int)p - (int)n) + off;
265 			mclrefcnt[mtocl(p)]++;
266 		} else
267 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
268 			    (unsigned)n->m_len);
269 		if (len != M_COPYALL)
270 			len -= n->m_len;
271 		off = 0;
272 		m = m->m_next;
273 		np = &n->m_next;
274 	}
275 	return (top);
276 nospace:
277 	m_freem(top);
278 	return (0);
279 }
280 
281 m_cat(m, n)
282 	register struct mbuf *m, *n;
283 {
284 	while (m->m_next)
285 		m = m->m_next;
286 	while (n) {
287 		if (m->m_off >= MMAXOFF ||
288 		    m->m_off + m->m_len + n->m_len > MMAXOFF) {
289 			/* just join the two chains */
290 			m->m_next = n;
291 			return;
292 		}
293 		/* splat the data from one into the other */
294 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
295 		    (u_int)n->m_len);
296 		m->m_len += n->m_len;
297 		n = m_free(n);
298 	}
299 }
300 
301 m_adj(mp, len)
302 	struct mbuf *mp;
303 	register int len;
304 {
305 	register struct mbuf *m;
306 	register count;
307 
308 	if ((m = mp) == NULL)
309 		return;
310 	if (len >= 0) {
311 		while (m != NULL && len > 0) {
312 			if (m->m_len <= len) {
313 				len -= m->m_len;
314 				m->m_len = 0;
315 				m = m->m_next;
316 			} else {
317 				m->m_len -= len;
318 				m->m_off += len;
319 				break;
320 			}
321 		}
322 	} else {
323 		/*
324 		 * Trim from tail.  Scan the mbuf chain,
325 		 * calculating its length and finding the last mbuf.
326 		 * If the adjustment only affects this mbuf, then just
327 		 * adjust and return.  Otherwise, rescan and truncate
328 		 * after the remaining size.
329 		 */
330 		len = -len;
331 		count = 0;
332 		for (;;) {
333 			count += m->m_len;
334 			if (m->m_next == (struct mbuf *)0)
335 				break;
336 			m = m->m_next;
337 		}
338 		if (m->m_len >= len) {
339 			m->m_len -= len;
340 			return;
341 		}
342 		count -= len;
343 		/*
344 		 * Correct length for chain is "count".
345 		 * Find the mbuf with last data, adjust its length,
346 		 * and toss data from remaining mbufs on chain.
347 		 */
348 		for (m = mp; m; m = m->m_next) {
349 			if (m->m_len >= count) {
350 				m->m_len = count;
351 				break;
352 			}
353 			count -= m->m_len;
354 		}
355 		while (m = m->m_next)
356 			m->m_len = 0;
357 	}
358 }
359 
360 /*
361  * Rearange an mbuf chain so that len bytes are contiguous
362  * and in the data area of an mbuf (so that mtod and dtom
363  * will work for a structure of size len).  Returns the resulting
364  * mbuf chain on success, frees it and returns null on failure.
365  * If there is room, it will add up to MPULL_EXTRA bytes to the
366  * contiguous region in an attempt to avoid being called next time.
367  */
368 struct mbuf *
369 m_pullup(n, len)
370 	register struct mbuf *n;
371 	int len;
372 {
373 	register struct mbuf *m;
374 	register int count;
375 	int space;
376 
377 	if (n->m_off + len <= MMAXOFF && n->m_next) {
378 		m = n;
379 		n = n->m_next;
380 		len -= m->m_len;
381 	} else {
382 		if (len > MLEN)
383 			goto bad;
384 		MGET(m, M_DONTWAIT, n->m_type);
385 		if (m == 0)
386 			goto bad;
387 		m->m_len = 0;
388 	}
389 	space = MMAXOFF - m->m_off;
390 	do {
391 		count = MIN(MIN(space - m->m_len, len + MPULL_EXTRA), n->m_len);
392 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t)+m->m_len,
393 		  (unsigned)count);
394 		len -= count;
395 		m->m_len += count;
396 		n->m_len -= count;
397 		if (n->m_len)
398 			n->m_off += count;
399 		else
400 			n = m_free(n);
401 	} while (len > 0 && n);
402 	if (len > 0) {
403 		(void) m_free(m);
404 		goto bad;
405 	}
406 	m->m_next = n;
407 	return (m);
408 bad:
409 	m_freem(n);
410 	return (0);
411 }
412