xref: /csrg-svn/sys/kern/uipc_mbuf.c (revision 34489)
1 /*
2  * Copyright (c) 1982, 1986, 1988 Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that this notice is preserved and that due credit is given
7  * to the University of California at Berkeley. The name of the University
8  * may not be used to endorse or promote products derived from this
9  * software without specific prior written permission. This software
10  * is provided ``as is'' without express or implied warranty.
11  *
12  *	@(#)uipc_mbuf.c	7.9 (Berkeley) 05/26/88
13  */
14 
15 #include "../machine/pte.h"
16 
17 #include "param.h"
18 #include "dir.h"
19 #include "user.h"
20 #include "proc.h"
21 #include "cmap.h"
22 #include "map.h"
23 #include "mbuf.h"
24 #include "vm.h"
25 #include "kernel.h"
26 #include "syslog.h"
27 #include "domain.h"
28 #include "protosw.h"
29 
30 mbinit()
31 {
32 	int s;
33 
34 #if CLBYTES < 4096
35 #define NCL_INIT	(4096/CLBYTES)
36 #else
37 #define NCL_INIT	1
38 #endif
39 	s = splimp();
40 	if (m_clalloc(NCL_INIT, MPG_MBUFS, M_DONTWAIT) == 0)
41 		goto bad;
42 	if (m_clalloc(NCL_INIT, MPG_CLUSTERS, M_DONTWAIT) == 0)
43 		goto bad;
44 	splx(s);
45 	return;
46 bad:
47 	panic("mbinit");
48 }
49 
50 /*
51  * Must be called at splimp.
52  */
53 /* ARGSUSED */
54 caddr_t
55 m_clalloc(ncl, how, canwait)
56 	register int ncl;
57 	int how;
58 {
59 	int npg, mbx;
60 	register struct mbuf *m;
61 	register int i;
62 	static int logged;
63 
64 	npg = ncl * CLSIZE;
65 	mbx = rmalloc(mbmap, (long)npg);
66 	if (mbx == 0) {
67 		if (logged == 0) {
68 			logged++;
69 			log(LOG_ERR, "mbuf map full\n");
70 		}
71 		return (0);
72 	}
73 	m = cltom(mbx * NBPG / MCLBYTES);
74 	if (memall(&Mbmap[mbx], npg, proc, CSYS) == 0) {
75 		rmfree(mbmap, (long)npg, (long)mbx);
76 		return (0);
77 	}
78 	vmaccess(&Mbmap[mbx], (caddr_t)m, npg);
79 	switch (how) {
80 
81 	case MPG_CLUSTERS:
82 		ncl = ncl * CLBYTES / MCLBYTES;
83 		for (i = 0; i < ncl; i++) {
84 			m->m_off = 0;
85 			m->m_next = mclfree;
86 			mclfree = m;
87 			m += MCLBYTES / sizeof (*m);
88 			mbstat.m_clfree++;
89 		}
90 		mbstat.m_clusters += ncl;
91 		break;
92 
93 	case MPG_MBUFS:
94 		for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) {
95 			m->m_off = 0;
96 			m->m_type = MT_DATA;
97 			mbstat.m_mtypes[MT_DATA]++;
98 			mbstat.m_mbufs++;
99 			(void) m_free(m);
100 			m++;
101 		}
102 		break;
103 	}
104 	return ((caddr_t)m);
105 }
106 
107 /*
108  * Must be called at splimp.
109  */
110 m_expand(canwait)
111 	int canwait;
112 {
113 	register struct domain *dp;
114 	register struct protosw *pr;
115 	int tries;
116 
117 	for (tries = 0;; ) {
118 		if (m_clalloc(1, MPG_MBUFS, canwait))
119 			return (1);
120 		if (canwait == 0 || tries++)
121 			return (0);
122 
123 		/* ask protocols to free space */
124 		for (dp = domains; dp; dp = dp->dom_next)
125 			for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW;
126 			    pr++)
127 				if (pr->pr_drain)
128 					(*pr->pr_drain)();
129 		mbstat.m_drain++;
130 	}
131 }
132 
133 /* NEED SOME WAY TO RELEASE SPACE */
134 
135 /*
136  * Space allocation routines.
137  * These are also available as macros
138  * for critical paths.
139  */
140 struct mbuf *
141 m_get(canwait, type)
142 	int canwait, type;
143 {
144 	register struct mbuf *m;
145 
146 	MGET(m, canwait, type);
147 	return (m);
148 }
149 
150 struct mbuf *
151 m_getclr(canwait, type)
152 	int canwait, type;
153 {
154 	register struct mbuf *m;
155 
156 	MGET(m, canwait, type);
157 	if (m == 0)
158 		return (0);
159 	bzero(mtod(m, caddr_t), MLEN);
160 	return (m);
161 }
162 
163 struct mbuf *
164 m_free(m)
165 	struct mbuf *m;
166 {
167 	register struct mbuf *n;
168 
169 	MFREE(m, n);
170 	return (n);
171 }
172 
173 /*
174  * Get more mbufs; called from MGET macro if mfree list is empty.
175  * Must be called at splimp.
176  */
177 /*ARGSUSED*/
178 struct mbuf *
179 m_more(canwait, type)
180 	int canwait, type;
181 {
182 	register struct mbuf *m;
183 
184 	while (m_expand(canwait) == 0) {
185 		if (canwait == M_WAIT) {
186 			mbstat.m_wait++;
187 			m_want++;
188 			sleep((caddr_t)&mfree, PZERO - 1);
189 			if (mfree)
190 				break;
191 		} else {
192 			mbstat.m_drops++;
193 			return (NULL);
194 		}
195 	}
196 #define m_more(x,y) (panic("m_more"), (struct mbuf *)0)
197 	MGET(m, canwait, type);
198 #undef m_more
199 	return (m);
200 }
201 
202 m_freem(m)
203 	register struct mbuf *m;
204 {
205 	register struct mbuf *n;
206 	register int s;
207 
208 	if (m == NULL)
209 		return;
210 	s = splimp();
211 	do {
212 		MFREE(m, n);
213 	} while (m = n);
214 	splx(s);
215 }
216 
217 /*
218  * Mbuffer utility routines.
219  */
220 
221 /*
222 /*
223  * Make a copy of an mbuf chain starting "off" bytes from the beginning,
224  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
225  * Should get M_WAIT/M_DONTWAIT from caller.
226  */
227 struct mbuf *
228 m_copy(m, off, len)
229 	register struct mbuf *m;
230 	int off;
231 	register int len;
232 {
233 	register struct mbuf *n, **np;
234 	struct mbuf *top, *p;
235 
236 	if (len == 0)
237 		return (0);
238 	if (off < 0 || len < 0)
239 		panic("m_copy");
240 	while (off > 0) {
241 		if (m == 0)
242 			panic("m_copy");
243 		if (off < m->m_len)
244 			break;
245 		off -= m->m_len;
246 		m = m->m_next;
247 	}
248 	np = &top;
249 	top = 0;
250 	while (len > 0) {
251 		if (m == 0) {
252 			if (len != M_COPYALL)
253 				panic("m_copy");
254 			break;
255 		}
256 		MGET(n, M_DONTWAIT, m->m_type);
257 		*np = n;
258 		if (n == 0)
259 			goto nospace;
260 		n->m_len = MIN(len, m->m_len - off);
261 		if (m->m_off > MMAXOFF) {
262 			p = mtod(m, struct mbuf *);
263 			n->m_off = ((int)p - (int)n) + off;
264 			mclrefcnt[mtocl(p)]++;
265 		} else
266 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
267 			    (unsigned)n->m_len);
268 		if (len != M_COPYALL)
269 			len -= n->m_len;
270 		off = 0;
271 		m = m->m_next;
272 		np = &n->m_next;
273 	}
274 	return (top);
275 nospace:
276 	m_freem(top);
277 	return (0);
278 }
279 
280 /*
281  * Copy data from an mbuf chain starting "off" bytes from the beginning,
282  * continuing for "len" bytes, into the indicated buffer.
283  */
284 m_copydata(m, off, len, cp)
285 	register struct mbuf *m;
286 	register int off;
287 	register int len;
288 	caddr_t cp;
289 {
290 	register unsigned count;
291 
292 	if (off < 0 || len < 0)
293 		panic("m_copydata");
294 	while (off > 0) {
295 		if (m == 0)
296 			panic("m_copydata");
297 		if (off < m->m_len)
298 			break;
299 		off -= m->m_len;
300 		m = m->m_next;
301 	}
302 	while (len > 0) {
303 		if (m == 0)
304 			panic("m_copydata");
305 		count = MIN(m->m_len - off, len);
306 		bcopy(mtod(m, caddr_t) + off, cp, count);
307 		len -= count;
308 		cp += count;
309 		off = 0;
310 		m = m->m_next;
311 	}
312 }
313 
314 m_cat(m, n)
315 	register struct mbuf *m, *n;
316 {
317 	while (m->m_next)
318 		m = m->m_next;
319 	while (n) {
320 		if (m->m_off >= MMAXOFF ||
321 		    m->m_off + m->m_len + n->m_len > MMAXOFF) {
322 			/* just join the two chains */
323 			m->m_next = n;
324 			return;
325 		}
326 		/* splat the data from one into the other */
327 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
328 		    (u_int)n->m_len);
329 		m->m_len += n->m_len;
330 		n = m_free(n);
331 	}
332 }
333 
334 m_adj(mp, len)
335 	struct mbuf *mp;
336 	register int len;
337 {
338 	register struct mbuf *m;
339 	register count;
340 
341 	if ((m = mp) == NULL)
342 		return;
343 	if (len >= 0) {
344 		while (m != NULL && len > 0) {
345 			if (m->m_len <= len) {
346 				len -= m->m_len;
347 				m->m_len = 0;
348 				m = m->m_next;
349 			} else {
350 				m->m_len -= len;
351 				m->m_off += len;
352 				break;
353 			}
354 		}
355 	} else {
356 		/*
357 		 * Trim from tail.  Scan the mbuf chain,
358 		 * calculating its length and finding the last mbuf.
359 		 * If the adjustment only affects this mbuf, then just
360 		 * adjust and return.  Otherwise, rescan and truncate
361 		 * after the remaining size.
362 		 */
363 		len = -len;
364 		count = 0;
365 		for (;;) {
366 			count += m->m_len;
367 			if (m->m_next == (struct mbuf *)0)
368 				break;
369 			m = m->m_next;
370 		}
371 		if (m->m_len >= len) {
372 			m->m_len -= len;
373 			return;
374 		}
375 		count -= len;
376 		/*
377 		 * Correct length for chain is "count".
378 		 * Find the mbuf with last data, adjust its length,
379 		 * and toss data from remaining mbufs on chain.
380 		 */
381 		for (m = mp; m; m = m->m_next) {
382 			if (m->m_len >= count) {
383 				m->m_len = count;
384 				break;
385 			}
386 			count -= m->m_len;
387 		}
388 		while (m = m->m_next)
389 			m->m_len = 0;
390 	}
391 }
392 
393 /*
394  * Rearange an mbuf chain so that len bytes are contiguous
395  * and in the data area of an mbuf (so that mtod and dtom
396  * will work for a structure of size len).  Returns the resulting
397  * mbuf chain on success, frees it and returns null on failure.
398  * If there is room, it will add up to MPULL_EXTRA bytes to the
399  * contiguous region in an attempt to avoid being called next time.
400  */
401 struct mbuf *
402 m_pullup(n, len)
403 	register struct mbuf *n;
404 	int len;
405 {
406 	register struct mbuf *m;
407 	register int count;
408 	int space;
409 
410 	if (n->m_off + len <= MMAXOFF && n->m_next) {
411 		m = n;
412 		n = n->m_next;
413 		len -= m->m_len;
414 	} else {
415 		if (len > MLEN)
416 			goto bad;
417 		MGET(m, M_DONTWAIT, n->m_type);
418 		if (m == 0)
419 			goto bad;
420 		m->m_len = 0;
421 	}
422 	space = MMAXOFF - m->m_off;
423 	do {
424 		count = MIN(MIN(space - m->m_len, len + MPULL_EXTRA), n->m_len);
425 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t)+m->m_len,
426 		  (unsigned)count);
427 		len -= count;
428 		m->m_len += count;
429 		n->m_len -= count;
430 		if (n->m_len)
431 			n->m_off += count;
432 		else
433 			n = m_free(n);
434 	} while (len > 0 && n);
435 	if (len > 0) {
436 		(void) m_free(m);
437 		goto bad;
438 	}
439 	m->m_next = n;
440 	return (m);
441 bad:
442 	m_freem(n);
443 	return (0);
444 }
445