xref: /csrg-svn/sys/kern/uipc_mbuf.c (revision 5581)
1 /*	uipc_mbuf.c	1.26	82/01/19	*/
2 
3 #include "../h/param.h"
4 #include "../h/dir.h"
5 #include "../h/user.h"
6 #include "../h/proc.h"
7 #include "../h/pte.h"
8 #include "../h/cmap.h"
9 #include "../h/map.h"
10 #include "../h/mbuf.h"
11 #include "../net/in_systm.h"		/* XXX */
12 #include "../h/vm.h"
13 
14 mbinit()
15 {
16 
17 COUNT(MBINIT);
18 	if (m_reserve(32) == 0)
19 		goto bad;
20 	if (m_clalloc(4, MPG_MBUFS) == 0)
21 		goto bad;
22 	if (m_clalloc(32, MPG_CLUSTERS) == 0)
23 		goto bad;
24 	return;
25 bad:
26 	panic("mbinit");
27 }
28 
29 caddr_t
30 m_clalloc(ncl, how)
31 	register int ncl;
32 	int how;
33 {
34 	int npg, mbx;
35 	register struct mbuf *m;
36 	register int i;
37 	int s;
38 
39 COUNT(M_CLALLOC);
40 	npg = ncl * CLSIZE;
41 	s = splimp();		/* careful: rmalloc isn't reentrant */
42 	mbx = rmalloc(mbmap, npg);
43 	splx(s);
44 	if (mbx == 0)
45 		return (0);
46 	m = cltom(mbx / CLSIZE);
47 	if (memall(&Mbmap[mbx], ncl * CLSIZE, proc, CSYS) == 0)
48 		return (0);
49 	vmaccess(&Mbmap[mbx], (caddr_t)m, npg);
50 	switch (how) {
51 
52 	case MPG_CLUSTERS:
53 		s = splimp();
54 		for (i = 0; i < ncl; i++) {
55 			m->m_off = 0;
56 			m->m_next = mclfree;
57 			mclfree = m;
58 			m += CLBYTES / sizeof (*m);
59 			nmclfree++;
60 		}
61 		mbstat.m_clusters += ncl;
62 		splx(s);
63 		break;
64 
65 	case MPG_MBUFS:
66 		for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) {
67 			m->m_off = 0;
68 			m->m_free = 0;
69 			(void) m_free(m);
70 			m++;
71 		}
72 		mbstat.m_clusters += ncl;
73 		break;
74 	}
75 	return ((caddr_t)m);
76 }
77 
78 m_pgfree(addr, n)
79 	caddr_t addr;
80 	int n;
81 {
82 
83 COUNT(M_PGFREE);
84 }
85 
86 m_expand()
87 {
88 
89 COUNT(M_EXPAND);
90 	if (mbstat.m_bufs >= mbstat.m_hiwat)
91 		return (0);
92 	if (m_clalloc(1, MPG_MBUFS) == 0)
93 		goto steal;
94 	return (1);
95 steal:
96 	/* should ask protocols to free code */
97 	return (0);
98 }
99 
100 /* NEED SOME WAY TO RELEASE SPACE */
101 
102 /*
103  * Space reservation routines
104  */
105 m_reserve(mbufs)
106 	int mbufs;
107 {
108 
109 	if (mbstat.m_lowat + (mbufs>>1) > (NMBCLUSTERS-32) * CLBYTES)
110 		return (0);
111 	mbstat.m_hiwat += mbufs;
112 	mbstat.m_lowat = mbstat.m_hiwat >> 1;
113 	return (1);
114 }
115 
116 m_release(mbufs)
117 	int mbufs;
118 {
119 
120 	mbstat.m_hiwat -= mbufs;
121 	mbstat.m_lowat = mbstat.m_hiwat >> 1;
122 }
123 
124 /*
125  * Space allocation routines.
126  * These are also available as macros
127  * for critical paths.
128  */
129 struct mbuf *
130 m_get(canwait)
131 	int canwait;
132 {
133 	register struct mbuf *m;
134 
135 COUNT(M_GET);
136 	MGET(m, canwait);
137 	return (m);
138 }
139 
140 struct mbuf *
141 m_getclr(canwait)
142 	int canwait;
143 {
144 	register struct mbuf *m;
145 
146 COUNT(M_GETCLR);
147 	m = m_get(canwait);
148 	if (m == 0)
149 		return (0);
150 	m->m_off = MMINOFF;
151 	bzero(mtod(m, caddr_t), MLEN);
152 	return (m);
153 }
154 
155 struct mbuf *
156 m_free(m)
157 	struct mbuf *m;
158 {
159 	register struct mbuf *n;
160 
161 COUNT(M_FREE);
162 	MFREE(m, n);
163 	return (n);
164 }
165 
166 /*ARGSUSED*/
167 struct mbuf *
168 m_more(type)
169 	int type;
170 {
171 	register struct mbuf *m;
172 
173 COUNT(M_MORE);
174 	if (!m_expand()) {
175 		mbstat.m_drops++;
176 		return (NULL);
177 	}
178 #define m_more(x) (panic("m_more"), (struct mbuf *)0)
179 	MGET(m, type);
180 #undef m_more
181 	return (m);
182 }
183 
184 m_freem(m)
185 	register struct mbuf *m;
186 {
187 	register struct mbuf *n;
188 	register int s;
189 
190 COUNT(M_FREEM);
191 	if (m == NULL)
192 		return;
193 	s = splimp();
194 	do {
195 		MFREE(m, n);
196 	} while (m = n);
197 	splx(s);
198 }
199 
200 /*
201  * Mbuffer utility routines.
202  */
203 struct mbuf *
204 m_copy(m, off, len)
205 	register struct mbuf *m;
206 	int off;
207 	register int len;
208 {
209 	register struct mbuf *n, **np;
210 	struct mbuf *top, *p;
211 COUNT(M_COPY);
212 
213 	if (len == 0)
214 		return (0);
215 	if (off < 0 || len < 0)
216 		panic("m_copy");
217 	while (off > 0) {
218 		if (m == 0)
219 			panic("m_copy");
220 		if (off < m->m_len)
221 			break;
222 		off -= m->m_len;
223 		m = m->m_next;
224 	}
225 	np = &top;
226 	top = 0;
227 	while (len > 0) {
228 		MGET(n, 1);
229 		*np = n;
230 		if (n == 0)
231 			goto nospace;
232 		if (m == 0)
233 			panic("m_copy");
234 		n->m_len = MIN(len, m->m_len - off);
235 		if (m->m_off > MMAXOFF) {
236 			p = mtod(m, struct mbuf *);
237 			n->m_off = ((int)p - (int)n) + off;
238 			mclrefcnt[mtocl(p)]++;
239 		} else {
240 			n->m_off = MMINOFF;
241 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
242 			    (unsigned)n->m_len);
243 		}
244 		len -= n->m_len;
245 		off = 0;
246 		m = m->m_next;
247 		np = &n->m_next;
248 	}
249 	return (top);
250 nospace:
251 	m_freem(top);
252 	return (0);
253 }
254 
255 m_cat(m, n)
256 	register struct mbuf *m, *n;
257 {
258 
259 	while (m->m_next)
260 		m = m->m_next;
261 	while (n)
262 		if (m->m_off + m->m_len + n->m_len <= MMAXOFF) {
263 			bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
264 			    (u_int)n->m_len);
265 			m->m_len += n->m_len;
266 			n = m_free(n);
267 		} else {
268 			m->m_next = n;
269 			m = n;
270 			n = m->m_next;
271 		}
272 }
273 
274 m_adj(mp, len)
275 	struct mbuf *mp;
276 	register int len;
277 {
278 	register struct mbuf *m, *n;
279 
280 COUNT(M_ADJ);
281 	if ((m = mp) == NULL)
282 		return;
283 	if (len >= 0) {
284 		while (m != NULL && len > 0) {
285 			if (m->m_len <= len) {
286 				len -= m->m_len;
287 				m->m_len = 0;
288 				m = m->m_next;
289 			} else {
290 				m->m_len -= len;
291 				m->m_off += len;
292 				break;
293 			}
294 		}
295 	} else {
296 		/* a 2 pass algorithm might be better */
297 		len = -len;
298 		while (len > 0 && m->m_len != 0) {
299 			while (m != NULL && m->m_len != 0) {
300 				n = m;
301 				m = m->m_next;
302 			}
303 			if (n->m_len <= len) {
304 				len -= n->m_len;
305 				n->m_len = 0;
306 				m = mp;
307 			} else {
308 				n->m_len -= len;
309 				break;
310 			}
311 		}
312 	}
313 }
314 
315 struct mbuf *
316 m_pullup(m0, len)
317 	struct mbuf *m0;
318 	int len;
319 {
320 	register struct mbuf *m, *n;
321 	int cnt;
322 
323 	if (len > MLEN)
324 		goto bad;
325 	MGET(m, 0);
326 	if (m == 0)
327 		goto bad;
328 	m->m_off = MMINOFF;
329 	m->m_len = 0;
330 	n = m0;
331 	do {
332 		cnt = MIN(MLEN - m->m_len, len);
333 		if (cnt > n->m_len)
334 			cnt = n->m_len;
335 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t)+m->m_len, cnt);
336 		len -= cnt;
337 		m->m_len += cnt;
338 		n->m_off += cnt;
339 		n->m_len -= cnt;
340 		if (n->m_len)
341 			break;
342 		n = m_free(n);
343 	} while (n);
344 	if (len) {
345 		(void) m_free(m);
346 		goto bad;
347 	}
348 	m->m_next = n;
349 	return (m);
350 bad:
351 	m_freem(m0);
352 	return (0);
353 }
354