xref: /csrg-svn/sys/kern/uipc_mbuf.c (revision 5282)
1 /*	uipc_mbuf.c	1.24	81/12/21	*/
2 
3 #include "../h/param.h"
4 #include "../h/dir.h"
5 #include "../h/user.h"
6 #include "../h/proc.h"
7 #include "../h/pte.h"
8 #include "../h/cmap.h"
9 #include "../h/map.h"
10 #include "../h/mbuf.h"
11 #include "../net/in_systm.h"		/* XXX */
12 #include "../h/vm.h"
13 
14 mbinit()
15 {
16 
17 COUNT(MBINIT);
18 	if (m_reserve(32) == 0)
19 		goto bad;
20 	if (m_clalloc(4, MPG_MBUFS) == 0)
21 		goto bad;
22 	if (m_clalloc(32, MPG_CLUSTERS) == 0)
23 		goto bad;
24 	return;
25 bad:
26 	panic("mbinit");
27 }
28 
29 caddr_t
30 m_clalloc(ncl, how)
31 	register int ncl;
32 	int how;
33 {
34 	int npg, mbx;
35 	register struct mbuf *m;
36 	register int i;
37 	int s;
38 
39 COUNT(M_CLALLOC);
40 	npg = ncl * CLSIZE;
41 	mbx = rmalloc(mbmap, npg);
42 	if (mbx == 0)
43 		return (0);
44 	m = cltom(mbx / CLSIZE);
45 	if (memall(&Mbmap[mbx], ncl * CLSIZE, proc, CSYS) == 0)
46 		return (0);
47 	vmaccess(&Mbmap[mbx], (caddr_t)m, npg);
48 	switch (how) {
49 
50 	case MPG_CLUSTERS:
51 		s = splimp();
52 		for (i = 0; i < ncl; i++) {
53 			m->m_off = 0;
54 			m->m_next = mclfree;
55 			mclfree = m;
56 			m += CLBYTES / sizeof (*m);
57 			nmclfree++;
58 		}
59 		mbstat.m_clusters += ncl;
60 		splx(s);
61 		break;
62 
63 	case MPG_MBUFS:
64 		for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) {
65 			m->m_off = 0;
66 			m->m_free = 0;
67 			(void) m_free(m);
68 			m++;
69 		}
70 		mbstat.m_clusters += ncl;
71 		break;
72 	}
73 	return ((caddr_t)m);
74 }
75 
76 m_pgfree(addr, n)
77 	caddr_t addr;
78 	int n;
79 {
80 
81 COUNT(M_PGFREE);
82 }
83 
84 m_expand()
85 {
86 
87 COUNT(M_EXPAND);
88 	if (mbstat.m_bufs >= mbstat.m_hiwat)
89 		return (0);
90 	if (m_clalloc(1, MPG_MBUFS) == 0)
91 		goto steal;
92 	return (1);
93 steal:
94 	/* should ask protocols to free code */
95 	return (0);
96 }
97 
98 /* NEED SOME WAY TO RELEASE SPACE */
99 
100 /*
101  * Space reservation routines
102  */
103 m_reserve(mbufs)
104 	int mbufs;
105 {
106 
107 	if (mbstat.m_lowat + (mbufs>>1) > (NMBCLUSTERS-32) * CLBYTES)
108 		return (0);
109 	mbstat.m_hiwat += mbufs;
110 	mbstat.m_lowat = mbstat.m_hiwat >> 1;
111 	return (1);
112 }
113 
114 m_release(mbufs)
115 	int mbufs;
116 {
117 
118 	mbstat.m_hiwat -= mbufs;
119 	mbstat.m_lowat = mbstat.m_hiwat >> 1;
120 }
121 
122 /*
123  * Space allocation routines.
124  * These are also available as macros
125  * for critical paths.
126  */
127 struct mbuf *
128 m_get(canwait)
129 	int canwait;
130 {
131 	register struct mbuf *m;
132 
133 COUNT(M_GET);
134 	MGET(m, canwait);
135 	return (m);
136 }
137 
138 struct mbuf *
139 m_getclr(canwait)
140 	int canwait;
141 {
142 	register struct mbuf *m;
143 
144 COUNT(M_GETCLR);
145 	m = m_get(canwait);
146 	if (m == 0)
147 		return (0);
148 	m->m_off = MMINOFF;
149 	bzero(mtod(m, caddr_t), MLEN);
150 	return (m);
151 }
152 
153 struct mbuf *
154 m_free(m)
155 	struct mbuf *m;
156 {
157 	register struct mbuf *n;
158 
159 COUNT(M_FREE);
160 	MFREE(m, n);
161 	return (n);
162 }
163 
164 /*ARGSUSED*/
165 struct mbuf *
166 m_more(type)
167 	int type;
168 {
169 	register struct mbuf *m;
170 
171 COUNT(M_MORE);
172 	if (!m_expand()) {
173 		mbstat.m_drops++;
174 		return (NULL);
175 	}
176 #define m_more(x) (panic("m_more"), (struct mbuf *)0)
177 	MGET(m, type);
178 #undef m_more
179 	return (m);
180 }
181 
182 m_freem(m)
183 	register struct mbuf *m;
184 {
185 	register struct mbuf *n;
186 	register int s;
187 
188 COUNT(M_FREEM);
189 	if (m == NULL)
190 		return;
191 	s = splimp();
192 	do {
193 		MFREE(m, n);
194 	} while (m = n);
195 	splx(s);
196 }
197 
198 /*
199  * Mbuffer utility routines.
200  */
201 struct mbuf *
202 m_copy(m, off, len)
203 	register struct mbuf *m;
204 	int off;
205 	register int len;
206 {
207 	register struct mbuf *n, **np;
208 	struct mbuf *top, *p;
209 COUNT(M_COPY);
210 
211 	if (len == 0)
212 		return (0);
213 	if (off < 0 || len < 0)
214 		panic("m_copy");
215 	while (off > 0) {
216 		if (m == 0)
217 			panic("m_copy");
218 		if (off < m->m_len)
219 			break;
220 		off -= m->m_len;
221 		m = m->m_next;
222 	}
223 	np = &top;
224 	top = 0;
225 	while (len > 0) {
226 		MGET(n, 1);
227 		*np = n;
228 		if (n == 0)
229 			goto nospace;
230 		if (m == 0)
231 			panic("m_copy");
232 		n->m_len = MIN(len, m->m_len - off);
233 		if (m->m_off > MMAXOFF) {
234 			p = mtod(m, struct mbuf *);
235 			n->m_off = ((int)p - (int)n) + off;
236 			mclrefcnt[mtocl(p)]++;
237 		} else {
238 			n->m_off = MMINOFF;
239 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
240 			    (unsigned)n->m_len);
241 		}
242 		len -= n->m_len;
243 		off = 0;
244 		m = m->m_next;
245 		np = &n->m_next;
246 	}
247 	return (top);
248 nospace:
249 	m_freem(top);
250 	return (0);
251 }
252 
253 m_cat(m, n)
254 	register struct mbuf *m, *n;
255 {
256 
257 	while (m->m_next)
258 		m = m->m_next;
259 	while (n)
260 		if (m->m_off + m->m_len + n->m_len <= MMAXOFF) {
261 			bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
262 			    (u_int)n->m_len);
263 			m->m_len += n->m_len;
264 			n = m_free(n);
265 		} else {
266 			m->m_next = n;
267 			m = n;
268 			n = m->m_next;
269 		}
270 }
271 
272 m_adj(mp, len)
273 	struct mbuf *mp;
274 	register int len;
275 {
276 	register struct mbuf *m, *n;
277 
278 COUNT(M_ADJ);
279 	if ((m = mp) == NULL)
280 		return;
281 	if (len >= 0) {
282 		while (m != NULL && len > 0) {
283 			if (m->m_len <= len) {
284 				len -= m->m_len;
285 				m->m_len = 0;
286 				m = m->m_next;
287 			} else {
288 				m->m_len -= len;
289 				m->m_off += len;
290 				break;
291 			}
292 		}
293 	} else {
294 		/* a 2 pass algorithm might be better */
295 		len = -len;
296 		while (len > 0 && m->m_len != 0) {
297 			while (m != NULL && m->m_len != 0) {
298 				n = m;
299 				m = m->m_next;
300 			}
301 			if (n->m_len <= len) {
302 				len -= n->m_len;
303 				n->m_len = 0;
304 				m = mp;
305 			} else {
306 				n->m_len -= len;
307 				break;
308 			}
309 		}
310 	}
311 }
312 
313 /*ARGSUSED*/
314 m_pullup(m, len)
315 	struct mbuf *m;
316 	int len;
317 {
318 
319 	return (0);
320 }
321