xref: /csrg-svn/sys/kern/uipc_mbuf.c (revision 5273)
1 /*	uipc_mbuf.c	1.23	81/12/20	*/
2 
3 #include "../h/param.h"
4 #include "../h/dir.h"
5 #include "../h/user.h"
6 #include "../h/proc.h"
7 #include "../h/pte.h"
8 #include "../h/cmap.h"
9 #include "../h/map.h"
10 #include "../h/mbuf.h"
11 #include "../net/in_systm.h"		/* XXX */
12 #include "../h/vm.h"
13 
14 mbinit()
15 {
16 
17 COUNT(MBINIT);
18 	if (m_reserve(32) == 0)
19 		goto bad;
20 	if (m_clalloc(4, MPG_MBUFS) == 0)
21 		goto bad;
22 	if (m_clalloc(32, MPG_CLUSTERS) == 0)
23 		goto bad;
24 	return;
25 bad:
26 	panic("mbinit");
27 }
28 
29 caddr_t
30 m_clalloc(ncl, how)
31 	register int ncl;
32 	int how;
33 {
34 	int npg, mbx;
35 	register struct mbuf *m;
36 	register int i;
37 	int s;
38 
39 COUNT(M_CLALLOC);
40 	npg = ncl * CLSIZE;
41 	mbx = rmalloc(mbmap, npg);
42 	if (mbx == 0)
43 		return (0);
44 	m = cltom(mbx / CLSIZE);
45 	if (memall(&Mbmap[mbx], ncl * CLSIZE, proc, CSYS) == 0)
46 		return (0);
47 	vmaccess(&Mbmap[mbx], (caddr_t)m, npg);
48 	switch (how) {
49 
50 	case MPG_CLUSTERS:
51 		s = splimp();
52 		for (i = 0; i < ncl; i++) {
53 			m->m_off = 0;
54 			m->m_next = mclfree;
55 			mclfree = m;
56 			m += CLBYTES / sizeof (*m);
57 			nmclfree++;
58 		}
59 		mbstat.m_clusters += ncl;
60 		splx(s);
61 		break;
62 
63 	case MPG_MBUFS:
64 		for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) {
65 			m->m_off = 0;
66 			m->m_free = 0;
67 			(void) m_free(m);
68 			m++;
69 		}
70 		mbstat.m_clusters += ncl;
71 		break;
72 	}
73 	return ((caddr_t)m);
74 }
75 
76 m_pgfree(addr, n)
77 	caddr_t addr;
78 	int n;
79 {
80 
81 COUNT(M_PGFREE);
82 }
83 
84 m_expand()
85 {
86 
87 COUNT(M_EXPAND);
88 	if (mbstat.m_bufs >= mbstat.m_hiwat)
89 		return (0);
90 	if (m_clalloc(1, MPG_MBUFS) == 0)
91 		goto steal;
92 	return (1);
93 steal:
94 	/* should ask protocols to free code */
95 	return (0);
96 }
97 
98 /* NEED SOME WAY TO RELEASE SPACE */
99 
100 /*
101  * Space reservation routines
102  */
103 m_reserve(mbufs)
104 	int mbufs;
105 {
106 
107 	if (mbstat.m_lowat + (mbufs>>1) > (NMBCLUSTERS-32) * CLBYTES)
108 		return (0);
109 	mbstat.m_hiwat += mbufs;
110 	mbstat.m_lowat = mbstat.m_hiwat >> 1;
111 	return (1);
112 }
113 
114 m_release(mbufs)
115 	int mbufs;
116 {
117 
118 	mbstat.m_hiwat -= mbufs;
119 	mbstat.m_lowat = mbstat.m_hiwat >> 1;
120 }
121 
122 /*
123  * Space allocation routines.
124  * These are also available as macros
125  * for critical paths.
126  */
127 struct mbuf *
128 m_get(canwait)
129 	int canwait;
130 {
131 	register struct mbuf *m;
132 
133 COUNT(M_GET);
134 	MGET(m, canwait);
135 	return (m);
136 }
137 
138 struct mbuf *
139 m_getclr(canwait)
140 	int canwait;
141 {
142 	register struct mbuf *m;
143 
144 COUNT(M_GETCLR);
145 	m = m_get(canwait);
146 	if (m == 0)
147 		return (0);
148 	m->m_off = MMINOFF;
149 	bzero(mtod(m, caddr_t), MLEN);
150 	return (m);
151 }
152 
153 struct mbuf *
154 m_free(m)
155 	struct mbuf *m;
156 {
157 	register struct mbuf *n;
158 
159 COUNT(M_FREE);
160 	MFREE(m, n);
161 	return (n);
162 }
163 
164 /*ARGSUSED*/
165 struct mbuf *
166 m_more(type)
167 	int type;
168 {
169 	register struct mbuf *m;
170 
171 COUNT(M_MORE);
172 	if (!m_expand()) {
173 		mbstat.m_drops++;
174 		return (NULL);
175 	}
176 #define m_more(x) (panic("m_more"), (struct mbuf *)0)
177 	MGET(m, type);
178 	return (m);
179 }
180 
181 m_freem(m)
182 	register struct mbuf *m;
183 {
184 	register struct mbuf *n;
185 	register int s;
186 
187 COUNT(M_FREEM);
188 	if (m == NULL)
189 		return;
190 	s = splimp();
191 	do {
192 		MFREE(m, n);
193 	} while (m = n);
194 	splx(s);
195 }
196 
197 /*
198  * Mbuffer utility routines.
199  */
200 struct mbuf *
201 m_copy(m, off, len)
202 	register struct mbuf *m;
203 	int off;
204 	register int len;
205 {
206 	register struct mbuf *n, **np;
207 	struct mbuf *top, *p;
208 COUNT(M_COPY);
209 
210 	if (len == 0)
211 		return (0);
212 	if (off < 0 || len < 0)
213 		panic("m_copy");
214 	while (off > 0) {
215 		if (m == 0)
216 			panic("m_copy");
217 		if (off < m->m_len)
218 			break;
219 		off -= m->m_len;
220 		m = m->m_next;
221 	}
222 	np = &top;
223 	top = 0;
224 	while (len > 0) {
225 		MGET(n, 1);
226 		*np = n;
227 		if (n == 0)
228 			goto nospace;
229 		if (m == 0)
230 			panic("m_copy");
231 		n->m_len = MIN(len, m->m_len - off);
232 		if (m->m_off > MMAXOFF) {
233 			p = mtod(m, struct mbuf *);
234 			n->m_off = ((int)p - (int)n) + off;
235 			mclrefcnt[mtocl(p)]++;
236 		} else {
237 			n->m_off = MMINOFF;
238 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
239 			    (unsigned)n->m_len);
240 		}
241 		len -= n->m_len;
242 		off = 0;
243 		m = m->m_next;
244 		np = &n->m_next;
245 	}
246 	return (top);
247 nospace:
248 	m_freem(top);
249 	return (0);
250 }
251 
252 m_cat(m, n)
253 	register struct mbuf *m, *n;
254 {
255 
256 	while (m->m_next)
257 		m = m->m_next;
258 	while (n)
259 		if (m->m_off + m->m_len + n->m_len <= MMAXOFF) {
260 			bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
261 			    (u_int)n->m_len);
262 			m->m_len += n->m_len;
263 			n = m_free(n);
264 		} else {
265 			m->m_next = n;
266 			m = n;
267 			n = m->m_next;
268 		}
269 }
270 
271 m_adj(mp, len)
272 	struct mbuf *mp;
273 	register int len;
274 {
275 	register struct mbuf *m, *n;
276 
277 COUNT(M_ADJ);
278 	if ((m = mp) == NULL)
279 		return;
280 	if (len >= 0) {
281 		while (m != NULL && len > 0) {
282 			if (m->m_len <= len) {
283 				len -= m->m_len;
284 				m->m_len = 0;
285 				m = m->m_next;
286 			} else {
287 				m->m_len -= len;
288 				m->m_off += len;
289 				break;
290 			}
291 		}
292 	} else {
293 		/* a 2 pass algorithm might be better */
294 		len = -len;
295 		while (len > 0 && m->m_len != 0) {
296 			while (m != NULL && m->m_len != 0) {
297 				n = m;
298 				m = m->m_next;
299 			}
300 			if (n->m_len <= len) {
301 				len -= n->m_len;
302 				n->m_len = 0;
303 				m = mp;
304 			} else {
305 				n->m_len -= len;
306 				break;
307 			}
308 		}
309 	}
310 }
311 
312 /*ARGSUSED*/
313 m_pullup(m, len)
314 	struct mbuf *m;
315 	int len;
316 {
317 
318 	return (0);
319 }
320