xref: /csrg-svn/sys/kern/uipc_mbuf.c (revision 5253)
1 /*	uipc_mbuf.c	1.22	81/12/12	*/
2 
3 #include "../h/param.h"
4 #include "../h/dir.h"
5 #include "../h/user.h"
6 #include "../h/proc.h"
7 #include "../h/pte.h"
8 #include "../h/cmap.h"
9 #include "../h/map.h"
10 #include "../h/mbuf.h"
11 #include "../net/in_systm.h"		/* XXX */
12 #include "../h/vm.h"
13 
14 mbinit()
15 {
16 
17 COUNT(MBINIT);
18 	if (m_reserve(32) == 0)
19 		goto bad;
20 	if (m_clalloc(4, MPG_MBUFS) == 0)
21 		goto bad;
22 	if (m_clalloc(32, MPG_CLUSTERS) == 0)
23 		goto bad;
24 	return;
25 bad:
26 	panic("mbinit");
27 }
28 
29 caddr_t
30 m_clalloc(ncl, how)
31 	register int ncl;
32 	int how;
33 {
34 	int npg, mbx;
35 	register struct mbuf *m;
36 	register int i;
37 	int s;
38 
39 COUNT(M_CLALLOC);
40 	npg = ncl * CLSIZE;
41 	mbx = rmalloc(mbmap, npg);
42 	if (mbx == 0)
43 		return (0);
44 	m = cltom(mbx / CLSIZE);
45 	if (memall(&Mbmap[mbx], ncl * CLSIZE, proc, CSYS) == 0)
46 		return (0);
47 	vmaccess(&Mbmap[mbx], (caddr_t)m, npg);
48 	switch (how) {
49 
50 	case MPG_CLUSTERS:
51 		s = splimp();
52 		for (i = 0; i < ncl; i++) {
53 			m->m_off = 0;
54 			m->m_next = mclfree;
55 			mclfree = m;
56 			m += CLBYTES / sizeof (*m);
57 			nmclfree++;
58 		}
59 		mbstat.m_clusters += ncl;
60 		splx(s);
61 		break;
62 
63 	case MPG_MBUFS:
64 		for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) {
65 			m->m_off = 0;
66 			(void) m_free(m);
67 			m++;
68 		}
69 		mbstat.m_clusters += ncl;
70 		break;
71 	}
72 	return ((caddr_t)m);
73 }
74 
75 m_pgfree(addr, n)
76 	caddr_t addr;
77 	int n;
78 {
79 
80 COUNT(M_PGFREE);
81 }
82 
83 m_expand()
84 {
85 
86 COUNT(M_EXPAND);
87 	if (mbstat.m_bufs >= mbstat.m_hiwat)
88 		return (0);
89 	if (m_clalloc(1, MPG_MBUFS) == 0)
90 		goto steal;
91 	return (1);
92 steal:
93 	/* should ask protocols to free code */
94 	return (0);
95 }
96 
97 /* NEED SOME WAY TO RELEASE SPACE */
98 
99 /*
100  * Space reservation routines
101  */
102 m_reserve(mbufs)
103 	int mbufs;
104 {
105 
106 	if (mbstat.m_lowat + (mbufs>>1) > (NMBCLUSTERS-32) * CLBYTES)
107 		return (0);
108 	mbstat.m_hiwat += mbufs;
109 	mbstat.m_lowat = mbstat.m_hiwat >> 1;
110 	return (1);
111 }
112 
113 m_release(mbufs)
114 	int mbufs;
115 {
116 
117 	mbstat.m_hiwat -= mbufs;
118 	mbstat.m_lowat = mbstat.m_hiwat >> 1;
119 }
120 
121 /*
122  * Space allocation routines.
123  * These are also available as macros
124  * for critical paths.
125  */
126 struct mbuf *
127 m_get(canwait)
128 	int canwait;
129 {
130 	register struct mbuf *m;
131 
132 COUNT(M_GET);
133 	MGET(m, canwait);
134 	return (m);
135 }
136 
137 struct mbuf *
138 m_getclr(canwait)
139 	int canwait;
140 {
141 	register struct mbuf *m;
142 
143 COUNT(M_GETCLR);
144 	m = m_get(canwait);
145 	if (m == 0)
146 		return (0);
147 	m->m_off = MMINOFF;
148 	bzero(mtod(m, caddr_t), MLEN);
149 	return (m);
150 }
151 
152 struct mbuf *
153 m_free(m)
154 	struct mbuf *m;
155 {
156 	register struct mbuf *n;
157 
158 COUNT(M_FREE);
159 	MFREE(m, n);
160 	return (n);
161 }
162 
163 /*ARGSUSED*/
164 struct mbuf *
165 m_more(type)
166 	int type;
167 {
168 	register struct mbuf *m;
169 
170 COUNT(M_MORE);
171 	if (!m_expand()) {
172 		mbstat.m_drops++;
173 		return (NULL);
174 	}
175 #define m_more(x) (panic("m_more"), (struct mbuf *)0)
176 	MGET(m, type);
177 	return (m);
178 }
179 
180 m_freem(m)
181 	register struct mbuf *m;
182 {
183 	register struct mbuf *n;
184 	register int s;
185 
186 COUNT(M_FREEM);
187 	if (m == NULL)
188 		return;
189 	s = splimp();
190 	do {
191 		MFREE(m, n);
192 	} while (m = n);
193 	splx(s);
194 }
195 
196 /*
197  * Mbuffer utility routines.
198  */
199 struct mbuf *
200 m_copy(m, off, len)
201 	register struct mbuf *m;
202 	int off;
203 	register int len;
204 {
205 	register struct mbuf *n, **np;
206 	struct mbuf *top, *p;
207 COUNT(M_COPY);
208 
209 	if (len == 0)
210 		return (0);
211 	if (off < 0 || len < 0)
212 		panic("m_copy");
213 	while (off > 0) {
214 		if (m == 0)
215 			panic("m_copy");
216 		if (off < m->m_len)
217 			break;
218 		off -= m->m_len;
219 		m = m->m_next;
220 	}
221 	np = &top;
222 	top = 0;
223 	while (len > 0) {
224 		MGET(n, 1);
225 		*np = n;
226 		if (n == 0)
227 			goto nospace;
228 		if (m == 0)
229 			panic("m_copy");
230 		n->m_len = MIN(len, m->m_len - off);
231 		if (m->m_off > MMAXOFF) {
232 			p = mtod(m, struct mbuf *);
233 			n->m_off = ((int)p - (int)n) + off;
234 			mclrefcnt[mtocl(p)]++;
235 		} else {
236 			n->m_off = MMINOFF;
237 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
238 			    (unsigned)n->m_len);
239 		}
240 		len -= n->m_len;
241 		off = 0;
242 		m = m->m_next;
243 		np = &n->m_next;
244 	}
245 	return (top);
246 nospace:
247 	m_freem(top);
248 	return (0);
249 }
250 
251 m_cat(m, n)
252 	register struct mbuf *m, *n;
253 {
254 
255 	while (m->m_next)
256 		m = m->m_next;
257 	while (n)
258 		if (m->m_off + m->m_len + n->m_len <= MMAXOFF) {
259 			bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
260 			    (u_int)n->m_len);
261 			m->m_len += n->m_len;
262 			n = m_free(n);
263 		} else {
264 			m->m_next = n;
265 			m = n;
266 			n = m->m_next;
267 		}
268 }
269 
270 m_adj(mp, len)
271 	struct mbuf *mp;
272 	register int len;
273 {
274 	register struct mbuf *m, *n;
275 
276 COUNT(M_ADJ);
277 	if ((m = mp) == NULL)
278 		return;
279 	if (len >= 0) {
280 		while (m != NULL && len > 0) {
281 			if (m->m_len <= len) {
282 				len -= m->m_len;
283 				m->m_len = 0;
284 				m = m->m_next;
285 			} else {
286 				m->m_len -= len;
287 				m->m_off += len;
288 				break;
289 			}
290 		}
291 	} else {
292 		/* a 2 pass algorithm might be better */
293 		len = -len;
294 		while (len > 0 && m->m_len != 0) {
295 			while (m != NULL && m->m_len != 0) {
296 				n = m;
297 				m = m->m_next;
298 			}
299 			if (n->m_len <= len) {
300 				len -= n->m_len;
301 				n->m_len = 0;
302 				m = mp;
303 			} else {
304 				n->m_len -= len;
305 				break;
306 			}
307 		}
308 	}
309 }
310 
311 /*ARGSUSED*/
312 m_pullup(m, len)
313 	struct mbuf *m;
314 	int len;
315 {
316 
317 	return (0);
318 }
319