xref: /dflybsd-src/sys/kern/uipc_mbuf.c (revision 6bc31f17c9c90db02ddbd88208e06c29ed0f1534)
1 /*
2  * Copyright (c) 2004 Jeffrey M. Hsu.  All rights reserved.
3  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Jeffrey M. Hsu.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of The DragonFly Project nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific, prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 2004 Jeffrey M. Hsu.  All rights reserved.
36  *
37  * License terms: all terms for the DragonFly license above plus the following:
38  *
39  * 4. All advertising materials mentioning features or use of this software
40  *    must display the following acknowledgement:
41  *
42  *	This product includes software developed by Jeffrey M. Hsu
43  *	for the DragonFly Project.
44  *
45  *    This requirement may be waived with permission from Jeffrey Hsu.
46  *    This requirement will sunset and may be removed on July 8 2005,
47  *    after which the standard DragonFly license (as shown above) will
48  *    apply.
49  */
50 
51 /*
52  * Copyright (c) 1982, 1986, 1988, 1991, 1993
53  *	The Regents of the University of California.  All rights reserved.
54  *
55  * Redistribution and use in source and binary forms, with or without
56  * modification, are permitted provided that the following conditions
57  * are met:
58  * 1. Redistributions of source code must retain the above copyright
59  *    notice, this list of conditions and the following disclaimer.
60  * 2. Redistributions in binary form must reproduce the above copyright
61  *    notice, this list of conditions and the following disclaimer in the
62  *    documentation and/or other materials provided with the distribution.
63  * 3. All advertising materials mentioning features or use of this software
64  *    must display the following acknowledgement:
65  *	This product includes software developed by the University of
66  *	California, Berkeley and its contributors.
67  * 4. Neither the name of the University nor the names of its contributors
68  *    may be used to endorse or promote products derived from this software
69  *    without specific prior written permission.
70  *
71  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
72  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
73  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
74  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
75  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
76  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
77  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
78  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
79  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
80  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
81  * SUCH DAMAGE.
82  *
83  * @(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
84  * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
85  * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.51 2005/06/09 18:26:22 dillon Exp $
86  */
87 
88 #include "opt_param.h"
89 #include "opt_ddb.h"
90 #include "opt_mbuf_stress_test.h"
91 #include <sys/param.h>
92 #include <sys/systm.h>
93 #include <sys/malloc.h>
94 #include <sys/mbuf.h>
95 #include <sys/kernel.h>
96 #include <sys/sysctl.h>
97 #include <sys/domain.h>
98 #include <sys/objcache.h>
99 #include <sys/protosw.h>
100 #include <sys/uio.h>
101 #include <sys/thread.h>
102 #include <sys/globaldata.h>
103 #include <sys/thread2.h>
104 
105 #include <vm/vm.h>
106 #include <vm/vm_kern.h>
107 #include <vm/vm_extern.h>
108 
109 #ifdef INVARIANTS
110 #include <machine/cpu.h>
111 #endif
112 
113 /*
114  * mbuf cluster meta-data
115  */
116 struct mbcluster {
117 	int32_t	mcl_refs;
118 	void	*mcl_data;
119 };
120 
121 static void mbinit(void *);
122 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
123 
124 static u_long	mbtypes[MT_NTYPES];
125 
126 struct mbstat mbstat;
127 int	max_linkhdr;
128 int	max_protohdr;
129 int	max_hdr;
130 int	max_datalen;
131 int	m_defragpackets;
132 int	m_defragbytes;
133 int	m_defraguseless;
134 int	m_defragfailure;
135 #ifdef MBUF_STRESS_TEST
136 int	m_defragrandomfailures;
137 #endif
138 
139 struct objcache *mbuf_cache, *mbufphdr_cache;
140 struct objcache *mclmeta_cache;
141 struct objcache *mbufcluster_cache, *mbufphdrcluster_cache;
142 
143 int	nmbclusters;
144 int	nmbufs;
145 
146 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
147 	   &max_linkhdr, 0, "");
148 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
149 	   &max_protohdr, 0, "");
150 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
151 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
152 	   &max_datalen, 0, "");
153 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
154 	   &mbuf_wait, 0, "");
155 SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
156 SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes,
157 	   sizeof(mbtypes), "LU", "");
158 SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RW,
159 	   &nmbclusters, 0, "Maximum number of mbuf clusters available");
160 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RW, &nmbufs, 0,
161 	   "Maximum number of mbufs available");
162 
163 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
164 	   &m_defragpackets, 0, "");
165 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
166 	   &m_defragbytes, 0, "");
167 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
168 	   &m_defraguseless, 0, "");
169 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
170 	   &m_defragfailure, 0, "");
171 #ifdef MBUF_STRESS_TEST
172 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
173 	   &m_defragrandomfailures, 0, "");
174 #endif
175 
176 static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
177 static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl");
178 static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta");
179 
180 static void m_reclaim (void);
181 static void m_mclref(void *arg);
182 static void m_mclfree(void *arg);
183 
184 #ifndef NMBCLUSTERS
185 #define NMBCLUSTERS	(512 + maxusers * 16)
186 #endif
187 #ifndef NMBUFS
188 #define NMBUFS		(nmbclusters * 2)
189 #endif
190 
191 /*
192  * Perform sanity checks of tunables declared above.
193  */
194 static void
195 tunable_mbinit(void *dummy)
196 {
197 
198 	/*
199 	 * This has to be done before VM init.
200 	 */
201 	nmbclusters = NMBCLUSTERS;
202 	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
203 	nmbufs = NMBUFS;
204 	TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
205 	/* Sanity checks */
206 	if (nmbufs < nmbclusters * 2)
207 		nmbufs = nmbclusters * 2;
208 
209 	return;
210 }
211 SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL);
212 
213 /* "number of clusters of pages" */
214 #define NCL_INIT	1
215 
216 #define NMB_INIT	16
217 
218 /*
219  * The mbuf object cache only guarantees that m_next and m_nextpkt are
220  * NULL and that m_data points to the beginning of the data area.  In
221  * particular, m_len and m_pkthdr.len are uninitialized.  It is the
222  * responsibility of the caller to initialize those fields before use.
223  */
224 
225 static boolean_t __inline
226 mbuf_ctor(void *obj, void *private, int ocflags)
227 {
228 	struct mbuf *m = obj;
229 
230 	m->m_next = NULL;
231 	m->m_nextpkt = NULL;
232 	m->m_data = m->m_dat;
233 	m->m_flags = 0;
234 
235 	return (TRUE);
236 }
237 
238 /*
239  * Initialize the mbuf and the packet header fields.
240  */
241 static boolean_t
242 mbufphdr_ctor(void *obj, void *private, int ocflags)
243 {
244 	struct mbuf *m = obj;
245 
246 	m->m_next = NULL;
247 	m->m_nextpkt = NULL;
248 	m->m_data = m->m_pktdat;
249 	m->m_flags = M_PKTHDR | M_PHCACHE;
250 
251 	m->m_pkthdr.rcvif = NULL;	/* eliminate XXX JH */
252 	SLIST_INIT(&m->m_pkthdr.tags);
253 	m->m_pkthdr.csum_flags = 0;	/* eliminate XXX JH */
254 	m->m_pkthdr.fw_flags = 0;	/* eliminate XXX JH */
255 
256 	return (TRUE);
257 }
258 
259 /*
260  * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount.
261  */
262 static boolean_t
263 mclmeta_ctor(void *obj, void *private, int ocflags)
264 {
265 	struct mbcluster *cl = obj;
266 	void *buf;
267 
268 	if (ocflags & M_NOWAIT)
269 		buf = malloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO);
270 	else
271 		buf = malloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO);
272 	if (buf == NULL)
273 		return (FALSE);
274 	cl->mcl_refs = 0;
275 	cl->mcl_data = buf;
276 	return (TRUE);
277 }
278 
279 static void
280 mclmeta_dtor(void *obj, void *private)
281 {
282 	struct mbcluster *mcl = obj;
283 
284 	KKASSERT(mcl->mcl_refs == 0);
285 	free(mcl->mcl_data, M_MBUFCL);
286 }
287 
288 static void
289 linkcluster(struct mbuf *m, struct mbcluster *cl)
290 {
291 	/*
292 	 * Add the cluster to the mbuf.  The caller will detect that the
293 	 * mbuf now has an attached cluster.
294 	 */
295 	m->m_ext.ext_arg = cl;
296 	m->m_ext.ext_buf = cl->mcl_data;
297 	m->m_ext.ext_ref = m_mclref;
298 	m->m_ext.ext_free = m_mclfree;
299 	m->m_ext.ext_size = MCLBYTES;
300 	++cl->mcl_refs;
301 
302 	m->m_data = m->m_ext.ext_buf;
303 	m->m_flags |= M_EXT | M_EXT_CLUSTER;
304 }
305 
306 static boolean_t
307 mbufphdrcluster_ctor(void *obj, void *private, int ocflags)
308 {
309 	struct mbuf *m = obj;
310 	struct mbcluster *cl;
311 
312 	mbufphdr_ctor(obj, private, ocflags);
313 	cl = objcache_get(mclmeta_cache, ocflags);
314 	if (cl == NULL)
315 		return (FALSE);
316 	m->m_flags |= M_CLCACHE;
317 	linkcluster(m, cl);
318 	return (TRUE);
319 }
320 
321 static boolean_t
322 mbufcluster_ctor(void *obj, void *private, int ocflags)
323 {
324 	struct mbuf *m = obj;
325 	struct mbcluster *cl;
326 
327 	mbuf_ctor(obj, private, ocflags);
328 	cl = objcache_get(mclmeta_cache, ocflags);
329 	if (cl == NULL)
330 		return (FALSE);
331 	m->m_flags |= M_CLCACHE;
332 	linkcluster(m, cl);
333 	return (TRUE);
334 }
335 
336 /*
337  * Used for both the cluster and cluster PHDR caches.
338  *
339  * The mbuf may have lost its cluster due to sharing, deal
340  * with the situation by checking M_EXT.
341  */
342 static void
343 mbufcluster_dtor(void *obj, void *private)
344 {
345 	struct mbuf *m = obj;
346 	struct mbcluster *mcl;
347 
348 	if (m->m_flags & M_EXT) {
349 		KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0);
350 		mcl = m->m_ext.ext_arg;
351 		KKASSERT(mcl->mcl_refs == 1);
352 		mcl->mcl_refs = 0;
353 		objcache_put(mclmeta_cache, mcl);
354 	}
355 }
356 
357 struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF };
358 struct objcache_malloc_args mclmeta_malloc_args =
359 	{ sizeof(struct mbcluster), M_MCLMETA };
360 
361 /* ARGSUSED*/
362 static void
363 mbinit(void *dummy)
364 {
365 	mbstat.m_msize = MSIZE;
366 	mbstat.m_mclbytes = MCLBYTES;
367 	mbstat.m_minclsize = MINCLSIZE;
368 	mbstat.m_mlen = MLEN;
369 	mbstat.m_mhlen = MHLEN;
370 
371 	mbuf_cache = objcache_create("mbuf", nmbufs, 0,
372 	    mbuf_ctor, null_dtor, NULL,
373 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
374 	mbufphdr_cache = objcache_create("mbuf pkt hdr", nmbufs, 64,
375 	    mbufphdr_ctor, null_dtor, NULL,
376 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
377 	mclmeta_cache = objcache_create("cluster mbuf", nmbclusters , 0,
378 	    mclmeta_ctor, mclmeta_dtor, NULL,
379 	    objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
380 	mbufcluster_cache = objcache_create("mbuf + cluster", nmbclusters, 0,
381 	    mbufcluster_ctor, mbufcluster_dtor, NULL,
382 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
383 	mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster",
384 	    nmbclusters, 64, mbufphdrcluster_ctor, mbufcluster_dtor, NULL,
385 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
386 	return;
387 }
388 
389 /*
390  * Return the number of references to this mbuf's data.  0 is returned
391  * if the mbuf is not M_EXT, a reference count is returned if it is
392  * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
393  */
394 int
395 m_sharecount(struct mbuf *m)
396 {
397 	switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) {
398 	case 0:
399 		return (0);
400 	case M_EXT:
401 		return (99);
402 	case M_EXT | M_EXT_CLUSTER:
403 		return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs);
404 	}
405 	/* NOTREACHED */
406 	return (0);		/* to shut up compiler */
407 }
408 
409 /*
410  * change mbuf to new type
411  */
412 void
413 m_chtype(struct mbuf *m, int type)
414 {
415 	crit_enter();
416 	++mbtypes[type];
417 	--mbtypes[m->m_type];
418 	m->m_type = type;
419 	crit_exit();
420 }
421 
422 static void
423 m_reclaim(void)
424 {
425 	struct domain *dp;
426 	struct protosw *pr;
427 
428 	crit_enter();
429 	SLIST_FOREACH(dp, &domains, dom_next) {
430 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
431 			if (pr->pr_drain)
432 				(*pr->pr_drain)();
433 		}
434 	}
435 	crit_exit();
436 	mbstat.m_drain++;
437 }
438 
439 static void __inline
440 updatestats(struct mbuf *m, int type)
441 {
442 	m->m_type = type;
443 
444 	crit_enter();
445 	++mbtypes[type];
446 	++mbstat.m_mbufs;
447 	crit_exit();
448 }
449 
450 /*
451  * Allocate an mbuf.
452  */
453 struct mbuf *
454 m_get(int how, int type)
455 {
456 	struct mbuf *m;
457 	int ntries = 0;
458 	int ocf = MBTOM(how);
459 
460 retryonce:
461 
462 	m = objcache_get(mbuf_cache, ocf);
463 
464 	if (m == NULL) {
465 		if ((how & MB_TRYWAIT) && ntries++ == 0) {
466 			struct objcache *reclaimlist[] = {
467 				mbufphdr_cache,
468 				mbufcluster_cache, mbufphdrcluster_cache
469 			};
470 			const int nreclaims = __arysize(reclaimlist);
471 
472 			if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
473 				m_reclaim();
474 			goto retryonce;
475 		}
476 		return (NULL);
477 	}
478 
479 	updatestats(m, type);
480 	return (m);
481 }
482 
483 struct mbuf *
484 m_gethdr(int how, int type)
485 {
486 	struct mbuf *m;
487 	int ocf = MBTOM(how);
488 	int ntries = 0;
489 
490 retryonce:
491 
492 	m = objcache_get(mbufphdr_cache, ocf);
493 
494 	if (m == NULL) {
495 		if ((how & MB_TRYWAIT) && ntries++ == 0) {
496 			struct objcache *reclaimlist[] = {
497 				mbuf_cache,
498 				mbufcluster_cache, mbufphdrcluster_cache
499 			};
500 			const int nreclaims = __arysize(reclaimlist);
501 
502 			if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
503 				m_reclaim();
504 			goto retryonce;
505 		}
506 		return (NULL);
507 	}
508 
509 	updatestats(m, type);
510 	return (m);
511 }
512 
513 /*
514  * Get a mbuf (not a mbuf cluster!) and zero it.
515  * Deprecated.
516  */
517 struct mbuf *
518 m_getclr(int how, int type)
519 {
520 	struct mbuf *m;
521 
522 	m = m_get(how, type);
523 	if (m != NULL)
524 		bzero(m->m_data, MLEN);
525 	return (m);
526 }
527 
528 /*
529  * Returns an mbuf with an attached cluster.
530  * Because many network drivers use this kind of buffers a lot, it is
531  * convenient to keep a small pool of free buffers of this kind.
532  * Even a small size such as 10 gives about 10% improvement in the
533  * forwarding rate in a bridge or router.
534  */
535 struct mbuf *
536 m_getcl(int how, short type, int flags)
537 {
538 	struct mbuf *m;
539 	int ocflags = MBTOM(how);
540 	int ntries = 0;
541 
542 retryonce:
543 
544 	if (flags & M_PKTHDR)
545 		m = objcache_get(mbufphdrcluster_cache, ocflags);
546 	else
547 		m = objcache_get(mbufcluster_cache, ocflags);
548 
549 	if (m == NULL) {
550 		if ((how & MB_TRYWAIT) && ntries++ == 0) {
551 			struct objcache *reclaimlist[1];
552 
553 			if (flags & M_PKTHDR)
554 				reclaimlist[0] = mbufcluster_cache;
555 			else
556 				reclaimlist[0] = mbufphdrcluster_cache;
557 			if (!objcache_reclaimlist(reclaimlist, 1, ocflags))
558 				m_reclaim();
559 			goto retryonce;
560 		}
561 		return (NULL);
562 	}
563 
564 	m->m_type = type;
565 
566 	crit_enter();
567 	++mbtypes[type];
568 	++mbstat.m_clusters;
569 	crit_exit();
570 	return (m);
571 }
572 
573 /*
574  * Allocate chain of requested length.
575  */
576 struct mbuf *
577 m_getc(int len, int how, int type)
578 {
579 	struct mbuf *n, *nfirst = NULL, **ntail = &nfirst;
580 	int nsize;
581 
582 	while (len > 0) {
583 		n = m_getl(len, how, type, 0, &nsize);
584 		if (n == NULL)
585 			goto failed;
586 		n->m_len = 0;
587 		*ntail = n;
588 		ntail = &n->m_next;
589 		len -= nsize;
590 	}
591 	return (nfirst);
592 
593 failed:
594 	m_freem(nfirst);
595 	return (NULL);
596 }
597 
598 /*
599  * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best)
600  * and return a pointer to the head of the allocated chain. If m0 is
601  * non-null, then we assume that it is a single mbuf or an mbuf chain to
602  * which we want len bytes worth of mbufs and/or clusters attached, and so
603  * if we succeed in allocating it, we will just return a pointer to m0.
604  *
605  * If we happen to fail at any point during the allocation, we will free
606  * up everything we have already allocated and return NULL.
607  *
608  * Deprecated.  Use m_getc() and m_cat() instead.
609  */
610 struct mbuf *
611 m_getm(struct mbuf *m0, int len, int how, int type)
612 {
613 	struct mbuf *nfirst;
614 
615 	nfirst = m_getc(len, how, type);
616 
617 	if (m0 != NULL) {
618 		m_last(m0)->m_next = nfirst;
619 		return (m0);
620 	}
621 
622 	return (nfirst);
623 }
624 
625 /*
626  * Adds a cluster to a normal mbuf, M_EXT is set on success.
627  * Deprecated.  Use m_getcl() instead.
628  */
629 void
630 m_mclget(struct mbuf *m, int how)
631 {
632 	struct mbcluster *mcl;
633 
634 	KKASSERT((m->m_flags & M_EXT) == 0);
635 	mcl = objcache_get(mclmeta_cache, MBTOM(how));
636 	if (mcl != NULL) {
637 		linkcluster(m, mcl);
638 		crit_enter();
639 		++mbstat.m_clusters;
640 		/* leave the m_mbufs count intact for original mbuf */
641 		crit_exit();
642 	}
643 }
644 
645 static void
646 m_mclref(void *arg)
647 {
648 	struct mbcluster *mcl = arg;
649 
650 	atomic_add_int(&mcl->mcl_refs, 1);
651 }
652 
653 static void
654 m_mclfree(void *arg)
655 {
656 	struct mbcluster *mcl = arg;
657 
658 	/* XXX interrupt race.  Currently called from a critical section */
659 	if (mcl->mcl_refs > 1) {
660 		atomic_subtract_int(&mcl->mcl_refs, 1);
661 	} else {
662 		KKASSERT(mcl->mcl_refs == 1);
663 		mcl->mcl_refs = 0;
664 		objcache_put(mclmeta_cache, mcl);
665 	}
666 }
667 
668 extern void db_print_backtrace(void);
669 
670 /*
671  * Free a single mbuf and any associated external storage.  The successor,
672  * if any, is returned.
673  *
674  * We do need to check non-first mbuf for m_aux, since some of existing
675  * code does not call M_PREPEND properly.
676  * (example: call to bpf_mtap from drivers)
677  */
678 struct mbuf *
679 m_free(struct mbuf *m)
680 {
681 	struct mbuf *n;
682 
683 	KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m));
684 	--mbtypes[m->m_type];
685 
686 	n = m->m_next;
687 
688 	/*
689 	 * Make sure the mbuf is in constructed state before returning it
690 	 * to the objcache.
691 	 */
692 	m->m_next = NULL;
693 #ifdef notyet
694 	KKASSERT(m->m_nextpkt == NULL);
695 #else
696 	if (m->m_nextpkt != NULL) {
697 #ifdef DDB
698 		static int afewtimes = 10;
699 
700 		if (afewtimes-- > 0) {
701 			printf("mfree: m->m_nextpkt != NULL\n");
702 			db_print_backtrace();
703 		}
704 #endif
705 		m->m_nextpkt = NULL;
706 	}
707 #endif
708 	if (m->m_flags & M_PKTHDR) {
709 		m_tag_delete_chain(m);		/* eliminate XXX JH */
710 	}
711 
712 	m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE);
713 
714 	/*
715 	 * Clean the M_PKTHDR state so we can return the mbuf to its original
716 	 * cache.  This is based on the PHCACHE flag which tells us whether
717 	 * the mbuf was originally allocated out of a packet-header cache
718 	 * or a non-packet-header cache.
719 	 */
720 	if (m->m_flags & M_PHCACHE) {
721 		m->m_flags |= M_PKTHDR;
722 		m->m_pkthdr.rcvif = NULL;	/* eliminate XXX JH */
723 		m->m_pkthdr.csum_flags = 0;	/* eliminate XXX JH */
724 		m->m_pkthdr.fw_flags = 0;	/* eliminate XXX JH */
725 	}
726 
727 	/*
728 	 * Handle remaining flags combinations.  M_CLCACHE tells us whether
729 	 * the mbuf was originally allocated from a cluster cache or not,
730 	 * and is totally separate from whether the mbuf is currently
731 	 * associated with a cluster.
732 	 */
733 	crit_enter();
734 	switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) {
735 	case M_CLCACHE | M_EXT | M_EXT_CLUSTER:
736 		/*
737 		 * mbuf+cluster cache case.  The mbuf was allocated from the
738 		 * combined mbuf_cluster cache and can be returned to the
739 		 * cache if the cluster hasn't been shared.
740 		 */
741 		if (m_sharecount(m) == 1) {
742 			/*
743 			 * The cluster has not been shared, we can just
744 			 * reset the data pointer and return the mbuf
745 			 * to the cluster cache.  Note that the reference
746 			 * count is left intact (it is still associated with
747 			 * an mbuf).
748 			 */
749 			m->m_data = m->m_ext.ext_buf;
750 			if (m->m_flags & M_PHCACHE)
751 				objcache_put(mbufphdrcluster_cache, m);
752 			else
753 				objcache_put(mbufcluster_cache, m);
754 		} else {
755 			/*
756 			 * Hell.  Someone else has a ref on this cluster,
757 			 * we have to disconnect it which means we can't
758 			 * put it back into the mbufcluster_cache, we
759 			 * have to destroy the mbuf.
760 			 *
761 			 * XXX we could try to connect another cluster to
762 			 * it.
763 			 */
764 			m->m_ext.ext_free(m->m_ext.ext_arg);
765 			m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
766 			if (m->m_flags & M_PHCACHE)
767 				objcache_dtor(mbufphdrcluster_cache, m);
768 			else
769 				objcache_dtor(mbufcluster_cache, m);
770 		}
771 		--mbstat.m_clusters;
772 		break;
773 	case M_EXT | M_EXT_CLUSTER:
774 		/*
775 		 * Normal cluster associated with an mbuf that was allocated
776 		 * from the normal mbuf pool rather then the cluster pool.
777 		 * The cluster has to be independantly disassociated from the
778 		 * mbuf.
779 		 */
780 		--mbstat.m_clusters;
781 		/* fall through */
782 	case M_EXT:
783 		/*
784 		 * Normal cluster association case, disconnect the cluster from
785 		 * the mbuf.  The cluster may or may not be custom.
786 		 */
787 		m->m_ext.ext_free(m->m_ext.ext_arg);
788 		m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
789 		/* fall through */
790 	case 0:
791 		/*
792 		 * return the mbuf to the mbuf cache.
793 		 */
794 		if (m->m_flags & M_PHCACHE) {
795 			m->m_data = m->m_pktdat;
796 			objcache_put(mbufphdr_cache, m);
797 		} else {
798 			m->m_data = m->m_dat;
799 			objcache_put(mbuf_cache, m);
800 		}
801 		--mbstat.m_mbufs;
802 		break;
803 	default:
804 		if (!panicstr)
805 			panic("bad mbuf flags %p %08x\n", m, m->m_flags);
806 		break;
807 	}
808 	crit_exit();
809 	return (n);
810 }
811 
812 void
813 m_freem(struct mbuf *m)
814 {
815 	crit_enter();
816 	while (m)
817 		m = m_free(m);
818 	crit_exit();
819 }
820 
821 /*
822  * mbuf utility routines
823  */
824 
825 /*
826  * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and
827  * copy junk along.
828  */
829 struct mbuf *
830 m_prepend(struct mbuf *m, int len, int how)
831 {
832 	struct mbuf *mn;
833 
834 	if (m->m_flags & M_PKTHDR)
835 	    mn = m_gethdr(how, m->m_type);
836 	else
837 	    mn = m_get(how, m->m_type);
838 	if (mn == NULL) {
839 		m_freem(m);
840 		return (NULL);
841 	}
842 	if (m->m_flags & M_PKTHDR)
843 		M_MOVE_PKTHDR(mn, m);
844 	mn->m_next = m;
845 	m = mn;
846 	if (len < MHLEN)
847 		MH_ALIGN(m, len);
848 	m->m_len = len;
849 	return (m);
850 }
851 
852 /*
853  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
854  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
855  * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
856  * Note that the copy is read-only, because clusters are not copied,
857  * only their reference counts are incremented.
858  */
859 struct mbuf *
860 m_copym(const struct mbuf *m, int off0, int len, int wait)
861 {
862 	struct mbuf *n, **np;
863 	int off = off0;
864 	struct mbuf *top;
865 	int copyhdr = 0;
866 
867 	KASSERT(off >= 0, ("m_copym, negative off %d", off));
868 	KASSERT(len >= 0, ("m_copym, negative len %d", len));
869 	if (off == 0 && m->m_flags & M_PKTHDR)
870 		copyhdr = 1;
871 	while (off > 0) {
872 		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
873 		if (off < m->m_len)
874 			break;
875 		off -= m->m_len;
876 		m = m->m_next;
877 	}
878 	np = &top;
879 	top = 0;
880 	while (len > 0) {
881 		if (m == NULL) {
882 			KASSERT(len == M_COPYALL,
883 			    ("m_copym, length > size of mbuf chain"));
884 			break;
885 		}
886 		/*
887 		 * Because we are sharing any cluster attachment below,
888 		 * be sure to get an mbuf that does not have a cluster
889 		 * associated with it.
890 		 */
891 		if (copyhdr)
892 			n = m_gethdr(wait, m->m_type);
893 		else
894 			n = m_get(wait, m->m_type);
895 		*np = n;
896 		if (n == NULL)
897 			goto nospace;
898 		if (copyhdr) {
899 			if (!m_dup_pkthdr(n, m, wait))
900 				goto nospace;
901 			if (len == M_COPYALL)
902 				n->m_pkthdr.len -= off0;
903 			else
904 				n->m_pkthdr.len = len;
905 			copyhdr = 0;
906 		}
907 		n->m_len = min(len, m->m_len - off);
908 		if (m->m_flags & M_EXT) {
909 			KKASSERT((n->m_flags & M_EXT) == 0);
910 			n->m_data = m->m_data + off;
911 			m->m_ext.ext_ref(m->m_ext.ext_arg);
912 			n->m_ext = m->m_ext;
913 			n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
914 		} else {
915 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
916 			    (unsigned)n->m_len);
917 		}
918 		if (len != M_COPYALL)
919 			len -= n->m_len;
920 		off = 0;
921 		m = m->m_next;
922 		np = &n->m_next;
923 	}
924 	if (top == NULL)
925 		mbstat.m_mcfail++;
926 	return (top);
927 nospace:
928 	m_freem(top);
929 	mbstat.m_mcfail++;
930 	return (NULL);
931 }
932 
933 /*
934  * Copy an entire packet, including header (which must be present).
935  * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
936  * Note that the copy is read-only, because clusters are not copied,
937  * only their reference counts are incremented.
938  * Preserve alignment of the first mbuf so if the creator has left
939  * some room at the beginning (e.g. for inserting protocol headers)
940  * the copies also have the room available.
941  */
942 struct mbuf *
943 m_copypacket(struct mbuf *m, int how)
944 {
945 	struct mbuf *top, *n, *o;
946 
947 	n = m_gethdr(how, m->m_type);
948 	top = n;
949 	if (!n)
950 		goto nospace;
951 
952 	if (!m_dup_pkthdr(n, m, how))
953 		goto nospace;
954 	n->m_len = m->m_len;
955 	if (m->m_flags & M_EXT) {
956 		KKASSERT((n->m_flags & M_EXT) == 0);
957 		n->m_data = m->m_data;
958 		m->m_ext.ext_ref(m->m_ext.ext_arg);
959 		n->m_ext = m->m_ext;
960 		n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
961 	} else {
962 		n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
963 		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
964 	}
965 
966 	m = m->m_next;
967 	while (m) {
968 		o = m_get(how, m->m_type);
969 		if (!o)
970 			goto nospace;
971 
972 		n->m_next = o;
973 		n = n->m_next;
974 
975 		n->m_len = m->m_len;
976 		if (m->m_flags & M_EXT) {
977 			KKASSERT((n->m_flags & M_EXT) == 0);
978 			n->m_data = m->m_data;
979 			m->m_ext.ext_ref(m->m_ext.ext_arg);
980 			n->m_ext = m->m_ext;
981 			n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
982 		} else {
983 			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
984 		}
985 
986 		m = m->m_next;
987 	}
988 	return top;
989 nospace:
990 	m_freem(top);
991 	mbstat.m_mcfail++;
992 	return (NULL);
993 }
994 
995 /*
996  * Copy data from an mbuf chain starting "off" bytes from the beginning,
997  * continuing for "len" bytes, into the indicated buffer.
998  */
999 void
1000 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
1001 {
1002 	unsigned count;
1003 
1004 	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1005 	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1006 	while (off > 0) {
1007 		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1008 		if (off < m->m_len)
1009 			break;
1010 		off -= m->m_len;
1011 		m = m->m_next;
1012 	}
1013 	while (len > 0) {
1014 		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1015 		count = min(m->m_len - off, len);
1016 		bcopy(mtod(m, caddr_t) + off, cp, count);
1017 		len -= count;
1018 		cp += count;
1019 		off = 0;
1020 		m = m->m_next;
1021 	}
1022 }
1023 
1024 /*
1025  * Copy a packet header mbuf chain into a completely new chain, including
1026  * copying any mbuf clusters.  Use this instead of m_copypacket() when
1027  * you need a writable copy of an mbuf chain.
1028  */
1029 struct mbuf *
1030 m_dup(struct mbuf *m, int how)
1031 {
1032 	struct mbuf **p, *top = NULL;
1033 	int remain, moff, nsize;
1034 
1035 	/* Sanity check */
1036 	if (m == NULL)
1037 		return (NULL);
1038 	KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__));
1039 
1040 	/* While there's more data, get a new mbuf, tack it on, and fill it */
1041 	remain = m->m_pkthdr.len;
1042 	moff = 0;
1043 	p = &top;
1044 	while (remain > 0 || top == NULL) {	/* allow m->m_pkthdr.len == 0 */
1045 		struct mbuf *n;
1046 
1047 		/* Get the next new mbuf */
1048 		n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0,
1049 			   &nsize);
1050 		if (n == NULL)
1051 			goto nospace;
1052 		if (top == NULL)
1053 			if (!m_dup_pkthdr(n, m, how))
1054 				goto nospace0;
1055 
1056 		/* Link it into the new chain */
1057 		*p = n;
1058 		p = &n->m_next;
1059 
1060 		/* Copy data from original mbuf(s) into new mbuf */
1061 		n->m_len = 0;
1062 		while (n->m_len < nsize && m != NULL) {
1063 			int chunk = min(nsize - n->m_len, m->m_len - moff);
1064 
1065 			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1066 			moff += chunk;
1067 			n->m_len += chunk;
1068 			remain -= chunk;
1069 			if (moff == m->m_len) {
1070 				m = m->m_next;
1071 				moff = 0;
1072 			}
1073 		}
1074 
1075 		/* Check correct total mbuf length */
1076 		KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1077 			("%s: bogus m_pkthdr.len", __func__));
1078 	}
1079 	return (top);
1080 
1081 nospace:
1082 	m_freem(top);
1083 nospace0:
1084 	mbstat.m_mcfail++;
1085 	return (NULL);
1086 }
1087 
1088 /*
1089  * Concatenate mbuf chain n to m.
1090  * Both chains must be of the same type (e.g. MT_DATA).
1091  * Any m_pkthdr is not updated.
1092  */
1093 void
1094 m_cat(struct mbuf *m, struct mbuf *n)
1095 {
1096 	m = m_last(m);
1097 	while (n) {
1098 		if (m->m_flags & M_EXT ||
1099 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1100 			/* just join the two chains */
1101 			m->m_next = n;
1102 			return;
1103 		}
1104 		/* splat the data from one into the other */
1105 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1106 		    (u_int)n->m_len);
1107 		m->m_len += n->m_len;
1108 		n = m_free(n);
1109 	}
1110 }
1111 
1112 void
1113 m_adj(struct mbuf *mp, int req_len)
1114 {
1115 	int len = req_len;
1116 	struct mbuf *m;
1117 	int count;
1118 
1119 	if ((m = mp) == NULL)
1120 		return;
1121 	if (len >= 0) {
1122 		/*
1123 		 * Trim from head.
1124 		 */
1125 		while (m != NULL && len > 0) {
1126 			if (m->m_len <= len) {
1127 				len -= m->m_len;
1128 				m->m_len = 0;
1129 				m = m->m_next;
1130 			} else {
1131 				m->m_len -= len;
1132 				m->m_data += len;
1133 				len = 0;
1134 			}
1135 		}
1136 		m = mp;
1137 		if (mp->m_flags & M_PKTHDR)
1138 			m->m_pkthdr.len -= (req_len - len);
1139 	} else {
1140 		/*
1141 		 * Trim from tail.  Scan the mbuf chain,
1142 		 * calculating its length and finding the last mbuf.
1143 		 * If the adjustment only affects this mbuf, then just
1144 		 * adjust and return.  Otherwise, rescan and truncate
1145 		 * after the remaining size.
1146 		 */
1147 		len = -len;
1148 		count = 0;
1149 		for (;;) {
1150 			count += m->m_len;
1151 			if (m->m_next == (struct mbuf *)0)
1152 				break;
1153 			m = m->m_next;
1154 		}
1155 		if (m->m_len >= len) {
1156 			m->m_len -= len;
1157 			if (mp->m_flags & M_PKTHDR)
1158 				mp->m_pkthdr.len -= len;
1159 			return;
1160 		}
1161 		count -= len;
1162 		if (count < 0)
1163 			count = 0;
1164 		/*
1165 		 * Correct length for chain is "count".
1166 		 * Find the mbuf with last data, adjust its length,
1167 		 * and toss data from remaining mbufs on chain.
1168 		 */
1169 		m = mp;
1170 		if (m->m_flags & M_PKTHDR)
1171 			m->m_pkthdr.len = count;
1172 		for (; m; m = m->m_next) {
1173 			if (m->m_len >= count) {
1174 				m->m_len = count;
1175 				break;
1176 			}
1177 			count -= m->m_len;
1178 		}
1179 		while (m->m_next)
1180 			(m = m->m_next) ->m_len = 0;
1181 	}
1182 }
1183 
1184 /*
1185  * Rearrange an mbuf chain so that len bytes are contiguous
1186  * and in the data area of an mbuf (so that mtod will work for a structure
1187  * of size len).  Returns the resulting mbuf chain on success, frees it and
1188  * returns null on failure.  If there is room, it will add up to
1189  * max_protohdr-len extra bytes to the contiguous region in an attempt to
1190  * avoid being called next time.
1191  */
1192 struct mbuf *
1193 m_pullup(struct mbuf *n, int len)
1194 {
1195 	struct mbuf *m;
1196 	int count;
1197 	int space;
1198 
1199 	/*
1200 	 * If first mbuf has no cluster, and has room for len bytes
1201 	 * without shifting current data, pullup into it,
1202 	 * otherwise allocate a new mbuf to prepend to the chain.
1203 	 */
1204 	if (!(n->m_flags & M_EXT) &&
1205 	    n->m_data + len < &n->m_dat[MLEN] &&
1206 	    n->m_next) {
1207 		if (n->m_len >= len)
1208 			return (n);
1209 		m = n;
1210 		n = n->m_next;
1211 		len -= m->m_len;
1212 	} else {
1213 		if (len > MHLEN)
1214 			goto bad;
1215 		if (n->m_flags & M_PKTHDR)
1216 			m = m_gethdr(MB_DONTWAIT, n->m_type);
1217 		else
1218 			m = m_get(MB_DONTWAIT, n->m_type);
1219 		if (m == NULL)
1220 			goto bad;
1221 		m->m_len = 0;
1222 		if (n->m_flags & M_PKTHDR)
1223 			M_MOVE_PKTHDR(m, n);
1224 	}
1225 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1226 	do {
1227 		count = min(min(max(len, max_protohdr), space), n->m_len);
1228 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1229 		  (unsigned)count);
1230 		len -= count;
1231 		m->m_len += count;
1232 		n->m_len -= count;
1233 		space -= count;
1234 		if (n->m_len)
1235 			n->m_data += count;
1236 		else
1237 			n = m_free(n);
1238 	} while (len > 0 && n);
1239 	if (len > 0) {
1240 		m_free(m);
1241 		goto bad;
1242 	}
1243 	m->m_next = n;
1244 	return (m);
1245 bad:
1246 	m_freem(n);
1247 	mbstat.m_mpfail++;
1248 	return (NULL);
1249 }
1250 
1251 /*
1252  * Partition an mbuf chain in two pieces, returning the tail --
1253  * all but the first len0 bytes.  In case of failure, it returns NULL and
1254  * attempts to restore the chain to its original state.
1255  *
1256  * Note that the resulting mbufs might be read-only, because the new
1257  * mbuf can end up sharing an mbuf cluster with the original mbuf if
1258  * the "breaking point" happens to lie within a cluster mbuf. Use the
1259  * M_WRITABLE() macro to check for this case.
1260  */
1261 struct mbuf *
1262 m_split(struct mbuf *m0, int len0, int wait)
1263 {
1264 	struct mbuf *m, *n;
1265 	unsigned len = len0, remain;
1266 
1267 	for (m = m0; m && len > m->m_len; m = m->m_next)
1268 		len -= m->m_len;
1269 	if (m == NULL)
1270 		return (NULL);
1271 	remain = m->m_len - len;
1272 	if (m0->m_flags & M_PKTHDR) {
1273 		n = m_gethdr(wait, m0->m_type);
1274 		if (n == NULL)
1275 			return (NULL);
1276 		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1277 		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1278 		m0->m_pkthdr.len = len0;
1279 		if (m->m_flags & M_EXT)
1280 			goto extpacket;
1281 		if (remain > MHLEN) {
1282 			/* m can't be the lead packet */
1283 			MH_ALIGN(n, 0);
1284 			n->m_next = m_split(m, len, wait);
1285 			if (n->m_next == NULL) {
1286 				m_free(n);
1287 				return (NULL);
1288 			} else {
1289 				n->m_len = 0;
1290 				return (n);
1291 			}
1292 		} else
1293 			MH_ALIGN(n, remain);
1294 	} else if (remain == 0) {
1295 		n = m->m_next;
1296 		m->m_next = 0;
1297 		return (n);
1298 	} else {
1299 		n = m_get(wait, m->m_type);
1300 		if (n == NULL)
1301 			return (NULL);
1302 		M_ALIGN(n, remain);
1303 	}
1304 extpacket:
1305 	if (m->m_flags & M_EXT) {
1306 		KKASSERT((n->m_flags & M_EXT) == 0);
1307 		n->m_data = m->m_data + len;
1308 		m->m_ext.ext_ref(m->m_ext.ext_arg);
1309 		n->m_ext = m->m_ext;
1310 		n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1311 	} else {
1312 		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1313 	}
1314 	n->m_len = remain;
1315 	m->m_len = len;
1316 	n->m_next = m->m_next;
1317 	m->m_next = 0;
1318 	return (n);
1319 }
1320 
1321 /*
1322  * Routine to copy from device local memory into mbufs.
1323  * Note: "offset" is ill-defined and always called as 0, so ignore it.
1324  */
1325 struct mbuf *
1326 m_devget(char *buf, int len, int offset, struct ifnet *ifp,
1327     void (*copy)(volatile const void *from, volatile void *to, size_t length))
1328 {
1329 	struct mbuf *m, *mfirst = NULL, **mtail;
1330 	int nsize, flags;
1331 
1332 	if (copy == NULL)
1333 		copy = bcopy;
1334 	mtail = &mfirst;
1335 	flags = M_PKTHDR;
1336 
1337 	while (len > 0) {
1338 		m = m_getl(len, MB_DONTWAIT, MT_DATA, flags, &nsize);
1339 		if (m == NULL) {
1340 			m_freem(mfirst);
1341 			return (NULL);
1342 		}
1343 		m->m_len = min(len, nsize);
1344 
1345 		if (flags & M_PKTHDR) {
1346 			if (len + max_linkhdr <= nsize)
1347 				m->m_data += max_linkhdr;
1348 			m->m_pkthdr.rcvif = ifp;
1349 			m->m_pkthdr.len = len;
1350 			flags = 0;
1351 		}
1352 
1353 		copy(buf, m->m_data, (unsigned)m->m_len);
1354 		buf += m->m_len;
1355 		len -= m->m_len;
1356 		*mtail = m;
1357 		mtail = &m->m_next;
1358 	}
1359 
1360 	return (mfirst);
1361 }
1362 
1363 /*
1364  * Copy data from a buffer back into the indicated mbuf chain,
1365  * starting "off" bytes from the beginning, extending the mbuf
1366  * chain if necessary.
1367  */
1368 void
1369 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
1370 {
1371 	int mlen;
1372 	struct mbuf *m = m0, *n;
1373 	int totlen = 0;
1374 
1375 	if (m0 == NULL)
1376 		return;
1377 	while (off > (mlen = m->m_len)) {
1378 		off -= mlen;
1379 		totlen += mlen;
1380 		if (m->m_next == NULL) {
1381 			n = m_getclr(MB_DONTWAIT, m->m_type);
1382 			if (n == NULL)
1383 				goto out;
1384 			n->m_len = min(MLEN, len + off);
1385 			m->m_next = n;
1386 		}
1387 		m = m->m_next;
1388 	}
1389 	while (len > 0) {
1390 		mlen = min (m->m_len - off, len);
1391 		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1392 		cp += mlen;
1393 		len -= mlen;
1394 		mlen += off;
1395 		off = 0;
1396 		totlen += mlen;
1397 		if (len == 0)
1398 			break;
1399 		if (m->m_next == NULL) {
1400 			n = m_get(MB_DONTWAIT, m->m_type);
1401 			if (n == NULL)
1402 				break;
1403 			n->m_len = min(MLEN, len);
1404 			m->m_next = n;
1405 		}
1406 		m = m->m_next;
1407 	}
1408 out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1409 		m->m_pkthdr.len = totlen;
1410 }
1411 
1412 void
1413 m_print(const struct mbuf *m)
1414 {
1415 	int len;
1416 	const struct mbuf *m2;
1417 
1418 	len = m->m_pkthdr.len;
1419 	m2 = m;
1420 	while (len) {
1421 		printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1422 		len -= m2->m_len;
1423 		m2 = m2->m_next;
1424 	}
1425 	return;
1426 }
1427 
1428 /*
1429  * "Move" mbuf pkthdr from "from" to "to".
1430  * "from" must have M_PKTHDR set, and "to" must be empty.
1431  */
1432 void
1433 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1434 {
1435 	KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header"));
1436 
1437 	to->m_flags |= from->m_flags & M_COPYFLAGS;
1438 	to->m_pkthdr = from->m_pkthdr;		/* especially tags */
1439 	SLIST_INIT(&from->m_pkthdr.tags);	/* purge tags from src */
1440 }
1441 
1442 /*
1443  * Duplicate "from"'s mbuf pkthdr in "to".
1444  * "from" must have M_PKTHDR set, and "to" must be empty.
1445  * In particular, this does a deep copy of the packet tags.
1446  */
1447 int
1448 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
1449 {
1450 	KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header"));
1451 
1452 	to->m_flags = (from->m_flags & M_COPYFLAGS) |
1453 		      (to->m_flags & ~M_COPYFLAGS);
1454 	to->m_pkthdr = from->m_pkthdr;
1455 	SLIST_INIT(&to->m_pkthdr.tags);
1456 	return (m_tag_copy_chain(to, from, how));
1457 }
1458 
1459 /*
1460  * Defragment a mbuf chain, returning the shortest possible
1461  * chain of mbufs and clusters.  If allocation fails and
1462  * this cannot be completed, NULL will be returned, but
1463  * the passed in chain will be unchanged.  Upon success,
1464  * the original chain will be freed, and the new chain
1465  * will be returned.
1466  *
1467  * If a non-packet header is passed in, the original
1468  * mbuf (chain?) will be returned unharmed.
1469  *
1470  * m_defrag_nofree doesn't free the passed in mbuf.
1471  */
1472 struct mbuf *
1473 m_defrag(struct mbuf *m0, int how)
1474 {
1475 	struct mbuf *m_new;
1476 
1477 	if ((m_new = m_defrag_nofree(m0, how)) == NULL)
1478 		return (NULL);
1479 	if (m_new != m0)
1480 		m_freem(m0);
1481 	return (m_new);
1482 }
1483 
1484 struct mbuf *
1485 m_defrag_nofree(struct mbuf *m0, int how)
1486 {
1487 	struct mbuf	*m_new = NULL, *m_final = NULL;
1488 	int		progress = 0, length, nsize;
1489 
1490 	if (!(m0->m_flags & M_PKTHDR))
1491 		return (m0);
1492 
1493 #ifdef MBUF_STRESS_TEST
1494 	if (m_defragrandomfailures) {
1495 		int temp = arc4random() & 0xff;
1496 		if (temp == 0xba)
1497 			goto nospace;
1498 	}
1499 #endif
1500 
1501 	m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize);
1502 	if (m_final == NULL)
1503 		goto nospace;
1504 	m_final->m_len = 0;	/* in case m0->m_pkthdr.len is zero */
1505 
1506 	if (m_dup_pkthdr(m_final, m0, how) == NULL)
1507 		goto nospace;
1508 
1509 	m_new = m_final;
1510 
1511 	while (progress < m0->m_pkthdr.len) {
1512 		length = m0->m_pkthdr.len - progress;
1513 		if (length > MCLBYTES)
1514 			length = MCLBYTES;
1515 
1516 		if (m_new == NULL) {
1517 			m_new = m_getl(length, how, MT_DATA, 0, &nsize);
1518 			if (m_new == NULL)
1519 				goto nospace;
1520 		}
1521 
1522 		m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1523 		progress += length;
1524 		m_new->m_len = length;
1525 		if (m_new != m_final)
1526 			m_cat(m_final, m_new);
1527 		m_new = NULL;
1528 	}
1529 	if (m0->m_next == NULL)
1530 		m_defraguseless++;
1531 	m_defragpackets++;
1532 	m_defragbytes += m_final->m_pkthdr.len;
1533 	return (m_final);
1534 nospace:
1535 	m_defragfailure++;
1536 	if (m_new)
1537 		m_free(m_new);
1538 	m_freem(m_final);
1539 	return (NULL);
1540 }
1541 
1542 /*
1543  * Move data from uio into mbufs.
1544  */
1545 struct mbuf *
1546 m_uiomove(struct uio *uio)
1547 {
1548 	struct mbuf *m;			/* current working mbuf */
1549 	struct mbuf *head = NULL;	/* result mbuf chain */
1550 	struct mbuf **mp = &head;
1551 	int resid = uio->uio_resid, nsize, flags = M_PKTHDR, error;
1552 
1553 	do {
1554 		m = m_getl(resid, MB_WAIT, MT_DATA, flags, &nsize);
1555 		if (flags) {
1556 			m->m_pkthdr.len = 0;
1557 			/* Leave room for protocol headers. */
1558 			if (resid < MHLEN)
1559 				MH_ALIGN(m, resid);
1560 			flags = 0;
1561 		}
1562 		m->m_len = min(nsize, resid);
1563 		error = uiomove(mtod(m, caddr_t), m->m_len, uio);
1564 		if (error) {
1565 			m_free(m);
1566 			goto failed;
1567 		}
1568 		*mp = m;
1569 		mp = &m->m_next;
1570 		head->m_pkthdr.len += m->m_len;
1571 		resid -= m->m_len;
1572 	} while (resid > 0);
1573 
1574 	return (head);
1575 
1576 failed:
1577 	m_freem(head);
1578 	return (NULL);
1579 }
1580 
1581 struct mbuf *
1582 m_last(struct mbuf *m)
1583 {
1584 	while (m->m_next)
1585 		m = m->m_next;
1586 	return (m);
1587 }
1588 
1589 /*
1590  * Return the number of bytes in an mbuf chain.
1591  * If lastm is not NULL, also return the last mbuf.
1592  */
1593 u_int
1594 m_lengthm(struct mbuf *m, struct mbuf **lastm)
1595 {
1596 	u_int len = 0;
1597 	struct mbuf *prev = m;
1598 
1599 	while (m) {
1600 		len += m->m_len;
1601 		prev = m;
1602 		m = m->m_next;
1603 	}
1604 	if (lastm != NULL)
1605 		*lastm = prev;
1606 	return (len);
1607 }
1608 
1609 /*
1610  * Like m_lengthm(), except also keep track of mbuf usage.
1611  */
1612 u_int
1613 m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt)
1614 {
1615 	u_int len = 0, mbcnt = 0;
1616 	struct mbuf *prev = m;
1617 
1618 	while (m) {
1619 		len += m->m_len;
1620 		mbcnt += MSIZE;
1621 		if (m->m_flags & M_EXT)
1622 			mbcnt += m->m_ext.ext_size;
1623 		prev = m;
1624 		m = m->m_next;
1625 	}
1626 	if (lastm != NULL)
1627 		*lastm = prev;
1628 	*pmbcnt = mbcnt;
1629 	return (len);
1630 }
1631