xref: /dflybsd-src/sys/kern/uipc_mbuf.c (revision 78195a764d5e70464a6d4f49bc08332a2a8bb4d0)
1 /*
2  * Copyright (c) 2004 Jeffrey M. Hsu.  All rights reserved.
3  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Jeffrey M. Hsu.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of The DragonFly Project nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific, prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 2004 Jeffrey M. Hsu.  All rights reserved.
36  *
37  * License terms: all terms for the DragonFly license above plus the following:
38  *
39  * 4. All advertising materials mentioning features or use of this software
40  *    must display the following acknowledgement:
41  *
42  *	This product includes software developed by Jeffrey M. Hsu
43  *	for the DragonFly Project.
44  *
45  *    This requirement may be waived with permission from Jeffrey Hsu.
46  *    This requirement will sunset and may be removed on July 8 2005,
47  *    after which the standard DragonFly license (as shown above) will
48  *    apply.
49  */
50 
51 /*
52  * Copyright (c) 1982, 1986, 1988, 1991, 1993
53  *	The Regents of the University of California.  All rights reserved.
54  *
55  * Redistribution and use in source and binary forms, with or without
56  * modification, are permitted provided that the following conditions
57  * are met:
58  * 1. Redistributions of source code must retain the above copyright
59  *    notice, this list of conditions and the following disclaimer.
60  * 2. Redistributions in binary form must reproduce the above copyright
61  *    notice, this list of conditions and the following disclaimer in the
62  *    documentation and/or other materials provided with the distribution.
63  * 3. All advertising materials mentioning features or use of this software
64  *    must display the following acknowledgement:
65  *	This product includes software developed by the University of
66  *	California, Berkeley and its contributors.
67  * 4. Neither the name of the University nor the names of its contributors
68  *    may be used to endorse or promote products derived from this software
69  *    without specific prior written permission.
70  *
71  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
72  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
73  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
74  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
75  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
76  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
77  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
78  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
79  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
80  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
81  * SUCH DAMAGE.
82  *
83  * @(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
84  * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
85  * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.53 2005/11/25 17:52:53 dillon Exp $
86  */
87 
88 #include "opt_param.h"
89 #include "opt_ddb.h"
90 #include "opt_mbuf_stress_test.h"
91 #include <sys/param.h>
92 #include <sys/systm.h>
93 #include <sys/malloc.h>
94 #include <sys/mbuf.h>
95 #include <sys/kernel.h>
96 #include <sys/sysctl.h>
97 #include <sys/domain.h>
98 #include <sys/objcache.h>
99 #include <sys/protosw.h>
100 #include <sys/uio.h>
101 #include <sys/thread.h>
102 #include <sys/globaldata.h>
103 #include <sys/thread2.h>
104 
105 #include <vm/vm.h>
106 #include <vm/vm_kern.h>
107 #include <vm/vm_extern.h>
108 
109 #ifdef INVARIANTS
110 #include <machine/cpu.h>
111 #endif
112 
113 /*
114  * mbuf cluster meta-data
115  */
116 struct mbcluster {
117 	int32_t	mcl_refs;
118 	void	*mcl_data;
119 };
120 
121 static void mbinit(void *);
122 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
123 
124 static u_long	mbtypes[MT_NTYPES];
125 
126 struct mbstat mbstat;
127 int	max_linkhdr;
128 int	max_protohdr;
129 int	max_hdr;
130 int	max_datalen;
131 int	m_defragpackets;
132 int	m_defragbytes;
133 int	m_defraguseless;
134 int	m_defragfailure;
135 #ifdef MBUF_STRESS_TEST
136 int	m_defragrandomfailures;
137 #endif
138 
139 struct objcache *mbuf_cache, *mbufphdr_cache;
140 struct objcache *mclmeta_cache;
141 struct objcache *mbufcluster_cache, *mbufphdrcluster_cache;
142 
143 int	nmbclusters;
144 int	nmbufs;
145 
146 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
147 	   &max_linkhdr, 0, "");
148 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
149 	   &max_protohdr, 0, "");
150 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
151 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
152 	   &max_datalen, 0, "");
153 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
154 	   &mbuf_wait, 0, "");
155 SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
156 SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes,
157 	   sizeof(mbtypes), "LU", "");
158 SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RW,
159 	   &nmbclusters, 0, "Maximum number of mbuf clusters available");
160 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RW, &nmbufs, 0,
161 	   "Maximum number of mbufs available");
162 
163 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
164 	   &m_defragpackets, 0, "");
165 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
166 	   &m_defragbytes, 0, "");
167 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
168 	   &m_defraguseless, 0, "");
169 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
170 	   &m_defragfailure, 0, "");
171 #ifdef MBUF_STRESS_TEST
172 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
173 	   &m_defragrandomfailures, 0, "");
174 #endif
175 
176 static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
177 static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl");
178 static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta");
179 
180 static void m_reclaim (void);
181 static void m_mclref(void *arg);
182 static void m_mclfree(void *arg);
183 
184 #ifndef NMBCLUSTERS
185 #define NMBCLUSTERS	(512 + maxusers * 16)
186 #endif
187 #ifndef NMBUFS
188 #define NMBUFS		(nmbclusters * 2)
189 #endif
190 
191 /*
192  * Perform sanity checks of tunables declared above.
193  */
194 static void
195 tunable_mbinit(void *dummy)
196 {
197 
198 	/*
199 	 * This has to be done before VM init.
200 	 */
201 	nmbclusters = NMBCLUSTERS;
202 	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
203 	nmbufs = NMBUFS;
204 	TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
205 	/* Sanity checks */
206 	if (nmbufs < nmbclusters * 2)
207 		nmbufs = nmbclusters * 2;
208 
209 	return;
210 }
211 SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL);
212 
213 /* "number of clusters of pages" */
214 #define NCL_INIT	1
215 
216 #define NMB_INIT	16
217 
218 /*
219  * The mbuf object cache only guarantees that m_next and m_nextpkt are
220  * NULL and that m_data points to the beginning of the data area.  In
221  * particular, m_len and m_pkthdr.len are uninitialized.  It is the
222  * responsibility of the caller to initialize those fields before use.
223  */
224 
225 static boolean_t __inline
226 mbuf_ctor(void *obj, void *private, int ocflags)
227 {
228 	struct mbuf *m = obj;
229 
230 	m->m_next = NULL;
231 	m->m_nextpkt = NULL;
232 	m->m_data = m->m_dat;
233 	m->m_flags = 0;
234 
235 	return (TRUE);
236 }
237 
238 /*
239  * Initialize the mbuf and the packet header fields.
240  */
241 static boolean_t
242 mbufphdr_ctor(void *obj, void *private, int ocflags)
243 {
244 	struct mbuf *m = obj;
245 
246 	m->m_next = NULL;
247 	m->m_nextpkt = NULL;
248 	m->m_data = m->m_pktdat;
249 	m->m_flags = M_PKTHDR | M_PHCACHE;
250 
251 	m->m_pkthdr.rcvif = NULL;	/* eliminate XXX JH */
252 	SLIST_INIT(&m->m_pkthdr.tags);
253 	m->m_pkthdr.csum_flags = 0;	/* eliminate XXX JH */
254 	m->m_pkthdr.fw_flags = 0;	/* eliminate XXX JH */
255 
256 	return (TRUE);
257 }
258 
259 /*
260  * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount.
261  */
262 static boolean_t
263 mclmeta_ctor(void *obj, void *private, int ocflags)
264 {
265 	struct mbcluster *cl = obj;
266 	void *buf;
267 
268 	if (ocflags & M_NOWAIT)
269 		buf = malloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO);
270 	else
271 		buf = malloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO);
272 	if (buf == NULL)
273 		return (FALSE);
274 	cl->mcl_refs = 0;
275 	cl->mcl_data = buf;
276 	return (TRUE);
277 }
278 
279 static void
280 mclmeta_dtor(void *obj, void *private)
281 {
282 	struct mbcluster *mcl = obj;
283 
284 	KKASSERT(mcl->mcl_refs == 0);
285 	free(mcl->mcl_data, M_MBUFCL);
286 }
287 
288 static void
289 linkcluster(struct mbuf *m, struct mbcluster *cl)
290 {
291 	/*
292 	 * Add the cluster to the mbuf.  The caller will detect that the
293 	 * mbuf now has an attached cluster.
294 	 */
295 	m->m_ext.ext_arg = cl;
296 	m->m_ext.ext_buf = cl->mcl_data;
297 	m->m_ext.ext_ref = m_mclref;
298 	m->m_ext.ext_free = m_mclfree;
299 	m->m_ext.ext_size = MCLBYTES;
300 	++cl->mcl_refs;
301 
302 	m->m_data = m->m_ext.ext_buf;
303 	m->m_flags |= M_EXT | M_EXT_CLUSTER;
304 }
305 
306 static boolean_t
307 mbufphdrcluster_ctor(void *obj, void *private, int ocflags)
308 {
309 	struct mbuf *m = obj;
310 	struct mbcluster *cl;
311 
312 	mbufphdr_ctor(obj, private, ocflags);
313 	cl = objcache_get(mclmeta_cache, ocflags);
314 	if (cl == NULL)
315 		return (FALSE);
316 	m->m_flags |= M_CLCACHE;
317 	linkcluster(m, cl);
318 	return (TRUE);
319 }
320 
321 static boolean_t
322 mbufcluster_ctor(void *obj, void *private, int ocflags)
323 {
324 	struct mbuf *m = obj;
325 	struct mbcluster *cl;
326 
327 	mbuf_ctor(obj, private, ocflags);
328 	cl = objcache_get(mclmeta_cache, ocflags);
329 	if (cl == NULL)
330 		return (FALSE);
331 	m->m_flags |= M_CLCACHE;
332 	linkcluster(m, cl);
333 	return (TRUE);
334 }
335 
336 /*
337  * Used for both the cluster and cluster PHDR caches.
338  *
339  * The mbuf may have lost its cluster due to sharing, deal
340  * with the situation by checking M_EXT.
341  */
342 static void
343 mbufcluster_dtor(void *obj, void *private)
344 {
345 	struct mbuf *m = obj;
346 	struct mbcluster *mcl;
347 
348 	if (m->m_flags & M_EXT) {
349 		KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0);
350 		mcl = m->m_ext.ext_arg;
351 		KKASSERT(mcl->mcl_refs == 1);
352 		mcl->mcl_refs = 0;
353 		objcache_put(mclmeta_cache, mcl);
354 	}
355 }
356 
357 struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF };
358 struct objcache_malloc_args mclmeta_malloc_args =
359 	{ sizeof(struct mbcluster), M_MCLMETA };
360 
361 /* ARGSUSED*/
362 static void
363 mbinit(void *dummy)
364 {
365 	mbstat.m_msize = MSIZE;
366 	mbstat.m_mclbytes = MCLBYTES;
367 	mbstat.m_minclsize = MINCLSIZE;
368 	mbstat.m_mlen = MLEN;
369 	mbstat.m_mhlen = MHLEN;
370 
371 	mbuf_cache = objcache_create("mbuf", nmbufs, 0,
372 	    mbuf_ctor, null_dtor, NULL,
373 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
374 	mbufphdr_cache = objcache_create("mbuf pkt hdr", nmbufs, 64,
375 	    mbufphdr_ctor, null_dtor, NULL,
376 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
377 	mclmeta_cache = objcache_create("cluster mbuf", nmbclusters , 0,
378 	    mclmeta_ctor, mclmeta_dtor, NULL,
379 	    objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
380 	mbufcluster_cache = objcache_create("mbuf + cluster", nmbclusters, 0,
381 	    mbufcluster_ctor, mbufcluster_dtor, NULL,
382 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
383 	mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster",
384 	    nmbclusters, 64, mbufphdrcluster_ctor, mbufcluster_dtor, NULL,
385 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
386 	return;
387 }
388 
389 /*
390  * Return the number of references to this mbuf's data.  0 is returned
391  * if the mbuf is not M_EXT, a reference count is returned if it is
392  * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
393  */
394 int
395 m_sharecount(struct mbuf *m)
396 {
397 	switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) {
398 	case 0:
399 		return (0);
400 	case M_EXT:
401 		return (99);
402 	case M_EXT | M_EXT_CLUSTER:
403 		return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs);
404 	}
405 	/* NOTREACHED */
406 	return (0);		/* to shut up compiler */
407 }
408 
409 /*
410  * change mbuf to new type
411  */
412 void
413 m_chtype(struct mbuf *m, int type)
414 {
415 	crit_enter();
416 	++mbtypes[type];
417 	--mbtypes[m->m_type];
418 	m->m_type = type;
419 	crit_exit();
420 }
421 
422 static void
423 m_reclaim(void)
424 {
425 	struct domain *dp;
426 	struct protosw *pr;
427 
428 	crit_enter();
429 	SLIST_FOREACH(dp, &domains, dom_next) {
430 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
431 			if (pr->pr_drain)
432 				(*pr->pr_drain)();
433 		}
434 	}
435 	crit_exit();
436 	mbstat.m_drain++;
437 }
438 
439 static void __inline
440 updatestats(struct mbuf *m, int type)
441 {
442 	m->m_type = type;
443 
444 	crit_enter();
445 	++mbtypes[type];
446 	++mbstat.m_mbufs;
447 	crit_exit();
448 }
449 
450 /*
451  * Allocate an mbuf.
452  */
453 struct mbuf *
454 m_get(int how, int type)
455 {
456 	struct mbuf *m;
457 	int ntries = 0;
458 	int ocf = MBTOM(how);
459 
460 retryonce:
461 
462 	m = objcache_get(mbuf_cache, ocf);
463 
464 	if (m == NULL) {
465 		if ((how & MB_TRYWAIT) && ntries++ == 0) {
466 			struct objcache *reclaimlist[] = {
467 				mbufphdr_cache,
468 				mbufcluster_cache, mbufphdrcluster_cache
469 			};
470 			const int nreclaims = __arysize(reclaimlist);
471 
472 			if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
473 				m_reclaim();
474 			goto retryonce;
475 		}
476 		return (NULL);
477 	}
478 
479 	updatestats(m, type);
480 	return (m);
481 }
482 
483 struct mbuf *
484 m_gethdr(int how, int type)
485 {
486 	struct mbuf *m;
487 	int ocf = MBTOM(how);
488 	int ntries = 0;
489 
490 retryonce:
491 
492 	m = objcache_get(mbufphdr_cache, ocf);
493 
494 	if (m == NULL) {
495 		if ((how & MB_TRYWAIT) && ntries++ == 0) {
496 			struct objcache *reclaimlist[] = {
497 				mbuf_cache,
498 				mbufcluster_cache, mbufphdrcluster_cache
499 			};
500 			const int nreclaims = __arysize(reclaimlist);
501 
502 			if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
503 				m_reclaim();
504 			goto retryonce;
505 		}
506 		return (NULL);
507 	}
508 
509 	updatestats(m, type);
510 	return (m);
511 }
512 
513 /*
514  * Get a mbuf (not a mbuf cluster!) and zero it.
515  * Deprecated.
516  */
517 struct mbuf *
518 m_getclr(int how, int type)
519 {
520 	struct mbuf *m;
521 
522 	m = m_get(how, type);
523 	if (m != NULL)
524 		bzero(m->m_data, MLEN);
525 	return (m);
526 }
527 
528 /*
529  * Returns an mbuf with an attached cluster.
530  * Because many network drivers use this kind of buffers a lot, it is
531  * convenient to keep a small pool of free buffers of this kind.
532  * Even a small size such as 10 gives about 10% improvement in the
533  * forwarding rate in a bridge or router.
534  */
535 struct mbuf *
536 m_getcl(int how, short type, int flags)
537 {
538 	struct mbuf *m;
539 	int ocflags = MBTOM(how);
540 	int ntries = 0;
541 
542 retryonce:
543 
544 	if (flags & M_PKTHDR)
545 		m = objcache_get(mbufphdrcluster_cache, ocflags);
546 	else
547 		m = objcache_get(mbufcluster_cache, ocflags);
548 
549 	if (m == NULL) {
550 		if ((how & MB_TRYWAIT) && ntries++ == 0) {
551 			struct objcache *reclaimlist[1];
552 
553 			if (flags & M_PKTHDR)
554 				reclaimlist[0] = mbufcluster_cache;
555 			else
556 				reclaimlist[0] = mbufphdrcluster_cache;
557 			if (!objcache_reclaimlist(reclaimlist, 1, ocflags))
558 				m_reclaim();
559 			goto retryonce;
560 		}
561 		return (NULL);
562 	}
563 
564 	m->m_type = type;
565 
566 	crit_enter();
567 	++mbtypes[type];
568 	++mbstat.m_clusters;
569 	crit_exit();
570 	return (m);
571 }
572 
573 /*
574  * Allocate chain of requested length.
575  */
576 struct mbuf *
577 m_getc(int len, int how, int type)
578 {
579 	struct mbuf *n, *nfirst = NULL, **ntail = &nfirst;
580 	int nsize;
581 
582 	while (len > 0) {
583 		n = m_getl(len, how, type, 0, &nsize);
584 		if (n == NULL)
585 			goto failed;
586 		n->m_len = 0;
587 		*ntail = n;
588 		ntail = &n->m_next;
589 		len -= nsize;
590 	}
591 	return (nfirst);
592 
593 failed:
594 	m_freem(nfirst);
595 	return (NULL);
596 }
597 
598 /*
599  * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best)
600  * and return a pointer to the head of the allocated chain. If m0 is
601  * non-null, then we assume that it is a single mbuf or an mbuf chain to
602  * which we want len bytes worth of mbufs and/or clusters attached, and so
603  * if we succeed in allocating it, we will just return a pointer to m0.
604  *
605  * If we happen to fail at any point during the allocation, we will free
606  * up everything we have already allocated and return NULL.
607  *
608  * Deprecated.  Use m_getc() and m_cat() instead.
609  */
610 struct mbuf *
611 m_getm(struct mbuf *m0, int len, int how, int type)
612 {
613 	struct mbuf *nfirst;
614 
615 	nfirst = m_getc(len, how, type);
616 
617 	if (m0 != NULL) {
618 		m_last(m0)->m_next = nfirst;
619 		return (m0);
620 	}
621 
622 	return (nfirst);
623 }
624 
625 /*
626  * Adds a cluster to a normal mbuf, M_EXT is set on success.
627  * Deprecated.  Use m_getcl() instead.
628  */
629 void
630 m_mclget(struct mbuf *m, int how)
631 {
632 	struct mbcluster *mcl;
633 
634 	KKASSERT((m->m_flags & M_EXT) == 0);
635 	mcl = objcache_get(mclmeta_cache, MBTOM(how));
636 	if (mcl != NULL) {
637 		linkcluster(m, mcl);
638 		crit_enter();
639 		++mbstat.m_clusters;
640 		/* leave the m_mbufs count intact for original mbuf */
641 		crit_exit();
642 	}
643 }
644 
645 static void
646 m_mclref(void *arg)
647 {
648 	struct mbcluster *mcl = arg;
649 
650 	atomic_add_int(&mcl->mcl_refs, 1);
651 }
652 
653 static void
654 m_mclfree(void *arg)
655 {
656 	struct mbcluster *mcl = arg;
657 
658 	/* XXX interrupt race.  Currently called from a critical section */
659 	if (mcl->mcl_refs > 1) {
660 		atomic_subtract_int(&mcl->mcl_refs, 1);
661 	} else {
662 		KKASSERT(mcl->mcl_refs == 1);
663 		mcl->mcl_refs = 0;
664 		objcache_put(mclmeta_cache, mcl);
665 	}
666 }
667 
668 extern void db_print_backtrace(void);
669 
670 /*
671  * Free a single mbuf and any associated external storage.  The successor,
672  * if any, is returned.
673  *
674  * We do need to check non-first mbuf for m_aux, since some of existing
675  * code does not call M_PREPEND properly.
676  * (example: call to bpf_mtap from drivers)
677  */
678 struct mbuf *
679 m_free(struct mbuf *m)
680 {
681 	struct mbuf *n;
682 
683 	KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m));
684 	--mbtypes[m->m_type];
685 
686 	n = m->m_next;
687 
688 	/*
689 	 * Make sure the mbuf is in constructed state before returning it
690 	 * to the objcache.
691 	 */
692 	m->m_next = NULL;
693 #ifdef notyet
694 	KKASSERT(m->m_nextpkt == NULL);
695 #else
696 	if (m->m_nextpkt != NULL) {
697 #ifdef DDB
698 		static int afewtimes = 10;
699 
700 		if (afewtimes-- > 0) {
701 			printf("mfree: m->m_nextpkt != NULL\n");
702 			db_print_backtrace();
703 		}
704 #endif
705 		m->m_nextpkt = NULL;
706 	}
707 #endif
708 	if (m->m_flags & M_PKTHDR) {
709 		m_tag_delete_chain(m);		/* eliminate XXX JH */
710 	}
711 
712 	m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE);
713 
714 	/*
715 	 * Clean the M_PKTHDR state so we can return the mbuf to its original
716 	 * cache.  This is based on the PHCACHE flag which tells us whether
717 	 * the mbuf was originally allocated out of a packet-header cache
718 	 * or a non-packet-header cache.
719 	 */
720 	if (m->m_flags & M_PHCACHE) {
721 		m->m_flags |= M_PKTHDR;
722 		m->m_pkthdr.rcvif = NULL;	/* eliminate XXX JH */
723 		m->m_pkthdr.csum_flags = 0;	/* eliminate XXX JH */
724 		m->m_pkthdr.fw_flags = 0;	/* eliminate XXX JH */
725 		SLIST_INIT(&m->m_pkthdr.tags);
726 	}
727 
728 	/*
729 	 * Handle remaining flags combinations.  M_CLCACHE tells us whether
730 	 * the mbuf was originally allocated from a cluster cache or not,
731 	 * and is totally separate from whether the mbuf is currently
732 	 * associated with a cluster.
733 	 */
734 	crit_enter();
735 	switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) {
736 	case M_CLCACHE | M_EXT | M_EXT_CLUSTER:
737 		/*
738 		 * mbuf+cluster cache case.  The mbuf was allocated from the
739 		 * combined mbuf_cluster cache and can be returned to the
740 		 * cache if the cluster hasn't been shared.
741 		 */
742 		if (m_sharecount(m) == 1) {
743 			/*
744 			 * The cluster has not been shared, we can just
745 			 * reset the data pointer and return the mbuf
746 			 * to the cluster cache.  Note that the reference
747 			 * count is left intact (it is still associated with
748 			 * an mbuf).
749 			 */
750 			m->m_data = m->m_ext.ext_buf;
751 			if (m->m_flags & M_PHCACHE)
752 				objcache_put(mbufphdrcluster_cache, m);
753 			else
754 				objcache_put(mbufcluster_cache, m);
755 			--mbstat.m_clusters;
756 		} else {
757 			/*
758 			 * Hell.  Someone else has a ref on this cluster,
759 			 * we have to disconnect it which means we can't
760 			 * put it back into the mbufcluster_cache, we
761 			 * have to destroy the mbuf.
762 			 *
763 			 * Other mbuf references to the cluster will typically
764 			 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE.
765 			 *
766 			 * XXX we could try to connect another cluster to
767 			 * it.
768 			 */
769 			m->m_ext.ext_free(m->m_ext.ext_arg);
770 			m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
771 			if (m->m_flags & M_PHCACHE)
772 				objcache_dtor(mbufphdrcluster_cache, m);
773 			else
774 				objcache_dtor(mbufcluster_cache, m);
775 		}
776 		break;
777 	case M_EXT | M_EXT_CLUSTER:
778 		/*
779 		 * Normal cluster associated with an mbuf that was allocated
780 		 * from the normal mbuf pool rather then the cluster pool.
781 		 * The cluster has to be independantly disassociated from the
782 		 * mbuf.
783 		 */
784 		if (m_sharecount(m) == 1)
785 			--mbstat.m_clusters;
786 		/* fall through */
787 	case M_EXT:
788 		/*
789 		 * Normal cluster association case, disconnect the cluster from
790 		 * the mbuf.  The cluster may or may not be custom.
791 		 */
792 		m->m_ext.ext_free(m->m_ext.ext_arg);
793 		m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
794 		/* fall through */
795 	case 0:
796 		/*
797 		 * return the mbuf to the mbuf cache.
798 		 */
799 		if (m->m_flags & M_PHCACHE) {
800 			m->m_data = m->m_pktdat;
801 			objcache_put(mbufphdr_cache, m);
802 		} else {
803 			m->m_data = m->m_dat;
804 			objcache_put(mbuf_cache, m);
805 		}
806 		--mbstat.m_mbufs;
807 		break;
808 	default:
809 		if (!panicstr)
810 			panic("bad mbuf flags %p %08x\n", m, m->m_flags);
811 		break;
812 	}
813 	crit_exit();
814 	return (n);
815 }
816 
817 void
818 m_freem(struct mbuf *m)
819 {
820 	crit_enter();
821 	while (m)
822 		m = m_free(m);
823 	crit_exit();
824 }
825 
826 /*
827  * mbuf utility routines
828  */
829 
830 /*
831  * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and
832  * copy junk along.
833  */
834 struct mbuf *
835 m_prepend(struct mbuf *m, int len, int how)
836 {
837 	struct mbuf *mn;
838 
839 	if (m->m_flags & M_PKTHDR)
840 	    mn = m_gethdr(how, m->m_type);
841 	else
842 	    mn = m_get(how, m->m_type);
843 	if (mn == NULL) {
844 		m_freem(m);
845 		return (NULL);
846 	}
847 	if (m->m_flags & M_PKTHDR)
848 		M_MOVE_PKTHDR(mn, m);
849 	mn->m_next = m;
850 	m = mn;
851 	if (len < MHLEN)
852 		MH_ALIGN(m, len);
853 	m->m_len = len;
854 	return (m);
855 }
856 
857 /*
858  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
859  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
860  * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
861  * Note that the copy is read-only, because clusters are not copied,
862  * only their reference counts are incremented.
863  */
864 struct mbuf *
865 m_copym(const struct mbuf *m, int off0, int len, int wait)
866 {
867 	struct mbuf *n, **np;
868 	int off = off0;
869 	struct mbuf *top;
870 	int copyhdr = 0;
871 
872 	KASSERT(off >= 0, ("m_copym, negative off %d", off));
873 	KASSERT(len >= 0, ("m_copym, negative len %d", len));
874 	if (off == 0 && m->m_flags & M_PKTHDR)
875 		copyhdr = 1;
876 	while (off > 0) {
877 		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
878 		if (off < m->m_len)
879 			break;
880 		off -= m->m_len;
881 		m = m->m_next;
882 	}
883 	np = &top;
884 	top = 0;
885 	while (len > 0) {
886 		if (m == NULL) {
887 			KASSERT(len == M_COPYALL,
888 			    ("m_copym, length > size of mbuf chain"));
889 			break;
890 		}
891 		/*
892 		 * Because we are sharing any cluster attachment below,
893 		 * be sure to get an mbuf that does not have a cluster
894 		 * associated with it.
895 		 */
896 		if (copyhdr)
897 			n = m_gethdr(wait, m->m_type);
898 		else
899 			n = m_get(wait, m->m_type);
900 		*np = n;
901 		if (n == NULL)
902 			goto nospace;
903 		if (copyhdr) {
904 			if (!m_dup_pkthdr(n, m, wait))
905 				goto nospace;
906 			if (len == M_COPYALL)
907 				n->m_pkthdr.len -= off0;
908 			else
909 				n->m_pkthdr.len = len;
910 			copyhdr = 0;
911 		}
912 		n->m_len = min(len, m->m_len - off);
913 		if (m->m_flags & M_EXT) {
914 			KKASSERT((n->m_flags & M_EXT) == 0);
915 			n->m_data = m->m_data + off;
916 			m->m_ext.ext_ref(m->m_ext.ext_arg);
917 			n->m_ext = m->m_ext;
918 			n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
919 		} else {
920 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
921 			    (unsigned)n->m_len);
922 		}
923 		if (len != M_COPYALL)
924 			len -= n->m_len;
925 		off = 0;
926 		m = m->m_next;
927 		np = &n->m_next;
928 	}
929 	if (top == NULL)
930 		mbstat.m_mcfail++;
931 	return (top);
932 nospace:
933 	m_freem(top);
934 	mbstat.m_mcfail++;
935 	return (NULL);
936 }
937 
938 /*
939  * Copy an entire packet, including header (which must be present).
940  * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
941  * Note that the copy is read-only, because clusters are not copied,
942  * only their reference counts are incremented.
943  * Preserve alignment of the first mbuf so if the creator has left
944  * some room at the beginning (e.g. for inserting protocol headers)
945  * the copies also have the room available.
946  */
947 struct mbuf *
948 m_copypacket(struct mbuf *m, int how)
949 {
950 	struct mbuf *top, *n, *o;
951 
952 	n = m_gethdr(how, m->m_type);
953 	top = n;
954 	if (!n)
955 		goto nospace;
956 
957 	if (!m_dup_pkthdr(n, m, how))
958 		goto nospace;
959 	n->m_len = m->m_len;
960 	if (m->m_flags & M_EXT) {
961 		KKASSERT((n->m_flags & M_EXT) == 0);
962 		n->m_data = m->m_data;
963 		m->m_ext.ext_ref(m->m_ext.ext_arg);
964 		n->m_ext = m->m_ext;
965 		n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
966 	} else {
967 		n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
968 		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
969 	}
970 
971 	m = m->m_next;
972 	while (m) {
973 		o = m_get(how, m->m_type);
974 		if (!o)
975 			goto nospace;
976 
977 		n->m_next = o;
978 		n = n->m_next;
979 
980 		n->m_len = m->m_len;
981 		if (m->m_flags & M_EXT) {
982 			KKASSERT((n->m_flags & M_EXT) == 0);
983 			n->m_data = m->m_data;
984 			m->m_ext.ext_ref(m->m_ext.ext_arg);
985 			n->m_ext = m->m_ext;
986 			n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
987 		} else {
988 			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
989 		}
990 
991 		m = m->m_next;
992 	}
993 	return top;
994 nospace:
995 	m_freem(top);
996 	mbstat.m_mcfail++;
997 	return (NULL);
998 }
999 
1000 /*
1001  * Copy data from an mbuf chain starting "off" bytes from the beginning,
1002  * continuing for "len" bytes, into the indicated buffer.
1003  */
1004 void
1005 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
1006 {
1007 	unsigned count;
1008 
1009 	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1010 	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1011 	while (off > 0) {
1012 		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1013 		if (off < m->m_len)
1014 			break;
1015 		off -= m->m_len;
1016 		m = m->m_next;
1017 	}
1018 	while (len > 0) {
1019 		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1020 		count = min(m->m_len - off, len);
1021 		bcopy(mtod(m, caddr_t) + off, cp, count);
1022 		len -= count;
1023 		cp += count;
1024 		off = 0;
1025 		m = m->m_next;
1026 	}
1027 }
1028 
1029 /*
1030  * Copy a packet header mbuf chain into a completely new chain, including
1031  * copying any mbuf clusters.  Use this instead of m_copypacket() when
1032  * you need a writable copy of an mbuf chain.
1033  */
1034 struct mbuf *
1035 m_dup(struct mbuf *m, int how)
1036 {
1037 	struct mbuf **p, *top = NULL;
1038 	int remain, moff, nsize;
1039 
1040 	/* Sanity check */
1041 	if (m == NULL)
1042 		return (NULL);
1043 	KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__));
1044 
1045 	/* While there's more data, get a new mbuf, tack it on, and fill it */
1046 	remain = m->m_pkthdr.len;
1047 	moff = 0;
1048 	p = &top;
1049 	while (remain > 0 || top == NULL) {	/* allow m->m_pkthdr.len == 0 */
1050 		struct mbuf *n;
1051 
1052 		/* Get the next new mbuf */
1053 		n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0,
1054 			   &nsize);
1055 		if (n == NULL)
1056 			goto nospace;
1057 		if (top == NULL)
1058 			if (!m_dup_pkthdr(n, m, how))
1059 				goto nospace0;
1060 
1061 		/* Link it into the new chain */
1062 		*p = n;
1063 		p = &n->m_next;
1064 
1065 		/* Copy data from original mbuf(s) into new mbuf */
1066 		n->m_len = 0;
1067 		while (n->m_len < nsize && m != NULL) {
1068 			int chunk = min(nsize - n->m_len, m->m_len - moff);
1069 
1070 			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1071 			moff += chunk;
1072 			n->m_len += chunk;
1073 			remain -= chunk;
1074 			if (moff == m->m_len) {
1075 				m = m->m_next;
1076 				moff = 0;
1077 			}
1078 		}
1079 
1080 		/* Check correct total mbuf length */
1081 		KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1082 			("%s: bogus m_pkthdr.len", __func__));
1083 	}
1084 	return (top);
1085 
1086 nospace:
1087 	m_freem(top);
1088 nospace0:
1089 	mbstat.m_mcfail++;
1090 	return (NULL);
1091 }
1092 
1093 /*
1094  * Concatenate mbuf chain n to m.
1095  * Both chains must be of the same type (e.g. MT_DATA).
1096  * Any m_pkthdr is not updated.
1097  */
1098 void
1099 m_cat(struct mbuf *m, struct mbuf *n)
1100 {
1101 	m = m_last(m);
1102 	while (n) {
1103 		if (m->m_flags & M_EXT ||
1104 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1105 			/* just join the two chains */
1106 			m->m_next = n;
1107 			return;
1108 		}
1109 		/* splat the data from one into the other */
1110 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1111 		    (u_int)n->m_len);
1112 		m->m_len += n->m_len;
1113 		n = m_free(n);
1114 	}
1115 }
1116 
1117 void
1118 m_adj(struct mbuf *mp, int req_len)
1119 {
1120 	int len = req_len;
1121 	struct mbuf *m;
1122 	int count;
1123 
1124 	if ((m = mp) == NULL)
1125 		return;
1126 	if (len >= 0) {
1127 		/*
1128 		 * Trim from head.
1129 		 */
1130 		while (m != NULL && len > 0) {
1131 			if (m->m_len <= len) {
1132 				len -= m->m_len;
1133 				m->m_len = 0;
1134 				m = m->m_next;
1135 			} else {
1136 				m->m_len -= len;
1137 				m->m_data += len;
1138 				len = 0;
1139 			}
1140 		}
1141 		m = mp;
1142 		if (mp->m_flags & M_PKTHDR)
1143 			m->m_pkthdr.len -= (req_len - len);
1144 	} else {
1145 		/*
1146 		 * Trim from tail.  Scan the mbuf chain,
1147 		 * calculating its length and finding the last mbuf.
1148 		 * If the adjustment only affects this mbuf, then just
1149 		 * adjust and return.  Otherwise, rescan and truncate
1150 		 * after the remaining size.
1151 		 */
1152 		len = -len;
1153 		count = 0;
1154 		for (;;) {
1155 			count += m->m_len;
1156 			if (m->m_next == (struct mbuf *)0)
1157 				break;
1158 			m = m->m_next;
1159 		}
1160 		if (m->m_len >= len) {
1161 			m->m_len -= len;
1162 			if (mp->m_flags & M_PKTHDR)
1163 				mp->m_pkthdr.len -= len;
1164 			return;
1165 		}
1166 		count -= len;
1167 		if (count < 0)
1168 			count = 0;
1169 		/*
1170 		 * Correct length for chain is "count".
1171 		 * Find the mbuf with last data, adjust its length,
1172 		 * and toss data from remaining mbufs on chain.
1173 		 */
1174 		m = mp;
1175 		if (m->m_flags & M_PKTHDR)
1176 			m->m_pkthdr.len = count;
1177 		for (; m; m = m->m_next) {
1178 			if (m->m_len >= count) {
1179 				m->m_len = count;
1180 				break;
1181 			}
1182 			count -= m->m_len;
1183 		}
1184 		while (m->m_next)
1185 			(m = m->m_next) ->m_len = 0;
1186 	}
1187 }
1188 
1189 /*
1190  * Rearrange an mbuf chain so that len bytes are contiguous
1191  * and in the data area of an mbuf (so that mtod will work for a structure
1192  * of size len).  Returns the resulting mbuf chain on success, frees it and
1193  * returns null on failure.  If there is room, it will add up to
1194  * max_protohdr-len extra bytes to the contiguous region in an attempt to
1195  * avoid being called next time.
1196  */
1197 struct mbuf *
1198 m_pullup(struct mbuf *n, int len)
1199 {
1200 	struct mbuf *m;
1201 	int count;
1202 	int space;
1203 
1204 	/*
1205 	 * If first mbuf has no cluster, and has room for len bytes
1206 	 * without shifting current data, pullup into it,
1207 	 * otherwise allocate a new mbuf to prepend to the chain.
1208 	 */
1209 	if (!(n->m_flags & M_EXT) &&
1210 	    n->m_data + len < &n->m_dat[MLEN] &&
1211 	    n->m_next) {
1212 		if (n->m_len >= len)
1213 			return (n);
1214 		m = n;
1215 		n = n->m_next;
1216 		len -= m->m_len;
1217 	} else {
1218 		if (len > MHLEN)
1219 			goto bad;
1220 		if (n->m_flags & M_PKTHDR)
1221 			m = m_gethdr(MB_DONTWAIT, n->m_type);
1222 		else
1223 			m = m_get(MB_DONTWAIT, n->m_type);
1224 		if (m == NULL)
1225 			goto bad;
1226 		m->m_len = 0;
1227 		if (n->m_flags & M_PKTHDR)
1228 			M_MOVE_PKTHDR(m, n);
1229 	}
1230 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1231 	do {
1232 		count = min(min(max(len, max_protohdr), space), n->m_len);
1233 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1234 		  (unsigned)count);
1235 		len -= count;
1236 		m->m_len += count;
1237 		n->m_len -= count;
1238 		space -= count;
1239 		if (n->m_len)
1240 			n->m_data += count;
1241 		else
1242 			n = m_free(n);
1243 	} while (len > 0 && n);
1244 	if (len > 0) {
1245 		m_free(m);
1246 		goto bad;
1247 	}
1248 	m->m_next = n;
1249 	return (m);
1250 bad:
1251 	m_freem(n);
1252 	mbstat.m_mpfail++;
1253 	return (NULL);
1254 }
1255 
1256 /*
1257  * Partition an mbuf chain in two pieces, returning the tail --
1258  * all but the first len0 bytes.  In case of failure, it returns NULL and
1259  * attempts to restore the chain to its original state.
1260  *
1261  * Note that the resulting mbufs might be read-only, because the new
1262  * mbuf can end up sharing an mbuf cluster with the original mbuf if
1263  * the "breaking point" happens to lie within a cluster mbuf. Use the
1264  * M_WRITABLE() macro to check for this case.
1265  */
1266 struct mbuf *
1267 m_split(struct mbuf *m0, int len0, int wait)
1268 {
1269 	struct mbuf *m, *n;
1270 	unsigned len = len0, remain;
1271 
1272 	for (m = m0; m && len > m->m_len; m = m->m_next)
1273 		len -= m->m_len;
1274 	if (m == NULL)
1275 		return (NULL);
1276 	remain = m->m_len - len;
1277 	if (m0->m_flags & M_PKTHDR) {
1278 		n = m_gethdr(wait, m0->m_type);
1279 		if (n == NULL)
1280 			return (NULL);
1281 		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1282 		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1283 		m0->m_pkthdr.len = len0;
1284 		if (m->m_flags & M_EXT)
1285 			goto extpacket;
1286 		if (remain > MHLEN) {
1287 			/* m can't be the lead packet */
1288 			MH_ALIGN(n, 0);
1289 			n->m_next = m_split(m, len, wait);
1290 			if (n->m_next == NULL) {
1291 				m_free(n);
1292 				return (NULL);
1293 			} else {
1294 				n->m_len = 0;
1295 				return (n);
1296 			}
1297 		} else
1298 			MH_ALIGN(n, remain);
1299 	} else if (remain == 0) {
1300 		n = m->m_next;
1301 		m->m_next = 0;
1302 		return (n);
1303 	} else {
1304 		n = m_get(wait, m->m_type);
1305 		if (n == NULL)
1306 			return (NULL);
1307 		M_ALIGN(n, remain);
1308 	}
1309 extpacket:
1310 	if (m->m_flags & M_EXT) {
1311 		KKASSERT((n->m_flags & M_EXT) == 0);
1312 		n->m_data = m->m_data + len;
1313 		m->m_ext.ext_ref(m->m_ext.ext_arg);
1314 		n->m_ext = m->m_ext;
1315 		n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1316 	} else {
1317 		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1318 	}
1319 	n->m_len = remain;
1320 	m->m_len = len;
1321 	n->m_next = m->m_next;
1322 	m->m_next = 0;
1323 	return (n);
1324 }
1325 
1326 /*
1327  * Routine to copy from device local memory into mbufs.
1328  * Note: "offset" is ill-defined and always called as 0, so ignore it.
1329  */
1330 struct mbuf *
1331 m_devget(char *buf, int len, int offset, struct ifnet *ifp,
1332     void (*copy)(volatile const void *from, volatile void *to, size_t length))
1333 {
1334 	struct mbuf *m, *mfirst = NULL, **mtail;
1335 	int nsize, flags;
1336 
1337 	if (copy == NULL)
1338 		copy = bcopy;
1339 	mtail = &mfirst;
1340 	flags = M_PKTHDR;
1341 
1342 	while (len > 0) {
1343 		m = m_getl(len, MB_DONTWAIT, MT_DATA, flags, &nsize);
1344 		if (m == NULL) {
1345 			m_freem(mfirst);
1346 			return (NULL);
1347 		}
1348 		m->m_len = min(len, nsize);
1349 
1350 		if (flags & M_PKTHDR) {
1351 			if (len + max_linkhdr <= nsize)
1352 				m->m_data += max_linkhdr;
1353 			m->m_pkthdr.rcvif = ifp;
1354 			m->m_pkthdr.len = len;
1355 			flags = 0;
1356 		}
1357 
1358 		copy(buf, m->m_data, (unsigned)m->m_len);
1359 		buf += m->m_len;
1360 		len -= m->m_len;
1361 		*mtail = m;
1362 		mtail = &m->m_next;
1363 	}
1364 
1365 	return (mfirst);
1366 }
1367 
1368 /*
1369  * Copy data from a buffer back into the indicated mbuf chain,
1370  * starting "off" bytes from the beginning, extending the mbuf
1371  * chain if necessary.
1372  */
1373 void
1374 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
1375 {
1376 	int mlen;
1377 	struct mbuf *m = m0, *n;
1378 	int totlen = 0;
1379 
1380 	if (m0 == NULL)
1381 		return;
1382 	while (off > (mlen = m->m_len)) {
1383 		off -= mlen;
1384 		totlen += mlen;
1385 		if (m->m_next == NULL) {
1386 			n = m_getclr(MB_DONTWAIT, m->m_type);
1387 			if (n == NULL)
1388 				goto out;
1389 			n->m_len = min(MLEN, len + off);
1390 			m->m_next = n;
1391 		}
1392 		m = m->m_next;
1393 	}
1394 	while (len > 0) {
1395 		mlen = min (m->m_len - off, len);
1396 		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1397 		cp += mlen;
1398 		len -= mlen;
1399 		mlen += off;
1400 		off = 0;
1401 		totlen += mlen;
1402 		if (len == 0)
1403 			break;
1404 		if (m->m_next == NULL) {
1405 			n = m_get(MB_DONTWAIT, m->m_type);
1406 			if (n == NULL)
1407 				break;
1408 			n->m_len = min(MLEN, len);
1409 			m->m_next = n;
1410 		}
1411 		m = m->m_next;
1412 	}
1413 out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1414 		m->m_pkthdr.len = totlen;
1415 }
1416 
1417 void
1418 m_print(const struct mbuf *m)
1419 {
1420 	int len;
1421 	const struct mbuf *m2;
1422 
1423 	len = m->m_pkthdr.len;
1424 	m2 = m;
1425 	while (len) {
1426 		printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1427 		len -= m2->m_len;
1428 		m2 = m2->m_next;
1429 	}
1430 	return;
1431 }
1432 
1433 /*
1434  * "Move" mbuf pkthdr from "from" to "to".
1435  * "from" must have M_PKTHDR set, and "to" must be empty.
1436  */
1437 void
1438 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1439 {
1440 	KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header"));
1441 
1442 	to->m_flags |= from->m_flags & M_COPYFLAGS;
1443 	to->m_pkthdr = from->m_pkthdr;		/* especially tags */
1444 	SLIST_INIT(&from->m_pkthdr.tags);	/* purge tags from src */
1445 }
1446 
1447 /*
1448  * Duplicate "from"'s mbuf pkthdr in "to".
1449  * "from" must have M_PKTHDR set, and "to" must be empty.
1450  * In particular, this does a deep copy of the packet tags.
1451  */
1452 int
1453 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
1454 {
1455 	KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header"));
1456 
1457 	to->m_flags = (from->m_flags & M_COPYFLAGS) |
1458 		      (to->m_flags & ~M_COPYFLAGS);
1459 	to->m_pkthdr = from->m_pkthdr;
1460 	SLIST_INIT(&to->m_pkthdr.tags);
1461 	return (m_tag_copy_chain(to, from, how));
1462 }
1463 
1464 /*
1465  * Defragment a mbuf chain, returning the shortest possible
1466  * chain of mbufs and clusters.  If allocation fails and
1467  * this cannot be completed, NULL will be returned, but
1468  * the passed in chain will be unchanged.  Upon success,
1469  * the original chain will be freed, and the new chain
1470  * will be returned.
1471  *
1472  * If a non-packet header is passed in, the original
1473  * mbuf (chain?) will be returned unharmed.
1474  *
1475  * m_defrag_nofree doesn't free the passed in mbuf.
1476  */
1477 struct mbuf *
1478 m_defrag(struct mbuf *m0, int how)
1479 {
1480 	struct mbuf *m_new;
1481 
1482 	if ((m_new = m_defrag_nofree(m0, how)) == NULL)
1483 		return (NULL);
1484 	if (m_new != m0)
1485 		m_freem(m0);
1486 	return (m_new);
1487 }
1488 
1489 struct mbuf *
1490 m_defrag_nofree(struct mbuf *m0, int how)
1491 {
1492 	struct mbuf	*m_new = NULL, *m_final = NULL;
1493 	int		progress = 0, length, nsize;
1494 
1495 	if (!(m0->m_flags & M_PKTHDR))
1496 		return (m0);
1497 
1498 #ifdef MBUF_STRESS_TEST
1499 	if (m_defragrandomfailures) {
1500 		int temp = arc4random() & 0xff;
1501 		if (temp == 0xba)
1502 			goto nospace;
1503 	}
1504 #endif
1505 
1506 	m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize);
1507 	if (m_final == NULL)
1508 		goto nospace;
1509 	m_final->m_len = 0;	/* in case m0->m_pkthdr.len is zero */
1510 
1511 	if (m_dup_pkthdr(m_final, m0, how) == NULL)
1512 		goto nospace;
1513 
1514 	m_new = m_final;
1515 
1516 	while (progress < m0->m_pkthdr.len) {
1517 		length = m0->m_pkthdr.len - progress;
1518 		if (length > MCLBYTES)
1519 			length = MCLBYTES;
1520 
1521 		if (m_new == NULL) {
1522 			m_new = m_getl(length, how, MT_DATA, 0, &nsize);
1523 			if (m_new == NULL)
1524 				goto nospace;
1525 		}
1526 
1527 		m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1528 		progress += length;
1529 		m_new->m_len = length;
1530 		if (m_new != m_final)
1531 			m_cat(m_final, m_new);
1532 		m_new = NULL;
1533 	}
1534 	if (m0->m_next == NULL)
1535 		m_defraguseless++;
1536 	m_defragpackets++;
1537 	m_defragbytes += m_final->m_pkthdr.len;
1538 	return (m_final);
1539 nospace:
1540 	m_defragfailure++;
1541 	if (m_new)
1542 		m_free(m_new);
1543 	m_freem(m_final);
1544 	return (NULL);
1545 }
1546 
1547 /*
1548  * Move data from uio into mbufs.
1549  */
1550 struct mbuf *
1551 m_uiomove(struct uio *uio)
1552 {
1553 	struct mbuf *m;			/* current working mbuf */
1554 	struct mbuf *head = NULL;	/* result mbuf chain */
1555 	struct mbuf **mp = &head;
1556 	int resid = uio->uio_resid, nsize, flags = M_PKTHDR, error;
1557 
1558 	do {
1559 		m = m_getl(resid, MB_WAIT, MT_DATA, flags, &nsize);
1560 		if (flags) {
1561 			m->m_pkthdr.len = 0;
1562 			/* Leave room for protocol headers. */
1563 			if (resid < MHLEN)
1564 				MH_ALIGN(m, resid);
1565 			flags = 0;
1566 		}
1567 		m->m_len = min(nsize, resid);
1568 		error = uiomove(mtod(m, caddr_t), m->m_len, uio);
1569 		if (error) {
1570 			m_free(m);
1571 			goto failed;
1572 		}
1573 		*mp = m;
1574 		mp = &m->m_next;
1575 		head->m_pkthdr.len += m->m_len;
1576 		resid -= m->m_len;
1577 	} while (resid > 0);
1578 
1579 	return (head);
1580 
1581 failed:
1582 	m_freem(head);
1583 	return (NULL);
1584 }
1585 
1586 struct mbuf *
1587 m_last(struct mbuf *m)
1588 {
1589 	while (m->m_next)
1590 		m = m->m_next;
1591 	return (m);
1592 }
1593 
1594 /*
1595  * Return the number of bytes in an mbuf chain.
1596  * If lastm is not NULL, also return the last mbuf.
1597  */
1598 u_int
1599 m_lengthm(struct mbuf *m, struct mbuf **lastm)
1600 {
1601 	u_int len = 0;
1602 	struct mbuf *prev = m;
1603 
1604 	while (m) {
1605 		len += m->m_len;
1606 		prev = m;
1607 		m = m->m_next;
1608 	}
1609 	if (lastm != NULL)
1610 		*lastm = prev;
1611 	return (len);
1612 }
1613 
1614 /*
1615  * Like m_lengthm(), except also keep track of mbuf usage.
1616  */
1617 u_int
1618 m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt)
1619 {
1620 	u_int len = 0, mbcnt = 0;
1621 	struct mbuf *prev = m;
1622 
1623 	while (m) {
1624 		len += m->m_len;
1625 		mbcnt += MSIZE;
1626 		if (m->m_flags & M_EXT)
1627 			mbcnt += m->m_ext.ext_size;
1628 		prev = m;
1629 		m = m->m_next;
1630 	}
1631 	if (lastm != NULL)
1632 		*lastm = prev;
1633 	*pmbcnt = mbcnt;
1634 	return (len);
1635 }
1636