xref: /netbsd-src/sys/kern/subr_pool.c (revision a536ee5124e62c9a0051a252f7833dc8f50f44c9)
1 /*	$NetBSD: subr_pool.c,v 1.198 2012/08/28 15:52:19 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010
5  *     The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
10  * Simulation Facility, NASA Ames Research Center, and by Andrew Doran.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.198 2012/08/28 15:52:19 christos Exp $");
36 
37 #include "opt_ddb.h"
38 #include "opt_lockdebug.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bitops.h>
43 #include <sys/proc.h>
44 #include <sys/errno.h>
45 #include <sys/kernel.h>
46 #include <sys/vmem.h>
47 #include <sys/pool.h>
48 #include <sys/syslog.h>
49 #include <sys/debug.h>
50 #include <sys/lockdebug.h>
51 #include <sys/xcall.h>
52 #include <sys/cpu.h>
53 #include <sys/atomic.h>
54 
55 #include <uvm/uvm_extern.h>
56 
57 /*
58  * Pool resource management utility.
59  *
60  * Memory is allocated in pages which are split into pieces according to
61  * the pool item size. Each page is kept on one of three lists in the
62  * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
63  * for empty, full and partially-full pages respectively. The individual
64  * pool items are on a linked list headed by `ph_itemlist' in each page
65  * header. The memory for building the page list is either taken from
66  * the allocated pages themselves (for small pool items) or taken from
67  * an internal pool of page headers (`phpool').
68  */
69 
70 /* List of all pools */
71 static TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
72 
73 /* Private pool for page header structures */
74 #define	PHPOOL_MAX	8
75 static struct pool phpool[PHPOOL_MAX];
76 #define	PHPOOL_FREELIST_NELEM(idx) \
77 	(((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
78 
79 #ifdef POOL_SUBPAGE
80 /* Pool of subpages for use by normal pools. */
81 static struct pool psppool;
82 #endif
83 
84 static void *pool_page_alloc_meta(struct pool *, int);
85 static void pool_page_free_meta(struct pool *, void *);
86 
87 /* allocator for pool metadata */
88 struct pool_allocator pool_allocator_meta = {
89 	.pa_alloc = pool_page_alloc_meta,
90 	.pa_free = pool_page_free_meta,
91 	.pa_pagesz = 0
92 };
93 
94 /* # of seconds to retain page after last use */
95 int pool_inactive_time = 10;
96 
97 /* Next candidate for drainage (see pool_drain()) */
98 static struct pool	*drainpp;
99 
100 /* This lock protects both pool_head and drainpp. */
101 static kmutex_t pool_head_lock;
102 static kcondvar_t pool_busy;
103 
104 /* This lock protects initialization of a potentially shared pool allocator */
105 static kmutex_t pool_allocator_lock;
106 
107 typedef uint32_t pool_item_bitmap_t;
108 #define	BITMAP_SIZE	(CHAR_BIT * sizeof(pool_item_bitmap_t))
109 #define	BITMAP_MASK	(BITMAP_SIZE - 1)
110 
111 struct pool_item_header {
112 	/* Page headers */
113 	LIST_ENTRY(pool_item_header)
114 				ph_pagelist;	/* pool page list */
115 	SPLAY_ENTRY(pool_item_header)
116 				ph_node;	/* Off-page page headers */
117 	void *			ph_page;	/* this page's address */
118 	uint32_t		ph_time;	/* last referenced */
119 	uint16_t		ph_nmissing;	/* # of chunks in use */
120 	uint16_t		ph_off;		/* start offset in page */
121 	union {
122 		/* !PR_NOTOUCH */
123 		struct {
124 			LIST_HEAD(, pool_item)
125 				phu_itemlist;	/* chunk list for this page */
126 		} phu_normal;
127 		/* PR_NOTOUCH */
128 		struct {
129 			pool_item_bitmap_t phu_bitmap[1];
130 		} phu_notouch;
131 	} ph_u;
132 };
133 #define	ph_itemlist	ph_u.phu_normal.phu_itemlist
134 #define	ph_bitmap	ph_u.phu_notouch.phu_bitmap
135 
136 struct pool_item {
137 #ifdef DIAGNOSTIC
138 	u_int pi_magic;
139 #endif
140 #define	PI_MAGIC 0xdeaddeadU
141 	/* Other entries use only this list entry */
142 	LIST_ENTRY(pool_item)	pi_list;
143 };
144 
145 #define	POOL_NEEDS_CATCHUP(pp)						\
146 	((pp)->pr_nitems < (pp)->pr_minitems)
147 
148 /*
149  * Pool cache management.
150  *
151  * Pool caches provide a way for constructed objects to be cached by the
152  * pool subsystem.  This can lead to performance improvements by avoiding
153  * needless object construction/destruction; it is deferred until absolutely
154  * necessary.
155  *
156  * Caches are grouped into cache groups.  Each cache group references up
157  * to PCG_NUMOBJECTS constructed objects.  When a cache allocates an
158  * object from the pool, it calls the object's constructor and places it
159  * into a cache group.  When a cache group frees an object back to the
160  * pool, it first calls the object's destructor.  This allows the object
161  * to persist in constructed form while freed to the cache.
162  *
163  * The pool references each cache, so that when a pool is drained by the
164  * pagedaemon, it can drain each individual cache as well.  Each time a
165  * cache is drained, the most idle cache group is freed to the pool in
166  * its entirety.
167  *
168  * Pool caches are layed on top of pools.  By layering them, we can avoid
169  * the complexity of cache management for pools which would not benefit
170  * from it.
171  */
172 
173 static struct pool pcg_normal_pool;
174 static struct pool pcg_large_pool;
175 static struct pool cache_pool;
176 static struct pool cache_cpu_pool;
177 
178 pool_cache_t pnbuf_cache;	/* pathname buffer cache */
179 
180 /* List of all caches. */
181 TAILQ_HEAD(,pool_cache) pool_cache_head =
182     TAILQ_HEAD_INITIALIZER(pool_cache_head);
183 
184 int pool_cache_disable;		/* global disable for caching */
185 static const pcg_t pcg_dummy;	/* zero sized: always empty, yet always full */
186 
187 static bool	pool_cache_put_slow(pool_cache_cpu_t *, int,
188 				    void *);
189 static bool	pool_cache_get_slow(pool_cache_cpu_t *, int,
190 				    void **, paddr_t *, int);
191 static void	pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
192 static void	pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
193 static void	pool_cache_invalidate_cpu(pool_cache_t, u_int);
194 static void	pool_cache_transfer(pool_cache_t);
195 
196 static int	pool_catchup(struct pool *);
197 static void	pool_prime_page(struct pool *, void *,
198 		    struct pool_item_header *);
199 static void	pool_update_curpage(struct pool *);
200 
201 static int	pool_grow(struct pool *, int);
202 static void	*pool_allocator_alloc(struct pool *, int);
203 static void	pool_allocator_free(struct pool *, void *);
204 
205 static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
206 	void (*)(const char *, ...));
207 static void pool_print1(struct pool *, const char *,
208 	void (*)(const char *, ...));
209 
210 static int pool_chk_page(struct pool *, const char *,
211 			 struct pool_item_header *);
212 
213 static inline unsigned int
214 pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
215     const void *v)
216 {
217 	const char *cp = v;
218 	unsigned int idx;
219 
220 	KASSERT(pp->pr_roflags & PR_NOTOUCH);
221 	idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
222 	KASSERT(idx < pp->pr_itemsperpage);
223 	return idx;
224 }
225 
226 static inline void
227 pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
228     void *obj)
229 {
230 	unsigned int idx = pr_item_notouch_index(pp, ph, obj);
231 	pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
232 	pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
233 
234 	KASSERT((*bitmap & mask) == 0);
235 	*bitmap |= mask;
236 }
237 
238 static inline void *
239 pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
240 {
241 	pool_item_bitmap_t *bitmap = ph->ph_bitmap;
242 	unsigned int idx;
243 	int i;
244 
245 	for (i = 0; ; i++) {
246 		int bit;
247 
248 		KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
249 		bit = ffs32(bitmap[i]);
250 		if (bit) {
251 			pool_item_bitmap_t mask;
252 
253 			bit--;
254 			idx = (i * BITMAP_SIZE) + bit;
255 			mask = 1 << bit;
256 			KASSERT((bitmap[i] & mask) != 0);
257 			bitmap[i] &= ~mask;
258 			break;
259 		}
260 	}
261 	KASSERT(idx < pp->pr_itemsperpage);
262 	return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
263 }
264 
265 static inline void
266 pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
267 {
268 	pool_item_bitmap_t *bitmap = ph->ph_bitmap;
269 	const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
270 	int i;
271 
272 	for (i = 0; i < n; i++) {
273 		bitmap[i] = (pool_item_bitmap_t)-1;
274 	}
275 }
276 
277 static inline int
278 phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
279 {
280 
281 	/*
282 	 * we consider pool_item_header with smaller ph_page bigger.
283 	 * (this unnatural ordering is for the benefit of pr_find_pagehead.)
284 	 */
285 
286 	if (a->ph_page < b->ph_page)
287 		return (1);
288 	else if (a->ph_page > b->ph_page)
289 		return (-1);
290 	else
291 		return (0);
292 }
293 
294 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
295 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
296 
297 static inline struct pool_item_header *
298 pr_find_pagehead_noalign(struct pool *pp, void *v)
299 {
300 	struct pool_item_header *ph, tmp;
301 
302 	tmp.ph_page = (void *)(uintptr_t)v;
303 	ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
304 	if (ph == NULL) {
305 		ph = SPLAY_ROOT(&pp->pr_phtree);
306 		if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
307 			ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
308 		}
309 		KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
310 	}
311 
312 	return ph;
313 }
314 
315 /*
316  * Return the pool page header based on item address.
317  */
318 static inline struct pool_item_header *
319 pr_find_pagehead(struct pool *pp, void *v)
320 {
321 	struct pool_item_header *ph, tmp;
322 
323 	if ((pp->pr_roflags & PR_NOALIGN) != 0) {
324 		ph = pr_find_pagehead_noalign(pp, v);
325 	} else {
326 		void *page =
327 		    (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
328 
329 		if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
330 			ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
331 		} else {
332 			tmp.ph_page = page;
333 			ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
334 		}
335 	}
336 
337 	KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
338 	    ((char *)ph->ph_page <= (char *)v &&
339 	    (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
340 	return ph;
341 }
342 
343 static void
344 pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
345 {
346 	struct pool_item_header *ph;
347 
348 	while ((ph = LIST_FIRST(pq)) != NULL) {
349 		LIST_REMOVE(ph, ph_pagelist);
350 		pool_allocator_free(pp, ph->ph_page);
351 		if ((pp->pr_roflags & PR_PHINPAGE) == 0)
352 			pool_put(pp->pr_phpool, ph);
353 	}
354 }
355 
356 /*
357  * Remove a page from the pool.
358  */
359 static inline void
360 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
361      struct pool_pagelist *pq)
362 {
363 
364 	KASSERT(mutex_owned(&pp->pr_lock));
365 
366 	/*
367 	 * If the page was idle, decrement the idle page count.
368 	 */
369 	if (ph->ph_nmissing == 0) {
370 #ifdef DIAGNOSTIC
371 		if (pp->pr_nidle == 0)
372 			panic("pr_rmpage: nidle inconsistent");
373 		if (pp->pr_nitems < pp->pr_itemsperpage)
374 			panic("pr_rmpage: nitems inconsistent");
375 #endif
376 		pp->pr_nidle--;
377 	}
378 
379 	pp->pr_nitems -= pp->pr_itemsperpage;
380 
381 	/*
382 	 * Unlink the page from the pool and queue it for release.
383 	 */
384 	LIST_REMOVE(ph, ph_pagelist);
385 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
386 		SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
387 	LIST_INSERT_HEAD(pq, ph, ph_pagelist);
388 
389 	pp->pr_npages--;
390 	pp->pr_npagefree++;
391 
392 	pool_update_curpage(pp);
393 }
394 
395 /*
396  * Initialize all the pools listed in the "pools" link set.
397  */
398 void
399 pool_subsystem_init(void)
400 {
401 	size_t size;
402 	int idx;
403 
404 	mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
405 	mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
406 	cv_init(&pool_busy, "poolbusy");
407 
408 	/*
409 	 * Initialize private page header pool and cache magazine pool if we
410 	 * haven't done so yet.
411 	 */
412 	for (idx = 0; idx < PHPOOL_MAX; idx++) {
413 		static char phpool_names[PHPOOL_MAX][6+1+6+1];
414 		int nelem;
415 		size_t sz;
416 
417 		nelem = PHPOOL_FREELIST_NELEM(idx);
418 		snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
419 		    "phpool-%d", nelem);
420 		sz = sizeof(struct pool_item_header);
421 		if (nelem) {
422 			sz = offsetof(struct pool_item_header,
423 			    ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
424 		}
425 		pool_init(&phpool[idx], sz, 0, 0, 0,
426 		    phpool_names[idx], &pool_allocator_meta, IPL_VM);
427 	}
428 #ifdef POOL_SUBPAGE
429 	pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
430 	    PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
431 #endif
432 
433 	size = sizeof(pcg_t) +
434 	    (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
435 	pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
436 	    "pcgnormal", &pool_allocator_meta, IPL_VM);
437 
438 	size = sizeof(pcg_t) +
439 	    (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
440 	pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
441 	    "pcglarge", &pool_allocator_meta, IPL_VM);
442 
443 	pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
444 	    0, 0, "pcache", &pool_allocator_meta, IPL_NONE);
445 
446 	pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
447 	    0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE);
448 }
449 
450 /*
451  * Initialize the given pool resource structure.
452  *
453  * We export this routine to allow other kernel parts to declare
454  * static pools that must be initialized before kmem(9) is available.
455  */
456 void
457 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
458     const char *wchan, struct pool_allocator *palloc, int ipl)
459 {
460 	struct pool *pp1;
461 	size_t trysize, phsize;
462 	int off, slack;
463 
464 #ifdef DEBUG
465 	if (__predict_true(!cold))
466 		mutex_enter(&pool_head_lock);
467 	/*
468 	 * Check that the pool hasn't already been initialised and
469 	 * added to the list of all pools.
470 	 */
471 	TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
472 		if (pp == pp1)
473 			panic("pool_init: pool %s already initialised",
474 			    wchan);
475 	}
476 	if (__predict_true(!cold))
477 		mutex_exit(&pool_head_lock);
478 #endif
479 
480 	if (palloc == NULL)
481 		palloc = &pool_allocator_kmem;
482 #ifdef POOL_SUBPAGE
483 	if (size > palloc->pa_pagesz) {
484 		if (palloc == &pool_allocator_kmem)
485 			palloc = &pool_allocator_kmem_fullpage;
486 		else if (palloc == &pool_allocator_nointr)
487 			palloc = &pool_allocator_nointr_fullpage;
488 	}
489 #endif /* POOL_SUBPAGE */
490 	if (!cold)
491 		mutex_enter(&pool_allocator_lock);
492 	if (palloc->pa_refcnt++ == 0) {
493 		if (palloc->pa_pagesz == 0)
494 			palloc->pa_pagesz = PAGE_SIZE;
495 
496 		TAILQ_INIT(&palloc->pa_list);
497 
498 		mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
499 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
500 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
501 	}
502 	if (!cold)
503 		mutex_exit(&pool_allocator_lock);
504 
505 	if (align == 0)
506 		align = ALIGN(1);
507 
508 	if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
509 		size = sizeof(struct pool_item);
510 
511 	size = roundup(size, align);
512 #ifdef DIAGNOSTIC
513 	if (size > palloc->pa_pagesz)
514 		panic("pool_init: pool item size (%zu) too large", size);
515 #endif
516 
517 	/*
518 	 * Initialize the pool structure.
519 	 */
520 	LIST_INIT(&pp->pr_emptypages);
521 	LIST_INIT(&pp->pr_fullpages);
522 	LIST_INIT(&pp->pr_partpages);
523 	pp->pr_cache = NULL;
524 	pp->pr_curpage = NULL;
525 	pp->pr_npages = 0;
526 	pp->pr_minitems = 0;
527 	pp->pr_minpages = 0;
528 	pp->pr_maxpages = UINT_MAX;
529 	pp->pr_roflags = flags;
530 	pp->pr_flags = 0;
531 	pp->pr_size = size;
532 	pp->pr_align = align;
533 	pp->pr_wchan = wchan;
534 	pp->pr_alloc = palloc;
535 	pp->pr_nitems = 0;
536 	pp->pr_nout = 0;
537 	pp->pr_hardlimit = UINT_MAX;
538 	pp->pr_hardlimit_warning = NULL;
539 	pp->pr_hardlimit_ratecap.tv_sec = 0;
540 	pp->pr_hardlimit_ratecap.tv_usec = 0;
541 	pp->pr_hardlimit_warning_last.tv_sec = 0;
542 	pp->pr_hardlimit_warning_last.tv_usec = 0;
543 	pp->pr_drain_hook = NULL;
544 	pp->pr_drain_hook_arg = NULL;
545 	pp->pr_freecheck = NULL;
546 
547 	/*
548 	 * Decide whether to put the page header off page to avoid
549 	 * wasting too large a part of the page or too big item.
550 	 * Off-page page headers go on a hash table, so we can match
551 	 * a returned item with its header based on the page address.
552 	 * We use 1/16 of the page size and about 8 times of the item
553 	 * size as the threshold (XXX: tune)
554 	 *
555 	 * However, we'll put the header into the page if we can put
556 	 * it without wasting any items.
557 	 *
558 	 * Silently enforce `0 <= ioff < align'.
559 	 */
560 	pp->pr_itemoffset = ioff %= align;
561 	/* See the comment below about reserved bytes. */
562 	trysize = palloc->pa_pagesz - ((align - ioff) % align);
563 	phsize = ALIGN(sizeof(struct pool_item_header));
564 	if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
565 	    (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
566 	    trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
567 		/* Use the end of the page for the page header */
568 		pp->pr_roflags |= PR_PHINPAGE;
569 		pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
570 	} else {
571 		/* The page header will be taken from our page header pool */
572 		pp->pr_phoffset = 0;
573 		off = palloc->pa_pagesz;
574 		SPLAY_INIT(&pp->pr_phtree);
575 	}
576 
577 	/*
578 	 * Alignment is to take place at `ioff' within the item. This means
579 	 * we must reserve up to `align - 1' bytes on the page to allow
580 	 * appropriate positioning of each item.
581 	 */
582 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
583 	KASSERT(pp->pr_itemsperpage != 0);
584 	if ((pp->pr_roflags & PR_NOTOUCH)) {
585 		int idx;
586 
587 		for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
588 		    idx++) {
589 			/* nothing */
590 		}
591 		if (idx >= PHPOOL_MAX) {
592 			/*
593 			 * if you see this panic, consider to tweak
594 			 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
595 			 */
596 			panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
597 			    pp->pr_wchan, pp->pr_itemsperpage);
598 		}
599 		pp->pr_phpool = &phpool[idx];
600 	} else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
601 		pp->pr_phpool = &phpool[0];
602 	}
603 #if defined(DIAGNOSTIC)
604 	else {
605 		pp->pr_phpool = NULL;
606 	}
607 #endif
608 
609 	/*
610 	 * Use the slack between the chunks and the page header
611 	 * for "cache coloring".
612 	 */
613 	slack = off - pp->pr_itemsperpage * pp->pr_size;
614 	pp->pr_maxcolor = (slack / align) * align;
615 	pp->pr_curcolor = 0;
616 
617 	pp->pr_nget = 0;
618 	pp->pr_nfail = 0;
619 	pp->pr_nput = 0;
620 	pp->pr_npagealloc = 0;
621 	pp->pr_npagefree = 0;
622 	pp->pr_hiwat = 0;
623 	pp->pr_nidle = 0;
624 	pp->pr_refcnt = 0;
625 
626 	mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
627 	cv_init(&pp->pr_cv, wchan);
628 	pp->pr_ipl = ipl;
629 
630 	/* Insert into the list of all pools. */
631 	if (!cold)
632 		mutex_enter(&pool_head_lock);
633 	TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
634 		if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
635 			break;
636 	}
637 	if (pp1 == NULL)
638 		TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
639 	else
640 		TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
641 	if (!cold)
642 		mutex_exit(&pool_head_lock);
643 
644 	/* Insert this into the list of pools using this allocator. */
645 	if (!cold)
646 		mutex_enter(&palloc->pa_lock);
647 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
648 	if (!cold)
649 		mutex_exit(&palloc->pa_lock);
650 }
651 
652 /*
653  * De-commision a pool resource.
654  */
655 void
656 pool_destroy(struct pool *pp)
657 {
658 	struct pool_pagelist pq;
659 	struct pool_item_header *ph;
660 
661 	/* Remove from global pool list */
662 	mutex_enter(&pool_head_lock);
663 	while (pp->pr_refcnt != 0)
664 		cv_wait(&pool_busy, &pool_head_lock);
665 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
666 	if (drainpp == pp)
667 		drainpp = NULL;
668 	mutex_exit(&pool_head_lock);
669 
670 	/* Remove this pool from its allocator's list of pools. */
671 	mutex_enter(&pp->pr_alloc->pa_lock);
672 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
673 	mutex_exit(&pp->pr_alloc->pa_lock);
674 
675 	mutex_enter(&pool_allocator_lock);
676 	if (--pp->pr_alloc->pa_refcnt == 0)
677 		mutex_destroy(&pp->pr_alloc->pa_lock);
678 	mutex_exit(&pool_allocator_lock);
679 
680 	mutex_enter(&pp->pr_lock);
681 
682 	KASSERT(pp->pr_cache == NULL);
683 
684 #ifdef DIAGNOSTIC
685 	if (pp->pr_nout != 0) {
686 		panic("pool_destroy: pool busy: still out: %u",
687 		    pp->pr_nout);
688 	}
689 #endif
690 
691 	KASSERT(LIST_EMPTY(&pp->pr_fullpages));
692 	KASSERT(LIST_EMPTY(&pp->pr_partpages));
693 
694 	/* Remove all pages */
695 	LIST_INIT(&pq);
696 	while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
697 		pr_rmpage(pp, ph, &pq);
698 
699 	mutex_exit(&pp->pr_lock);
700 
701 	pr_pagelist_free(pp, &pq);
702 	cv_destroy(&pp->pr_cv);
703 	mutex_destroy(&pp->pr_lock);
704 }
705 
706 void
707 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
708 {
709 
710 	/* XXX no locking -- must be used just after pool_init() */
711 #ifdef DIAGNOSTIC
712 	if (pp->pr_drain_hook != NULL)
713 		panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
714 #endif
715 	pp->pr_drain_hook = fn;
716 	pp->pr_drain_hook_arg = arg;
717 }
718 
719 static struct pool_item_header *
720 pool_alloc_item_header(struct pool *pp, void *storage, int flags)
721 {
722 	struct pool_item_header *ph;
723 
724 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
725 		ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
726 	else
727 		ph = pool_get(pp->pr_phpool, flags);
728 
729 	return (ph);
730 }
731 
732 /*
733  * Grab an item from the pool.
734  */
735 void *
736 pool_get(struct pool *pp, int flags)
737 {
738 	struct pool_item *pi;
739 	struct pool_item_header *ph;
740 	void *v;
741 
742 #ifdef DIAGNOSTIC
743 	if (pp->pr_itemsperpage == 0)
744 		panic("pool_get: pool '%s': pr_itemsperpage is zero, "
745 		    "pool not initialized?", pp->pr_wchan);
746 	if ((cpu_intr_p() || cpu_softintr_p()) && pp->pr_ipl == IPL_NONE &&
747 	    !cold && panicstr == NULL)
748 		panic("pool '%s' is IPL_NONE, but called from "
749 		    "interrupt context\n", pp->pr_wchan);
750 #endif
751 	if (flags & PR_WAITOK) {
752 		ASSERT_SLEEPABLE();
753 	}
754 
755 	mutex_enter(&pp->pr_lock);
756  startover:
757 	/*
758 	 * Check to see if we've reached the hard limit.  If we have,
759 	 * and we can wait, then wait until an item has been returned to
760 	 * the pool.
761 	 */
762 #ifdef DIAGNOSTIC
763 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
764 		mutex_exit(&pp->pr_lock);
765 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
766 	}
767 #endif
768 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
769 		if (pp->pr_drain_hook != NULL) {
770 			/*
771 			 * Since the drain hook is going to free things
772 			 * back to the pool, unlock, call the hook, re-lock,
773 			 * and check the hardlimit condition again.
774 			 */
775 			mutex_exit(&pp->pr_lock);
776 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
777 			mutex_enter(&pp->pr_lock);
778 			if (pp->pr_nout < pp->pr_hardlimit)
779 				goto startover;
780 		}
781 
782 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
783 			/*
784 			 * XXX: A warning isn't logged in this case.  Should
785 			 * it be?
786 			 */
787 			pp->pr_flags |= PR_WANTED;
788 			cv_wait(&pp->pr_cv, &pp->pr_lock);
789 			goto startover;
790 		}
791 
792 		/*
793 		 * Log a message that the hard limit has been hit.
794 		 */
795 		if (pp->pr_hardlimit_warning != NULL &&
796 		    ratecheck(&pp->pr_hardlimit_warning_last,
797 			      &pp->pr_hardlimit_ratecap))
798 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
799 
800 		pp->pr_nfail++;
801 
802 		mutex_exit(&pp->pr_lock);
803 		return (NULL);
804 	}
805 
806 	/*
807 	 * The convention we use is that if `curpage' is not NULL, then
808 	 * it points at a non-empty bucket. In particular, `curpage'
809 	 * never points at a page header which has PR_PHINPAGE set and
810 	 * has no items in its bucket.
811 	 */
812 	if ((ph = pp->pr_curpage) == NULL) {
813 		int error;
814 
815 #ifdef DIAGNOSTIC
816 		if (pp->pr_nitems != 0) {
817 			mutex_exit(&pp->pr_lock);
818 			printf("pool_get: %s: curpage NULL, nitems %u\n",
819 			    pp->pr_wchan, pp->pr_nitems);
820 			panic("pool_get: nitems inconsistent");
821 		}
822 #endif
823 
824 		/*
825 		 * Call the back-end page allocator for more memory.
826 		 * Release the pool lock, as the back-end page allocator
827 		 * may block.
828 		 */
829 		error = pool_grow(pp, flags);
830 		if (error != 0) {
831 			/*
832 			 * We were unable to allocate a page or item
833 			 * header, but we released the lock during
834 			 * allocation, so perhaps items were freed
835 			 * back to the pool.  Check for this case.
836 			 */
837 			if (pp->pr_curpage != NULL)
838 				goto startover;
839 
840 			pp->pr_nfail++;
841 			mutex_exit(&pp->pr_lock);
842 			return (NULL);
843 		}
844 
845 		/* Start the allocation process over. */
846 		goto startover;
847 	}
848 	if (pp->pr_roflags & PR_NOTOUCH) {
849 #ifdef DIAGNOSTIC
850 		if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
851 			mutex_exit(&pp->pr_lock);
852 			panic("pool_get: %s: page empty", pp->pr_wchan);
853 		}
854 #endif
855 		v = pr_item_notouch_get(pp, ph);
856 	} else {
857 		v = pi = LIST_FIRST(&ph->ph_itemlist);
858 		if (__predict_false(v == NULL)) {
859 			mutex_exit(&pp->pr_lock);
860 			panic("pool_get: %s: page empty", pp->pr_wchan);
861 		}
862 #ifdef DIAGNOSTIC
863 		if (__predict_false(pp->pr_nitems == 0)) {
864 			mutex_exit(&pp->pr_lock);
865 			printf("pool_get: %s: items on itemlist, nitems %u\n",
866 			    pp->pr_wchan, pp->pr_nitems);
867 			panic("pool_get: nitems inconsistent");
868 		}
869 #endif
870 
871 #ifdef DIAGNOSTIC
872 		if (__predict_false(pi->pi_magic != PI_MAGIC)) {
873 			panic("pool_get(%s): free list modified: "
874 			    "magic=%x; page %p; item addr %p\n",
875 			    pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
876 		}
877 #endif
878 
879 		/*
880 		 * Remove from item list.
881 		 */
882 		LIST_REMOVE(pi, pi_list);
883 	}
884 	pp->pr_nitems--;
885 	pp->pr_nout++;
886 	if (ph->ph_nmissing == 0) {
887 #ifdef DIAGNOSTIC
888 		if (__predict_false(pp->pr_nidle == 0))
889 			panic("pool_get: nidle inconsistent");
890 #endif
891 		pp->pr_nidle--;
892 
893 		/*
894 		 * This page was previously empty.  Move it to the list of
895 		 * partially-full pages.  This page is already curpage.
896 		 */
897 		LIST_REMOVE(ph, ph_pagelist);
898 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
899 	}
900 	ph->ph_nmissing++;
901 	if (ph->ph_nmissing == pp->pr_itemsperpage) {
902 #ifdef DIAGNOSTIC
903 		if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
904 		    !LIST_EMPTY(&ph->ph_itemlist))) {
905 			mutex_exit(&pp->pr_lock);
906 			panic("pool_get: %s: nmissing inconsistent",
907 			    pp->pr_wchan);
908 		}
909 #endif
910 		/*
911 		 * This page is now full.  Move it to the full list
912 		 * and select a new current page.
913 		 */
914 		LIST_REMOVE(ph, ph_pagelist);
915 		LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
916 		pool_update_curpage(pp);
917 	}
918 
919 	pp->pr_nget++;
920 
921 	/*
922 	 * If we have a low water mark and we are now below that low
923 	 * water mark, add more items to the pool.
924 	 */
925 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
926 		/*
927 		 * XXX: Should we log a warning?  Should we set up a timeout
928 		 * to try again in a second or so?  The latter could break
929 		 * a caller's assumptions about interrupt protection, etc.
930 		 */
931 	}
932 
933 	mutex_exit(&pp->pr_lock);
934 	KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
935 	FREECHECK_OUT(&pp->pr_freecheck, v);
936 	return (v);
937 }
938 
939 /*
940  * Internal version of pool_put().  Pool is already locked/entered.
941  */
942 static void
943 pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
944 {
945 	struct pool_item *pi = v;
946 	struct pool_item_header *ph;
947 
948 	KASSERT(mutex_owned(&pp->pr_lock));
949 	FREECHECK_IN(&pp->pr_freecheck, v);
950 	LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
951 
952 #ifdef DIAGNOSTIC
953 	if (__predict_false(pp->pr_nout == 0)) {
954 		printf("pool %s: putting with none out\n",
955 		    pp->pr_wchan);
956 		panic("pool_put");
957 	}
958 #endif
959 
960 	if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
961 		panic("pool_put: %s: page header missing", pp->pr_wchan);
962 	}
963 
964 	/*
965 	 * Return to item list.
966 	 */
967 	if (pp->pr_roflags & PR_NOTOUCH) {
968 		pr_item_notouch_put(pp, ph, v);
969 	} else {
970 #ifdef DIAGNOSTIC
971 		pi->pi_magic = PI_MAGIC;
972 #endif
973 #ifdef DEBUG
974 		{
975 			int i, *ip = v;
976 
977 			for (i = 0; i < pp->pr_size / sizeof(int); i++) {
978 				*ip++ = PI_MAGIC;
979 			}
980 		}
981 #endif
982 
983 		LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
984 	}
985 	KDASSERT(ph->ph_nmissing != 0);
986 	ph->ph_nmissing--;
987 	pp->pr_nput++;
988 	pp->pr_nitems++;
989 	pp->pr_nout--;
990 
991 	/* Cancel "pool empty" condition if it exists */
992 	if (pp->pr_curpage == NULL)
993 		pp->pr_curpage = ph;
994 
995 	if (pp->pr_flags & PR_WANTED) {
996 		pp->pr_flags &= ~PR_WANTED;
997 		cv_broadcast(&pp->pr_cv);
998 	}
999 
1000 	/*
1001 	 * If this page is now empty, do one of two things:
1002 	 *
1003 	 *	(1) If we have more pages than the page high water mark,
1004 	 *	    free the page back to the system.  ONLY CONSIDER
1005 	 *	    FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1006 	 *	    CLAIM.
1007 	 *
1008 	 *	(2) Otherwise, move the page to the empty page list.
1009 	 *
1010 	 * Either way, select a new current page (so we use a partially-full
1011 	 * page if one is available).
1012 	 */
1013 	if (ph->ph_nmissing == 0) {
1014 		pp->pr_nidle++;
1015 		if (pp->pr_npages > pp->pr_minpages &&
1016 		    pp->pr_npages > pp->pr_maxpages) {
1017 			pr_rmpage(pp, ph, pq);
1018 		} else {
1019 			LIST_REMOVE(ph, ph_pagelist);
1020 			LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1021 
1022 			/*
1023 			 * Update the timestamp on the page.  A page must
1024 			 * be idle for some period of time before it can
1025 			 * be reclaimed by the pagedaemon.  This minimizes
1026 			 * ping-pong'ing for memory.
1027 			 *
1028 			 * note for 64-bit time_t: truncating to 32-bit is not
1029 			 * a problem for our usage.
1030 			 */
1031 			ph->ph_time = time_uptime;
1032 		}
1033 		pool_update_curpage(pp);
1034 	}
1035 
1036 	/*
1037 	 * If the page was previously completely full, move it to the
1038 	 * partially-full list and make it the current page.  The next
1039 	 * allocation will get the item from this page, instead of
1040 	 * further fragmenting the pool.
1041 	 */
1042 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1043 		LIST_REMOVE(ph, ph_pagelist);
1044 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1045 		pp->pr_curpage = ph;
1046 	}
1047 }
1048 
1049 void
1050 pool_put(struct pool *pp, void *v)
1051 {
1052 	struct pool_pagelist pq;
1053 
1054 	LIST_INIT(&pq);
1055 
1056 	mutex_enter(&pp->pr_lock);
1057 	pool_do_put(pp, v, &pq);
1058 	mutex_exit(&pp->pr_lock);
1059 
1060 	pr_pagelist_free(pp, &pq);
1061 }
1062 
1063 /*
1064  * pool_grow: grow a pool by a page.
1065  *
1066  * => called with pool locked.
1067  * => unlock and relock the pool.
1068  * => return with pool locked.
1069  */
1070 
1071 static int
1072 pool_grow(struct pool *pp, int flags)
1073 {
1074 	struct pool_item_header *ph = NULL;
1075 	char *cp;
1076 
1077 	mutex_exit(&pp->pr_lock);
1078 	cp = pool_allocator_alloc(pp, flags);
1079 	if (__predict_true(cp != NULL)) {
1080 		ph = pool_alloc_item_header(pp, cp, flags);
1081 	}
1082 	if (__predict_false(cp == NULL || ph == NULL)) {
1083 		if (cp != NULL) {
1084 			pool_allocator_free(pp, cp);
1085 		}
1086 		mutex_enter(&pp->pr_lock);
1087 		return ENOMEM;
1088 	}
1089 
1090 	mutex_enter(&pp->pr_lock);
1091 	pool_prime_page(pp, cp, ph);
1092 	pp->pr_npagealloc++;
1093 	return 0;
1094 }
1095 
1096 /*
1097  * Add N items to the pool.
1098  */
1099 int
1100 pool_prime(struct pool *pp, int n)
1101 {
1102 	int newpages;
1103 	int error = 0;
1104 
1105 	mutex_enter(&pp->pr_lock);
1106 
1107 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1108 
1109 	while (newpages-- > 0) {
1110 		error = pool_grow(pp, PR_NOWAIT);
1111 		if (error) {
1112 			break;
1113 		}
1114 		pp->pr_minpages++;
1115 	}
1116 
1117 	if (pp->pr_minpages >= pp->pr_maxpages)
1118 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
1119 
1120 	mutex_exit(&pp->pr_lock);
1121 	return error;
1122 }
1123 
1124 /*
1125  * Add a page worth of items to the pool.
1126  *
1127  * Note, we must be called with the pool descriptor LOCKED.
1128  */
1129 static void
1130 pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1131 {
1132 	struct pool_item *pi;
1133 	void *cp = storage;
1134 	const unsigned int align = pp->pr_align;
1135 	const unsigned int ioff = pp->pr_itemoffset;
1136 	int n;
1137 
1138 	KASSERT(mutex_owned(&pp->pr_lock));
1139 
1140 #ifdef DIAGNOSTIC
1141 	if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
1142 	    ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1143 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1144 #endif
1145 
1146 	/*
1147 	 * Insert page header.
1148 	 */
1149 	LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1150 	LIST_INIT(&ph->ph_itemlist);
1151 	ph->ph_page = storage;
1152 	ph->ph_nmissing = 0;
1153 	ph->ph_time = time_uptime;
1154 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1155 		SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1156 
1157 	pp->pr_nidle++;
1158 
1159 	/*
1160 	 * Color this page.
1161 	 */
1162 	ph->ph_off = pp->pr_curcolor;
1163 	cp = (char *)cp + ph->ph_off;
1164 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1165 		pp->pr_curcolor = 0;
1166 
1167 	/*
1168 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1169 	 */
1170 	if (ioff != 0)
1171 		cp = (char *)cp + align - ioff;
1172 
1173 	KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1174 
1175 	/*
1176 	 * Insert remaining chunks on the bucket list.
1177 	 */
1178 	n = pp->pr_itemsperpage;
1179 	pp->pr_nitems += n;
1180 
1181 	if (pp->pr_roflags & PR_NOTOUCH) {
1182 		pr_item_notouch_init(pp, ph);
1183 	} else {
1184 		while (n--) {
1185 			pi = (struct pool_item *)cp;
1186 
1187 			KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1188 
1189 			/* Insert on page list */
1190 			LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1191 #ifdef DIAGNOSTIC
1192 			pi->pi_magic = PI_MAGIC;
1193 #endif
1194 			cp = (char *)cp + pp->pr_size;
1195 
1196 			KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1197 		}
1198 	}
1199 
1200 	/*
1201 	 * If the pool was depleted, point at the new page.
1202 	 */
1203 	if (pp->pr_curpage == NULL)
1204 		pp->pr_curpage = ph;
1205 
1206 	if (++pp->pr_npages > pp->pr_hiwat)
1207 		pp->pr_hiwat = pp->pr_npages;
1208 }
1209 
1210 /*
1211  * Used by pool_get() when nitems drops below the low water mark.  This
1212  * is used to catch up pr_nitems with the low water mark.
1213  *
1214  * Note 1, we never wait for memory here, we let the caller decide what to do.
1215  *
1216  * Note 2, we must be called with the pool already locked, and we return
1217  * with it locked.
1218  */
1219 static int
1220 pool_catchup(struct pool *pp)
1221 {
1222 	int error = 0;
1223 
1224 	while (POOL_NEEDS_CATCHUP(pp)) {
1225 		error = pool_grow(pp, PR_NOWAIT);
1226 		if (error) {
1227 			break;
1228 		}
1229 	}
1230 	return error;
1231 }
1232 
1233 static void
1234 pool_update_curpage(struct pool *pp)
1235 {
1236 
1237 	pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1238 	if (pp->pr_curpage == NULL) {
1239 		pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1240 	}
1241 	KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
1242 	    (pp->pr_curpage != NULL && pp->pr_nitems > 0));
1243 }
1244 
1245 void
1246 pool_setlowat(struct pool *pp, int n)
1247 {
1248 
1249 	mutex_enter(&pp->pr_lock);
1250 
1251 	pp->pr_minitems = n;
1252 	pp->pr_minpages = (n == 0)
1253 		? 0
1254 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1255 
1256 	/* Make sure we're caught up with the newly-set low water mark. */
1257 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1258 		/*
1259 		 * XXX: Should we log a warning?  Should we set up a timeout
1260 		 * to try again in a second or so?  The latter could break
1261 		 * a caller's assumptions about interrupt protection, etc.
1262 		 */
1263 	}
1264 
1265 	mutex_exit(&pp->pr_lock);
1266 }
1267 
1268 void
1269 pool_sethiwat(struct pool *pp, int n)
1270 {
1271 
1272 	mutex_enter(&pp->pr_lock);
1273 
1274 	pp->pr_maxpages = (n == 0)
1275 		? 0
1276 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1277 
1278 	mutex_exit(&pp->pr_lock);
1279 }
1280 
1281 void
1282 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1283 {
1284 
1285 	mutex_enter(&pp->pr_lock);
1286 
1287 	pp->pr_hardlimit = n;
1288 	pp->pr_hardlimit_warning = warnmess;
1289 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1290 	pp->pr_hardlimit_warning_last.tv_sec = 0;
1291 	pp->pr_hardlimit_warning_last.tv_usec = 0;
1292 
1293 	/*
1294 	 * In-line version of pool_sethiwat(), because we don't want to
1295 	 * release the lock.
1296 	 */
1297 	pp->pr_maxpages = (n == 0)
1298 		? 0
1299 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1300 
1301 	mutex_exit(&pp->pr_lock);
1302 }
1303 
1304 /*
1305  * Release all complete pages that have not been used recently.
1306  *
1307  * Must not be called from interrupt context.
1308  */
1309 int
1310 pool_reclaim(struct pool *pp)
1311 {
1312 	struct pool_item_header *ph, *phnext;
1313 	struct pool_pagelist pq;
1314 	uint32_t curtime;
1315 	bool klock;
1316 	int rv;
1317 
1318 	KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1319 
1320 	if (pp->pr_drain_hook != NULL) {
1321 		/*
1322 		 * The drain hook must be called with the pool unlocked.
1323 		 */
1324 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1325 	}
1326 
1327 	/*
1328 	 * XXXSMP Because we do not want to cause non-MPSAFE code
1329 	 * to block.
1330 	 */
1331 	if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1332 	    pp->pr_ipl == IPL_SOFTSERIAL) {
1333 		KERNEL_LOCK(1, NULL);
1334 		klock = true;
1335 	} else
1336 		klock = false;
1337 
1338 	/* Reclaim items from the pool's cache (if any). */
1339 	if (pp->pr_cache != NULL)
1340 		pool_cache_invalidate(pp->pr_cache);
1341 
1342 	if (mutex_tryenter(&pp->pr_lock) == 0) {
1343 		if (klock) {
1344 			KERNEL_UNLOCK_ONE(NULL);
1345 		}
1346 		return (0);
1347 	}
1348 
1349 	LIST_INIT(&pq);
1350 
1351 	curtime = time_uptime;
1352 
1353 	for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1354 		phnext = LIST_NEXT(ph, ph_pagelist);
1355 
1356 		/* Check our minimum page claim */
1357 		if (pp->pr_npages <= pp->pr_minpages)
1358 			break;
1359 
1360 		KASSERT(ph->ph_nmissing == 0);
1361 		if (curtime - ph->ph_time < pool_inactive_time)
1362 			continue;
1363 
1364 		/*
1365 		 * If freeing this page would put us below
1366 		 * the low water mark, stop now.
1367 		 */
1368 		if ((pp->pr_nitems - pp->pr_itemsperpage) <
1369 		    pp->pr_minitems)
1370 			break;
1371 
1372 		pr_rmpage(pp, ph, &pq);
1373 	}
1374 
1375 	mutex_exit(&pp->pr_lock);
1376 
1377 	if (LIST_EMPTY(&pq))
1378 		rv = 0;
1379 	else {
1380 		pr_pagelist_free(pp, &pq);
1381 		rv = 1;
1382 	}
1383 
1384 	if (klock) {
1385 		KERNEL_UNLOCK_ONE(NULL);
1386 	}
1387 
1388 	return (rv);
1389 }
1390 
1391 /*
1392  * Drain pools, one at a time. The drained pool is returned within ppp.
1393  *
1394  * Note, must never be called from interrupt context.
1395  */
1396 bool
1397 pool_drain(struct pool **ppp)
1398 {
1399 	bool reclaimed;
1400 	struct pool *pp;
1401 
1402 	KASSERT(!TAILQ_EMPTY(&pool_head));
1403 
1404 	pp = NULL;
1405 
1406 	/* Find next pool to drain, and add a reference. */
1407 	mutex_enter(&pool_head_lock);
1408 	do {
1409 		if (drainpp == NULL) {
1410 			drainpp = TAILQ_FIRST(&pool_head);
1411 		}
1412 		if (drainpp != NULL) {
1413 			pp = drainpp;
1414 			drainpp = TAILQ_NEXT(pp, pr_poollist);
1415 		}
1416 		/*
1417 		 * Skip completely idle pools.  We depend on at least
1418 		 * one pool in the system being active.
1419 		 */
1420 	} while (pp == NULL || pp->pr_npages == 0);
1421 	pp->pr_refcnt++;
1422 	mutex_exit(&pool_head_lock);
1423 
1424 	/* Drain the cache (if any) and pool.. */
1425 	reclaimed = pool_reclaim(pp);
1426 
1427 	/* Finally, unlock the pool. */
1428 	mutex_enter(&pool_head_lock);
1429 	pp->pr_refcnt--;
1430 	cv_broadcast(&pool_busy);
1431 	mutex_exit(&pool_head_lock);
1432 
1433 	if (ppp != NULL)
1434 		*ppp = pp;
1435 
1436 	return reclaimed;
1437 }
1438 
1439 /*
1440  * Diagnostic helpers.
1441  */
1442 
1443 void
1444 pool_printall(const char *modif, void (*pr)(const char *, ...))
1445 {
1446 	struct pool *pp;
1447 
1448 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1449 		pool_printit(pp, modif, pr);
1450 	}
1451 }
1452 
1453 void
1454 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1455 {
1456 
1457 	if (pp == NULL) {
1458 		(*pr)("Must specify a pool to print.\n");
1459 		return;
1460 	}
1461 
1462 	pool_print1(pp, modif, pr);
1463 }
1464 
1465 static void
1466 pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1467     void (*pr)(const char *, ...))
1468 {
1469 	struct pool_item_header *ph;
1470 #ifdef DIAGNOSTIC
1471 	struct pool_item *pi;
1472 #endif
1473 
1474 	LIST_FOREACH(ph, pl, ph_pagelist) {
1475 		(*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
1476 		    ph->ph_page, ph->ph_nmissing, ph->ph_time);
1477 #ifdef DIAGNOSTIC
1478 		if (!(pp->pr_roflags & PR_NOTOUCH)) {
1479 			LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1480 				if (pi->pi_magic != PI_MAGIC) {
1481 					(*pr)("\t\t\titem %p, magic 0x%x\n",
1482 					    pi, pi->pi_magic);
1483 				}
1484 			}
1485 		}
1486 #endif
1487 	}
1488 }
1489 
1490 static void
1491 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1492 {
1493 	struct pool_item_header *ph;
1494 	pool_cache_t pc;
1495 	pcg_t *pcg;
1496 	pool_cache_cpu_t *cc;
1497 	uint64_t cpuhit, cpumiss;
1498 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1499 	char c;
1500 
1501 	while ((c = *modif++) != '\0') {
1502 		if (c == 'l')
1503 			print_log = 1;
1504 		if (c == 'p')
1505 			print_pagelist = 1;
1506 		if (c == 'c')
1507 			print_cache = 1;
1508 	}
1509 
1510 	if ((pc = pp->pr_cache) != NULL) {
1511 		(*pr)("POOL CACHE");
1512 	} else {
1513 		(*pr)("POOL");
1514 	}
1515 
1516 	(*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1517 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1518 	    pp->pr_roflags);
1519 	(*pr)("\talloc %p\n", pp->pr_alloc);
1520 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1521 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1522 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1523 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1524 
1525 	(*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1526 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1527 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1528 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1529 
1530 	if (print_pagelist == 0)
1531 		goto skip_pagelist;
1532 
1533 	if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1534 		(*pr)("\n\tempty page list:\n");
1535 	pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1536 	if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1537 		(*pr)("\n\tfull page list:\n");
1538 	pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1539 	if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1540 		(*pr)("\n\tpartial-page list:\n");
1541 	pool_print_pagelist(pp, &pp->pr_partpages, pr);
1542 
1543 	if (pp->pr_curpage == NULL)
1544 		(*pr)("\tno current page\n");
1545 	else
1546 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1547 
1548  skip_pagelist:
1549 	if (print_log == 0)
1550 		goto skip_log;
1551 
1552 	(*pr)("\n");
1553 
1554  skip_log:
1555 
1556 #define PR_GROUPLIST(pcg)						\
1557 	(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);		\
1558 	for (i = 0; i < pcg->pcg_size; i++) {				\
1559 		if (pcg->pcg_objects[i].pcgo_pa !=			\
1560 		    POOL_PADDR_INVALID) {				\
1561 			(*pr)("\t\t\t%p, 0x%llx\n",			\
1562 			    pcg->pcg_objects[i].pcgo_va,		\
1563 			    (unsigned long long)			\
1564 			    pcg->pcg_objects[i].pcgo_pa);		\
1565 		} else {						\
1566 			(*pr)("\t\t\t%p\n",				\
1567 			    pcg->pcg_objects[i].pcgo_va);		\
1568 		}							\
1569 	}
1570 
1571 	if (pc != NULL) {
1572 		cpuhit = 0;
1573 		cpumiss = 0;
1574 		for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1575 			if ((cc = pc->pc_cpus[i]) == NULL)
1576 				continue;
1577 			cpuhit += cc->cc_hits;
1578 			cpumiss += cc->cc_misses;
1579 		}
1580 		(*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1581 		(*pr)("\tcache layer hits %llu misses %llu\n",
1582 		    pc->pc_hits, pc->pc_misses);
1583 		(*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1584 		    pc->pc_hits + pc->pc_misses - pc->pc_contended,
1585 		    pc->pc_contended);
1586 		(*pr)("\tcache layer empty groups %u full groups %u\n",
1587 		    pc->pc_nempty, pc->pc_nfull);
1588 		if (print_cache) {
1589 			(*pr)("\tfull cache groups:\n");
1590 			for (pcg = pc->pc_fullgroups; pcg != NULL;
1591 			    pcg = pcg->pcg_next) {
1592 				PR_GROUPLIST(pcg);
1593 			}
1594 			(*pr)("\tempty cache groups:\n");
1595 			for (pcg = pc->pc_emptygroups; pcg != NULL;
1596 			    pcg = pcg->pcg_next) {
1597 				PR_GROUPLIST(pcg);
1598 			}
1599 		}
1600 	}
1601 #undef PR_GROUPLIST
1602 }
1603 
1604 static int
1605 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1606 {
1607 	struct pool_item *pi;
1608 	void *page;
1609 	int n;
1610 
1611 	if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1612 		page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1613 		if (page != ph->ph_page &&
1614 		    (pp->pr_roflags & PR_PHINPAGE) != 0) {
1615 			if (label != NULL)
1616 				printf("%s: ", label);
1617 			printf("pool(%p:%s): page inconsistency: page %p;"
1618 			       " at page head addr %p (p %p)\n", pp,
1619 				pp->pr_wchan, ph->ph_page,
1620 				ph, page);
1621 			return 1;
1622 		}
1623 	}
1624 
1625 	if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1626 		return 0;
1627 
1628 	for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1629 	     pi != NULL;
1630 	     pi = LIST_NEXT(pi,pi_list), n++) {
1631 
1632 #ifdef DIAGNOSTIC
1633 		if (pi->pi_magic != PI_MAGIC) {
1634 			if (label != NULL)
1635 				printf("%s: ", label);
1636 			printf("pool(%s): free list modified: magic=%x;"
1637 			       " page %p; item ordinal %d; addr %p\n",
1638 				pp->pr_wchan, pi->pi_magic, ph->ph_page,
1639 				n, pi);
1640 			panic("pool");
1641 		}
1642 #endif
1643 		if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1644 			continue;
1645 		}
1646 		page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1647 		if (page == ph->ph_page)
1648 			continue;
1649 
1650 		if (label != NULL)
1651 			printf("%s: ", label);
1652 		printf("pool(%p:%s): page inconsistency: page %p;"
1653 		       " item ordinal %d; addr %p (p %p)\n", pp,
1654 			pp->pr_wchan, ph->ph_page,
1655 			n, pi, page);
1656 		return 1;
1657 	}
1658 	return 0;
1659 }
1660 
1661 
1662 int
1663 pool_chk(struct pool *pp, const char *label)
1664 {
1665 	struct pool_item_header *ph;
1666 	int r = 0;
1667 
1668 	mutex_enter(&pp->pr_lock);
1669 	LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1670 		r = pool_chk_page(pp, label, ph);
1671 		if (r) {
1672 			goto out;
1673 		}
1674 	}
1675 	LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1676 		r = pool_chk_page(pp, label, ph);
1677 		if (r) {
1678 			goto out;
1679 		}
1680 	}
1681 	LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1682 		r = pool_chk_page(pp, label, ph);
1683 		if (r) {
1684 			goto out;
1685 		}
1686 	}
1687 
1688 out:
1689 	mutex_exit(&pp->pr_lock);
1690 	return (r);
1691 }
1692 
1693 /*
1694  * pool_cache_init:
1695  *
1696  *	Initialize a pool cache.
1697  */
1698 pool_cache_t
1699 pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
1700     const char *wchan, struct pool_allocator *palloc, int ipl,
1701     int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
1702 {
1703 	pool_cache_t pc;
1704 
1705 	pc = pool_get(&cache_pool, PR_WAITOK);
1706 	if (pc == NULL)
1707 		return NULL;
1708 
1709 	pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
1710 	   palloc, ipl, ctor, dtor, arg);
1711 
1712 	return pc;
1713 }
1714 
1715 /*
1716  * pool_cache_bootstrap:
1717  *
1718  *	Kernel-private version of pool_cache_init().  The caller
1719  *	provides initial storage.
1720  */
1721 void
1722 pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
1723     u_int align_offset, u_int flags, const char *wchan,
1724     struct pool_allocator *palloc, int ipl,
1725     int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1726     void *arg)
1727 {
1728 	CPU_INFO_ITERATOR cii;
1729 	pool_cache_t pc1;
1730 	struct cpu_info *ci;
1731 	struct pool *pp;
1732 
1733 	pp = &pc->pc_pool;
1734 	if (palloc == NULL && ipl == IPL_NONE)
1735 		palloc = &pool_allocator_nointr;
1736 	pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1737 	mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
1738 
1739 	if (ctor == NULL) {
1740 		ctor = (int (*)(void *, void *, int))nullop;
1741 	}
1742 	if (dtor == NULL) {
1743 		dtor = (void (*)(void *, void *))nullop;
1744 	}
1745 
1746 	pc->pc_emptygroups = NULL;
1747 	pc->pc_fullgroups = NULL;
1748 	pc->pc_partgroups = NULL;
1749 	pc->pc_ctor = ctor;
1750 	pc->pc_dtor = dtor;
1751 	pc->pc_arg  = arg;
1752 	pc->pc_hits  = 0;
1753 	pc->pc_misses = 0;
1754 	pc->pc_nempty = 0;
1755 	pc->pc_npart = 0;
1756 	pc->pc_nfull = 0;
1757 	pc->pc_contended = 0;
1758 	pc->pc_refcnt = 0;
1759 	pc->pc_freecheck = NULL;
1760 
1761 	if ((flags & PR_LARGECACHE) != 0) {
1762 		pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
1763 		pc->pc_pcgpool = &pcg_large_pool;
1764 	} else {
1765 		pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
1766 		pc->pc_pcgpool = &pcg_normal_pool;
1767 	}
1768 
1769 	/* Allocate per-CPU caches. */
1770 	memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
1771 	pc->pc_ncpu = 0;
1772 	if (ncpu < 2) {
1773 		/* XXX For sparc: boot CPU is not attached yet. */
1774 		pool_cache_cpu_init1(curcpu(), pc);
1775 	} else {
1776 		for (CPU_INFO_FOREACH(cii, ci)) {
1777 			pool_cache_cpu_init1(ci, pc);
1778 		}
1779 	}
1780 
1781 	/* Add to list of all pools. */
1782 	if (__predict_true(!cold))
1783 		mutex_enter(&pool_head_lock);
1784 	TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
1785 		if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
1786 			break;
1787 	}
1788 	if (pc1 == NULL)
1789 		TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
1790 	else
1791 		TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
1792 	if (__predict_true(!cold))
1793 		mutex_exit(&pool_head_lock);
1794 
1795 	membar_sync();
1796 	pp->pr_cache = pc;
1797 }
1798 
1799 /*
1800  * pool_cache_destroy:
1801  *
1802  *	Destroy a pool cache.
1803  */
1804 void
1805 pool_cache_destroy(pool_cache_t pc)
1806 {
1807 
1808 	pool_cache_bootstrap_destroy(pc);
1809 	pool_put(&cache_pool, pc);
1810 }
1811 
1812 /*
1813  * pool_cache_bootstrap_destroy:
1814  *
1815  *	Destroy a pool cache.
1816  */
1817 void
1818 pool_cache_bootstrap_destroy(pool_cache_t pc)
1819 {
1820 	struct pool *pp = &pc->pc_pool;
1821 	u_int i;
1822 
1823 	/* Remove it from the global list. */
1824 	mutex_enter(&pool_head_lock);
1825 	while (pc->pc_refcnt != 0)
1826 		cv_wait(&pool_busy, &pool_head_lock);
1827 	TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
1828 	mutex_exit(&pool_head_lock);
1829 
1830 	/* First, invalidate the entire cache. */
1831 	pool_cache_invalidate(pc);
1832 
1833 	/* Disassociate it from the pool. */
1834 	mutex_enter(&pp->pr_lock);
1835 	pp->pr_cache = NULL;
1836 	mutex_exit(&pp->pr_lock);
1837 
1838 	/* Destroy per-CPU data */
1839 	for (i = 0; i < __arraycount(pc->pc_cpus); i++)
1840 		pool_cache_invalidate_cpu(pc, i);
1841 
1842 	/* Finally, destroy it. */
1843 	mutex_destroy(&pc->pc_lock);
1844 	pool_destroy(pp);
1845 }
1846 
1847 /*
1848  * pool_cache_cpu_init1:
1849  *
1850  *	Called for each pool_cache whenever a new CPU is attached.
1851  */
1852 static void
1853 pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
1854 {
1855 	pool_cache_cpu_t *cc;
1856 	int index;
1857 
1858 	index = ci->ci_index;
1859 
1860 	KASSERT(index < __arraycount(pc->pc_cpus));
1861 
1862 	if ((cc = pc->pc_cpus[index]) != NULL) {
1863 		KASSERT(cc->cc_cpuindex == index);
1864 		return;
1865 	}
1866 
1867 	/*
1868 	 * The first CPU is 'free'.  This needs to be the case for
1869 	 * bootstrap - we may not be able to allocate yet.
1870 	 */
1871 	if (pc->pc_ncpu == 0) {
1872 		cc = &pc->pc_cpu0;
1873 		pc->pc_ncpu = 1;
1874 	} else {
1875 		mutex_enter(&pc->pc_lock);
1876 		pc->pc_ncpu++;
1877 		mutex_exit(&pc->pc_lock);
1878 		cc = pool_get(&cache_cpu_pool, PR_WAITOK);
1879 	}
1880 
1881 	cc->cc_ipl = pc->pc_pool.pr_ipl;
1882 	cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
1883 	cc->cc_cache = pc;
1884 	cc->cc_cpuindex = index;
1885 	cc->cc_hits = 0;
1886 	cc->cc_misses = 0;
1887 	cc->cc_current = __UNCONST(&pcg_dummy);
1888 	cc->cc_previous = __UNCONST(&pcg_dummy);
1889 
1890 	pc->pc_cpus[index] = cc;
1891 }
1892 
1893 /*
1894  * pool_cache_cpu_init:
1895  *
1896  *	Called whenever a new CPU is attached.
1897  */
1898 void
1899 pool_cache_cpu_init(struct cpu_info *ci)
1900 {
1901 	pool_cache_t pc;
1902 
1903 	mutex_enter(&pool_head_lock);
1904 	TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
1905 		pc->pc_refcnt++;
1906 		mutex_exit(&pool_head_lock);
1907 
1908 		pool_cache_cpu_init1(ci, pc);
1909 
1910 		mutex_enter(&pool_head_lock);
1911 		pc->pc_refcnt--;
1912 		cv_broadcast(&pool_busy);
1913 	}
1914 	mutex_exit(&pool_head_lock);
1915 }
1916 
1917 /*
1918  * pool_cache_reclaim:
1919  *
1920  *	Reclaim memory from a pool cache.
1921  */
1922 bool
1923 pool_cache_reclaim(pool_cache_t pc)
1924 {
1925 
1926 	return pool_reclaim(&pc->pc_pool);
1927 }
1928 
1929 static void
1930 pool_cache_destruct_object1(pool_cache_t pc, void *object)
1931 {
1932 
1933 	(*pc->pc_dtor)(pc->pc_arg, object);
1934 	pool_put(&pc->pc_pool, object);
1935 }
1936 
1937 /*
1938  * pool_cache_destruct_object:
1939  *
1940  *	Force destruction of an object and its release back into
1941  *	the pool.
1942  */
1943 void
1944 pool_cache_destruct_object(pool_cache_t pc, void *object)
1945 {
1946 
1947 	FREECHECK_IN(&pc->pc_freecheck, object);
1948 
1949 	pool_cache_destruct_object1(pc, object);
1950 }
1951 
1952 /*
1953  * pool_cache_invalidate_groups:
1954  *
1955  *	Invalidate a chain of groups and destruct all objects.
1956  */
1957 static void
1958 pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
1959 {
1960 	void *object;
1961 	pcg_t *next;
1962 	int i;
1963 
1964 	for (; pcg != NULL; pcg = next) {
1965 		next = pcg->pcg_next;
1966 
1967 		for (i = 0; i < pcg->pcg_avail; i++) {
1968 			object = pcg->pcg_objects[i].pcgo_va;
1969 			pool_cache_destruct_object1(pc, object);
1970 		}
1971 
1972 		if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
1973 			pool_put(&pcg_large_pool, pcg);
1974 		} else {
1975 			KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
1976 			pool_put(&pcg_normal_pool, pcg);
1977 		}
1978 	}
1979 }
1980 
1981 /*
1982  * pool_cache_invalidate:
1983  *
1984  *	Invalidate a pool cache (destruct and release all of the
1985  *	cached objects).  Does not reclaim objects from the pool.
1986  *
1987  *	Note: For pool caches that provide constructed objects, there
1988  *	is an assumption that another level of synchronization is occurring
1989  *	between the input to the constructor and the cache invalidation.
1990  *
1991  *	Invalidation is a costly process and should not be called from
1992  *	interrupt context.
1993  */
1994 void
1995 pool_cache_invalidate(pool_cache_t pc)
1996 {
1997 	uint64_t where;
1998 	pcg_t *full, *empty, *part;
1999 
2000 	KASSERT(!cpu_intr_p() && !cpu_softintr_p());
2001 
2002 	if (ncpu < 2 || !mp_online) {
2003 		/*
2004 		 * We might be called early enough in the boot process
2005 		 * for the CPU data structures to not be fully initialized.
2006 		 * In this case, transfer the content of the local CPU's
2007 		 * cache back into global cache as only this CPU is currently
2008 		 * running.
2009 		 */
2010 		pool_cache_transfer(pc);
2011 	} else {
2012 		/*
2013 		 * Signal all CPUs that they must transfer their local
2014 		 * cache back to the global pool then wait for the xcall to
2015 		 * complete.
2016 		 */
2017 		where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer,
2018 		    pc, NULL);
2019 		xc_wait(where);
2020 	}
2021 
2022 	/* Empty pool caches, then invalidate objects */
2023 	mutex_enter(&pc->pc_lock);
2024 	full = pc->pc_fullgroups;
2025 	empty = pc->pc_emptygroups;
2026 	part = pc->pc_partgroups;
2027 	pc->pc_fullgroups = NULL;
2028 	pc->pc_emptygroups = NULL;
2029 	pc->pc_partgroups = NULL;
2030 	pc->pc_nfull = 0;
2031 	pc->pc_nempty = 0;
2032 	pc->pc_npart = 0;
2033 	mutex_exit(&pc->pc_lock);
2034 
2035 	pool_cache_invalidate_groups(pc, full);
2036 	pool_cache_invalidate_groups(pc, empty);
2037 	pool_cache_invalidate_groups(pc, part);
2038 }
2039 
2040 /*
2041  * pool_cache_invalidate_cpu:
2042  *
2043  *	Invalidate all CPU-bound cached objects in pool cache, the CPU being
2044  *	identified by its associated index.
2045  *	It is caller's responsibility to ensure that no operation is
2046  *	taking place on this pool cache while doing this invalidation.
2047  *	WARNING: as no inter-CPU locking is enforced, trying to invalidate
2048  *	pool cached objects from a CPU different from the one currently running
2049  *	may result in an undefined behaviour.
2050  */
2051 static void
2052 pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
2053 {
2054 	pool_cache_cpu_t *cc;
2055 	pcg_t *pcg;
2056 
2057 	if ((cc = pc->pc_cpus[index]) == NULL)
2058 		return;
2059 
2060 	if ((pcg = cc->cc_current) != &pcg_dummy) {
2061 		pcg->pcg_next = NULL;
2062 		pool_cache_invalidate_groups(pc, pcg);
2063 	}
2064 	if ((pcg = cc->cc_previous) != &pcg_dummy) {
2065 		pcg->pcg_next = NULL;
2066 		pool_cache_invalidate_groups(pc, pcg);
2067 	}
2068 	if (cc != &pc->pc_cpu0)
2069 		pool_put(&cache_cpu_pool, cc);
2070 
2071 }
2072 
2073 void
2074 pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2075 {
2076 
2077 	pool_set_drain_hook(&pc->pc_pool, fn, arg);
2078 }
2079 
2080 void
2081 pool_cache_setlowat(pool_cache_t pc, int n)
2082 {
2083 
2084 	pool_setlowat(&pc->pc_pool, n);
2085 }
2086 
2087 void
2088 pool_cache_sethiwat(pool_cache_t pc, int n)
2089 {
2090 
2091 	pool_sethiwat(&pc->pc_pool, n);
2092 }
2093 
2094 void
2095 pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2096 {
2097 
2098 	pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2099 }
2100 
2101 static bool __noinline
2102 pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
2103 		    paddr_t *pap, int flags)
2104 {
2105 	pcg_t *pcg, *cur;
2106 	uint64_t ncsw;
2107 	pool_cache_t pc;
2108 	void *object;
2109 
2110 	KASSERT(cc->cc_current->pcg_avail == 0);
2111 	KASSERT(cc->cc_previous->pcg_avail == 0);
2112 
2113 	pc = cc->cc_cache;
2114 	cc->cc_misses++;
2115 
2116 	/*
2117 	 * Nothing was available locally.  Try and grab a group
2118 	 * from the cache.
2119 	 */
2120 	if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
2121 		ncsw = curlwp->l_ncsw;
2122 		mutex_enter(&pc->pc_lock);
2123 		pc->pc_contended++;
2124 
2125 		/*
2126 		 * If we context switched while locking, then
2127 		 * our view of the per-CPU data is invalid:
2128 		 * retry.
2129 		 */
2130 		if (curlwp->l_ncsw != ncsw) {
2131 			mutex_exit(&pc->pc_lock);
2132 			return true;
2133 		}
2134 	}
2135 
2136 	if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
2137 		/*
2138 		 * If there's a full group, release our empty
2139 		 * group back to the cache.  Install the full
2140 		 * group as cc_current and return.
2141 		 */
2142 		if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
2143 			KASSERT(cur->pcg_avail == 0);
2144 			cur->pcg_next = pc->pc_emptygroups;
2145 			pc->pc_emptygroups = cur;
2146 			pc->pc_nempty++;
2147 		}
2148 		KASSERT(pcg->pcg_avail == pcg->pcg_size);
2149 		cc->cc_current = pcg;
2150 		pc->pc_fullgroups = pcg->pcg_next;
2151 		pc->pc_hits++;
2152 		pc->pc_nfull--;
2153 		mutex_exit(&pc->pc_lock);
2154 		return true;
2155 	}
2156 
2157 	/*
2158 	 * Nothing available locally or in cache.  Take the slow
2159 	 * path: fetch a new object from the pool and construct
2160 	 * it.
2161 	 */
2162 	pc->pc_misses++;
2163 	mutex_exit(&pc->pc_lock);
2164 	splx(s);
2165 
2166 	object = pool_get(&pc->pc_pool, flags);
2167 	*objectp = object;
2168 	if (__predict_false(object == NULL))
2169 		return false;
2170 
2171 	if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
2172 		pool_put(&pc->pc_pool, object);
2173 		*objectp = NULL;
2174 		return false;
2175 	}
2176 
2177 	KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
2178 	    (pc->pc_pool.pr_align - 1)) == 0);
2179 
2180 	if (pap != NULL) {
2181 #ifdef POOL_VTOPHYS
2182 		*pap = POOL_VTOPHYS(object);
2183 #else
2184 		*pap = POOL_PADDR_INVALID;
2185 #endif
2186 	}
2187 
2188 	FREECHECK_OUT(&pc->pc_freecheck, object);
2189 	return false;
2190 }
2191 
2192 /*
2193  * pool_cache_get{,_paddr}:
2194  *
2195  *	Get an object from a pool cache (optionally returning
2196  *	the physical address of the object).
2197  */
2198 void *
2199 pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
2200 {
2201 	pool_cache_cpu_t *cc;
2202 	pcg_t *pcg;
2203 	void *object;
2204 	int s;
2205 
2206 	KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) ||
2207 	    (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL),
2208 	    "pool '%s' is IPL_NONE, but called from interrupt context\n",
2209 	    pc->pc_pool.pr_wchan);
2210 
2211 	if (flags & PR_WAITOK) {
2212 		ASSERT_SLEEPABLE();
2213 	}
2214 
2215 	/* Lock out interrupts and disable preemption. */
2216 	s = splvm();
2217 	while (/* CONSTCOND */ true) {
2218 		/* Try and allocate an object from the current group. */
2219 		cc = pc->pc_cpus[curcpu()->ci_index];
2220 		KASSERT(cc->cc_cache == pc);
2221 	 	pcg = cc->cc_current;
2222 		if (__predict_true(pcg->pcg_avail > 0)) {
2223 			object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
2224 			if (__predict_false(pap != NULL))
2225 				*pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
2226 #if defined(DIAGNOSTIC)
2227 			pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
2228 			KASSERT(pcg->pcg_avail < pcg->pcg_size);
2229 			KASSERT(object != NULL);
2230 #endif
2231 			cc->cc_hits++;
2232 			splx(s);
2233 			FREECHECK_OUT(&pc->pc_freecheck, object);
2234 			return object;
2235 		}
2236 
2237 		/*
2238 		 * That failed.  If the previous group isn't empty, swap
2239 		 * it with the current group and allocate from there.
2240 		 */
2241 		pcg = cc->cc_previous;
2242 		if (__predict_true(pcg->pcg_avail > 0)) {
2243 			cc->cc_previous = cc->cc_current;
2244 			cc->cc_current = pcg;
2245 			continue;
2246 		}
2247 
2248 		/*
2249 		 * Can't allocate from either group: try the slow path.
2250 		 * If get_slow() allocated an object for us, or if
2251 		 * no more objects are available, it will return false.
2252 		 * Otherwise, we need to retry.
2253 		 */
2254 		if (!pool_cache_get_slow(cc, s, &object, pap, flags))
2255 			break;
2256 	}
2257 
2258 	return object;
2259 }
2260 
2261 static bool __noinline
2262 pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
2263 {
2264 	pcg_t *pcg, *cur;
2265 	uint64_t ncsw;
2266 	pool_cache_t pc;
2267 
2268 	KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
2269 	KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
2270 
2271 	pc = cc->cc_cache;
2272 	pcg = NULL;
2273 	cc->cc_misses++;
2274 
2275 	/*
2276 	 * If there are no empty groups in the cache then allocate one
2277 	 * while still unlocked.
2278 	 */
2279 	if (__predict_false(pc->pc_emptygroups == NULL)) {
2280 		if (__predict_true(!pool_cache_disable)) {
2281 			pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
2282 		}
2283 		if (__predict_true(pcg != NULL)) {
2284 			pcg->pcg_avail = 0;
2285 			pcg->pcg_size = pc->pc_pcgsize;
2286 		}
2287 	}
2288 
2289 	/* Lock the cache. */
2290 	if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
2291 		ncsw = curlwp->l_ncsw;
2292 		mutex_enter(&pc->pc_lock);
2293 		pc->pc_contended++;
2294 
2295 		/*
2296 		 * If we context switched while locking, then our view of
2297 		 * the per-CPU data is invalid: retry.
2298 		 */
2299 		if (__predict_false(curlwp->l_ncsw != ncsw)) {
2300 			mutex_exit(&pc->pc_lock);
2301 			if (pcg != NULL) {
2302 				pool_put(pc->pc_pcgpool, pcg);
2303 			}
2304 			return true;
2305 		}
2306 	}
2307 
2308 	/* If there are no empty groups in the cache then allocate one. */
2309 	if (pcg == NULL && pc->pc_emptygroups != NULL) {
2310 		pcg = pc->pc_emptygroups;
2311 		pc->pc_emptygroups = pcg->pcg_next;
2312 		pc->pc_nempty--;
2313 	}
2314 
2315 	/*
2316 	 * If there's a empty group, release our full group back
2317 	 * to the cache.  Install the empty group to the local CPU
2318 	 * and return.
2319 	 */
2320 	if (pcg != NULL) {
2321 		KASSERT(pcg->pcg_avail == 0);
2322 		if (__predict_false(cc->cc_previous == &pcg_dummy)) {
2323 			cc->cc_previous = pcg;
2324 		} else {
2325 			cur = cc->cc_current;
2326 			if (__predict_true(cur != &pcg_dummy)) {
2327 				KASSERT(cur->pcg_avail == cur->pcg_size);
2328 				cur->pcg_next = pc->pc_fullgroups;
2329 				pc->pc_fullgroups = cur;
2330 				pc->pc_nfull++;
2331 			}
2332 			cc->cc_current = pcg;
2333 		}
2334 		pc->pc_hits++;
2335 		mutex_exit(&pc->pc_lock);
2336 		return true;
2337 	}
2338 
2339 	/*
2340 	 * Nothing available locally or in cache, and we didn't
2341 	 * allocate an empty group.  Take the slow path and destroy
2342 	 * the object here and now.
2343 	 */
2344 	pc->pc_misses++;
2345 	mutex_exit(&pc->pc_lock);
2346 	splx(s);
2347 	pool_cache_destruct_object(pc, object);
2348 
2349 	return false;
2350 }
2351 
2352 /*
2353  * pool_cache_put{,_paddr}:
2354  *
2355  *	Put an object back to the pool cache (optionally caching the
2356  *	physical address of the object).
2357  */
2358 void
2359 pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
2360 {
2361 	pool_cache_cpu_t *cc;
2362 	pcg_t *pcg;
2363 	int s;
2364 
2365 	KASSERT(object != NULL);
2366 	FREECHECK_IN(&pc->pc_freecheck, object);
2367 
2368 	/* Lock out interrupts and disable preemption. */
2369 	s = splvm();
2370 	while (/* CONSTCOND */ true) {
2371 		/* If the current group isn't full, release it there. */
2372 		cc = pc->pc_cpus[curcpu()->ci_index];
2373 		KASSERT(cc->cc_cache == pc);
2374 	 	pcg = cc->cc_current;
2375 		if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
2376 			pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2377 			pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2378 			pcg->pcg_avail++;
2379 			cc->cc_hits++;
2380 			splx(s);
2381 			return;
2382 		}
2383 
2384 		/*
2385 		 * That failed.  If the previous group isn't full, swap
2386 		 * it with the current group and try again.
2387 		 */
2388 		pcg = cc->cc_previous;
2389 		if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
2390 			cc->cc_previous = cc->cc_current;
2391 			cc->cc_current = pcg;
2392 			continue;
2393 		}
2394 
2395 		/*
2396 		 * Can't free to either group: try the slow path.
2397 		 * If put_slow() releases the object for us, it
2398 		 * will return false.  Otherwise we need to retry.
2399 		 */
2400 		if (!pool_cache_put_slow(cc, s, object))
2401 			break;
2402 	}
2403 }
2404 
2405 /*
2406  * pool_cache_transfer:
2407  *
2408  *	Transfer objects from the per-CPU cache to the global cache.
2409  *	Run within a cross-call thread.
2410  */
2411 static void
2412 pool_cache_transfer(pool_cache_t pc)
2413 {
2414 	pool_cache_cpu_t *cc;
2415 	pcg_t *prev, *cur, **list;
2416 	int s;
2417 
2418 	s = splvm();
2419 	mutex_enter(&pc->pc_lock);
2420 	cc = pc->pc_cpus[curcpu()->ci_index];
2421 	cur = cc->cc_current;
2422 	cc->cc_current = __UNCONST(&pcg_dummy);
2423 	prev = cc->cc_previous;
2424 	cc->cc_previous = __UNCONST(&pcg_dummy);
2425 	if (cur != &pcg_dummy) {
2426 		if (cur->pcg_avail == cur->pcg_size) {
2427 			list = &pc->pc_fullgroups;
2428 			pc->pc_nfull++;
2429 		} else if (cur->pcg_avail == 0) {
2430 			list = &pc->pc_emptygroups;
2431 			pc->pc_nempty++;
2432 		} else {
2433 			list = &pc->pc_partgroups;
2434 			pc->pc_npart++;
2435 		}
2436 		cur->pcg_next = *list;
2437 		*list = cur;
2438 	}
2439 	if (prev != &pcg_dummy) {
2440 		if (prev->pcg_avail == prev->pcg_size) {
2441 			list = &pc->pc_fullgroups;
2442 			pc->pc_nfull++;
2443 		} else if (prev->pcg_avail == 0) {
2444 			list = &pc->pc_emptygroups;
2445 			pc->pc_nempty++;
2446 		} else {
2447 			list = &pc->pc_partgroups;
2448 			pc->pc_npart++;
2449 		}
2450 		prev->pcg_next = *list;
2451 		*list = prev;
2452 	}
2453 	mutex_exit(&pc->pc_lock);
2454 	splx(s);
2455 }
2456 
2457 /*
2458  * Pool backend allocators.
2459  *
2460  * Each pool has a backend allocator that handles allocation, deallocation,
2461  * and any additional draining that might be needed.
2462  *
2463  * We provide two standard allocators:
2464  *
2465  *	pool_allocator_kmem - the default when no allocator is specified
2466  *
2467  *	pool_allocator_nointr - used for pools that will not be accessed
2468  *	in interrupt context.
2469  */
2470 void	*pool_page_alloc(struct pool *, int);
2471 void	pool_page_free(struct pool *, void *);
2472 
2473 #ifdef POOL_SUBPAGE
2474 struct pool_allocator pool_allocator_kmem_fullpage = {
2475 	.pa_alloc = pool_page_alloc,
2476 	.pa_free = pool_page_free,
2477 	.pa_pagesz = 0
2478 };
2479 #else
2480 struct pool_allocator pool_allocator_kmem = {
2481 	.pa_alloc = pool_page_alloc,
2482 	.pa_free = pool_page_free,
2483 	.pa_pagesz = 0
2484 };
2485 #endif
2486 
2487 #ifdef POOL_SUBPAGE
2488 struct pool_allocator pool_allocator_nointr_fullpage = {
2489 	.pa_alloc = pool_page_alloc,
2490 	.pa_free = pool_page_free,
2491 	.pa_pagesz = 0
2492 };
2493 #else
2494 struct pool_allocator pool_allocator_nointr = {
2495 	.pa_alloc = pool_page_alloc,
2496 	.pa_free = pool_page_free,
2497 	.pa_pagesz = 0
2498 };
2499 #endif
2500 
2501 #ifdef POOL_SUBPAGE
2502 void	*pool_subpage_alloc(struct pool *, int);
2503 void	pool_subpage_free(struct pool *, void *);
2504 
2505 struct pool_allocator pool_allocator_kmem = {
2506 	.pa_alloc = pool_subpage_alloc,
2507 	.pa_free = pool_subpage_free,
2508 	.pa_pagesz = POOL_SUBPAGE
2509 };
2510 
2511 struct pool_allocator pool_allocator_nointr = {
2512 	.pa_alloc = pool_subpage_alloc,
2513 	.pa_free = pool_subpage_free,
2514 	.pa_pagesz = POOL_SUBPAGE
2515 };
2516 #endif /* POOL_SUBPAGE */
2517 
2518 static void *
2519 pool_allocator_alloc(struct pool *pp, int flags)
2520 {
2521 	struct pool_allocator *pa = pp->pr_alloc;
2522 	void *res;
2523 
2524 	res = (*pa->pa_alloc)(pp, flags);
2525 	if (res == NULL && (flags & PR_WAITOK) == 0) {
2526 		/*
2527 		 * We only run the drain hook here if PR_NOWAIT.
2528 		 * In other cases, the hook will be run in
2529 		 * pool_reclaim().
2530 		 */
2531 		if (pp->pr_drain_hook != NULL) {
2532 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2533 			res = (*pa->pa_alloc)(pp, flags);
2534 		}
2535 	}
2536 	return res;
2537 }
2538 
2539 static void
2540 pool_allocator_free(struct pool *pp, void *v)
2541 {
2542 	struct pool_allocator *pa = pp->pr_alloc;
2543 
2544 	(*pa->pa_free)(pp, v);
2545 }
2546 
2547 void *
2548 pool_page_alloc(struct pool *pp, int flags)
2549 {
2550 	const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2551 	vmem_addr_t va;
2552 	int ret;
2553 
2554 	ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz,
2555 	    vflags | VM_INSTANTFIT, &va);
2556 
2557 	return ret ? NULL : (void *)va;
2558 }
2559 
2560 void
2561 pool_page_free(struct pool *pp, void *v)
2562 {
2563 
2564 	uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
2565 }
2566 
2567 static void *
2568 pool_page_alloc_meta(struct pool *pp, int flags)
2569 {
2570 	const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2571 	vmem_addr_t va;
2572 	int ret;
2573 
2574 	ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
2575 	    vflags | VM_INSTANTFIT, &va);
2576 
2577 	return ret ? NULL : (void *)va;
2578 }
2579 
2580 static void
2581 pool_page_free_meta(struct pool *pp, void *v)
2582 {
2583 
2584 	vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
2585 }
2586 
2587 #ifdef POOL_SUBPAGE
2588 /* Sub-page allocator, for machines with large hardware pages. */
2589 void *
2590 pool_subpage_alloc(struct pool *pp, int flags)
2591 {
2592 	return pool_get(&psppool, flags);
2593 }
2594 
2595 void
2596 pool_subpage_free(struct pool *pp, void *v)
2597 {
2598 	pool_put(&psppool, v);
2599 }
2600 
2601 #endif /* POOL_SUBPAGE */
2602 
2603 #if defined(DDB)
2604 static bool
2605 pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2606 {
2607 
2608 	return (uintptr_t)ph->ph_page <= addr &&
2609 	    addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
2610 }
2611 
2612 static bool
2613 pool_in_item(struct pool *pp, void *item, uintptr_t addr)
2614 {
2615 
2616 	return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
2617 }
2618 
2619 static bool
2620 pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
2621 {
2622 	int i;
2623 
2624 	if (pcg == NULL) {
2625 		return false;
2626 	}
2627 	for (i = 0; i < pcg->pcg_avail; i++) {
2628 		if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
2629 			return true;
2630 		}
2631 	}
2632 	return false;
2633 }
2634 
2635 static bool
2636 pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2637 {
2638 
2639 	if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
2640 		unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
2641 		pool_item_bitmap_t *bitmap =
2642 		    ph->ph_bitmap + (idx / BITMAP_SIZE);
2643 		pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
2644 
2645 		return (*bitmap & mask) == 0;
2646 	} else {
2647 		struct pool_item *pi;
2648 
2649 		LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
2650 			if (pool_in_item(pp, pi, addr)) {
2651 				return false;
2652 			}
2653 		}
2654 		return true;
2655 	}
2656 }
2657 
2658 void
2659 pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
2660 {
2661 	struct pool *pp;
2662 
2663 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
2664 		struct pool_item_header *ph;
2665 		uintptr_t item;
2666 		bool allocated = true;
2667 		bool incache = false;
2668 		bool incpucache = false;
2669 		char cpucachestr[32];
2670 
2671 		if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
2672 			LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2673 				if (pool_in_page(pp, ph, addr)) {
2674 					goto found;
2675 				}
2676 			}
2677 			LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2678 				if (pool_in_page(pp, ph, addr)) {
2679 					allocated =
2680 					    pool_allocated(pp, ph, addr);
2681 					goto found;
2682 				}
2683 			}
2684 			LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
2685 				if (pool_in_page(pp, ph, addr)) {
2686 					allocated = false;
2687 					goto found;
2688 				}
2689 			}
2690 			continue;
2691 		} else {
2692 			ph = pr_find_pagehead_noalign(pp, (void *)addr);
2693 			if (ph == NULL || !pool_in_page(pp, ph, addr)) {
2694 				continue;
2695 			}
2696 			allocated = pool_allocated(pp, ph, addr);
2697 		}
2698 found:
2699 		if (allocated && pp->pr_cache) {
2700 			pool_cache_t pc = pp->pr_cache;
2701 			struct pool_cache_group *pcg;
2702 			int i;
2703 
2704 			for (pcg = pc->pc_fullgroups; pcg != NULL;
2705 			    pcg = pcg->pcg_next) {
2706 				if (pool_in_cg(pp, pcg, addr)) {
2707 					incache = true;
2708 					goto print;
2709 				}
2710 			}
2711 			for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
2712 				pool_cache_cpu_t *cc;
2713 
2714 				if ((cc = pc->pc_cpus[i]) == NULL) {
2715 					continue;
2716 				}
2717 				if (pool_in_cg(pp, cc->cc_current, addr) ||
2718 				    pool_in_cg(pp, cc->cc_previous, addr)) {
2719 					struct cpu_info *ci =
2720 					    cpu_lookup(i);
2721 
2722 					incpucache = true;
2723 					snprintf(cpucachestr,
2724 					    sizeof(cpucachestr),
2725 					    "cached by CPU %u",
2726 					    ci->ci_index);
2727 					goto print;
2728 				}
2729 			}
2730 		}
2731 print:
2732 		item = (uintptr_t)ph->ph_page + ph->ph_off;
2733 		item = item + rounddown(addr - item, pp->pr_size);
2734 		(*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
2735 		    (void *)addr, item, (size_t)(addr - item),
2736 		    pp->pr_wchan,
2737 		    incpucache ? cpucachestr :
2738 		    incache ? "cached" : allocated ? "allocated" : "free");
2739 	}
2740 }
2741 #endif /* defined(DDB) */
2742