xref: /netbsd-src/sys/kern/subr_pool.c (revision aad9773e38ed2370a628a6416e098f9008fc10a7)
1 /*	$NetBSD: subr_pool.c,v 1.203 2014/06/13 19:09:07 joerg Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014
5  *     The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
10  * Simulation Facility, NASA Ames Research Center, and by Andrew Doran.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.203 2014/06/13 19:09:07 joerg Exp $");
36 
37 #include "opt_ddb.h"
38 #include "opt_lockdebug.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysctl.h>
43 #include <sys/bitops.h>
44 #include <sys/proc.h>
45 #include <sys/errno.h>
46 #include <sys/kernel.h>
47 #include <sys/vmem.h>
48 #include <sys/pool.h>
49 #include <sys/syslog.h>
50 #include <sys/debug.h>
51 #include <sys/lockdebug.h>
52 #include <sys/xcall.h>
53 #include <sys/cpu.h>
54 #include <sys/atomic.h>
55 
56 #include <uvm/uvm_extern.h>
57 
58 /*
59  * Pool resource management utility.
60  *
61  * Memory is allocated in pages which are split into pieces according to
62  * the pool item size. Each page is kept on one of three lists in the
63  * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
64  * for empty, full and partially-full pages respectively. The individual
65  * pool items are on a linked list headed by `ph_itemlist' in each page
66  * header. The memory for building the page list is either taken from
67  * the allocated pages themselves (for small pool items) or taken from
68  * an internal pool of page headers (`phpool').
69  */
70 
71 /* List of all pools. Non static as needed by 'vmstat -i' */
72 TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
73 
74 /* Private pool for page header structures */
75 #define	PHPOOL_MAX	8
76 static struct pool phpool[PHPOOL_MAX];
77 #define	PHPOOL_FREELIST_NELEM(idx) \
78 	(((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
79 
80 #ifdef POOL_SUBPAGE
81 /* Pool of subpages for use by normal pools. */
82 static struct pool psppool;
83 #endif
84 
85 static void *pool_page_alloc_meta(struct pool *, int);
86 static void pool_page_free_meta(struct pool *, void *);
87 
88 /* allocator for pool metadata */
89 struct pool_allocator pool_allocator_meta = {
90 	.pa_alloc = pool_page_alloc_meta,
91 	.pa_free = pool_page_free_meta,
92 	.pa_pagesz = 0
93 };
94 
95 /* # of seconds to retain page after last use */
96 int pool_inactive_time = 10;
97 
98 /* Next candidate for drainage (see pool_drain()) */
99 static struct pool	*drainpp;
100 
101 /* This lock protects both pool_head and drainpp. */
102 static kmutex_t pool_head_lock;
103 static kcondvar_t pool_busy;
104 
105 /* This lock protects initialization of a potentially shared pool allocator */
106 static kmutex_t pool_allocator_lock;
107 
108 typedef uint32_t pool_item_bitmap_t;
109 #define	BITMAP_SIZE	(CHAR_BIT * sizeof(pool_item_bitmap_t))
110 #define	BITMAP_MASK	(BITMAP_SIZE - 1)
111 
112 struct pool_item_header {
113 	/* Page headers */
114 	LIST_ENTRY(pool_item_header)
115 				ph_pagelist;	/* pool page list */
116 	SPLAY_ENTRY(pool_item_header)
117 				ph_node;	/* Off-page page headers */
118 	void *			ph_page;	/* this page's address */
119 	uint32_t		ph_time;	/* last referenced */
120 	uint16_t		ph_nmissing;	/* # of chunks in use */
121 	uint16_t		ph_off;		/* start offset in page */
122 	union {
123 		/* !PR_NOTOUCH */
124 		struct {
125 			LIST_HEAD(, pool_item)
126 				phu_itemlist;	/* chunk list for this page */
127 		} phu_normal;
128 		/* PR_NOTOUCH */
129 		struct {
130 			pool_item_bitmap_t phu_bitmap[1];
131 		} phu_notouch;
132 	} ph_u;
133 };
134 #define	ph_itemlist	ph_u.phu_normal.phu_itemlist
135 #define	ph_bitmap	ph_u.phu_notouch.phu_bitmap
136 
137 struct pool_item {
138 #ifdef DIAGNOSTIC
139 	u_int pi_magic;
140 #endif
141 #define	PI_MAGIC 0xdeaddeadU
142 	/* Other entries use only this list entry */
143 	LIST_ENTRY(pool_item)	pi_list;
144 };
145 
146 #define	POOL_NEEDS_CATCHUP(pp)						\
147 	((pp)->pr_nitems < (pp)->pr_minitems)
148 
149 /*
150  * Pool cache management.
151  *
152  * Pool caches provide a way for constructed objects to be cached by the
153  * pool subsystem.  This can lead to performance improvements by avoiding
154  * needless object construction/destruction; it is deferred until absolutely
155  * necessary.
156  *
157  * Caches are grouped into cache groups.  Each cache group references up
158  * to PCG_NUMOBJECTS constructed objects.  When a cache allocates an
159  * object from the pool, it calls the object's constructor and places it
160  * into a cache group.  When a cache group frees an object back to the
161  * pool, it first calls the object's destructor.  This allows the object
162  * to persist in constructed form while freed to the cache.
163  *
164  * The pool references each cache, so that when a pool is drained by the
165  * pagedaemon, it can drain each individual cache as well.  Each time a
166  * cache is drained, the most idle cache group is freed to the pool in
167  * its entirety.
168  *
169  * Pool caches are layed on top of pools.  By layering them, we can avoid
170  * the complexity of cache management for pools which would not benefit
171  * from it.
172  */
173 
174 static struct pool pcg_normal_pool;
175 static struct pool pcg_large_pool;
176 static struct pool cache_pool;
177 static struct pool cache_cpu_pool;
178 
179 pool_cache_t pnbuf_cache;	/* pathname buffer cache */
180 
181 /* List of all caches. */
182 TAILQ_HEAD(,pool_cache) pool_cache_head =
183     TAILQ_HEAD_INITIALIZER(pool_cache_head);
184 
185 int pool_cache_disable;		/* global disable for caching */
186 static const pcg_t pcg_dummy;	/* zero sized: always empty, yet always full */
187 
188 static bool	pool_cache_put_slow(pool_cache_cpu_t *, int,
189 				    void *);
190 static bool	pool_cache_get_slow(pool_cache_cpu_t *, int,
191 				    void **, paddr_t *, int);
192 static void	pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
193 static void	pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
194 static void	pool_cache_invalidate_cpu(pool_cache_t, u_int);
195 static void	pool_cache_transfer(pool_cache_t);
196 
197 static int	pool_catchup(struct pool *);
198 static void	pool_prime_page(struct pool *, void *,
199 		    struct pool_item_header *);
200 static void	pool_update_curpage(struct pool *);
201 
202 static int	pool_grow(struct pool *, int);
203 static void	*pool_allocator_alloc(struct pool *, int);
204 static void	pool_allocator_free(struct pool *, void *);
205 
206 static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
207 	void (*)(const char *, ...) __printflike(1, 2));
208 static void pool_print1(struct pool *, const char *,
209 	void (*)(const char *, ...) __printflike(1, 2));
210 
211 static int pool_chk_page(struct pool *, const char *,
212 			 struct pool_item_header *);
213 
214 static inline unsigned int
215 pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
216     const void *v)
217 {
218 	const char *cp = v;
219 	unsigned int idx;
220 
221 	KASSERT(pp->pr_roflags & PR_NOTOUCH);
222 	idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
223 	KASSERT(idx < pp->pr_itemsperpage);
224 	return idx;
225 }
226 
227 static inline void
228 pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
229     void *obj)
230 {
231 	unsigned int idx = pr_item_notouch_index(pp, ph, obj);
232 	pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
233 	pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
234 
235 	KASSERT((*bitmap & mask) == 0);
236 	*bitmap |= mask;
237 }
238 
239 static inline void *
240 pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
241 {
242 	pool_item_bitmap_t *bitmap = ph->ph_bitmap;
243 	unsigned int idx;
244 	int i;
245 
246 	for (i = 0; ; i++) {
247 		int bit;
248 
249 		KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
250 		bit = ffs32(bitmap[i]);
251 		if (bit) {
252 			pool_item_bitmap_t mask;
253 
254 			bit--;
255 			idx = (i * BITMAP_SIZE) + bit;
256 			mask = 1 << bit;
257 			KASSERT((bitmap[i] & mask) != 0);
258 			bitmap[i] &= ~mask;
259 			break;
260 		}
261 	}
262 	KASSERT(idx < pp->pr_itemsperpage);
263 	return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
264 }
265 
266 static inline void
267 pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
268 {
269 	pool_item_bitmap_t *bitmap = ph->ph_bitmap;
270 	const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
271 	int i;
272 
273 	for (i = 0; i < n; i++) {
274 		bitmap[i] = (pool_item_bitmap_t)-1;
275 	}
276 }
277 
278 static inline int
279 phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
280 {
281 
282 	/*
283 	 * we consider pool_item_header with smaller ph_page bigger.
284 	 * (this unnatural ordering is for the benefit of pr_find_pagehead.)
285 	 */
286 
287 	if (a->ph_page < b->ph_page)
288 		return (1);
289 	else if (a->ph_page > b->ph_page)
290 		return (-1);
291 	else
292 		return (0);
293 }
294 
295 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
296 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
297 
298 static inline struct pool_item_header *
299 pr_find_pagehead_noalign(struct pool *pp, void *v)
300 {
301 	struct pool_item_header *ph, tmp;
302 
303 	tmp.ph_page = (void *)(uintptr_t)v;
304 	ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
305 	if (ph == NULL) {
306 		ph = SPLAY_ROOT(&pp->pr_phtree);
307 		if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
308 			ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
309 		}
310 		KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
311 	}
312 
313 	return ph;
314 }
315 
316 /*
317  * Return the pool page header based on item address.
318  */
319 static inline struct pool_item_header *
320 pr_find_pagehead(struct pool *pp, void *v)
321 {
322 	struct pool_item_header *ph, tmp;
323 
324 	if ((pp->pr_roflags & PR_NOALIGN) != 0) {
325 		ph = pr_find_pagehead_noalign(pp, v);
326 	} else {
327 		void *page =
328 		    (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
329 
330 		if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
331 			ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
332 		} else {
333 			tmp.ph_page = page;
334 			ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
335 		}
336 	}
337 
338 	KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
339 	    ((char *)ph->ph_page <= (char *)v &&
340 	    (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
341 	return ph;
342 }
343 
344 static void
345 pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
346 {
347 	struct pool_item_header *ph;
348 
349 	while ((ph = LIST_FIRST(pq)) != NULL) {
350 		LIST_REMOVE(ph, ph_pagelist);
351 		pool_allocator_free(pp, ph->ph_page);
352 		if ((pp->pr_roflags & PR_PHINPAGE) == 0)
353 			pool_put(pp->pr_phpool, ph);
354 	}
355 }
356 
357 /*
358  * Remove a page from the pool.
359  */
360 static inline void
361 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
362      struct pool_pagelist *pq)
363 {
364 
365 	KASSERT(mutex_owned(&pp->pr_lock));
366 
367 	/*
368 	 * If the page was idle, decrement the idle page count.
369 	 */
370 	if (ph->ph_nmissing == 0) {
371 #ifdef DIAGNOSTIC
372 		if (pp->pr_nidle == 0)
373 			panic("pr_rmpage: nidle inconsistent");
374 		if (pp->pr_nitems < pp->pr_itemsperpage)
375 			panic("pr_rmpage: nitems inconsistent");
376 #endif
377 		pp->pr_nidle--;
378 	}
379 
380 	pp->pr_nitems -= pp->pr_itemsperpage;
381 
382 	/*
383 	 * Unlink the page from the pool and queue it for release.
384 	 */
385 	LIST_REMOVE(ph, ph_pagelist);
386 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
387 		SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
388 	LIST_INSERT_HEAD(pq, ph, ph_pagelist);
389 
390 	pp->pr_npages--;
391 	pp->pr_npagefree++;
392 
393 	pool_update_curpage(pp);
394 }
395 
396 /*
397  * Initialize all the pools listed in the "pools" link set.
398  */
399 void
400 pool_subsystem_init(void)
401 {
402 	size_t size;
403 	int idx;
404 
405 	mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
406 	mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
407 	cv_init(&pool_busy, "poolbusy");
408 
409 	/*
410 	 * Initialize private page header pool and cache magazine pool if we
411 	 * haven't done so yet.
412 	 */
413 	for (idx = 0; idx < PHPOOL_MAX; idx++) {
414 		static char phpool_names[PHPOOL_MAX][6+1+6+1];
415 		int nelem;
416 		size_t sz;
417 
418 		nelem = PHPOOL_FREELIST_NELEM(idx);
419 		snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
420 		    "phpool-%d", nelem);
421 		sz = sizeof(struct pool_item_header);
422 		if (nelem) {
423 			sz = offsetof(struct pool_item_header,
424 			    ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
425 		}
426 		pool_init(&phpool[idx], sz, 0, 0, 0,
427 		    phpool_names[idx], &pool_allocator_meta, IPL_VM);
428 	}
429 #ifdef POOL_SUBPAGE
430 	pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
431 	    PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
432 #endif
433 
434 	size = sizeof(pcg_t) +
435 	    (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
436 	pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
437 	    "pcgnormal", &pool_allocator_meta, IPL_VM);
438 
439 	size = sizeof(pcg_t) +
440 	    (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
441 	pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
442 	    "pcglarge", &pool_allocator_meta, IPL_VM);
443 
444 	pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
445 	    0, 0, "pcache", &pool_allocator_meta, IPL_NONE);
446 
447 	pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
448 	    0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE);
449 }
450 
451 /*
452  * Initialize the given pool resource structure.
453  *
454  * We export this routine to allow other kernel parts to declare
455  * static pools that must be initialized before kmem(9) is available.
456  */
457 void
458 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
459     const char *wchan, struct pool_allocator *palloc, int ipl)
460 {
461 	struct pool *pp1;
462 	size_t trysize, phsize;
463 	int off, slack;
464 
465 #ifdef DEBUG
466 	if (__predict_true(!cold))
467 		mutex_enter(&pool_head_lock);
468 	/*
469 	 * Check that the pool hasn't already been initialised and
470 	 * added to the list of all pools.
471 	 */
472 	TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
473 		if (pp == pp1)
474 			panic("pool_init: pool %s already initialised",
475 			    wchan);
476 	}
477 	if (__predict_true(!cold))
478 		mutex_exit(&pool_head_lock);
479 #endif
480 
481 	if (palloc == NULL)
482 		palloc = &pool_allocator_kmem;
483 #ifdef POOL_SUBPAGE
484 	if (size > palloc->pa_pagesz) {
485 		if (palloc == &pool_allocator_kmem)
486 			palloc = &pool_allocator_kmem_fullpage;
487 		else if (palloc == &pool_allocator_nointr)
488 			palloc = &pool_allocator_nointr_fullpage;
489 	}
490 #endif /* POOL_SUBPAGE */
491 	if (!cold)
492 		mutex_enter(&pool_allocator_lock);
493 	if (palloc->pa_refcnt++ == 0) {
494 		if (palloc->pa_pagesz == 0)
495 			palloc->pa_pagesz = PAGE_SIZE;
496 
497 		TAILQ_INIT(&palloc->pa_list);
498 
499 		mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
500 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
501 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
502 	}
503 	if (!cold)
504 		mutex_exit(&pool_allocator_lock);
505 
506 	if (align == 0)
507 		align = ALIGN(1);
508 
509 	if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
510 		size = sizeof(struct pool_item);
511 
512 	size = roundup(size, align);
513 #ifdef DIAGNOSTIC
514 	if (size > palloc->pa_pagesz)
515 		panic("pool_init: pool item size (%zu) too large", size);
516 #endif
517 
518 	/*
519 	 * Initialize the pool structure.
520 	 */
521 	LIST_INIT(&pp->pr_emptypages);
522 	LIST_INIT(&pp->pr_fullpages);
523 	LIST_INIT(&pp->pr_partpages);
524 	pp->pr_cache = NULL;
525 	pp->pr_curpage = NULL;
526 	pp->pr_npages = 0;
527 	pp->pr_minitems = 0;
528 	pp->pr_minpages = 0;
529 	pp->pr_maxpages = UINT_MAX;
530 	pp->pr_roflags = flags;
531 	pp->pr_flags = 0;
532 	pp->pr_size = size;
533 	pp->pr_align = align;
534 	pp->pr_wchan = wchan;
535 	pp->pr_alloc = palloc;
536 	pp->pr_nitems = 0;
537 	pp->pr_nout = 0;
538 	pp->pr_hardlimit = UINT_MAX;
539 	pp->pr_hardlimit_warning = NULL;
540 	pp->pr_hardlimit_ratecap.tv_sec = 0;
541 	pp->pr_hardlimit_ratecap.tv_usec = 0;
542 	pp->pr_hardlimit_warning_last.tv_sec = 0;
543 	pp->pr_hardlimit_warning_last.tv_usec = 0;
544 	pp->pr_drain_hook = NULL;
545 	pp->pr_drain_hook_arg = NULL;
546 	pp->pr_freecheck = NULL;
547 
548 	/*
549 	 * Decide whether to put the page header off page to avoid
550 	 * wasting too large a part of the page or too big item.
551 	 * Off-page page headers go on a hash table, so we can match
552 	 * a returned item with its header based on the page address.
553 	 * We use 1/16 of the page size and about 8 times of the item
554 	 * size as the threshold (XXX: tune)
555 	 *
556 	 * However, we'll put the header into the page if we can put
557 	 * it without wasting any items.
558 	 *
559 	 * Silently enforce `0 <= ioff < align'.
560 	 */
561 	pp->pr_itemoffset = ioff %= align;
562 	/* See the comment below about reserved bytes. */
563 	trysize = palloc->pa_pagesz - ((align - ioff) % align);
564 	phsize = ALIGN(sizeof(struct pool_item_header));
565 	if (pp->pr_roflags & PR_PHINPAGE ||
566 	    ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
567 	    (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
568 	    trysize / pp->pr_size == (trysize - phsize) / pp->pr_size))) {
569 		/* Use the end of the page for the page header */
570 		pp->pr_roflags |= PR_PHINPAGE;
571 		pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
572 	} else {
573 		/* The page header will be taken from our page header pool */
574 		pp->pr_phoffset = 0;
575 		off = palloc->pa_pagesz;
576 		SPLAY_INIT(&pp->pr_phtree);
577 	}
578 
579 	/*
580 	 * Alignment is to take place at `ioff' within the item. This means
581 	 * we must reserve up to `align - 1' bytes on the page to allow
582 	 * appropriate positioning of each item.
583 	 */
584 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
585 	KASSERT(pp->pr_itemsperpage != 0);
586 	if ((pp->pr_roflags & PR_NOTOUCH)) {
587 		int idx;
588 
589 		for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
590 		    idx++) {
591 			/* nothing */
592 		}
593 		if (idx >= PHPOOL_MAX) {
594 			/*
595 			 * if you see this panic, consider to tweak
596 			 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
597 			 */
598 			panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
599 			    pp->pr_wchan, pp->pr_itemsperpage);
600 		}
601 		pp->pr_phpool = &phpool[idx];
602 	} else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
603 		pp->pr_phpool = &phpool[0];
604 	}
605 #if defined(DIAGNOSTIC)
606 	else {
607 		pp->pr_phpool = NULL;
608 	}
609 #endif
610 
611 	/*
612 	 * Use the slack between the chunks and the page header
613 	 * for "cache coloring".
614 	 */
615 	slack = off - pp->pr_itemsperpage * pp->pr_size;
616 	pp->pr_maxcolor = (slack / align) * align;
617 	pp->pr_curcolor = 0;
618 
619 	pp->pr_nget = 0;
620 	pp->pr_nfail = 0;
621 	pp->pr_nput = 0;
622 	pp->pr_npagealloc = 0;
623 	pp->pr_npagefree = 0;
624 	pp->pr_hiwat = 0;
625 	pp->pr_nidle = 0;
626 	pp->pr_refcnt = 0;
627 
628 	mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
629 	cv_init(&pp->pr_cv, wchan);
630 	pp->pr_ipl = ipl;
631 
632 	/* Insert into the list of all pools. */
633 	if (!cold)
634 		mutex_enter(&pool_head_lock);
635 	TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
636 		if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
637 			break;
638 	}
639 	if (pp1 == NULL)
640 		TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
641 	else
642 		TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
643 	if (!cold)
644 		mutex_exit(&pool_head_lock);
645 
646 	/* Insert this into the list of pools using this allocator. */
647 	if (!cold)
648 		mutex_enter(&palloc->pa_lock);
649 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
650 	if (!cold)
651 		mutex_exit(&palloc->pa_lock);
652 }
653 
654 /*
655  * De-commision a pool resource.
656  */
657 void
658 pool_destroy(struct pool *pp)
659 {
660 	struct pool_pagelist pq;
661 	struct pool_item_header *ph;
662 
663 	/* Remove from global pool list */
664 	mutex_enter(&pool_head_lock);
665 	while (pp->pr_refcnt != 0)
666 		cv_wait(&pool_busy, &pool_head_lock);
667 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
668 	if (drainpp == pp)
669 		drainpp = NULL;
670 	mutex_exit(&pool_head_lock);
671 
672 	/* Remove this pool from its allocator's list of pools. */
673 	mutex_enter(&pp->pr_alloc->pa_lock);
674 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
675 	mutex_exit(&pp->pr_alloc->pa_lock);
676 
677 	mutex_enter(&pool_allocator_lock);
678 	if (--pp->pr_alloc->pa_refcnt == 0)
679 		mutex_destroy(&pp->pr_alloc->pa_lock);
680 	mutex_exit(&pool_allocator_lock);
681 
682 	mutex_enter(&pp->pr_lock);
683 
684 	KASSERT(pp->pr_cache == NULL);
685 
686 #ifdef DIAGNOSTIC
687 	if (pp->pr_nout != 0) {
688 		panic("pool_destroy: pool busy: still out: %u",
689 		    pp->pr_nout);
690 	}
691 #endif
692 
693 	KASSERT(LIST_EMPTY(&pp->pr_fullpages));
694 	KASSERT(LIST_EMPTY(&pp->pr_partpages));
695 
696 	/* Remove all pages */
697 	LIST_INIT(&pq);
698 	while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
699 		pr_rmpage(pp, ph, &pq);
700 
701 	mutex_exit(&pp->pr_lock);
702 
703 	pr_pagelist_free(pp, &pq);
704 	cv_destroy(&pp->pr_cv);
705 	mutex_destroy(&pp->pr_lock);
706 }
707 
708 void
709 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
710 {
711 
712 	/* XXX no locking -- must be used just after pool_init() */
713 #ifdef DIAGNOSTIC
714 	if (pp->pr_drain_hook != NULL)
715 		panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
716 #endif
717 	pp->pr_drain_hook = fn;
718 	pp->pr_drain_hook_arg = arg;
719 }
720 
721 static struct pool_item_header *
722 pool_alloc_item_header(struct pool *pp, void *storage, int flags)
723 {
724 	struct pool_item_header *ph;
725 
726 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
727 		ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
728 	else
729 		ph = pool_get(pp->pr_phpool, flags);
730 
731 	return (ph);
732 }
733 
734 /*
735  * Grab an item from the pool.
736  */
737 void *
738 pool_get(struct pool *pp, int flags)
739 {
740 	struct pool_item *pi;
741 	struct pool_item_header *ph;
742 	void *v;
743 
744 #ifdef DIAGNOSTIC
745 	if (pp->pr_itemsperpage == 0)
746 		panic("pool_get: pool '%s': pr_itemsperpage is zero, "
747 		    "pool not initialized?", pp->pr_wchan);
748 	if ((cpu_intr_p() || cpu_softintr_p()) && pp->pr_ipl == IPL_NONE &&
749 	    !cold && panicstr == NULL)
750 		panic("pool '%s' is IPL_NONE, but called from "
751 		    "interrupt context\n", pp->pr_wchan);
752 #endif
753 	if (flags & PR_WAITOK) {
754 		ASSERT_SLEEPABLE();
755 	}
756 
757 	mutex_enter(&pp->pr_lock);
758  startover:
759 	/*
760 	 * Check to see if we've reached the hard limit.  If we have,
761 	 * and we can wait, then wait until an item has been returned to
762 	 * the pool.
763 	 */
764 #ifdef DIAGNOSTIC
765 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
766 		mutex_exit(&pp->pr_lock);
767 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
768 	}
769 #endif
770 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
771 		if (pp->pr_drain_hook != NULL) {
772 			/*
773 			 * Since the drain hook is going to free things
774 			 * back to the pool, unlock, call the hook, re-lock,
775 			 * and check the hardlimit condition again.
776 			 */
777 			mutex_exit(&pp->pr_lock);
778 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
779 			mutex_enter(&pp->pr_lock);
780 			if (pp->pr_nout < pp->pr_hardlimit)
781 				goto startover;
782 		}
783 
784 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
785 			/*
786 			 * XXX: A warning isn't logged in this case.  Should
787 			 * it be?
788 			 */
789 			pp->pr_flags |= PR_WANTED;
790 			cv_wait(&pp->pr_cv, &pp->pr_lock);
791 			goto startover;
792 		}
793 
794 		/*
795 		 * Log a message that the hard limit has been hit.
796 		 */
797 		if (pp->pr_hardlimit_warning != NULL &&
798 		    ratecheck(&pp->pr_hardlimit_warning_last,
799 			      &pp->pr_hardlimit_ratecap))
800 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
801 
802 		pp->pr_nfail++;
803 
804 		mutex_exit(&pp->pr_lock);
805 		return (NULL);
806 	}
807 
808 	/*
809 	 * The convention we use is that if `curpage' is not NULL, then
810 	 * it points at a non-empty bucket. In particular, `curpage'
811 	 * never points at a page header which has PR_PHINPAGE set and
812 	 * has no items in its bucket.
813 	 */
814 	if ((ph = pp->pr_curpage) == NULL) {
815 		int error;
816 
817 #ifdef DIAGNOSTIC
818 		if (pp->pr_nitems != 0) {
819 			mutex_exit(&pp->pr_lock);
820 			printf("pool_get: %s: curpage NULL, nitems %u\n",
821 			    pp->pr_wchan, pp->pr_nitems);
822 			panic("pool_get: nitems inconsistent");
823 		}
824 #endif
825 
826 		/*
827 		 * Call the back-end page allocator for more memory.
828 		 * Release the pool lock, as the back-end page allocator
829 		 * may block.
830 		 */
831 		error = pool_grow(pp, flags);
832 		if (error != 0) {
833 			/*
834 			 * We were unable to allocate a page or item
835 			 * header, but we released the lock during
836 			 * allocation, so perhaps items were freed
837 			 * back to the pool.  Check for this case.
838 			 */
839 			if (pp->pr_curpage != NULL)
840 				goto startover;
841 
842 			pp->pr_nfail++;
843 			mutex_exit(&pp->pr_lock);
844 			return (NULL);
845 		}
846 
847 		/* Start the allocation process over. */
848 		goto startover;
849 	}
850 	if (pp->pr_roflags & PR_NOTOUCH) {
851 #ifdef DIAGNOSTIC
852 		if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
853 			mutex_exit(&pp->pr_lock);
854 			panic("pool_get: %s: page empty", pp->pr_wchan);
855 		}
856 #endif
857 		v = pr_item_notouch_get(pp, ph);
858 	} else {
859 		v = pi = LIST_FIRST(&ph->ph_itemlist);
860 		if (__predict_false(v == NULL)) {
861 			mutex_exit(&pp->pr_lock);
862 			panic("pool_get: %s: page empty", pp->pr_wchan);
863 		}
864 #ifdef DIAGNOSTIC
865 		if (__predict_false(pp->pr_nitems == 0)) {
866 			mutex_exit(&pp->pr_lock);
867 			printf("pool_get: %s: items on itemlist, nitems %u\n",
868 			    pp->pr_wchan, pp->pr_nitems);
869 			panic("pool_get: nitems inconsistent");
870 		}
871 #endif
872 
873 #ifdef DIAGNOSTIC
874 		if (__predict_false(pi->pi_magic != PI_MAGIC)) {
875 			panic("pool_get(%s): free list modified: "
876 			    "magic=%x; page %p; item addr %p\n",
877 			    pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
878 		}
879 #endif
880 
881 		/*
882 		 * Remove from item list.
883 		 */
884 		LIST_REMOVE(pi, pi_list);
885 	}
886 	pp->pr_nitems--;
887 	pp->pr_nout++;
888 	if (ph->ph_nmissing == 0) {
889 #ifdef DIAGNOSTIC
890 		if (__predict_false(pp->pr_nidle == 0))
891 			panic("pool_get: nidle inconsistent");
892 #endif
893 		pp->pr_nidle--;
894 
895 		/*
896 		 * This page was previously empty.  Move it to the list of
897 		 * partially-full pages.  This page is already curpage.
898 		 */
899 		LIST_REMOVE(ph, ph_pagelist);
900 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
901 	}
902 	ph->ph_nmissing++;
903 	if (ph->ph_nmissing == pp->pr_itemsperpage) {
904 #ifdef DIAGNOSTIC
905 		if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
906 		    !LIST_EMPTY(&ph->ph_itemlist))) {
907 			mutex_exit(&pp->pr_lock);
908 			panic("pool_get: %s: nmissing inconsistent",
909 			    pp->pr_wchan);
910 		}
911 #endif
912 		/*
913 		 * This page is now full.  Move it to the full list
914 		 * and select a new current page.
915 		 */
916 		LIST_REMOVE(ph, ph_pagelist);
917 		LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
918 		pool_update_curpage(pp);
919 	}
920 
921 	pp->pr_nget++;
922 
923 	/*
924 	 * If we have a low water mark and we are now below that low
925 	 * water mark, add more items to the pool.
926 	 */
927 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
928 		/*
929 		 * XXX: Should we log a warning?  Should we set up a timeout
930 		 * to try again in a second or so?  The latter could break
931 		 * a caller's assumptions about interrupt protection, etc.
932 		 */
933 	}
934 
935 	mutex_exit(&pp->pr_lock);
936 	KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
937 	FREECHECK_OUT(&pp->pr_freecheck, v);
938 	return (v);
939 }
940 
941 /*
942  * Internal version of pool_put().  Pool is already locked/entered.
943  */
944 static void
945 pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
946 {
947 	struct pool_item *pi = v;
948 	struct pool_item_header *ph;
949 
950 	KASSERT(mutex_owned(&pp->pr_lock));
951 	FREECHECK_IN(&pp->pr_freecheck, v);
952 	LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
953 
954 #ifdef DIAGNOSTIC
955 	if (__predict_false(pp->pr_nout == 0)) {
956 		printf("pool %s: putting with none out\n",
957 		    pp->pr_wchan);
958 		panic("pool_put");
959 	}
960 #endif
961 
962 	if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
963 		panic("pool_put: %s: page header missing", pp->pr_wchan);
964 	}
965 
966 	/*
967 	 * Return to item list.
968 	 */
969 	if (pp->pr_roflags & PR_NOTOUCH) {
970 		pr_item_notouch_put(pp, ph, v);
971 	} else {
972 #ifdef DIAGNOSTIC
973 		pi->pi_magic = PI_MAGIC;
974 #endif
975 #ifdef DEBUG
976 		{
977 			int i, *ip = v;
978 
979 			for (i = 0; i < pp->pr_size / sizeof(int); i++) {
980 				*ip++ = PI_MAGIC;
981 			}
982 		}
983 #endif
984 
985 		LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
986 	}
987 	KDASSERT(ph->ph_nmissing != 0);
988 	ph->ph_nmissing--;
989 	pp->pr_nput++;
990 	pp->pr_nitems++;
991 	pp->pr_nout--;
992 
993 	/* Cancel "pool empty" condition if it exists */
994 	if (pp->pr_curpage == NULL)
995 		pp->pr_curpage = ph;
996 
997 	if (pp->pr_flags & PR_WANTED) {
998 		pp->pr_flags &= ~PR_WANTED;
999 		cv_broadcast(&pp->pr_cv);
1000 	}
1001 
1002 	/*
1003 	 * If this page is now empty, do one of two things:
1004 	 *
1005 	 *	(1) If we have more pages than the page high water mark,
1006 	 *	    free the page back to the system.  ONLY CONSIDER
1007 	 *	    FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1008 	 *	    CLAIM.
1009 	 *
1010 	 *	(2) Otherwise, move the page to the empty page list.
1011 	 *
1012 	 * Either way, select a new current page (so we use a partially-full
1013 	 * page if one is available).
1014 	 */
1015 	if (ph->ph_nmissing == 0) {
1016 		pp->pr_nidle++;
1017 		if (pp->pr_npages > pp->pr_minpages &&
1018 		    pp->pr_npages > pp->pr_maxpages) {
1019 			pr_rmpage(pp, ph, pq);
1020 		} else {
1021 			LIST_REMOVE(ph, ph_pagelist);
1022 			LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1023 
1024 			/*
1025 			 * Update the timestamp on the page.  A page must
1026 			 * be idle for some period of time before it can
1027 			 * be reclaimed by the pagedaemon.  This minimizes
1028 			 * ping-pong'ing for memory.
1029 			 *
1030 			 * note for 64-bit time_t: truncating to 32-bit is not
1031 			 * a problem for our usage.
1032 			 */
1033 			ph->ph_time = time_uptime;
1034 		}
1035 		pool_update_curpage(pp);
1036 	}
1037 
1038 	/*
1039 	 * If the page was previously completely full, move it to the
1040 	 * partially-full list and make it the current page.  The next
1041 	 * allocation will get the item from this page, instead of
1042 	 * further fragmenting the pool.
1043 	 */
1044 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1045 		LIST_REMOVE(ph, ph_pagelist);
1046 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1047 		pp->pr_curpage = ph;
1048 	}
1049 }
1050 
1051 void
1052 pool_put(struct pool *pp, void *v)
1053 {
1054 	struct pool_pagelist pq;
1055 
1056 	LIST_INIT(&pq);
1057 
1058 	mutex_enter(&pp->pr_lock);
1059 	pool_do_put(pp, v, &pq);
1060 	mutex_exit(&pp->pr_lock);
1061 
1062 	pr_pagelist_free(pp, &pq);
1063 }
1064 
1065 /*
1066  * pool_grow: grow a pool by a page.
1067  *
1068  * => called with pool locked.
1069  * => unlock and relock the pool.
1070  * => return with pool locked.
1071  */
1072 
1073 static int
1074 pool_grow(struct pool *pp, int flags)
1075 {
1076 	struct pool_item_header *ph = NULL;
1077 	char *cp;
1078 
1079 	mutex_exit(&pp->pr_lock);
1080 	cp = pool_allocator_alloc(pp, flags);
1081 	if (__predict_true(cp != NULL)) {
1082 		ph = pool_alloc_item_header(pp, cp, flags);
1083 	}
1084 	if (__predict_false(cp == NULL || ph == NULL)) {
1085 		if (cp != NULL) {
1086 			pool_allocator_free(pp, cp);
1087 		}
1088 		mutex_enter(&pp->pr_lock);
1089 		return ENOMEM;
1090 	}
1091 
1092 	mutex_enter(&pp->pr_lock);
1093 	pool_prime_page(pp, cp, ph);
1094 	pp->pr_npagealloc++;
1095 	return 0;
1096 }
1097 
1098 /*
1099  * Add N items to the pool.
1100  */
1101 int
1102 pool_prime(struct pool *pp, int n)
1103 {
1104 	int newpages;
1105 	int error = 0;
1106 
1107 	mutex_enter(&pp->pr_lock);
1108 
1109 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1110 
1111 	while (newpages-- > 0) {
1112 		error = pool_grow(pp, PR_NOWAIT);
1113 		if (error) {
1114 			break;
1115 		}
1116 		pp->pr_minpages++;
1117 	}
1118 
1119 	if (pp->pr_minpages >= pp->pr_maxpages)
1120 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
1121 
1122 	mutex_exit(&pp->pr_lock);
1123 	return error;
1124 }
1125 
1126 /*
1127  * Add a page worth of items to the pool.
1128  *
1129  * Note, we must be called with the pool descriptor LOCKED.
1130  */
1131 static void
1132 pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1133 {
1134 	struct pool_item *pi;
1135 	void *cp = storage;
1136 	const unsigned int align = pp->pr_align;
1137 	const unsigned int ioff = pp->pr_itemoffset;
1138 	int n;
1139 
1140 	KASSERT(mutex_owned(&pp->pr_lock));
1141 
1142 #ifdef DIAGNOSTIC
1143 	if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
1144 	    ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1145 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1146 #endif
1147 
1148 	/*
1149 	 * Insert page header.
1150 	 */
1151 	LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1152 	LIST_INIT(&ph->ph_itemlist);
1153 	ph->ph_page = storage;
1154 	ph->ph_nmissing = 0;
1155 	ph->ph_time = time_uptime;
1156 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1157 		SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1158 
1159 	pp->pr_nidle++;
1160 
1161 	/*
1162 	 * Color this page.
1163 	 */
1164 	ph->ph_off = pp->pr_curcolor;
1165 	cp = (char *)cp + ph->ph_off;
1166 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1167 		pp->pr_curcolor = 0;
1168 
1169 	/*
1170 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1171 	 */
1172 	if (ioff != 0)
1173 		cp = (char *)cp + align - ioff;
1174 
1175 	KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1176 
1177 	/*
1178 	 * Insert remaining chunks on the bucket list.
1179 	 */
1180 	n = pp->pr_itemsperpage;
1181 	pp->pr_nitems += n;
1182 
1183 	if (pp->pr_roflags & PR_NOTOUCH) {
1184 		pr_item_notouch_init(pp, ph);
1185 	} else {
1186 		while (n--) {
1187 			pi = (struct pool_item *)cp;
1188 
1189 			KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1190 
1191 			/* Insert on page list */
1192 			LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1193 #ifdef DIAGNOSTIC
1194 			pi->pi_magic = PI_MAGIC;
1195 #endif
1196 			cp = (char *)cp + pp->pr_size;
1197 
1198 			KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1199 		}
1200 	}
1201 
1202 	/*
1203 	 * If the pool was depleted, point at the new page.
1204 	 */
1205 	if (pp->pr_curpage == NULL)
1206 		pp->pr_curpage = ph;
1207 
1208 	if (++pp->pr_npages > pp->pr_hiwat)
1209 		pp->pr_hiwat = pp->pr_npages;
1210 }
1211 
1212 /*
1213  * Used by pool_get() when nitems drops below the low water mark.  This
1214  * is used to catch up pr_nitems with the low water mark.
1215  *
1216  * Note 1, we never wait for memory here, we let the caller decide what to do.
1217  *
1218  * Note 2, we must be called with the pool already locked, and we return
1219  * with it locked.
1220  */
1221 static int
1222 pool_catchup(struct pool *pp)
1223 {
1224 	int error = 0;
1225 
1226 	while (POOL_NEEDS_CATCHUP(pp)) {
1227 		error = pool_grow(pp, PR_NOWAIT);
1228 		if (error) {
1229 			break;
1230 		}
1231 	}
1232 	return error;
1233 }
1234 
1235 static void
1236 pool_update_curpage(struct pool *pp)
1237 {
1238 
1239 	pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1240 	if (pp->pr_curpage == NULL) {
1241 		pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1242 	}
1243 	KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
1244 	    (pp->pr_curpage != NULL && pp->pr_nitems > 0));
1245 }
1246 
1247 void
1248 pool_setlowat(struct pool *pp, int n)
1249 {
1250 
1251 	mutex_enter(&pp->pr_lock);
1252 
1253 	pp->pr_minitems = n;
1254 	pp->pr_minpages = (n == 0)
1255 		? 0
1256 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1257 
1258 	/* Make sure we're caught up with the newly-set low water mark. */
1259 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1260 		/*
1261 		 * XXX: Should we log a warning?  Should we set up a timeout
1262 		 * to try again in a second or so?  The latter could break
1263 		 * a caller's assumptions about interrupt protection, etc.
1264 		 */
1265 	}
1266 
1267 	mutex_exit(&pp->pr_lock);
1268 }
1269 
1270 void
1271 pool_sethiwat(struct pool *pp, int n)
1272 {
1273 
1274 	mutex_enter(&pp->pr_lock);
1275 
1276 	pp->pr_maxpages = (n == 0)
1277 		? 0
1278 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1279 
1280 	mutex_exit(&pp->pr_lock);
1281 }
1282 
1283 void
1284 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1285 {
1286 
1287 	mutex_enter(&pp->pr_lock);
1288 
1289 	pp->pr_hardlimit = n;
1290 	pp->pr_hardlimit_warning = warnmess;
1291 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1292 	pp->pr_hardlimit_warning_last.tv_sec = 0;
1293 	pp->pr_hardlimit_warning_last.tv_usec = 0;
1294 
1295 	/*
1296 	 * In-line version of pool_sethiwat(), because we don't want to
1297 	 * release the lock.
1298 	 */
1299 	pp->pr_maxpages = (n == 0)
1300 		? 0
1301 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1302 
1303 	mutex_exit(&pp->pr_lock);
1304 }
1305 
1306 /*
1307  * Release all complete pages that have not been used recently.
1308  *
1309  * Must not be called from interrupt context.
1310  */
1311 int
1312 pool_reclaim(struct pool *pp)
1313 {
1314 	struct pool_item_header *ph, *phnext;
1315 	struct pool_pagelist pq;
1316 	uint32_t curtime;
1317 	bool klock;
1318 	int rv;
1319 
1320 	KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1321 
1322 	if (pp->pr_drain_hook != NULL) {
1323 		/*
1324 		 * The drain hook must be called with the pool unlocked.
1325 		 */
1326 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1327 	}
1328 
1329 	/*
1330 	 * XXXSMP Because we do not want to cause non-MPSAFE code
1331 	 * to block.
1332 	 */
1333 	if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1334 	    pp->pr_ipl == IPL_SOFTSERIAL) {
1335 		KERNEL_LOCK(1, NULL);
1336 		klock = true;
1337 	} else
1338 		klock = false;
1339 
1340 	/* Reclaim items from the pool's cache (if any). */
1341 	if (pp->pr_cache != NULL)
1342 		pool_cache_invalidate(pp->pr_cache);
1343 
1344 	if (mutex_tryenter(&pp->pr_lock) == 0) {
1345 		if (klock) {
1346 			KERNEL_UNLOCK_ONE(NULL);
1347 		}
1348 		return (0);
1349 	}
1350 
1351 	LIST_INIT(&pq);
1352 
1353 	curtime = time_uptime;
1354 
1355 	for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1356 		phnext = LIST_NEXT(ph, ph_pagelist);
1357 
1358 		/* Check our minimum page claim */
1359 		if (pp->pr_npages <= pp->pr_minpages)
1360 			break;
1361 
1362 		KASSERT(ph->ph_nmissing == 0);
1363 		if (curtime - ph->ph_time < pool_inactive_time)
1364 			continue;
1365 
1366 		/*
1367 		 * If freeing this page would put us below
1368 		 * the low water mark, stop now.
1369 		 */
1370 		if ((pp->pr_nitems - pp->pr_itemsperpage) <
1371 		    pp->pr_minitems)
1372 			break;
1373 
1374 		pr_rmpage(pp, ph, &pq);
1375 	}
1376 
1377 	mutex_exit(&pp->pr_lock);
1378 
1379 	if (LIST_EMPTY(&pq))
1380 		rv = 0;
1381 	else {
1382 		pr_pagelist_free(pp, &pq);
1383 		rv = 1;
1384 	}
1385 
1386 	if (klock) {
1387 		KERNEL_UNLOCK_ONE(NULL);
1388 	}
1389 
1390 	return (rv);
1391 }
1392 
1393 /*
1394  * Drain pools, one at a time. The drained pool is returned within ppp.
1395  *
1396  * Note, must never be called from interrupt context.
1397  */
1398 bool
1399 pool_drain(struct pool **ppp)
1400 {
1401 	bool reclaimed;
1402 	struct pool *pp;
1403 
1404 	KASSERT(!TAILQ_EMPTY(&pool_head));
1405 
1406 	pp = NULL;
1407 
1408 	/* Find next pool to drain, and add a reference. */
1409 	mutex_enter(&pool_head_lock);
1410 	do {
1411 		if (drainpp == NULL) {
1412 			drainpp = TAILQ_FIRST(&pool_head);
1413 		}
1414 		if (drainpp != NULL) {
1415 			pp = drainpp;
1416 			drainpp = TAILQ_NEXT(pp, pr_poollist);
1417 		}
1418 		/*
1419 		 * Skip completely idle pools.  We depend on at least
1420 		 * one pool in the system being active.
1421 		 */
1422 	} while (pp == NULL || pp->pr_npages == 0);
1423 	pp->pr_refcnt++;
1424 	mutex_exit(&pool_head_lock);
1425 
1426 	/* Drain the cache (if any) and pool.. */
1427 	reclaimed = pool_reclaim(pp);
1428 
1429 	/* Finally, unlock the pool. */
1430 	mutex_enter(&pool_head_lock);
1431 	pp->pr_refcnt--;
1432 	cv_broadcast(&pool_busy);
1433 	mutex_exit(&pool_head_lock);
1434 
1435 	if (ppp != NULL)
1436 		*ppp = pp;
1437 
1438 	return reclaimed;
1439 }
1440 
1441 /*
1442  * Diagnostic helpers.
1443  */
1444 
1445 void
1446 pool_printall(const char *modif, void (*pr)(const char *, ...))
1447 {
1448 	struct pool *pp;
1449 
1450 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1451 		pool_printit(pp, modif, pr);
1452 	}
1453 }
1454 
1455 void
1456 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1457 {
1458 
1459 	if (pp == NULL) {
1460 		(*pr)("Must specify a pool to print.\n");
1461 		return;
1462 	}
1463 
1464 	pool_print1(pp, modif, pr);
1465 }
1466 
1467 static void
1468 pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1469     void (*pr)(const char *, ...))
1470 {
1471 	struct pool_item_header *ph;
1472 #ifdef DIAGNOSTIC
1473 	struct pool_item *pi;
1474 #endif
1475 
1476 	LIST_FOREACH(ph, pl, ph_pagelist) {
1477 		(*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
1478 		    ph->ph_page, ph->ph_nmissing, ph->ph_time);
1479 #ifdef DIAGNOSTIC
1480 		if (!(pp->pr_roflags & PR_NOTOUCH)) {
1481 			LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1482 				if (pi->pi_magic != PI_MAGIC) {
1483 					(*pr)("\t\t\titem %p, magic 0x%x\n",
1484 					    pi, pi->pi_magic);
1485 				}
1486 			}
1487 		}
1488 #endif
1489 	}
1490 }
1491 
1492 static void
1493 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1494 {
1495 	struct pool_item_header *ph;
1496 	pool_cache_t pc;
1497 	pcg_t *pcg;
1498 	pool_cache_cpu_t *cc;
1499 	uint64_t cpuhit, cpumiss;
1500 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1501 	char c;
1502 
1503 	while ((c = *modif++) != '\0') {
1504 		if (c == 'l')
1505 			print_log = 1;
1506 		if (c == 'p')
1507 			print_pagelist = 1;
1508 		if (c == 'c')
1509 			print_cache = 1;
1510 	}
1511 
1512 	if ((pc = pp->pr_cache) != NULL) {
1513 		(*pr)("POOL CACHE");
1514 	} else {
1515 		(*pr)("POOL");
1516 	}
1517 
1518 	(*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1519 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1520 	    pp->pr_roflags);
1521 	(*pr)("\talloc %p\n", pp->pr_alloc);
1522 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1523 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1524 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1525 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1526 
1527 	(*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1528 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1529 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1530 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1531 
1532 	if (print_pagelist == 0)
1533 		goto skip_pagelist;
1534 
1535 	if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1536 		(*pr)("\n\tempty page list:\n");
1537 	pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1538 	if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1539 		(*pr)("\n\tfull page list:\n");
1540 	pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1541 	if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1542 		(*pr)("\n\tpartial-page list:\n");
1543 	pool_print_pagelist(pp, &pp->pr_partpages, pr);
1544 
1545 	if (pp->pr_curpage == NULL)
1546 		(*pr)("\tno current page\n");
1547 	else
1548 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1549 
1550  skip_pagelist:
1551 	if (print_log == 0)
1552 		goto skip_log;
1553 
1554 	(*pr)("\n");
1555 
1556  skip_log:
1557 
1558 #define PR_GROUPLIST(pcg)						\
1559 	(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);		\
1560 	for (i = 0; i < pcg->pcg_size; i++) {				\
1561 		if (pcg->pcg_objects[i].pcgo_pa !=			\
1562 		    POOL_PADDR_INVALID) {				\
1563 			(*pr)("\t\t\t%p, 0x%llx\n",			\
1564 			    pcg->pcg_objects[i].pcgo_va,		\
1565 			    (unsigned long long)			\
1566 			    pcg->pcg_objects[i].pcgo_pa);		\
1567 		} else {						\
1568 			(*pr)("\t\t\t%p\n",				\
1569 			    pcg->pcg_objects[i].pcgo_va);		\
1570 		}							\
1571 	}
1572 
1573 	if (pc != NULL) {
1574 		cpuhit = 0;
1575 		cpumiss = 0;
1576 		for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1577 			if ((cc = pc->pc_cpus[i]) == NULL)
1578 				continue;
1579 			cpuhit += cc->cc_hits;
1580 			cpumiss += cc->cc_misses;
1581 		}
1582 		(*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1583 		(*pr)("\tcache layer hits %llu misses %llu\n",
1584 		    pc->pc_hits, pc->pc_misses);
1585 		(*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1586 		    pc->pc_hits + pc->pc_misses - pc->pc_contended,
1587 		    pc->pc_contended);
1588 		(*pr)("\tcache layer empty groups %u full groups %u\n",
1589 		    pc->pc_nempty, pc->pc_nfull);
1590 		if (print_cache) {
1591 			(*pr)("\tfull cache groups:\n");
1592 			for (pcg = pc->pc_fullgroups; pcg != NULL;
1593 			    pcg = pcg->pcg_next) {
1594 				PR_GROUPLIST(pcg);
1595 			}
1596 			(*pr)("\tempty cache groups:\n");
1597 			for (pcg = pc->pc_emptygroups; pcg != NULL;
1598 			    pcg = pcg->pcg_next) {
1599 				PR_GROUPLIST(pcg);
1600 			}
1601 		}
1602 	}
1603 #undef PR_GROUPLIST
1604 }
1605 
1606 static int
1607 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1608 {
1609 	struct pool_item *pi;
1610 	void *page;
1611 	int n;
1612 
1613 	if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1614 		page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1615 		if (page != ph->ph_page &&
1616 		    (pp->pr_roflags & PR_PHINPAGE) != 0) {
1617 			if (label != NULL)
1618 				printf("%s: ", label);
1619 			printf("pool(%p:%s): page inconsistency: page %p;"
1620 			       " at page head addr %p (p %p)\n", pp,
1621 				pp->pr_wchan, ph->ph_page,
1622 				ph, page);
1623 			return 1;
1624 		}
1625 	}
1626 
1627 	if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1628 		return 0;
1629 
1630 	for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1631 	     pi != NULL;
1632 	     pi = LIST_NEXT(pi,pi_list), n++) {
1633 
1634 #ifdef DIAGNOSTIC
1635 		if (pi->pi_magic != PI_MAGIC) {
1636 			if (label != NULL)
1637 				printf("%s: ", label);
1638 			printf("pool(%s): free list modified: magic=%x;"
1639 			       " page %p; item ordinal %d; addr %p\n",
1640 				pp->pr_wchan, pi->pi_magic, ph->ph_page,
1641 				n, pi);
1642 			panic("pool");
1643 		}
1644 #endif
1645 		if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1646 			continue;
1647 		}
1648 		page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1649 		if (page == ph->ph_page)
1650 			continue;
1651 
1652 		if (label != NULL)
1653 			printf("%s: ", label);
1654 		printf("pool(%p:%s): page inconsistency: page %p;"
1655 		       " item ordinal %d; addr %p (p %p)\n", pp,
1656 			pp->pr_wchan, ph->ph_page,
1657 			n, pi, page);
1658 		return 1;
1659 	}
1660 	return 0;
1661 }
1662 
1663 
1664 int
1665 pool_chk(struct pool *pp, const char *label)
1666 {
1667 	struct pool_item_header *ph;
1668 	int r = 0;
1669 
1670 	mutex_enter(&pp->pr_lock);
1671 	LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1672 		r = pool_chk_page(pp, label, ph);
1673 		if (r) {
1674 			goto out;
1675 		}
1676 	}
1677 	LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1678 		r = pool_chk_page(pp, label, ph);
1679 		if (r) {
1680 			goto out;
1681 		}
1682 	}
1683 	LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1684 		r = pool_chk_page(pp, label, ph);
1685 		if (r) {
1686 			goto out;
1687 		}
1688 	}
1689 
1690 out:
1691 	mutex_exit(&pp->pr_lock);
1692 	return (r);
1693 }
1694 
1695 /*
1696  * pool_cache_init:
1697  *
1698  *	Initialize a pool cache.
1699  */
1700 pool_cache_t
1701 pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
1702     const char *wchan, struct pool_allocator *palloc, int ipl,
1703     int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
1704 {
1705 	pool_cache_t pc;
1706 
1707 	pc = pool_get(&cache_pool, PR_WAITOK);
1708 	if (pc == NULL)
1709 		return NULL;
1710 
1711 	pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
1712 	   palloc, ipl, ctor, dtor, arg);
1713 
1714 	return pc;
1715 }
1716 
1717 /*
1718  * pool_cache_bootstrap:
1719  *
1720  *	Kernel-private version of pool_cache_init().  The caller
1721  *	provides initial storage.
1722  */
1723 void
1724 pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
1725     u_int align_offset, u_int flags, const char *wchan,
1726     struct pool_allocator *palloc, int ipl,
1727     int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1728     void *arg)
1729 {
1730 	CPU_INFO_ITERATOR cii;
1731 	pool_cache_t pc1;
1732 	struct cpu_info *ci;
1733 	struct pool *pp;
1734 
1735 	pp = &pc->pc_pool;
1736 	if (palloc == NULL && ipl == IPL_NONE)
1737 		palloc = &pool_allocator_nointr;
1738 	pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1739 	mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
1740 
1741 	if (ctor == NULL) {
1742 		ctor = (int (*)(void *, void *, int))nullop;
1743 	}
1744 	if (dtor == NULL) {
1745 		dtor = (void (*)(void *, void *))nullop;
1746 	}
1747 
1748 	pc->pc_emptygroups = NULL;
1749 	pc->pc_fullgroups = NULL;
1750 	pc->pc_partgroups = NULL;
1751 	pc->pc_ctor = ctor;
1752 	pc->pc_dtor = dtor;
1753 	pc->pc_arg  = arg;
1754 	pc->pc_hits  = 0;
1755 	pc->pc_misses = 0;
1756 	pc->pc_nempty = 0;
1757 	pc->pc_npart = 0;
1758 	pc->pc_nfull = 0;
1759 	pc->pc_contended = 0;
1760 	pc->pc_refcnt = 0;
1761 	pc->pc_freecheck = NULL;
1762 
1763 	if ((flags & PR_LARGECACHE) != 0) {
1764 		pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
1765 		pc->pc_pcgpool = &pcg_large_pool;
1766 	} else {
1767 		pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
1768 		pc->pc_pcgpool = &pcg_normal_pool;
1769 	}
1770 
1771 	/* Allocate per-CPU caches. */
1772 	memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
1773 	pc->pc_ncpu = 0;
1774 	if (ncpu < 2) {
1775 		/* XXX For sparc: boot CPU is not attached yet. */
1776 		pool_cache_cpu_init1(curcpu(), pc);
1777 	} else {
1778 		for (CPU_INFO_FOREACH(cii, ci)) {
1779 			pool_cache_cpu_init1(ci, pc);
1780 		}
1781 	}
1782 
1783 	/* Add to list of all pools. */
1784 	if (__predict_true(!cold))
1785 		mutex_enter(&pool_head_lock);
1786 	TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
1787 		if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
1788 			break;
1789 	}
1790 	if (pc1 == NULL)
1791 		TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
1792 	else
1793 		TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
1794 	if (__predict_true(!cold))
1795 		mutex_exit(&pool_head_lock);
1796 
1797 	membar_sync();
1798 	pp->pr_cache = pc;
1799 }
1800 
1801 /*
1802  * pool_cache_destroy:
1803  *
1804  *	Destroy a pool cache.
1805  */
1806 void
1807 pool_cache_destroy(pool_cache_t pc)
1808 {
1809 
1810 	pool_cache_bootstrap_destroy(pc);
1811 	pool_put(&cache_pool, pc);
1812 }
1813 
1814 /*
1815  * pool_cache_bootstrap_destroy:
1816  *
1817  *	Destroy a pool cache.
1818  */
1819 void
1820 pool_cache_bootstrap_destroy(pool_cache_t pc)
1821 {
1822 	struct pool *pp = &pc->pc_pool;
1823 	u_int i;
1824 
1825 	/* Remove it from the global list. */
1826 	mutex_enter(&pool_head_lock);
1827 	while (pc->pc_refcnt != 0)
1828 		cv_wait(&pool_busy, &pool_head_lock);
1829 	TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
1830 	mutex_exit(&pool_head_lock);
1831 
1832 	/* First, invalidate the entire cache. */
1833 	pool_cache_invalidate(pc);
1834 
1835 	/* Disassociate it from the pool. */
1836 	mutex_enter(&pp->pr_lock);
1837 	pp->pr_cache = NULL;
1838 	mutex_exit(&pp->pr_lock);
1839 
1840 	/* Destroy per-CPU data */
1841 	for (i = 0; i < __arraycount(pc->pc_cpus); i++)
1842 		pool_cache_invalidate_cpu(pc, i);
1843 
1844 	/* Finally, destroy it. */
1845 	mutex_destroy(&pc->pc_lock);
1846 	pool_destroy(pp);
1847 }
1848 
1849 /*
1850  * pool_cache_cpu_init1:
1851  *
1852  *	Called for each pool_cache whenever a new CPU is attached.
1853  */
1854 static void
1855 pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
1856 {
1857 	pool_cache_cpu_t *cc;
1858 	int index;
1859 
1860 	index = ci->ci_index;
1861 
1862 	KASSERT(index < __arraycount(pc->pc_cpus));
1863 
1864 	if ((cc = pc->pc_cpus[index]) != NULL) {
1865 		KASSERT(cc->cc_cpuindex == index);
1866 		return;
1867 	}
1868 
1869 	/*
1870 	 * The first CPU is 'free'.  This needs to be the case for
1871 	 * bootstrap - we may not be able to allocate yet.
1872 	 */
1873 	if (pc->pc_ncpu == 0) {
1874 		cc = &pc->pc_cpu0;
1875 		pc->pc_ncpu = 1;
1876 	} else {
1877 		mutex_enter(&pc->pc_lock);
1878 		pc->pc_ncpu++;
1879 		mutex_exit(&pc->pc_lock);
1880 		cc = pool_get(&cache_cpu_pool, PR_WAITOK);
1881 	}
1882 
1883 	cc->cc_ipl = pc->pc_pool.pr_ipl;
1884 	cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
1885 	cc->cc_cache = pc;
1886 	cc->cc_cpuindex = index;
1887 	cc->cc_hits = 0;
1888 	cc->cc_misses = 0;
1889 	cc->cc_current = __UNCONST(&pcg_dummy);
1890 	cc->cc_previous = __UNCONST(&pcg_dummy);
1891 
1892 	pc->pc_cpus[index] = cc;
1893 }
1894 
1895 /*
1896  * pool_cache_cpu_init:
1897  *
1898  *	Called whenever a new CPU is attached.
1899  */
1900 void
1901 pool_cache_cpu_init(struct cpu_info *ci)
1902 {
1903 	pool_cache_t pc;
1904 
1905 	mutex_enter(&pool_head_lock);
1906 	TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
1907 		pc->pc_refcnt++;
1908 		mutex_exit(&pool_head_lock);
1909 
1910 		pool_cache_cpu_init1(ci, pc);
1911 
1912 		mutex_enter(&pool_head_lock);
1913 		pc->pc_refcnt--;
1914 		cv_broadcast(&pool_busy);
1915 	}
1916 	mutex_exit(&pool_head_lock);
1917 }
1918 
1919 /*
1920  * pool_cache_reclaim:
1921  *
1922  *	Reclaim memory from a pool cache.
1923  */
1924 bool
1925 pool_cache_reclaim(pool_cache_t pc)
1926 {
1927 
1928 	return pool_reclaim(&pc->pc_pool);
1929 }
1930 
1931 static void
1932 pool_cache_destruct_object1(pool_cache_t pc, void *object)
1933 {
1934 
1935 	(*pc->pc_dtor)(pc->pc_arg, object);
1936 	pool_put(&pc->pc_pool, object);
1937 }
1938 
1939 /*
1940  * pool_cache_destruct_object:
1941  *
1942  *	Force destruction of an object and its release back into
1943  *	the pool.
1944  */
1945 void
1946 pool_cache_destruct_object(pool_cache_t pc, void *object)
1947 {
1948 
1949 	FREECHECK_IN(&pc->pc_freecheck, object);
1950 
1951 	pool_cache_destruct_object1(pc, object);
1952 }
1953 
1954 /*
1955  * pool_cache_invalidate_groups:
1956  *
1957  *	Invalidate a chain of groups and destruct all objects.
1958  */
1959 static void
1960 pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
1961 {
1962 	void *object;
1963 	pcg_t *next;
1964 	int i;
1965 
1966 	for (; pcg != NULL; pcg = next) {
1967 		next = pcg->pcg_next;
1968 
1969 		for (i = 0; i < pcg->pcg_avail; i++) {
1970 			object = pcg->pcg_objects[i].pcgo_va;
1971 			pool_cache_destruct_object1(pc, object);
1972 		}
1973 
1974 		if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
1975 			pool_put(&pcg_large_pool, pcg);
1976 		} else {
1977 			KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
1978 			pool_put(&pcg_normal_pool, pcg);
1979 		}
1980 	}
1981 }
1982 
1983 /*
1984  * pool_cache_invalidate:
1985  *
1986  *	Invalidate a pool cache (destruct and release all of the
1987  *	cached objects).  Does not reclaim objects from the pool.
1988  *
1989  *	Note: For pool caches that provide constructed objects, there
1990  *	is an assumption that another level of synchronization is occurring
1991  *	between the input to the constructor and the cache invalidation.
1992  *
1993  *	Invalidation is a costly process and should not be called from
1994  *	interrupt context.
1995  */
1996 void
1997 pool_cache_invalidate(pool_cache_t pc)
1998 {
1999 	uint64_t where;
2000 	pcg_t *full, *empty, *part;
2001 
2002 	KASSERT(!cpu_intr_p() && !cpu_softintr_p());
2003 
2004 	if (ncpu < 2 || !mp_online) {
2005 		/*
2006 		 * We might be called early enough in the boot process
2007 		 * for the CPU data structures to not be fully initialized.
2008 		 * In this case, transfer the content of the local CPU's
2009 		 * cache back into global cache as only this CPU is currently
2010 		 * running.
2011 		 */
2012 		pool_cache_transfer(pc);
2013 	} else {
2014 		/*
2015 		 * Signal all CPUs that they must transfer their local
2016 		 * cache back to the global pool then wait for the xcall to
2017 		 * complete.
2018 		 */
2019 		where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer,
2020 		    pc, NULL);
2021 		xc_wait(where);
2022 	}
2023 
2024 	/* Empty pool caches, then invalidate objects */
2025 	mutex_enter(&pc->pc_lock);
2026 	full = pc->pc_fullgroups;
2027 	empty = pc->pc_emptygroups;
2028 	part = pc->pc_partgroups;
2029 	pc->pc_fullgroups = NULL;
2030 	pc->pc_emptygroups = NULL;
2031 	pc->pc_partgroups = NULL;
2032 	pc->pc_nfull = 0;
2033 	pc->pc_nempty = 0;
2034 	pc->pc_npart = 0;
2035 	mutex_exit(&pc->pc_lock);
2036 
2037 	pool_cache_invalidate_groups(pc, full);
2038 	pool_cache_invalidate_groups(pc, empty);
2039 	pool_cache_invalidate_groups(pc, part);
2040 }
2041 
2042 /*
2043  * pool_cache_invalidate_cpu:
2044  *
2045  *	Invalidate all CPU-bound cached objects in pool cache, the CPU being
2046  *	identified by its associated index.
2047  *	It is caller's responsibility to ensure that no operation is
2048  *	taking place on this pool cache while doing this invalidation.
2049  *	WARNING: as no inter-CPU locking is enforced, trying to invalidate
2050  *	pool cached objects from a CPU different from the one currently running
2051  *	may result in an undefined behaviour.
2052  */
2053 static void
2054 pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
2055 {
2056 	pool_cache_cpu_t *cc;
2057 	pcg_t *pcg;
2058 
2059 	if ((cc = pc->pc_cpus[index]) == NULL)
2060 		return;
2061 
2062 	if ((pcg = cc->cc_current) != &pcg_dummy) {
2063 		pcg->pcg_next = NULL;
2064 		pool_cache_invalidate_groups(pc, pcg);
2065 	}
2066 	if ((pcg = cc->cc_previous) != &pcg_dummy) {
2067 		pcg->pcg_next = NULL;
2068 		pool_cache_invalidate_groups(pc, pcg);
2069 	}
2070 	if (cc != &pc->pc_cpu0)
2071 		pool_put(&cache_cpu_pool, cc);
2072 
2073 }
2074 
2075 void
2076 pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2077 {
2078 
2079 	pool_set_drain_hook(&pc->pc_pool, fn, arg);
2080 }
2081 
2082 void
2083 pool_cache_setlowat(pool_cache_t pc, int n)
2084 {
2085 
2086 	pool_setlowat(&pc->pc_pool, n);
2087 }
2088 
2089 void
2090 pool_cache_sethiwat(pool_cache_t pc, int n)
2091 {
2092 
2093 	pool_sethiwat(&pc->pc_pool, n);
2094 }
2095 
2096 void
2097 pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2098 {
2099 
2100 	pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2101 }
2102 
2103 static bool __noinline
2104 pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
2105 		    paddr_t *pap, int flags)
2106 {
2107 	pcg_t *pcg, *cur;
2108 	uint64_t ncsw;
2109 	pool_cache_t pc;
2110 	void *object;
2111 
2112 	KASSERT(cc->cc_current->pcg_avail == 0);
2113 	KASSERT(cc->cc_previous->pcg_avail == 0);
2114 
2115 	pc = cc->cc_cache;
2116 	cc->cc_misses++;
2117 
2118 	/*
2119 	 * Nothing was available locally.  Try and grab a group
2120 	 * from the cache.
2121 	 */
2122 	if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
2123 		ncsw = curlwp->l_ncsw;
2124 		mutex_enter(&pc->pc_lock);
2125 		pc->pc_contended++;
2126 
2127 		/*
2128 		 * If we context switched while locking, then
2129 		 * our view of the per-CPU data is invalid:
2130 		 * retry.
2131 		 */
2132 		if (curlwp->l_ncsw != ncsw) {
2133 			mutex_exit(&pc->pc_lock);
2134 			return true;
2135 		}
2136 	}
2137 
2138 	if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
2139 		/*
2140 		 * If there's a full group, release our empty
2141 		 * group back to the cache.  Install the full
2142 		 * group as cc_current and return.
2143 		 */
2144 		if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
2145 			KASSERT(cur->pcg_avail == 0);
2146 			cur->pcg_next = pc->pc_emptygroups;
2147 			pc->pc_emptygroups = cur;
2148 			pc->pc_nempty++;
2149 		}
2150 		KASSERT(pcg->pcg_avail == pcg->pcg_size);
2151 		cc->cc_current = pcg;
2152 		pc->pc_fullgroups = pcg->pcg_next;
2153 		pc->pc_hits++;
2154 		pc->pc_nfull--;
2155 		mutex_exit(&pc->pc_lock);
2156 		return true;
2157 	}
2158 
2159 	/*
2160 	 * Nothing available locally or in cache.  Take the slow
2161 	 * path: fetch a new object from the pool and construct
2162 	 * it.
2163 	 */
2164 	pc->pc_misses++;
2165 	mutex_exit(&pc->pc_lock);
2166 	splx(s);
2167 
2168 	object = pool_get(&pc->pc_pool, flags);
2169 	*objectp = object;
2170 	if (__predict_false(object == NULL))
2171 		return false;
2172 
2173 	if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
2174 		pool_put(&pc->pc_pool, object);
2175 		*objectp = NULL;
2176 		return false;
2177 	}
2178 
2179 	KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
2180 	    (pc->pc_pool.pr_align - 1)) == 0);
2181 
2182 	if (pap != NULL) {
2183 #ifdef POOL_VTOPHYS
2184 		*pap = POOL_VTOPHYS(object);
2185 #else
2186 		*pap = POOL_PADDR_INVALID;
2187 #endif
2188 	}
2189 
2190 	FREECHECK_OUT(&pc->pc_freecheck, object);
2191 	return false;
2192 }
2193 
2194 /*
2195  * pool_cache_get{,_paddr}:
2196  *
2197  *	Get an object from a pool cache (optionally returning
2198  *	the physical address of the object).
2199  */
2200 void *
2201 pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
2202 {
2203 	pool_cache_cpu_t *cc;
2204 	pcg_t *pcg;
2205 	void *object;
2206 	int s;
2207 
2208 	KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) ||
2209 	    (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL),
2210 	    "pool '%s' is IPL_NONE, but called from interrupt context\n",
2211 	    pc->pc_pool.pr_wchan);
2212 
2213 	if (flags & PR_WAITOK) {
2214 		ASSERT_SLEEPABLE();
2215 	}
2216 
2217 	/* Lock out interrupts and disable preemption. */
2218 	s = splvm();
2219 	while (/* CONSTCOND */ true) {
2220 		/* Try and allocate an object from the current group. */
2221 		cc = pc->pc_cpus[curcpu()->ci_index];
2222 		KASSERT(cc->cc_cache == pc);
2223 	 	pcg = cc->cc_current;
2224 		if (__predict_true(pcg->pcg_avail > 0)) {
2225 			object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
2226 			if (__predict_false(pap != NULL))
2227 				*pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
2228 #if defined(DIAGNOSTIC)
2229 			pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
2230 			KASSERT(pcg->pcg_avail < pcg->pcg_size);
2231 			KASSERT(object != NULL);
2232 #endif
2233 			cc->cc_hits++;
2234 			splx(s);
2235 			FREECHECK_OUT(&pc->pc_freecheck, object);
2236 			return object;
2237 		}
2238 
2239 		/*
2240 		 * That failed.  If the previous group isn't empty, swap
2241 		 * it with the current group and allocate from there.
2242 		 */
2243 		pcg = cc->cc_previous;
2244 		if (__predict_true(pcg->pcg_avail > 0)) {
2245 			cc->cc_previous = cc->cc_current;
2246 			cc->cc_current = pcg;
2247 			continue;
2248 		}
2249 
2250 		/*
2251 		 * Can't allocate from either group: try the slow path.
2252 		 * If get_slow() allocated an object for us, or if
2253 		 * no more objects are available, it will return false.
2254 		 * Otherwise, we need to retry.
2255 		 */
2256 		if (!pool_cache_get_slow(cc, s, &object, pap, flags))
2257 			break;
2258 	}
2259 
2260 	return object;
2261 }
2262 
2263 static bool __noinline
2264 pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
2265 {
2266 	struct lwp *l = curlwp;
2267 	pcg_t *pcg, *cur;
2268 	uint64_t ncsw;
2269 	pool_cache_t pc;
2270 
2271 	KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
2272 	KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
2273 
2274 	pc = cc->cc_cache;
2275 	pcg = NULL;
2276 	cc->cc_misses++;
2277 	ncsw = l->l_ncsw;
2278 
2279 	/*
2280 	 * If there are no empty groups in the cache then allocate one
2281 	 * while still unlocked.
2282 	 */
2283 	if (__predict_false(pc->pc_emptygroups == NULL)) {
2284 		if (__predict_true(!pool_cache_disable)) {
2285 			pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
2286 		}
2287 		/*
2288 		 * If pool_get() blocked, then our view of
2289 		 * the per-CPU data is invalid: retry.
2290 		 */
2291 		if (__predict_false(l->l_ncsw != ncsw)) {
2292 			if (pcg != NULL) {
2293 				pool_put(pc->pc_pcgpool, pcg);
2294 			}
2295 			return true;
2296 		}
2297 		if (__predict_true(pcg != NULL)) {
2298 			pcg->pcg_avail = 0;
2299 			pcg->pcg_size = pc->pc_pcgsize;
2300 		}
2301 	}
2302 
2303 	/* Lock the cache. */
2304 	if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
2305 		mutex_enter(&pc->pc_lock);
2306 		pc->pc_contended++;
2307 
2308 		/*
2309 		 * If we context switched while locking, then our view of
2310 		 * the per-CPU data is invalid: retry.
2311 		 */
2312 		if (__predict_false(l->l_ncsw != ncsw)) {
2313 			mutex_exit(&pc->pc_lock);
2314 			if (pcg != NULL) {
2315 				pool_put(pc->pc_pcgpool, pcg);
2316 			}
2317 			return true;
2318 		}
2319 	}
2320 
2321 	/* If there are no empty groups in the cache then allocate one. */
2322 	if (pcg == NULL && pc->pc_emptygroups != NULL) {
2323 		pcg = pc->pc_emptygroups;
2324 		pc->pc_emptygroups = pcg->pcg_next;
2325 		pc->pc_nempty--;
2326 	}
2327 
2328 	/*
2329 	 * If there's a empty group, release our full group back
2330 	 * to the cache.  Install the empty group to the local CPU
2331 	 * and return.
2332 	 */
2333 	if (pcg != NULL) {
2334 		KASSERT(pcg->pcg_avail == 0);
2335 		if (__predict_false(cc->cc_previous == &pcg_dummy)) {
2336 			cc->cc_previous = pcg;
2337 		} else {
2338 			cur = cc->cc_current;
2339 			if (__predict_true(cur != &pcg_dummy)) {
2340 				KASSERT(cur->pcg_avail == cur->pcg_size);
2341 				cur->pcg_next = pc->pc_fullgroups;
2342 				pc->pc_fullgroups = cur;
2343 				pc->pc_nfull++;
2344 			}
2345 			cc->cc_current = pcg;
2346 		}
2347 		pc->pc_hits++;
2348 		mutex_exit(&pc->pc_lock);
2349 		return true;
2350 	}
2351 
2352 	/*
2353 	 * Nothing available locally or in cache, and we didn't
2354 	 * allocate an empty group.  Take the slow path and destroy
2355 	 * the object here and now.
2356 	 */
2357 	pc->pc_misses++;
2358 	mutex_exit(&pc->pc_lock);
2359 	splx(s);
2360 	pool_cache_destruct_object(pc, object);
2361 
2362 	return false;
2363 }
2364 
2365 /*
2366  * pool_cache_put{,_paddr}:
2367  *
2368  *	Put an object back to the pool cache (optionally caching the
2369  *	physical address of the object).
2370  */
2371 void
2372 pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
2373 {
2374 	pool_cache_cpu_t *cc;
2375 	pcg_t *pcg;
2376 	int s;
2377 
2378 	KASSERT(object != NULL);
2379 	FREECHECK_IN(&pc->pc_freecheck, object);
2380 
2381 	/* Lock out interrupts and disable preemption. */
2382 	s = splvm();
2383 	while (/* CONSTCOND */ true) {
2384 		/* If the current group isn't full, release it there. */
2385 		cc = pc->pc_cpus[curcpu()->ci_index];
2386 		KASSERT(cc->cc_cache == pc);
2387 	 	pcg = cc->cc_current;
2388 		if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
2389 			pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2390 			pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2391 			pcg->pcg_avail++;
2392 			cc->cc_hits++;
2393 			splx(s);
2394 			return;
2395 		}
2396 
2397 		/*
2398 		 * That failed.  If the previous group isn't full, swap
2399 		 * it with the current group and try again.
2400 		 */
2401 		pcg = cc->cc_previous;
2402 		if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
2403 			cc->cc_previous = cc->cc_current;
2404 			cc->cc_current = pcg;
2405 			continue;
2406 		}
2407 
2408 		/*
2409 		 * Can't free to either group: try the slow path.
2410 		 * If put_slow() releases the object for us, it
2411 		 * will return false.  Otherwise we need to retry.
2412 		 */
2413 		if (!pool_cache_put_slow(cc, s, object))
2414 			break;
2415 	}
2416 }
2417 
2418 /*
2419  * pool_cache_transfer:
2420  *
2421  *	Transfer objects from the per-CPU cache to the global cache.
2422  *	Run within a cross-call thread.
2423  */
2424 static void
2425 pool_cache_transfer(pool_cache_t pc)
2426 {
2427 	pool_cache_cpu_t *cc;
2428 	pcg_t *prev, *cur, **list;
2429 	int s;
2430 
2431 	s = splvm();
2432 	mutex_enter(&pc->pc_lock);
2433 	cc = pc->pc_cpus[curcpu()->ci_index];
2434 	cur = cc->cc_current;
2435 	cc->cc_current = __UNCONST(&pcg_dummy);
2436 	prev = cc->cc_previous;
2437 	cc->cc_previous = __UNCONST(&pcg_dummy);
2438 	if (cur != &pcg_dummy) {
2439 		if (cur->pcg_avail == cur->pcg_size) {
2440 			list = &pc->pc_fullgroups;
2441 			pc->pc_nfull++;
2442 		} else if (cur->pcg_avail == 0) {
2443 			list = &pc->pc_emptygroups;
2444 			pc->pc_nempty++;
2445 		} else {
2446 			list = &pc->pc_partgroups;
2447 			pc->pc_npart++;
2448 		}
2449 		cur->pcg_next = *list;
2450 		*list = cur;
2451 	}
2452 	if (prev != &pcg_dummy) {
2453 		if (prev->pcg_avail == prev->pcg_size) {
2454 			list = &pc->pc_fullgroups;
2455 			pc->pc_nfull++;
2456 		} else if (prev->pcg_avail == 0) {
2457 			list = &pc->pc_emptygroups;
2458 			pc->pc_nempty++;
2459 		} else {
2460 			list = &pc->pc_partgroups;
2461 			pc->pc_npart++;
2462 		}
2463 		prev->pcg_next = *list;
2464 		*list = prev;
2465 	}
2466 	mutex_exit(&pc->pc_lock);
2467 	splx(s);
2468 }
2469 
2470 /*
2471  * Pool backend allocators.
2472  *
2473  * Each pool has a backend allocator that handles allocation, deallocation,
2474  * and any additional draining that might be needed.
2475  *
2476  * We provide two standard allocators:
2477  *
2478  *	pool_allocator_kmem - the default when no allocator is specified
2479  *
2480  *	pool_allocator_nointr - used for pools that will not be accessed
2481  *	in interrupt context.
2482  */
2483 void	*pool_page_alloc(struct pool *, int);
2484 void	pool_page_free(struct pool *, void *);
2485 
2486 #ifdef POOL_SUBPAGE
2487 struct pool_allocator pool_allocator_kmem_fullpage = {
2488 	.pa_alloc = pool_page_alloc,
2489 	.pa_free = pool_page_free,
2490 	.pa_pagesz = 0
2491 };
2492 #else
2493 struct pool_allocator pool_allocator_kmem = {
2494 	.pa_alloc = pool_page_alloc,
2495 	.pa_free = pool_page_free,
2496 	.pa_pagesz = 0
2497 };
2498 #endif
2499 
2500 #ifdef POOL_SUBPAGE
2501 struct pool_allocator pool_allocator_nointr_fullpage = {
2502 	.pa_alloc = pool_page_alloc,
2503 	.pa_free = pool_page_free,
2504 	.pa_pagesz = 0
2505 };
2506 #else
2507 struct pool_allocator pool_allocator_nointr = {
2508 	.pa_alloc = pool_page_alloc,
2509 	.pa_free = pool_page_free,
2510 	.pa_pagesz = 0
2511 };
2512 #endif
2513 
2514 #ifdef POOL_SUBPAGE
2515 void	*pool_subpage_alloc(struct pool *, int);
2516 void	pool_subpage_free(struct pool *, void *);
2517 
2518 struct pool_allocator pool_allocator_kmem = {
2519 	.pa_alloc = pool_subpage_alloc,
2520 	.pa_free = pool_subpage_free,
2521 	.pa_pagesz = POOL_SUBPAGE
2522 };
2523 
2524 struct pool_allocator pool_allocator_nointr = {
2525 	.pa_alloc = pool_subpage_alloc,
2526 	.pa_free = pool_subpage_free,
2527 	.pa_pagesz = POOL_SUBPAGE
2528 };
2529 #endif /* POOL_SUBPAGE */
2530 
2531 static void *
2532 pool_allocator_alloc(struct pool *pp, int flags)
2533 {
2534 	struct pool_allocator *pa = pp->pr_alloc;
2535 	void *res;
2536 
2537 	res = (*pa->pa_alloc)(pp, flags);
2538 	if (res == NULL && (flags & PR_WAITOK) == 0) {
2539 		/*
2540 		 * We only run the drain hook here if PR_NOWAIT.
2541 		 * In other cases, the hook will be run in
2542 		 * pool_reclaim().
2543 		 */
2544 		if (pp->pr_drain_hook != NULL) {
2545 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2546 			res = (*pa->pa_alloc)(pp, flags);
2547 		}
2548 	}
2549 	return res;
2550 }
2551 
2552 static void
2553 pool_allocator_free(struct pool *pp, void *v)
2554 {
2555 	struct pool_allocator *pa = pp->pr_alloc;
2556 
2557 	(*pa->pa_free)(pp, v);
2558 }
2559 
2560 void *
2561 pool_page_alloc(struct pool *pp, int flags)
2562 {
2563 	const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2564 	vmem_addr_t va;
2565 	int ret;
2566 
2567 	ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz,
2568 	    vflags | VM_INSTANTFIT, &va);
2569 
2570 	return ret ? NULL : (void *)va;
2571 }
2572 
2573 void
2574 pool_page_free(struct pool *pp, void *v)
2575 {
2576 
2577 	uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
2578 }
2579 
2580 static void *
2581 pool_page_alloc_meta(struct pool *pp, int flags)
2582 {
2583 	const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2584 	vmem_addr_t va;
2585 	int ret;
2586 
2587 	ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
2588 	    vflags | VM_INSTANTFIT, &va);
2589 
2590 	return ret ? NULL : (void *)va;
2591 }
2592 
2593 static void
2594 pool_page_free_meta(struct pool *pp, void *v)
2595 {
2596 
2597 	vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
2598 }
2599 
2600 #ifdef POOL_SUBPAGE
2601 /* Sub-page allocator, for machines with large hardware pages. */
2602 void *
2603 pool_subpage_alloc(struct pool *pp, int flags)
2604 {
2605 	return pool_get(&psppool, flags);
2606 }
2607 
2608 void
2609 pool_subpage_free(struct pool *pp, void *v)
2610 {
2611 	pool_put(&psppool, v);
2612 }
2613 
2614 #endif /* POOL_SUBPAGE */
2615 
2616 #if defined(DDB)
2617 static bool
2618 pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2619 {
2620 
2621 	return (uintptr_t)ph->ph_page <= addr &&
2622 	    addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
2623 }
2624 
2625 static bool
2626 pool_in_item(struct pool *pp, void *item, uintptr_t addr)
2627 {
2628 
2629 	return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
2630 }
2631 
2632 static bool
2633 pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
2634 {
2635 	int i;
2636 
2637 	if (pcg == NULL) {
2638 		return false;
2639 	}
2640 	for (i = 0; i < pcg->pcg_avail; i++) {
2641 		if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
2642 			return true;
2643 		}
2644 	}
2645 	return false;
2646 }
2647 
2648 static bool
2649 pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2650 {
2651 
2652 	if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
2653 		unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
2654 		pool_item_bitmap_t *bitmap =
2655 		    ph->ph_bitmap + (idx / BITMAP_SIZE);
2656 		pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
2657 
2658 		return (*bitmap & mask) == 0;
2659 	} else {
2660 		struct pool_item *pi;
2661 
2662 		LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
2663 			if (pool_in_item(pp, pi, addr)) {
2664 				return false;
2665 			}
2666 		}
2667 		return true;
2668 	}
2669 }
2670 
2671 void
2672 pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
2673 {
2674 	struct pool *pp;
2675 
2676 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
2677 		struct pool_item_header *ph;
2678 		uintptr_t item;
2679 		bool allocated = true;
2680 		bool incache = false;
2681 		bool incpucache = false;
2682 		char cpucachestr[32];
2683 
2684 		if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
2685 			LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2686 				if (pool_in_page(pp, ph, addr)) {
2687 					goto found;
2688 				}
2689 			}
2690 			LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2691 				if (pool_in_page(pp, ph, addr)) {
2692 					allocated =
2693 					    pool_allocated(pp, ph, addr);
2694 					goto found;
2695 				}
2696 			}
2697 			LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
2698 				if (pool_in_page(pp, ph, addr)) {
2699 					allocated = false;
2700 					goto found;
2701 				}
2702 			}
2703 			continue;
2704 		} else {
2705 			ph = pr_find_pagehead_noalign(pp, (void *)addr);
2706 			if (ph == NULL || !pool_in_page(pp, ph, addr)) {
2707 				continue;
2708 			}
2709 			allocated = pool_allocated(pp, ph, addr);
2710 		}
2711 found:
2712 		if (allocated && pp->pr_cache) {
2713 			pool_cache_t pc = pp->pr_cache;
2714 			struct pool_cache_group *pcg;
2715 			int i;
2716 
2717 			for (pcg = pc->pc_fullgroups; pcg != NULL;
2718 			    pcg = pcg->pcg_next) {
2719 				if (pool_in_cg(pp, pcg, addr)) {
2720 					incache = true;
2721 					goto print;
2722 				}
2723 			}
2724 			for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
2725 				pool_cache_cpu_t *cc;
2726 
2727 				if ((cc = pc->pc_cpus[i]) == NULL) {
2728 					continue;
2729 				}
2730 				if (pool_in_cg(pp, cc->cc_current, addr) ||
2731 				    pool_in_cg(pp, cc->cc_previous, addr)) {
2732 					struct cpu_info *ci =
2733 					    cpu_lookup(i);
2734 
2735 					incpucache = true;
2736 					snprintf(cpucachestr,
2737 					    sizeof(cpucachestr),
2738 					    "cached by CPU %u",
2739 					    ci->ci_index);
2740 					goto print;
2741 				}
2742 			}
2743 		}
2744 print:
2745 		item = (uintptr_t)ph->ph_page + ph->ph_off;
2746 		item = item + rounddown(addr - item, pp->pr_size);
2747 		(*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
2748 		    (void *)addr, item, (size_t)(addr - item),
2749 		    pp->pr_wchan,
2750 		    incpucache ? cpucachestr :
2751 		    incache ? "cached" : allocated ? "allocated" : "free");
2752 	}
2753 }
2754 #endif /* defined(DDB) */
2755 
2756 static int
2757 pool_sysctl(SYSCTLFN_ARGS)
2758 {
2759 	struct pool_sysctl data;
2760 	struct pool *pp;
2761 	struct pool_cache *pc;
2762 	pool_cache_cpu_t *cc;
2763 	int error;
2764 	size_t i, written;
2765 
2766 	if (oldp == NULL) {
2767 		*oldlenp = 0;
2768 		TAILQ_FOREACH(pp, &pool_head, pr_poollist)
2769 			*oldlenp += sizeof(data);
2770 		return 0;
2771 	}
2772 
2773 	memset(&data, 0, sizeof(data));
2774 	error = 0;
2775 	written = 0;
2776 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
2777 		if (written + sizeof(data) > *oldlenp)
2778 			break;
2779 		strlcpy(data.pr_wchan, pp->pr_wchan, sizeof(data.pr_wchan));
2780 		data.pr_pagesize = pp->pr_alloc->pa_pagesz;
2781 		data.pr_flags = pp->pr_roflags | pp->pr_flags;
2782 #define COPY(field) data.field = pp->field
2783 		COPY(pr_size);
2784 
2785 		COPY(pr_itemsperpage);
2786 		COPY(pr_nitems);
2787 		COPY(pr_nout);
2788 		COPY(pr_hardlimit);
2789 		COPY(pr_npages);
2790 		COPY(pr_minpages);
2791 		COPY(pr_maxpages);
2792 
2793 		COPY(pr_nget);
2794 		COPY(pr_nfail);
2795 		COPY(pr_nput);
2796 		COPY(pr_npagealloc);
2797 		COPY(pr_npagefree);
2798 		COPY(pr_hiwat);
2799 		COPY(pr_nidle);
2800 #undef COPY
2801 
2802 		data.pr_cache_nmiss_pcpu = 0;
2803 		data.pr_cache_nhit_pcpu = 0;
2804 		if (pp->pr_cache) {
2805 			pc = pp->pr_cache;
2806 			data.pr_cache_meta_size = pc->pc_pcgsize;
2807 			data.pr_cache_nfull = pc->pc_nfull;
2808 			data.pr_cache_npartial = pc->pc_npart;
2809 			data.pr_cache_nempty = pc->pc_nempty;
2810 			data.pr_cache_ncontended = pc->pc_contended;
2811 			data.pr_cache_nmiss_global = pc->pc_misses;
2812 			data.pr_cache_nhit_global = pc->pc_hits;
2813 			for (i = 0; i < pc->pc_ncpu; ++i) {
2814 				cc = pc->pc_cpus[i];
2815 				if (cc == NULL)
2816 					continue;
2817 				data.pr_cache_nmiss_pcpu = cc->cc_misses;
2818 				data.pr_cache_nhit_pcpu = cc->cc_hits;
2819 			}
2820 		} else {
2821 			data.pr_cache_meta_size = 0;
2822 			data.pr_cache_nfull = 0;
2823 			data.pr_cache_npartial = 0;
2824 			data.pr_cache_nempty = 0;
2825 			data.pr_cache_ncontended = 0;
2826 			data.pr_cache_nmiss_global = 0;
2827 			data.pr_cache_nhit_global = 0;
2828 		}
2829 
2830 		error = sysctl_copyout(l, &data, oldp, sizeof(data));
2831 		if (error)
2832 			break;
2833 		written += sizeof(data);
2834 		oldp = (char *)oldp + sizeof(data);
2835 	}
2836 
2837 	*oldlenp = written;
2838 	return error;
2839 }
2840 
2841 SYSCTL_SETUP(sysctl_pool_setup, "sysctl kern.pool setup")
2842 {
2843 	const struct sysctlnode *rnode = NULL;
2844 
2845 	sysctl_createv(clog, 0, NULL, &rnode,
2846 		       CTLFLAG_PERMANENT,
2847 		       CTLTYPE_STRUCT, "pool",
2848 		       SYSCTL_DESCR("Get pool statistics"),
2849 		       pool_sysctl, 0, NULL, 0,
2850 		       CTL_KERN, CTL_CREATE, CTL_EOL);
2851 }
2852