xref: /netbsd-src/sys/kern/subr_pool.c (revision ed75d7a867996c84cfa88e3b8906816277e957f7)
1 /*	$NetBSD: subr_pool.c,v 1.266 2020/02/08 07:07:07 maxv Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015, 2018
5  *     The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
10  * Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by
11  * Maxime Villard.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.266 2020/02/08 07:07:07 maxv Exp $");
37 
38 #ifdef _KERNEL_OPT
39 #include "opt_ddb.h"
40 #include "opt_lockdebug.h"
41 #include "opt_pool.h"
42 #endif
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sysctl.h>
47 #include <sys/bitops.h>
48 #include <sys/proc.h>
49 #include <sys/errno.h>
50 #include <sys/kernel.h>
51 #include <sys/vmem.h>
52 #include <sys/pool.h>
53 #include <sys/syslog.h>
54 #include <sys/debug.h>
55 #include <sys/lockdebug.h>
56 #include <sys/xcall.h>
57 #include <sys/cpu.h>
58 #include <sys/atomic.h>
59 #include <sys/asan.h>
60 #include <sys/msan.h>
61 
62 #include <uvm/uvm_extern.h>
63 
64 /*
65  * Pool resource management utility.
66  *
67  * Memory is allocated in pages which are split into pieces according to
68  * the pool item size. Each page is kept on one of three lists in the
69  * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
70  * for empty, full and partially-full pages respectively. The individual
71  * pool items are on a linked list headed by `ph_itemlist' in each page
72  * header. The memory for building the page list is either taken from
73  * the allocated pages themselves (for small pool items) or taken from
74  * an internal pool of page headers (`phpool').
75  */
76 
77 /* List of all pools. Non static as needed by 'vmstat -m' */
78 TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
79 
80 /* Private pool for page header structures */
81 #define	PHPOOL_MAX	8
82 static struct pool phpool[PHPOOL_MAX];
83 #define	PHPOOL_FREELIST_NELEM(idx) \
84 	(((idx) == 0) ? BITMAP_MIN_SIZE : BITMAP_SIZE * (1 << (idx)))
85 
86 #if !defined(KMSAN) && (defined(DIAGNOSTIC) || defined(KASAN))
87 #define POOL_REDZONE
88 #endif
89 
90 #ifdef POOL_REDZONE
91 # ifdef KASAN
92 #  define POOL_REDZONE_SIZE 8
93 # else
94 #  define POOL_REDZONE_SIZE 2
95 # endif
96 static void pool_redzone_init(struct pool *, size_t);
97 static void pool_redzone_fill(struct pool *, void *);
98 static void pool_redzone_check(struct pool *, void *);
99 static void pool_cache_redzone_check(pool_cache_t, void *);
100 #else
101 # define pool_redzone_init(pp, sz)		__nothing
102 # define pool_redzone_fill(pp, ptr)		__nothing
103 # define pool_redzone_check(pp, ptr)		__nothing
104 # define pool_cache_redzone_check(pc, ptr)	__nothing
105 #endif
106 
107 #ifdef KMSAN
108 static inline void pool_get_kmsan(struct pool *, void *);
109 static inline void pool_put_kmsan(struct pool *, void *);
110 static inline void pool_cache_get_kmsan(pool_cache_t, void *);
111 static inline void pool_cache_put_kmsan(pool_cache_t, void *);
112 #else
113 #define pool_get_kmsan(pp, ptr)		__nothing
114 #define pool_put_kmsan(pp, ptr)		__nothing
115 #define pool_cache_get_kmsan(pc, ptr)	__nothing
116 #define pool_cache_put_kmsan(pc, ptr)	__nothing
117 #endif
118 
119 #ifdef POOL_QUARANTINE
120 static void pool_quarantine_init(struct pool *);
121 static void pool_quarantine_flush(struct pool *);
122 static bool pool_put_quarantine(struct pool *, void *,
123     struct pool_pagelist *);
124 static bool pool_cache_put_quarantine(pool_cache_t, void *, paddr_t);
125 #else
126 #define pool_quarantine_init(a)			__nothing
127 #define pool_quarantine_flush(a)		__nothing
128 #define pool_put_quarantine(a, b, c)		false
129 #define pool_cache_put_quarantine(a, b, c)	false
130 #endif
131 
132 #define NO_CTOR	__FPTRCAST(int (*)(void *, void *, int), nullop)
133 #define NO_DTOR	__FPTRCAST(void (*)(void *, void *), nullop)
134 
135 #define pc_has_ctor(pc) ((pc)->pc_ctor != NO_CTOR)
136 #define pc_has_dtor(pc) ((pc)->pc_dtor != NO_DTOR)
137 
138 /*
139  * Pool backend allocators.
140  *
141  * Each pool has a backend allocator that handles allocation, deallocation,
142  * and any additional draining that might be needed.
143  *
144  * We provide two standard allocators:
145  *
146  *	pool_allocator_kmem - the default when no allocator is specified
147  *
148  *	pool_allocator_nointr - used for pools that will not be accessed
149  *	in interrupt context.
150  */
151 void *pool_page_alloc(struct pool *, int);
152 void pool_page_free(struct pool *, void *);
153 
154 static void *pool_page_alloc_meta(struct pool *, int);
155 static void pool_page_free_meta(struct pool *, void *);
156 
157 struct pool_allocator pool_allocator_kmem = {
158 	.pa_alloc = pool_page_alloc,
159 	.pa_free = pool_page_free,
160 	.pa_pagesz = 0
161 };
162 
163 struct pool_allocator pool_allocator_nointr = {
164 	.pa_alloc = pool_page_alloc,
165 	.pa_free = pool_page_free,
166 	.pa_pagesz = 0
167 };
168 
169 struct pool_allocator pool_allocator_meta = {
170 	.pa_alloc = pool_page_alloc_meta,
171 	.pa_free = pool_page_free_meta,
172 	.pa_pagesz = 0
173 };
174 
175 #define POOL_ALLOCATOR_BIG_BASE 13
176 static struct pool_allocator pool_allocator_big[] = {
177 	{
178 		.pa_alloc = pool_page_alloc,
179 		.pa_free = pool_page_free,
180 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 0),
181 	},
182 	{
183 		.pa_alloc = pool_page_alloc,
184 		.pa_free = pool_page_free,
185 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 1),
186 	},
187 	{
188 		.pa_alloc = pool_page_alloc,
189 		.pa_free = pool_page_free,
190 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 2),
191 	},
192 	{
193 		.pa_alloc = pool_page_alloc,
194 		.pa_free = pool_page_free,
195 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 3),
196 	},
197 	{
198 		.pa_alloc = pool_page_alloc,
199 		.pa_free = pool_page_free,
200 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 4),
201 	},
202 	{
203 		.pa_alloc = pool_page_alloc,
204 		.pa_free = pool_page_free,
205 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 5),
206 	},
207 	{
208 		.pa_alloc = pool_page_alloc,
209 		.pa_free = pool_page_free,
210 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 6),
211 	},
212 	{
213 		.pa_alloc = pool_page_alloc,
214 		.pa_free = pool_page_free,
215 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 7),
216 	}
217 };
218 
219 static int pool_bigidx(size_t);
220 
221 /* # of seconds to retain page after last use */
222 int pool_inactive_time = 10;
223 
224 /* Next candidate for drainage (see pool_drain()) */
225 static struct pool *drainpp;
226 
227 /* This lock protects both pool_head and drainpp. */
228 static kmutex_t pool_head_lock;
229 static kcondvar_t pool_busy;
230 
231 /* This lock protects initialization of a potentially shared pool allocator */
232 static kmutex_t pool_allocator_lock;
233 
234 static unsigned int poolid_counter = 0;
235 
236 typedef uint32_t pool_item_bitmap_t;
237 #define	BITMAP_SIZE	(CHAR_BIT * sizeof(pool_item_bitmap_t))
238 #define	BITMAP_MASK	(BITMAP_SIZE - 1)
239 #define	BITMAP_MIN_SIZE	(CHAR_BIT * sizeof(((struct pool_item_header *)NULL)->ph_u2))
240 
241 struct pool_item_header {
242 	/* Page headers */
243 	LIST_ENTRY(pool_item_header)
244 				ph_pagelist;	/* pool page list */
245 	union {
246 		/* !PR_PHINPAGE */
247 		struct {
248 			SPLAY_ENTRY(pool_item_header)
249 				phu_node;	/* off-page page headers */
250 		} phu_offpage;
251 		/* PR_PHINPAGE */
252 		struct {
253 			unsigned int phu_poolid;
254 		} phu_onpage;
255 	} ph_u1;
256 	void *			ph_page;	/* this page's address */
257 	uint32_t		ph_time;	/* last referenced */
258 	uint16_t		ph_nmissing;	/* # of chunks in use */
259 	uint16_t		ph_off;		/* start offset in page */
260 	union {
261 		/* !PR_USEBMAP */
262 		struct {
263 			LIST_HEAD(, pool_item)
264 				phu_itemlist;	/* chunk list for this page */
265 		} phu_normal;
266 		/* PR_USEBMAP */
267 		struct {
268 			pool_item_bitmap_t phu_bitmap[1];
269 		} phu_notouch;
270 	} ph_u2;
271 };
272 #define ph_node		ph_u1.phu_offpage.phu_node
273 #define ph_poolid	ph_u1.phu_onpage.phu_poolid
274 #define ph_itemlist	ph_u2.phu_normal.phu_itemlist
275 #define ph_bitmap	ph_u2.phu_notouch.phu_bitmap
276 
277 #define PHSIZE	ALIGN(sizeof(struct pool_item_header))
278 
279 CTASSERT(offsetof(struct pool_item_header, ph_u2) +
280     BITMAP_MIN_SIZE / CHAR_BIT == sizeof(struct pool_item_header));
281 
282 #if defined(DIAGNOSTIC) && !defined(KASAN)
283 #define POOL_CHECK_MAGIC
284 #endif
285 
286 struct pool_item {
287 #ifdef POOL_CHECK_MAGIC
288 	u_int pi_magic;
289 #endif
290 #define	PI_MAGIC 0xdeaddeadU
291 	/* Other entries use only this list entry */
292 	LIST_ENTRY(pool_item)	pi_list;
293 };
294 
295 #define	POOL_NEEDS_CATCHUP(pp)						\
296 	((pp)->pr_nitems < (pp)->pr_minitems)
297 #define	POOL_OBJ_TO_PAGE(pp, v)						\
298 	(void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask)
299 
300 /*
301  * Pool cache management.
302  *
303  * Pool caches provide a way for constructed objects to be cached by the
304  * pool subsystem.  This can lead to performance improvements by avoiding
305  * needless object construction/destruction; it is deferred until absolutely
306  * necessary.
307  *
308  * Caches are grouped into cache groups.  Each cache group references up
309  * to PCG_NUMOBJECTS constructed objects.  When a cache allocates an
310  * object from the pool, it calls the object's constructor and places it
311  * into a cache group.  When a cache group frees an object back to the
312  * pool, it first calls the object's destructor.  This allows the object
313  * to persist in constructed form while freed to the cache.
314  *
315  * The pool references each cache, so that when a pool is drained by the
316  * pagedaemon, it can drain each individual cache as well.  Each time a
317  * cache is drained, the most idle cache group is freed to the pool in
318  * its entirety.
319  *
320  * Pool caches are layed on top of pools.  By layering them, we can avoid
321  * the complexity of cache management for pools which would not benefit
322  * from it.
323  */
324 
325 static struct pool pcg_normal_pool;
326 static struct pool pcg_large_pool;
327 static struct pool cache_pool;
328 static struct pool cache_cpu_pool;
329 
330 /* List of all caches. */
331 TAILQ_HEAD(,pool_cache) pool_cache_head =
332     TAILQ_HEAD_INITIALIZER(pool_cache_head);
333 
334 int pool_cache_disable;		/* global disable for caching */
335 static const pcg_t pcg_dummy;	/* zero sized: always empty, yet always full */
336 
337 static bool	pool_cache_put_slow(pool_cache_cpu_t *, int,
338 				    void *);
339 static bool	pool_cache_get_slow(pool_cache_cpu_t *, int,
340 				    void **, paddr_t *, int);
341 static void	pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
342 static void	pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
343 static void	pool_cache_invalidate_cpu(pool_cache_t, u_int);
344 static void	pool_cache_transfer(pool_cache_t);
345 
346 static int	pool_catchup(struct pool *);
347 static void	pool_prime_page(struct pool *, void *,
348 		    struct pool_item_header *);
349 static void	pool_update_curpage(struct pool *);
350 
351 static int	pool_grow(struct pool *, int);
352 static void	*pool_allocator_alloc(struct pool *, int);
353 static void	pool_allocator_free(struct pool *, void *);
354 
355 static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
356 	void (*)(const char *, ...) __printflike(1, 2));
357 static void pool_print1(struct pool *, const char *,
358 	void (*)(const char *, ...) __printflike(1, 2));
359 
360 static int pool_chk_page(struct pool *, const char *,
361 			 struct pool_item_header *);
362 
363 /* -------------------------------------------------------------------------- */
364 
365 static inline unsigned int
366 pr_item_bitmap_index(const struct pool *pp, const struct pool_item_header *ph,
367     const void *v)
368 {
369 	const char *cp = v;
370 	unsigned int idx;
371 
372 	KASSERT(pp->pr_roflags & PR_USEBMAP);
373 	idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
374 
375 	if (__predict_false(idx >= pp->pr_itemsperpage)) {
376 		panic("%s: [%s] %u >= %u", __func__, pp->pr_wchan, idx,
377 		    pp->pr_itemsperpage);
378 	}
379 
380 	return idx;
381 }
382 
383 static inline void
384 pr_item_bitmap_put(const struct pool *pp, struct pool_item_header *ph,
385     void *obj)
386 {
387 	unsigned int idx = pr_item_bitmap_index(pp, ph, obj);
388 	pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
389 	pool_item_bitmap_t mask = 1U << (idx & BITMAP_MASK);
390 
391 	if (__predict_false((*bitmap & mask) != 0)) {
392 		panic("%s: [%s] %p already freed", __func__, pp->pr_wchan, obj);
393 	}
394 
395 	*bitmap |= mask;
396 }
397 
398 static inline void *
399 pr_item_bitmap_get(const struct pool *pp, struct pool_item_header *ph)
400 {
401 	pool_item_bitmap_t *bitmap = ph->ph_bitmap;
402 	unsigned int idx;
403 	int i;
404 
405 	for (i = 0; ; i++) {
406 		int bit;
407 
408 		KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
409 		bit = ffs32(bitmap[i]);
410 		if (bit) {
411 			pool_item_bitmap_t mask;
412 
413 			bit--;
414 			idx = (i * BITMAP_SIZE) + bit;
415 			mask = 1U << bit;
416 			KASSERT((bitmap[i] & mask) != 0);
417 			bitmap[i] &= ~mask;
418 			break;
419 		}
420 	}
421 	KASSERT(idx < pp->pr_itemsperpage);
422 	return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
423 }
424 
425 static inline void
426 pr_item_bitmap_init(const struct pool *pp, struct pool_item_header *ph)
427 {
428 	pool_item_bitmap_t *bitmap = ph->ph_bitmap;
429 	const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
430 	int i;
431 
432 	for (i = 0; i < n; i++) {
433 		bitmap[i] = (pool_item_bitmap_t)-1;
434 	}
435 }
436 
437 /* -------------------------------------------------------------------------- */
438 
439 static inline void
440 pr_item_linkedlist_put(const struct pool *pp, struct pool_item_header *ph,
441     void *obj)
442 {
443 	struct pool_item *pi = obj;
444 
445 #ifdef POOL_CHECK_MAGIC
446 	pi->pi_magic = PI_MAGIC;
447 #endif
448 
449 	if (pp->pr_redzone) {
450 		/*
451 		 * Mark the pool_item as valid. The rest is already
452 		 * invalid.
453 		 */
454 		kasan_mark(pi, sizeof(*pi), sizeof(*pi), 0);
455 	}
456 
457 	LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
458 }
459 
460 static inline void *
461 pr_item_linkedlist_get(struct pool *pp, struct pool_item_header *ph)
462 {
463 	struct pool_item *pi;
464 	void *v;
465 
466 	v = pi = LIST_FIRST(&ph->ph_itemlist);
467 	if (__predict_false(v == NULL)) {
468 		mutex_exit(&pp->pr_lock);
469 		panic("%s: [%s] page empty", __func__, pp->pr_wchan);
470 	}
471 	KASSERTMSG((pp->pr_nitems > 0),
472 	    "%s: [%s] nitems %u inconsistent on itemlist",
473 	    __func__, pp->pr_wchan, pp->pr_nitems);
474 #ifdef POOL_CHECK_MAGIC
475 	KASSERTMSG((pi->pi_magic == PI_MAGIC),
476 	    "%s: [%s] free list modified: "
477 	    "magic=%x; page %p; item addr %p", __func__,
478 	    pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
479 #endif
480 
481 	/*
482 	 * Remove from item list.
483 	 */
484 	LIST_REMOVE(pi, pi_list);
485 
486 	return v;
487 }
488 
489 /* -------------------------------------------------------------------------- */
490 
491 static inline void
492 pr_phinpage_check(struct pool *pp, struct pool_item_header *ph, void *page,
493     void *object)
494 {
495 	if (__predict_false((void *)ph->ph_page != page)) {
496 		panic("%s: [%s] item %p not part of pool", __func__,
497 		    pp->pr_wchan, object);
498 	}
499 	if (__predict_false((char *)object < (char *)page + ph->ph_off)) {
500 		panic("%s: [%s] item %p below item space", __func__,
501 		    pp->pr_wchan, object);
502 	}
503 	if (__predict_false(ph->ph_poolid != pp->pr_poolid)) {
504 		panic("%s: [%s] item %p poolid %u != %u", __func__,
505 		    pp->pr_wchan, object, ph->ph_poolid, pp->pr_poolid);
506 	}
507 }
508 
509 static inline void
510 pc_phinpage_check(pool_cache_t pc, void *object)
511 {
512 	struct pool_item_header *ph;
513 	struct pool *pp;
514 	void *page;
515 
516 	pp = &pc->pc_pool;
517 	page = POOL_OBJ_TO_PAGE(pp, object);
518 	ph = (struct pool_item_header *)page;
519 
520 	pr_phinpage_check(pp, ph, page, object);
521 }
522 
523 /* -------------------------------------------------------------------------- */
524 
525 static inline int
526 phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
527 {
528 
529 	/*
530 	 * We consider pool_item_header with smaller ph_page bigger. This
531 	 * unnatural ordering is for the benefit of pr_find_pagehead.
532 	 */
533 	if (a->ph_page < b->ph_page)
534 		return 1;
535 	else if (a->ph_page > b->ph_page)
536 		return -1;
537 	else
538 		return 0;
539 }
540 
541 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
542 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
543 
544 static inline struct pool_item_header *
545 pr_find_pagehead_noalign(struct pool *pp, void *v)
546 {
547 	struct pool_item_header *ph, tmp;
548 
549 	tmp.ph_page = (void *)(uintptr_t)v;
550 	ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
551 	if (ph == NULL) {
552 		ph = SPLAY_ROOT(&pp->pr_phtree);
553 		if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
554 			ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
555 		}
556 		KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
557 	}
558 
559 	return ph;
560 }
561 
562 /*
563  * Return the pool page header based on item address.
564  */
565 static inline struct pool_item_header *
566 pr_find_pagehead(struct pool *pp, void *v)
567 {
568 	struct pool_item_header *ph, tmp;
569 
570 	if ((pp->pr_roflags & PR_NOALIGN) != 0) {
571 		ph = pr_find_pagehead_noalign(pp, v);
572 	} else {
573 		void *page = POOL_OBJ_TO_PAGE(pp, v);
574 		if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
575 			ph = (struct pool_item_header *)page;
576 			pr_phinpage_check(pp, ph, page, v);
577 		} else {
578 			tmp.ph_page = page;
579 			ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
580 		}
581 	}
582 
583 	KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
584 	    ((char *)ph->ph_page <= (char *)v &&
585 	    (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
586 	return ph;
587 }
588 
589 static void
590 pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
591 {
592 	struct pool_item_header *ph;
593 
594 	while ((ph = LIST_FIRST(pq)) != NULL) {
595 		LIST_REMOVE(ph, ph_pagelist);
596 		pool_allocator_free(pp, ph->ph_page);
597 		if ((pp->pr_roflags & PR_PHINPAGE) == 0)
598 			pool_put(pp->pr_phpool, ph);
599 	}
600 }
601 
602 /*
603  * Remove a page from the pool.
604  */
605 static inline void
606 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
607      struct pool_pagelist *pq)
608 {
609 
610 	KASSERT(mutex_owned(&pp->pr_lock));
611 
612 	/*
613 	 * If the page was idle, decrement the idle page count.
614 	 */
615 	if (ph->ph_nmissing == 0) {
616 		KASSERT(pp->pr_nidle != 0);
617 		KASSERTMSG((pp->pr_nitems >= pp->pr_itemsperpage),
618 		    "%s: [%s] nitems=%u < itemsperpage=%u", __func__,
619 		    pp->pr_wchan, pp->pr_nitems, pp->pr_itemsperpage);
620 		pp->pr_nidle--;
621 	}
622 
623 	pp->pr_nitems -= pp->pr_itemsperpage;
624 
625 	/*
626 	 * Unlink the page from the pool and queue it for release.
627 	 */
628 	LIST_REMOVE(ph, ph_pagelist);
629 	if (pp->pr_roflags & PR_PHINPAGE) {
630 		if (__predict_false(ph->ph_poolid != pp->pr_poolid)) {
631 			panic("%s: [%s] ph %p poolid %u != %u",
632 			    __func__, pp->pr_wchan, ph, ph->ph_poolid,
633 			    pp->pr_poolid);
634 		}
635 	} else {
636 		SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
637 	}
638 	LIST_INSERT_HEAD(pq, ph, ph_pagelist);
639 
640 	pp->pr_npages--;
641 	pp->pr_npagefree++;
642 
643 	pool_update_curpage(pp);
644 }
645 
646 /*
647  * Initialize all the pools listed in the "pools" link set.
648  */
649 void
650 pool_subsystem_init(void)
651 {
652 	size_t size;
653 	int idx;
654 
655 	mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
656 	mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
657 	cv_init(&pool_busy, "poolbusy");
658 
659 	/*
660 	 * Initialize private page header pool and cache magazine pool if we
661 	 * haven't done so yet.
662 	 */
663 	for (idx = 0; idx < PHPOOL_MAX; idx++) {
664 		static char phpool_names[PHPOOL_MAX][6+1+6+1];
665 		int nelem;
666 		size_t sz;
667 
668 		nelem = PHPOOL_FREELIST_NELEM(idx);
669 		KASSERT(nelem != 0);
670 		snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
671 		    "phpool-%d", nelem);
672 		sz = offsetof(struct pool_item_header,
673 		    ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
674 		pool_init(&phpool[idx], sz, 0, 0, 0,
675 		    phpool_names[idx], &pool_allocator_meta, IPL_VM);
676 	}
677 
678 	size = sizeof(pcg_t) +
679 	    (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
680 	pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
681 	    "pcgnormal", &pool_allocator_meta, IPL_VM);
682 
683 	size = sizeof(pcg_t) +
684 	    (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
685 	pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
686 	    "pcglarge", &pool_allocator_meta, IPL_VM);
687 
688 	pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
689 	    0, 0, "pcache", &pool_allocator_meta, IPL_NONE);
690 
691 	pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
692 	    0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE);
693 }
694 
695 static inline bool
696 pool_init_is_phinpage(const struct pool *pp)
697 {
698 	size_t pagesize;
699 
700 	if (pp->pr_roflags & PR_PHINPAGE) {
701 		return true;
702 	}
703 	if (pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) {
704 		return false;
705 	}
706 
707 	pagesize = pp->pr_alloc->pa_pagesz;
708 
709 	/*
710 	 * Threshold: the item size is below 1/16 of a page size, and below
711 	 * 8 times the page header size. The latter ensures we go off-page
712 	 * if the page header would make us waste a rather big item.
713 	 */
714 	if (pp->pr_size < MIN(pagesize / 16, PHSIZE * 8)) {
715 		return true;
716 	}
717 
718 	/* Put the header into the page if it doesn't waste any items. */
719 	if (pagesize / pp->pr_size == (pagesize - PHSIZE) / pp->pr_size) {
720 		return true;
721 	}
722 
723 	return false;
724 }
725 
726 static inline bool
727 pool_init_is_usebmap(const struct pool *pp)
728 {
729 	size_t bmapsize;
730 
731 	if (pp->pr_roflags & PR_NOTOUCH) {
732 		return true;
733 	}
734 
735 	/*
736 	 * If we're off-page, go with a bitmap.
737 	 */
738 	if (!(pp->pr_roflags & PR_PHINPAGE)) {
739 		return true;
740 	}
741 
742 	/*
743 	 * If we're on-page, and the page header can already contain a bitmap
744 	 * big enough to cover all the items of the page, go with a bitmap.
745 	 */
746 	bmapsize = roundup(PHSIZE, pp->pr_align) -
747 	    offsetof(struct pool_item_header, ph_bitmap[0]);
748 	KASSERT(bmapsize % sizeof(pool_item_bitmap_t) == 0);
749 	if (pp->pr_itemsperpage <= bmapsize * CHAR_BIT) {
750 		return true;
751 	}
752 
753 	return false;
754 }
755 
756 /*
757  * Initialize the given pool resource structure.
758  *
759  * We export this routine to allow other kernel parts to declare
760  * static pools that must be initialized before kmem(9) is available.
761  */
762 void
763 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
764     const char *wchan, struct pool_allocator *palloc, int ipl)
765 {
766 	struct pool *pp1;
767 	size_t prsize;
768 	int itemspace, slack;
769 
770 	/* XXX ioff will be removed. */
771 	KASSERT(ioff == 0);
772 
773 #ifdef DEBUG
774 	if (__predict_true(!cold))
775 		mutex_enter(&pool_head_lock);
776 	/*
777 	 * Check that the pool hasn't already been initialised and
778 	 * added to the list of all pools.
779 	 */
780 	TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
781 		if (pp == pp1)
782 			panic("%s: [%s] already initialised", __func__,
783 			    wchan);
784 	}
785 	if (__predict_true(!cold))
786 		mutex_exit(&pool_head_lock);
787 #endif
788 
789 	if (palloc == NULL)
790 		palloc = &pool_allocator_kmem;
791 
792 	if (!cold)
793 		mutex_enter(&pool_allocator_lock);
794 	if (palloc->pa_refcnt++ == 0) {
795 		if (palloc->pa_pagesz == 0)
796 			palloc->pa_pagesz = PAGE_SIZE;
797 
798 		TAILQ_INIT(&palloc->pa_list);
799 
800 		mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
801 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
802 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
803 	}
804 	if (!cold)
805 		mutex_exit(&pool_allocator_lock);
806 
807 	if (align == 0)
808 		align = ALIGN(1);
809 
810 	prsize = size;
811 	if ((flags & PR_NOTOUCH) == 0 && prsize < sizeof(struct pool_item))
812 		prsize = sizeof(struct pool_item);
813 
814 	prsize = roundup(prsize, align);
815 	KASSERTMSG((prsize <= palloc->pa_pagesz),
816 	    "%s: [%s] pool item size (%zu) larger than page size (%u)",
817 	    __func__, wchan, prsize, palloc->pa_pagesz);
818 
819 	/*
820 	 * Initialize the pool structure.
821 	 */
822 	LIST_INIT(&pp->pr_emptypages);
823 	LIST_INIT(&pp->pr_fullpages);
824 	LIST_INIT(&pp->pr_partpages);
825 	pp->pr_cache = NULL;
826 	pp->pr_curpage = NULL;
827 	pp->pr_npages = 0;
828 	pp->pr_minitems = 0;
829 	pp->pr_minpages = 0;
830 	pp->pr_maxpages = UINT_MAX;
831 	pp->pr_roflags = flags;
832 	pp->pr_flags = 0;
833 	pp->pr_size = prsize;
834 	pp->pr_reqsize = size;
835 	pp->pr_align = align;
836 	pp->pr_wchan = wchan;
837 	pp->pr_alloc = palloc;
838 	pp->pr_poolid = atomic_inc_uint_nv(&poolid_counter);
839 	pp->pr_nitems = 0;
840 	pp->pr_nout = 0;
841 	pp->pr_hardlimit = UINT_MAX;
842 	pp->pr_hardlimit_warning = NULL;
843 	pp->pr_hardlimit_ratecap.tv_sec = 0;
844 	pp->pr_hardlimit_ratecap.tv_usec = 0;
845 	pp->pr_hardlimit_warning_last.tv_sec = 0;
846 	pp->pr_hardlimit_warning_last.tv_usec = 0;
847 	pp->pr_drain_hook = NULL;
848 	pp->pr_drain_hook_arg = NULL;
849 	pp->pr_freecheck = NULL;
850 	pp->pr_redzone = false;
851 	pool_redzone_init(pp, size);
852 	pool_quarantine_init(pp);
853 
854 	/*
855 	 * Decide whether to put the page header off-page to avoid wasting too
856 	 * large a part of the page or too big an item. Off-page page headers
857 	 * go on a hash table, so we can match a returned item with its header
858 	 * based on the page address.
859 	 */
860 	if (pool_init_is_phinpage(pp)) {
861 		/* Use the beginning of the page for the page header */
862 		itemspace = palloc->pa_pagesz - roundup(PHSIZE, align);
863 		pp->pr_itemoffset = roundup(PHSIZE, align);
864 		pp->pr_roflags |= PR_PHINPAGE;
865 	} else {
866 		/* The page header will be taken from our page header pool */
867 		itemspace = palloc->pa_pagesz;
868 		pp->pr_itemoffset = 0;
869 		SPLAY_INIT(&pp->pr_phtree);
870 	}
871 
872 	pp->pr_itemsperpage = itemspace / pp->pr_size;
873 	KASSERT(pp->pr_itemsperpage != 0);
874 
875 	/*
876 	 * Decide whether to use a bitmap or a linked list to manage freed
877 	 * items.
878 	 */
879 	if (pool_init_is_usebmap(pp)) {
880 		pp->pr_roflags |= PR_USEBMAP;
881 	}
882 
883 	/*
884 	 * If we're off-page, then we're using a bitmap; choose the appropriate
885 	 * pool to allocate page headers, whose size varies depending on the
886 	 * bitmap. If we're on-page, nothing to do.
887 	 */
888 	if (!(pp->pr_roflags & PR_PHINPAGE)) {
889 		int idx;
890 
891 		KASSERT(pp->pr_roflags & PR_USEBMAP);
892 
893 		for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
894 		    idx++) {
895 			/* nothing */
896 		}
897 		if (idx >= PHPOOL_MAX) {
898 			/*
899 			 * if you see this panic, consider to tweak
900 			 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
901 			 */
902 			panic("%s: [%s] too large itemsperpage(%d) for "
903 			    "PR_USEBMAP", __func__,
904 			    pp->pr_wchan, pp->pr_itemsperpage);
905 		}
906 		pp->pr_phpool = &phpool[idx];
907 	} else {
908 		pp->pr_phpool = NULL;
909 	}
910 
911 	/*
912 	 * Use the slack between the chunks and the page header
913 	 * for "cache coloring".
914 	 */
915 	slack = itemspace - pp->pr_itemsperpage * pp->pr_size;
916 	pp->pr_maxcolor = rounddown(slack, align);
917 	pp->pr_curcolor = 0;
918 
919 	pp->pr_nget = 0;
920 	pp->pr_nfail = 0;
921 	pp->pr_nput = 0;
922 	pp->pr_npagealloc = 0;
923 	pp->pr_npagefree = 0;
924 	pp->pr_hiwat = 0;
925 	pp->pr_nidle = 0;
926 	pp->pr_refcnt = 0;
927 
928 	mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
929 	cv_init(&pp->pr_cv, wchan);
930 	pp->pr_ipl = ipl;
931 
932 	/* Insert into the list of all pools. */
933 	if (!cold)
934 		mutex_enter(&pool_head_lock);
935 	TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
936 		if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
937 			break;
938 	}
939 	if (pp1 == NULL)
940 		TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
941 	else
942 		TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
943 	if (!cold)
944 		mutex_exit(&pool_head_lock);
945 
946 	/* Insert this into the list of pools using this allocator. */
947 	if (!cold)
948 		mutex_enter(&palloc->pa_lock);
949 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
950 	if (!cold)
951 		mutex_exit(&palloc->pa_lock);
952 }
953 
954 /*
955  * De-commision a pool resource.
956  */
957 void
958 pool_destroy(struct pool *pp)
959 {
960 	struct pool_pagelist pq;
961 	struct pool_item_header *ph;
962 
963 	pool_quarantine_flush(pp);
964 
965 	/* Remove from global pool list */
966 	mutex_enter(&pool_head_lock);
967 	while (pp->pr_refcnt != 0)
968 		cv_wait(&pool_busy, &pool_head_lock);
969 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
970 	if (drainpp == pp)
971 		drainpp = NULL;
972 	mutex_exit(&pool_head_lock);
973 
974 	/* Remove this pool from its allocator's list of pools. */
975 	mutex_enter(&pp->pr_alloc->pa_lock);
976 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
977 	mutex_exit(&pp->pr_alloc->pa_lock);
978 
979 	mutex_enter(&pool_allocator_lock);
980 	if (--pp->pr_alloc->pa_refcnt == 0)
981 		mutex_destroy(&pp->pr_alloc->pa_lock);
982 	mutex_exit(&pool_allocator_lock);
983 
984 	mutex_enter(&pp->pr_lock);
985 
986 	KASSERT(pp->pr_cache == NULL);
987 	KASSERTMSG((pp->pr_nout == 0),
988 	    "%s: [%s] pool busy: still out: %u", __func__, pp->pr_wchan,
989 	    pp->pr_nout);
990 	KASSERT(LIST_EMPTY(&pp->pr_fullpages));
991 	KASSERT(LIST_EMPTY(&pp->pr_partpages));
992 
993 	/* Remove all pages */
994 	LIST_INIT(&pq);
995 	while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
996 		pr_rmpage(pp, ph, &pq);
997 
998 	mutex_exit(&pp->pr_lock);
999 
1000 	pr_pagelist_free(pp, &pq);
1001 	cv_destroy(&pp->pr_cv);
1002 	mutex_destroy(&pp->pr_lock);
1003 }
1004 
1005 void
1006 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
1007 {
1008 
1009 	/* XXX no locking -- must be used just after pool_init() */
1010 	KASSERTMSG((pp->pr_drain_hook == NULL),
1011 	    "%s: [%s] already set", __func__, pp->pr_wchan);
1012 	pp->pr_drain_hook = fn;
1013 	pp->pr_drain_hook_arg = arg;
1014 }
1015 
1016 static struct pool_item_header *
1017 pool_alloc_item_header(struct pool *pp, void *storage, int flags)
1018 {
1019 	struct pool_item_header *ph;
1020 
1021 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1022 		ph = storage;
1023 	else
1024 		ph = pool_get(pp->pr_phpool, flags);
1025 
1026 	return ph;
1027 }
1028 
1029 /*
1030  * Grab an item from the pool.
1031  */
1032 void *
1033 pool_get(struct pool *pp, int flags)
1034 {
1035 	struct pool_item_header *ph;
1036 	void *v;
1037 
1038 	KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK));
1039 	KASSERTMSG((pp->pr_itemsperpage != 0),
1040 	    "%s: [%s] pr_itemsperpage is zero, "
1041 	    "pool not initialized?", __func__, pp->pr_wchan);
1042 	KASSERTMSG((!(cpu_intr_p() || cpu_softintr_p())
1043 		|| pp->pr_ipl != IPL_NONE || cold || panicstr != NULL),
1044 	    "%s: [%s] is IPL_NONE, but called from interrupt context",
1045 	    __func__, pp->pr_wchan);
1046 	if (flags & PR_WAITOK) {
1047 		ASSERT_SLEEPABLE();
1048 	}
1049 
1050 	mutex_enter(&pp->pr_lock);
1051  startover:
1052 	/*
1053 	 * Check to see if we've reached the hard limit.  If we have,
1054 	 * and we can wait, then wait until an item has been returned to
1055 	 * the pool.
1056 	 */
1057 	KASSERTMSG((pp->pr_nout <= pp->pr_hardlimit),
1058 	    "%s: %s: crossed hard limit", __func__, pp->pr_wchan);
1059 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1060 		if (pp->pr_drain_hook != NULL) {
1061 			/*
1062 			 * Since the drain hook is going to free things
1063 			 * back to the pool, unlock, call the hook, re-lock,
1064 			 * and check the hardlimit condition again.
1065 			 */
1066 			mutex_exit(&pp->pr_lock);
1067 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
1068 			mutex_enter(&pp->pr_lock);
1069 			if (pp->pr_nout < pp->pr_hardlimit)
1070 				goto startover;
1071 		}
1072 
1073 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1074 			/*
1075 			 * XXX: A warning isn't logged in this case.  Should
1076 			 * it be?
1077 			 */
1078 			pp->pr_flags |= PR_WANTED;
1079 			do {
1080 				cv_wait(&pp->pr_cv, &pp->pr_lock);
1081 			} while (pp->pr_flags & PR_WANTED);
1082 			goto startover;
1083 		}
1084 
1085 		/*
1086 		 * Log a message that the hard limit has been hit.
1087 		 */
1088 		if (pp->pr_hardlimit_warning != NULL &&
1089 		    ratecheck(&pp->pr_hardlimit_warning_last,
1090 			      &pp->pr_hardlimit_ratecap))
1091 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1092 
1093 		pp->pr_nfail++;
1094 
1095 		mutex_exit(&pp->pr_lock);
1096 		KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0);
1097 		return NULL;
1098 	}
1099 
1100 	/*
1101 	 * The convention we use is that if `curpage' is not NULL, then
1102 	 * it points at a non-empty bucket. In particular, `curpage'
1103 	 * never points at a page header which has PR_PHINPAGE set and
1104 	 * has no items in its bucket.
1105 	 */
1106 	if ((ph = pp->pr_curpage) == NULL) {
1107 		int error;
1108 
1109 		KASSERTMSG((pp->pr_nitems == 0),
1110 		    "%s: [%s] curpage NULL, inconsistent nitems %u",
1111 		    __func__, pp->pr_wchan, pp->pr_nitems);
1112 
1113 		/*
1114 		 * Call the back-end page allocator for more memory.
1115 		 * Release the pool lock, as the back-end page allocator
1116 		 * may block.
1117 		 */
1118 		error = pool_grow(pp, flags);
1119 		if (error != 0) {
1120 			/*
1121 			 * pool_grow aborts when another thread
1122 			 * is allocating a new page. Retry if it
1123 			 * waited for it.
1124 			 */
1125 			if (error == ERESTART)
1126 				goto startover;
1127 
1128 			/*
1129 			 * We were unable to allocate a page or item
1130 			 * header, but we released the lock during
1131 			 * allocation, so perhaps items were freed
1132 			 * back to the pool.  Check for this case.
1133 			 */
1134 			if (pp->pr_curpage != NULL)
1135 				goto startover;
1136 
1137 			pp->pr_nfail++;
1138 			mutex_exit(&pp->pr_lock);
1139 			KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0);
1140 			return NULL;
1141 		}
1142 
1143 		/* Start the allocation process over. */
1144 		goto startover;
1145 	}
1146 	if (pp->pr_roflags & PR_USEBMAP) {
1147 		KASSERTMSG((ph->ph_nmissing < pp->pr_itemsperpage),
1148 		    "%s: [%s] pool page empty", __func__, pp->pr_wchan);
1149 		v = pr_item_bitmap_get(pp, ph);
1150 	} else {
1151 		v = pr_item_linkedlist_get(pp, ph);
1152 	}
1153 	pp->pr_nitems--;
1154 	pp->pr_nout++;
1155 	if (ph->ph_nmissing == 0) {
1156 		KASSERT(pp->pr_nidle > 0);
1157 		pp->pr_nidle--;
1158 
1159 		/*
1160 		 * This page was previously empty.  Move it to the list of
1161 		 * partially-full pages.  This page is already curpage.
1162 		 */
1163 		LIST_REMOVE(ph, ph_pagelist);
1164 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1165 	}
1166 	ph->ph_nmissing++;
1167 	if (ph->ph_nmissing == pp->pr_itemsperpage) {
1168 		KASSERTMSG(((pp->pr_roflags & PR_USEBMAP) ||
1169 			LIST_EMPTY(&ph->ph_itemlist)),
1170 		    "%s: [%s] nmissing (%u) inconsistent", __func__,
1171 			pp->pr_wchan, ph->ph_nmissing);
1172 		/*
1173 		 * This page is now full.  Move it to the full list
1174 		 * and select a new current page.
1175 		 */
1176 		LIST_REMOVE(ph, ph_pagelist);
1177 		LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1178 		pool_update_curpage(pp);
1179 	}
1180 
1181 	pp->pr_nget++;
1182 
1183 	/*
1184 	 * If we have a low water mark and we are now below that low
1185 	 * water mark, add more items to the pool.
1186 	 */
1187 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1188 		/*
1189 		 * XXX: Should we log a warning?  Should we set up a timeout
1190 		 * to try again in a second or so?  The latter could break
1191 		 * a caller's assumptions about interrupt protection, etc.
1192 		 */
1193 	}
1194 
1195 	mutex_exit(&pp->pr_lock);
1196 	KASSERT((((vaddr_t)v) & (pp->pr_align - 1)) == 0);
1197 	FREECHECK_OUT(&pp->pr_freecheck, v);
1198 	pool_redzone_fill(pp, v);
1199 	pool_get_kmsan(pp, v);
1200 	if (flags & PR_ZERO)
1201 		memset(v, 0, pp->pr_reqsize);
1202 	return v;
1203 }
1204 
1205 /*
1206  * Internal version of pool_put().  Pool is already locked/entered.
1207  */
1208 static void
1209 pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1210 {
1211 	struct pool_item_header *ph;
1212 
1213 	KASSERT(mutex_owned(&pp->pr_lock));
1214 	pool_redzone_check(pp, v);
1215 	pool_put_kmsan(pp, v);
1216 	FREECHECK_IN(&pp->pr_freecheck, v);
1217 	LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
1218 
1219 	KASSERTMSG((pp->pr_nout > 0),
1220 	    "%s: [%s] putting with none out", __func__, pp->pr_wchan);
1221 
1222 	if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1223 		panic("%s: [%s] page header missing", __func__,  pp->pr_wchan);
1224 	}
1225 
1226 	/*
1227 	 * Return to item list.
1228 	 */
1229 	if (pp->pr_roflags & PR_USEBMAP) {
1230 		pr_item_bitmap_put(pp, ph, v);
1231 	} else {
1232 		pr_item_linkedlist_put(pp, ph, v);
1233 	}
1234 	KDASSERT(ph->ph_nmissing != 0);
1235 	ph->ph_nmissing--;
1236 	pp->pr_nput++;
1237 	pp->pr_nitems++;
1238 	pp->pr_nout--;
1239 
1240 	/* Cancel "pool empty" condition if it exists */
1241 	if (pp->pr_curpage == NULL)
1242 		pp->pr_curpage = ph;
1243 
1244 	if (pp->pr_flags & PR_WANTED) {
1245 		pp->pr_flags &= ~PR_WANTED;
1246 		cv_broadcast(&pp->pr_cv);
1247 	}
1248 
1249 	/*
1250 	 * If this page is now empty, do one of two things:
1251 	 *
1252 	 *	(1) If we have more pages than the page high water mark,
1253 	 *	    free the page back to the system.  ONLY CONSIDER
1254 	 *	    FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1255 	 *	    CLAIM.
1256 	 *
1257 	 *	(2) Otherwise, move the page to the empty page list.
1258 	 *
1259 	 * Either way, select a new current page (so we use a partially-full
1260 	 * page if one is available).
1261 	 */
1262 	if (ph->ph_nmissing == 0) {
1263 		pp->pr_nidle++;
1264 		if (pp->pr_npages > pp->pr_minpages &&
1265 		    pp->pr_npages > pp->pr_maxpages) {
1266 			pr_rmpage(pp, ph, pq);
1267 		} else {
1268 			LIST_REMOVE(ph, ph_pagelist);
1269 			LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1270 
1271 			/*
1272 			 * Update the timestamp on the page.  A page must
1273 			 * be idle for some period of time before it can
1274 			 * be reclaimed by the pagedaemon.  This minimizes
1275 			 * ping-pong'ing for memory.
1276 			 *
1277 			 * note for 64-bit time_t: truncating to 32-bit is not
1278 			 * a problem for our usage.
1279 			 */
1280 			ph->ph_time = time_uptime;
1281 		}
1282 		pool_update_curpage(pp);
1283 	}
1284 
1285 	/*
1286 	 * If the page was previously completely full, move it to the
1287 	 * partially-full list and make it the current page.  The next
1288 	 * allocation will get the item from this page, instead of
1289 	 * further fragmenting the pool.
1290 	 */
1291 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1292 		LIST_REMOVE(ph, ph_pagelist);
1293 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1294 		pp->pr_curpage = ph;
1295 	}
1296 }
1297 
1298 void
1299 pool_put(struct pool *pp, void *v)
1300 {
1301 	struct pool_pagelist pq;
1302 
1303 	LIST_INIT(&pq);
1304 
1305 	mutex_enter(&pp->pr_lock);
1306 	if (!pool_put_quarantine(pp, v, &pq)) {
1307 		pool_do_put(pp, v, &pq);
1308 	}
1309 	mutex_exit(&pp->pr_lock);
1310 
1311 	pr_pagelist_free(pp, &pq);
1312 }
1313 
1314 /*
1315  * pool_grow: grow a pool by a page.
1316  *
1317  * => called with pool locked.
1318  * => unlock and relock the pool.
1319  * => return with pool locked.
1320  */
1321 
1322 static int
1323 pool_grow(struct pool *pp, int flags)
1324 {
1325 	struct pool_item_header *ph;
1326 	char *storage;
1327 
1328 	/*
1329 	 * If there's a pool_grow in progress, wait for it to complete
1330 	 * and try again from the top.
1331 	 */
1332 	if (pp->pr_flags & PR_GROWING) {
1333 		if (flags & PR_WAITOK) {
1334 			do {
1335 				cv_wait(&pp->pr_cv, &pp->pr_lock);
1336 			} while (pp->pr_flags & PR_GROWING);
1337 			return ERESTART;
1338 		} else {
1339 			if (pp->pr_flags & PR_GROWINGNOWAIT) {
1340 				/*
1341 				 * This needs an unlock/relock dance so
1342 				 * that the other caller has a chance to
1343 				 * run and actually do the thing.  Note
1344 				 * that this is effectively a busy-wait.
1345 				 */
1346 				mutex_exit(&pp->pr_lock);
1347 				mutex_enter(&pp->pr_lock);
1348 				return ERESTART;
1349 			}
1350 			return EWOULDBLOCK;
1351 		}
1352 	}
1353 	pp->pr_flags |= PR_GROWING;
1354 	if (flags & PR_WAITOK)
1355 		mutex_exit(&pp->pr_lock);
1356 	else
1357 		pp->pr_flags |= PR_GROWINGNOWAIT;
1358 
1359 	storage = pool_allocator_alloc(pp, flags);
1360 	if (__predict_false(storage == NULL))
1361 		goto out;
1362 
1363 	ph = pool_alloc_item_header(pp, storage, flags);
1364 	if (__predict_false(ph == NULL)) {
1365 		pool_allocator_free(pp, storage);
1366 		goto out;
1367 	}
1368 
1369 	if (flags & PR_WAITOK)
1370 		mutex_enter(&pp->pr_lock);
1371 	pool_prime_page(pp, storage, ph);
1372 	pp->pr_npagealloc++;
1373 	KASSERT(pp->pr_flags & PR_GROWING);
1374 	pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT);
1375 	/*
1376 	 * If anyone was waiting for pool_grow, notify them that we
1377 	 * may have just done it.
1378 	 */
1379 	cv_broadcast(&pp->pr_cv);
1380 	return 0;
1381 out:
1382 	if (flags & PR_WAITOK)
1383 		mutex_enter(&pp->pr_lock);
1384 	KASSERT(pp->pr_flags & PR_GROWING);
1385 	pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT);
1386 	return ENOMEM;
1387 }
1388 
1389 /*
1390  * Add N items to the pool.
1391  */
1392 int
1393 pool_prime(struct pool *pp, int n)
1394 {
1395 	int newpages;
1396 	int error = 0;
1397 
1398 	mutex_enter(&pp->pr_lock);
1399 
1400 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1401 
1402 	while (newpages > 0) {
1403 		error = pool_grow(pp, PR_NOWAIT);
1404 		if (error) {
1405 			if (error == ERESTART)
1406 				continue;
1407 			break;
1408 		}
1409 		pp->pr_minpages++;
1410 		newpages--;
1411 	}
1412 
1413 	if (pp->pr_minpages >= pp->pr_maxpages)
1414 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
1415 
1416 	mutex_exit(&pp->pr_lock);
1417 	return error;
1418 }
1419 
1420 /*
1421  * Add a page worth of items to the pool.
1422  *
1423  * Note, we must be called with the pool descriptor LOCKED.
1424  */
1425 static void
1426 pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1427 {
1428 	const unsigned int align = pp->pr_align;
1429 	struct pool_item *pi;
1430 	void *cp = storage;
1431 	int n;
1432 
1433 	KASSERT(mutex_owned(&pp->pr_lock));
1434 	KASSERTMSG(((pp->pr_roflags & PR_NOALIGN) ||
1435 		(((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) == 0)),
1436 	    "%s: [%s] unaligned page: %p", __func__, pp->pr_wchan, cp);
1437 
1438 	/*
1439 	 * Insert page header.
1440 	 */
1441 	LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1442 	LIST_INIT(&ph->ph_itemlist);
1443 	ph->ph_page = storage;
1444 	ph->ph_nmissing = 0;
1445 	ph->ph_time = time_uptime;
1446 	if (pp->pr_roflags & PR_PHINPAGE)
1447 		ph->ph_poolid = pp->pr_poolid;
1448 	else
1449 		SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1450 
1451 	pp->pr_nidle++;
1452 
1453 	/*
1454 	 * The item space starts after the on-page header, if any.
1455 	 */
1456 	ph->ph_off = pp->pr_itemoffset;
1457 
1458 	/*
1459 	 * Color this page.
1460 	 */
1461 	ph->ph_off += pp->pr_curcolor;
1462 	cp = (char *)cp + ph->ph_off;
1463 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1464 		pp->pr_curcolor = 0;
1465 
1466 	KASSERT((((vaddr_t)cp) & (align - 1)) == 0);
1467 
1468 	/*
1469 	 * Insert remaining chunks on the bucket list.
1470 	 */
1471 	n = pp->pr_itemsperpage;
1472 	pp->pr_nitems += n;
1473 
1474 	if (pp->pr_roflags & PR_USEBMAP) {
1475 		pr_item_bitmap_init(pp, ph);
1476 	} else {
1477 		while (n--) {
1478 			pi = (struct pool_item *)cp;
1479 
1480 			KASSERT((((vaddr_t)pi) & (align - 1)) == 0);
1481 
1482 			/* Insert on page list */
1483 			LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1484 #ifdef POOL_CHECK_MAGIC
1485 			pi->pi_magic = PI_MAGIC;
1486 #endif
1487 			cp = (char *)cp + pp->pr_size;
1488 
1489 			KASSERT((((vaddr_t)cp) & (align - 1)) == 0);
1490 		}
1491 	}
1492 
1493 	/*
1494 	 * If the pool was depleted, point at the new page.
1495 	 */
1496 	if (pp->pr_curpage == NULL)
1497 		pp->pr_curpage = ph;
1498 
1499 	if (++pp->pr_npages > pp->pr_hiwat)
1500 		pp->pr_hiwat = pp->pr_npages;
1501 }
1502 
1503 /*
1504  * Used by pool_get() when nitems drops below the low water mark.  This
1505  * is used to catch up pr_nitems with the low water mark.
1506  *
1507  * Note 1, we never wait for memory here, we let the caller decide what to do.
1508  *
1509  * Note 2, we must be called with the pool already locked, and we return
1510  * with it locked.
1511  */
1512 static int
1513 pool_catchup(struct pool *pp)
1514 {
1515 	int error = 0;
1516 
1517 	while (POOL_NEEDS_CATCHUP(pp)) {
1518 		error = pool_grow(pp, PR_NOWAIT);
1519 		if (error) {
1520 			if (error == ERESTART)
1521 				continue;
1522 			break;
1523 		}
1524 	}
1525 	return error;
1526 }
1527 
1528 static void
1529 pool_update_curpage(struct pool *pp)
1530 {
1531 
1532 	pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1533 	if (pp->pr_curpage == NULL) {
1534 		pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1535 	}
1536 	KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
1537 	    (pp->pr_curpage != NULL && pp->pr_nitems > 0));
1538 }
1539 
1540 void
1541 pool_setlowat(struct pool *pp, int n)
1542 {
1543 
1544 	mutex_enter(&pp->pr_lock);
1545 
1546 	pp->pr_minitems = n;
1547 	pp->pr_minpages = (n == 0)
1548 		? 0
1549 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1550 
1551 	/* Make sure we're caught up with the newly-set low water mark. */
1552 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1553 		/*
1554 		 * XXX: Should we log a warning?  Should we set up a timeout
1555 		 * to try again in a second or so?  The latter could break
1556 		 * a caller's assumptions about interrupt protection, etc.
1557 		 */
1558 	}
1559 
1560 	mutex_exit(&pp->pr_lock);
1561 }
1562 
1563 void
1564 pool_sethiwat(struct pool *pp, int n)
1565 {
1566 
1567 	mutex_enter(&pp->pr_lock);
1568 
1569 	pp->pr_maxpages = (n == 0)
1570 		? 0
1571 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1572 
1573 	mutex_exit(&pp->pr_lock);
1574 }
1575 
1576 void
1577 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1578 {
1579 
1580 	mutex_enter(&pp->pr_lock);
1581 
1582 	pp->pr_hardlimit = n;
1583 	pp->pr_hardlimit_warning = warnmess;
1584 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1585 	pp->pr_hardlimit_warning_last.tv_sec = 0;
1586 	pp->pr_hardlimit_warning_last.tv_usec = 0;
1587 
1588 	/*
1589 	 * In-line version of pool_sethiwat(), because we don't want to
1590 	 * release the lock.
1591 	 */
1592 	pp->pr_maxpages = (n == 0)
1593 		? 0
1594 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1595 
1596 	mutex_exit(&pp->pr_lock);
1597 }
1598 
1599 /*
1600  * Release all complete pages that have not been used recently.
1601  *
1602  * Must not be called from interrupt context.
1603  */
1604 int
1605 pool_reclaim(struct pool *pp)
1606 {
1607 	struct pool_item_header *ph, *phnext;
1608 	struct pool_pagelist pq;
1609 	uint32_t curtime;
1610 	bool klock;
1611 	int rv;
1612 
1613 	KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1614 
1615 	if (pp->pr_drain_hook != NULL) {
1616 		/*
1617 		 * The drain hook must be called with the pool unlocked.
1618 		 */
1619 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1620 	}
1621 
1622 	/*
1623 	 * XXXSMP Because we do not want to cause non-MPSAFE code
1624 	 * to block.
1625 	 */
1626 	if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1627 	    pp->pr_ipl == IPL_SOFTSERIAL) {
1628 		KERNEL_LOCK(1, NULL);
1629 		klock = true;
1630 	} else
1631 		klock = false;
1632 
1633 	/* Reclaim items from the pool's cache (if any). */
1634 	if (pp->pr_cache != NULL)
1635 		pool_cache_invalidate(pp->pr_cache);
1636 
1637 	if (mutex_tryenter(&pp->pr_lock) == 0) {
1638 		if (klock) {
1639 			KERNEL_UNLOCK_ONE(NULL);
1640 		}
1641 		return 0;
1642 	}
1643 
1644 	LIST_INIT(&pq);
1645 
1646 	curtime = time_uptime;
1647 
1648 	for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1649 		phnext = LIST_NEXT(ph, ph_pagelist);
1650 
1651 		/* Check our minimum page claim */
1652 		if (pp->pr_npages <= pp->pr_minpages)
1653 			break;
1654 
1655 		KASSERT(ph->ph_nmissing == 0);
1656 		if (curtime - ph->ph_time < pool_inactive_time)
1657 			continue;
1658 
1659 		/*
1660 		 * If freeing this page would put us below
1661 		 * the low water mark, stop now.
1662 		 */
1663 		if ((pp->pr_nitems - pp->pr_itemsperpage) <
1664 		    pp->pr_minitems)
1665 			break;
1666 
1667 		pr_rmpage(pp, ph, &pq);
1668 	}
1669 
1670 	mutex_exit(&pp->pr_lock);
1671 
1672 	if (LIST_EMPTY(&pq))
1673 		rv = 0;
1674 	else {
1675 		pr_pagelist_free(pp, &pq);
1676 		rv = 1;
1677 	}
1678 
1679 	if (klock) {
1680 		KERNEL_UNLOCK_ONE(NULL);
1681 	}
1682 
1683 	return rv;
1684 }
1685 
1686 /*
1687  * Drain pools, one at a time. The drained pool is returned within ppp.
1688  *
1689  * Note, must never be called from interrupt context.
1690  */
1691 bool
1692 pool_drain(struct pool **ppp)
1693 {
1694 	bool reclaimed;
1695 	struct pool *pp;
1696 
1697 	KASSERT(!TAILQ_EMPTY(&pool_head));
1698 
1699 	pp = NULL;
1700 
1701 	/* Find next pool to drain, and add a reference. */
1702 	mutex_enter(&pool_head_lock);
1703 	do {
1704 		if (drainpp == NULL) {
1705 			drainpp = TAILQ_FIRST(&pool_head);
1706 		}
1707 		if (drainpp != NULL) {
1708 			pp = drainpp;
1709 			drainpp = TAILQ_NEXT(pp, pr_poollist);
1710 		}
1711 		/*
1712 		 * Skip completely idle pools.  We depend on at least
1713 		 * one pool in the system being active.
1714 		 */
1715 	} while (pp == NULL || pp->pr_npages == 0);
1716 	pp->pr_refcnt++;
1717 	mutex_exit(&pool_head_lock);
1718 
1719 	/* Drain the cache (if any) and pool.. */
1720 	reclaimed = pool_reclaim(pp);
1721 
1722 	/* Finally, unlock the pool. */
1723 	mutex_enter(&pool_head_lock);
1724 	pp->pr_refcnt--;
1725 	cv_broadcast(&pool_busy);
1726 	mutex_exit(&pool_head_lock);
1727 
1728 	if (ppp != NULL)
1729 		*ppp = pp;
1730 
1731 	return reclaimed;
1732 }
1733 
1734 /*
1735  * Calculate the total number of pages consumed by pools.
1736  */
1737 int
1738 pool_totalpages(void)
1739 {
1740 
1741 	mutex_enter(&pool_head_lock);
1742 	int pages = pool_totalpages_locked();
1743 	mutex_exit(&pool_head_lock);
1744 
1745 	return pages;
1746 }
1747 
1748 int
1749 pool_totalpages_locked(void)
1750 {
1751 	struct pool *pp;
1752 	uint64_t total = 0;
1753 
1754 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1755 		uint64_t bytes = pp->pr_npages * pp->pr_alloc->pa_pagesz;
1756 
1757 		if ((pp->pr_roflags & PR_RECURSIVE) != 0)
1758 			bytes -= (pp->pr_nout * pp->pr_size);
1759 		total += bytes;
1760 	}
1761 
1762 	return atop(total);
1763 }
1764 
1765 /*
1766  * Diagnostic helpers.
1767  */
1768 
1769 void
1770 pool_printall(const char *modif, void (*pr)(const char *, ...))
1771 {
1772 	struct pool *pp;
1773 
1774 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1775 		pool_printit(pp, modif, pr);
1776 	}
1777 }
1778 
1779 void
1780 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1781 {
1782 
1783 	if (pp == NULL) {
1784 		(*pr)("Must specify a pool to print.\n");
1785 		return;
1786 	}
1787 
1788 	pool_print1(pp, modif, pr);
1789 }
1790 
1791 static void
1792 pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1793     void (*pr)(const char *, ...))
1794 {
1795 	struct pool_item_header *ph;
1796 
1797 	LIST_FOREACH(ph, pl, ph_pagelist) {
1798 		(*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
1799 		    ph->ph_page, ph->ph_nmissing, ph->ph_time);
1800 #ifdef POOL_CHECK_MAGIC
1801 		struct pool_item *pi;
1802 		if (!(pp->pr_roflags & PR_USEBMAP)) {
1803 			LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1804 				if (pi->pi_magic != PI_MAGIC) {
1805 					(*pr)("\t\t\titem %p, magic 0x%x\n",
1806 					    pi, pi->pi_magic);
1807 				}
1808 			}
1809 		}
1810 #endif
1811 	}
1812 }
1813 
1814 static void
1815 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1816 {
1817 	struct pool_item_header *ph;
1818 	pool_cache_t pc;
1819 	pcg_t *pcg;
1820 	pool_cache_cpu_t *cc;
1821 	uint64_t cpuhit, cpumiss;
1822 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1823 	char c;
1824 
1825 	while ((c = *modif++) != '\0') {
1826 		if (c == 'l')
1827 			print_log = 1;
1828 		if (c == 'p')
1829 			print_pagelist = 1;
1830 		if (c == 'c')
1831 			print_cache = 1;
1832 	}
1833 
1834 	if ((pc = pp->pr_cache) != NULL) {
1835 		(*pr)("POOL CACHE");
1836 	} else {
1837 		(*pr)("POOL");
1838 	}
1839 
1840 	(*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1841 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1842 	    pp->pr_roflags);
1843 	(*pr)("\talloc %p\n", pp->pr_alloc);
1844 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1845 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1846 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1847 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1848 
1849 	(*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1850 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1851 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1852 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1853 
1854 	if (print_pagelist == 0)
1855 		goto skip_pagelist;
1856 
1857 	if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1858 		(*pr)("\n\tempty page list:\n");
1859 	pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1860 	if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1861 		(*pr)("\n\tfull page list:\n");
1862 	pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1863 	if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1864 		(*pr)("\n\tpartial-page list:\n");
1865 	pool_print_pagelist(pp, &pp->pr_partpages, pr);
1866 
1867 	if (pp->pr_curpage == NULL)
1868 		(*pr)("\tno current page\n");
1869 	else
1870 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1871 
1872  skip_pagelist:
1873 	if (print_log == 0)
1874 		goto skip_log;
1875 
1876 	(*pr)("\n");
1877 
1878  skip_log:
1879 
1880 #define PR_GROUPLIST(pcg)						\
1881 	(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);		\
1882 	for (i = 0; i < pcg->pcg_size; i++) {				\
1883 		if (pcg->pcg_objects[i].pcgo_pa !=			\
1884 		    POOL_PADDR_INVALID) {				\
1885 			(*pr)("\t\t\t%p, 0x%llx\n",			\
1886 			    pcg->pcg_objects[i].pcgo_va,		\
1887 			    (unsigned long long)			\
1888 			    pcg->pcg_objects[i].pcgo_pa);		\
1889 		} else {						\
1890 			(*pr)("\t\t\t%p\n",				\
1891 			    pcg->pcg_objects[i].pcgo_va);		\
1892 		}							\
1893 	}
1894 
1895 	if (pc != NULL) {
1896 		cpuhit = 0;
1897 		cpumiss = 0;
1898 		for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1899 			if ((cc = pc->pc_cpus[i]) == NULL)
1900 				continue;
1901 			cpuhit += cc->cc_hits;
1902 			cpumiss += cc->cc_misses;
1903 		}
1904 		(*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1905 		(*pr)("\tcache layer hits %llu misses %llu\n",
1906 		    pc->pc_hits, pc->pc_misses);
1907 		(*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1908 		    pc->pc_hits + pc->pc_misses - pc->pc_contended,
1909 		    pc->pc_contended);
1910 		(*pr)("\tcache layer empty groups %u full groups %u\n",
1911 		    pc->pc_nempty, pc->pc_nfull);
1912 		if (print_cache) {
1913 			(*pr)("\tfull cache groups:\n");
1914 			for (pcg = pc->pc_fullgroups; pcg != NULL;
1915 			    pcg = pcg->pcg_next) {
1916 				PR_GROUPLIST(pcg);
1917 			}
1918 			(*pr)("\tempty cache groups:\n");
1919 			for (pcg = pc->pc_emptygroups; pcg != NULL;
1920 			    pcg = pcg->pcg_next) {
1921 				PR_GROUPLIST(pcg);
1922 			}
1923 		}
1924 	}
1925 #undef PR_GROUPLIST
1926 }
1927 
1928 static int
1929 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1930 {
1931 	struct pool_item *pi;
1932 	void *page;
1933 	int n;
1934 
1935 	if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1936 		page = POOL_OBJ_TO_PAGE(pp, ph);
1937 		if (page != ph->ph_page &&
1938 		    (pp->pr_roflags & PR_PHINPAGE) != 0) {
1939 			if (label != NULL)
1940 				printf("%s: ", label);
1941 			printf("pool(%p:%s): page inconsistency: page %p;"
1942 			       " at page head addr %p (p %p)\n", pp,
1943 				pp->pr_wchan, ph->ph_page,
1944 				ph, page);
1945 			return 1;
1946 		}
1947 	}
1948 
1949 	if ((pp->pr_roflags & PR_USEBMAP) != 0)
1950 		return 0;
1951 
1952 	for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1953 	     pi != NULL;
1954 	     pi = LIST_NEXT(pi,pi_list), n++) {
1955 
1956 #ifdef POOL_CHECK_MAGIC
1957 		if (pi->pi_magic != PI_MAGIC) {
1958 			if (label != NULL)
1959 				printf("%s: ", label);
1960 			printf("pool(%s): free list modified: magic=%x;"
1961 			       " page %p; item ordinal %d; addr %p\n",
1962 				pp->pr_wchan, pi->pi_magic, ph->ph_page,
1963 				n, pi);
1964 			panic("pool");
1965 		}
1966 #endif
1967 		if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1968 			continue;
1969 		}
1970 		page = POOL_OBJ_TO_PAGE(pp, pi);
1971 		if (page == ph->ph_page)
1972 			continue;
1973 
1974 		if (label != NULL)
1975 			printf("%s: ", label);
1976 		printf("pool(%p:%s): page inconsistency: page %p;"
1977 		       " item ordinal %d; addr %p (p %p)\n", pp,
1978 			pp->pr_wchan, ph->ph_page,
1979 			n, pi, page);
1980 		return 1;
1981 	}
1982 	return 0;
1983 }
1984 
1985 
1986 int
1987 pool_chk(struct pool *pp, const char *label)
1988 {
1989 	struct pool_item_header *ph;
1990 	int r = 0;
1991 
1992 	mutex_enter(&pp->pr_lock);
1993 	LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1994 		r = pool_chk_page(pp, label, ph);
1995 		if (r) {
1996 			goto out;
1997 		}
1998 	}
1999 	LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2000 		r = pool_chk_page(pp, label, ph);
2001 		if (r) {
2002 			goto out;
2003 		}
2004 	}
2005 	LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2006 		r = pool_chk_page(pp, label, ph);
2007 		if (r) {
2008 			goto out;
2009 		}
2010 	}
2011 
2012 out:
2013 	mutex_exit(&pp->pr_lock);
2014 	return r;
2015 }
2016 
2017 /*
2018  * pool_cache_init:
2019  *
2020  *	Initialize a pool cache.
2021  */
2022 pool_cache_t
2023 pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
2024     const char *wchan, struct pool_allocator *palloc, int ipl,
2025     int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
2026 {
2027 	pool_cache_t pc;
2028 
2029 	pc = pool_get(&cache_pool, PR_WAITOK);
2030 	if (pc == NULL)
2031 		return NULL;
2032 
2033 	pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
2034 	   palloc, ipl, ctor, dtor, arg);
2035 
2036 	return pc;
2037 }
2038 
2039 /*
2040  * pool_cache_bootstrap:
2041  *
2042  *	Kernel-private version of pool_cache_init().  The caller
2043  *	provides initial storage.
2044  */
2045 void
2046 pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
2047     u_int align_offset, u_int flags, const char *wchan,
2048     struct pool_allocator *palloc, int ipl,
2049     int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
2050     void *arg)
2051 {
2052 	CPU_INFO_ITERATOR cii;
2053 	pool_cache_t pc1;
2054 	struct cpu_info *ci;
2055 	struct pool *pp;
2056 
2057 	pp = &pc->pc_pool;
2058 	if (palloc == NULL && ipl == IPL_NONE) {
2059 		if (size > PAGE_SIZE) {
2060 			int bigidx = pool_bigidx(size);
2061 
2062 			palloc = &pool_allocator_big[bigidx];
2063 			flags |= PR_NOALIGN;
2064 		} else
2065 			palloc = &pool_allocator_nointr;
2066 	}
2067 	pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
2068 	mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
2069 
2070 	if (ctor == NULL) {
2071 		ctor = NO_CTOR;
2072 	}
2073 	if (dtor == NULL) {
2074 		dtor = NO_DTOR;
2075 	}
2076 
2077 	pc->pc_emptygroups = NULL;
2078 	pc->pc_fullgroups = NULL;
2079 	pc->pc_partgroups = NULL;
2080 	pc->pc_ctor = ctor;
2081 	pc->pc_dtor = dtor;
2082 	pc->pc_arg  = arg;
2083 	pc->pc_hits  = 0;
2084 	pc->pc_misses = 0;
2085 	pc->pc_nempty = 0;
2086 	pc->pc_npart = 0;
2087 	pc->pc_nfull = 0;
2088 	pc->pc_contended = 0;
2089 	pc->pc_refcnt = 0;
2090 	pc->pc_freecheck = NULL;
2091 
2092 	if ((flags & PR_LARGECACHE) != 0) {
2093 		pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
2094 		pc->pc_pcgpool = &pcg_large_pool;
2095 	} else {
2096 		pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
2097 		pc->pc_pcgpool = &pcg_normal_pool;
2098 	}
2099 
2100 	/* Allocate per-CPU caches. */
2101 	memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
2102 	pc->pc_ncpu = 0;
2103 	if (ncpu < 2) {
2104 		/* XXX For sparc: boot CPU is not attached yet. */
2105 		pool_cache_cpu_init1(curcpu(), pc);
2106 	} else {
2107 		for (CPU_INFO_FOREACH(cii, ci)) {
2108 			pool_cache_cpu_init1(ci, pc);
2109 		}
2110 	}
2111 
2112 	/* Add to list of all pools. */
2113 	if (__predict_true(!cold))
2114 		mutex_enter(&pool_head_lock);
2115 	TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
2116 		if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
2117 			break;
2118 	}
2119 	if (pc1 == NULL)
2120 		TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
2121 	else
2122 		TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
2123 	if (__predict_true(!cold))
2124 		mutex_exit(&pool_head_lock);
2125 
2126 	membar_sync();
2127 	pp->pr_cache = pc;
2128 }
2129 
2130 /*
2131  * pool_cache_destroy:
2132  *
2133  *	Destroy a pool cache.
2134  */
2135 void
2136 pool_cache_destroy(pool_cache_t pc)
2137 {
2138 
2139 	pool_cache_bootstrap_destroy(pc);
2140 	pool_put(&cache_pool, pc);
2141 }
2142 
2143 /*
2144  * pool_cache_bootstrap_destroy:
2145  *
2146  *	Destroy a pool cache.
2147  */
2148 void
2149 pool_cache_bootstrap_destroy(pool_cache_t pc)
2150 {
2151 	struct pool *pp = &pc->pc_pool;
2152 	u_int i;
2153 
2154 	/* Remove it from the global list. */
2155 	mutex_enter(&pool_head_lock);
2156 	while (pc->pc_refcnt != 0)
2157 		cv_wait(&pool_busy, &pool_head_lock);
2158 	TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
2159 	mutex_exit(&pool_head_lock);
2160 
2161 	/* First, invalidate the entire cache. */
2162 	pool_cache_invalidate(pc);
2163 
2164 	/* Disassociate it from the pool. */
2165 	mutex_enter(&pp->pr_lock);
2166 	pp->pr_cache = NULL;
2167 	mutex_exit(&pp->pr_lock);
2168 
2169 	/* Destroy per-CPU data */
2170 	for (i = 0; i < __arraycount(pc->pc_cpus); i++)
2171 		pool_cache_invalidate_cpu(pc, i);
2172 
2173 	/* Finally, destroy it. */
2174 	mutex_destroy(&pc->pc_lock);
2175 	pool_destroy(pp);
2176 }
2177 
2178 /*
2179  * pool_cache_cpu_init1:
2180  *
2181  *	Called for each pool_cache whenever a new CPU is attached.
2182  */
2183 static void
2184 pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
2185 {
2186 	pool_cache_cpu_t *cc;
2187 	int index;
2188 
2189 	index = ci->ci_index;
2190 
2191 	KASSERT(index < __arraycount(pc->pc_cpus));
2192 
2193 	if ((cc = pc->pc_cpus[index]) != NULL) {
2194 		KASSERT(cc->cc_cpuindex == index);
2195 		return;
2196 	}
2197 
2198 	/*
2199 	 * The first CPU is 'free'.  This needs to be the case for
2200 	 * bootstrap - we may not be able to allocate yet.
2201 	 */
2202 	if (pc->pc_ncpu == 0) {
2203 		cc = &pc->pc_cpu0;
2204 		pc->pc_ncpu = 1;
2205 	} else {
2206 		mutex_enter(&pc->pc_lock);
2207 		pc->pc_ncpu++;
2208 		mutex_exit(&pc->pc_lock);
2209 		cc = pool_get(&cache_cpu_pool, PR_WAITOK);
2210 	}
2211 
2212 	cc->cc_ipl = pc->pc_pool.pr_ipl;
2213 	cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
2214 	cc->cc_cache = pc;
2215 	cc->cc_cpuindex = index;
2216 	cc->cc_hits = 0;
2217 	cc->cc_misses = 0;
2218 	cc->cc_current = __UNCONST(&pcg_dummy);
2219 	cc->cc_previous = __UNCONST(&pcg_dummy);
2220 
2221 	pc->pc_cpus[index] = cc;
2222 }
2223 
2224 /*
2225  * pool_cache_cpu_init:
2226  *
2227  *	Called whenever a new CPU is attached.
2228  */
2229 void
2230 pool_cache_cpu_init(struct cpu_info *ci)
2231 {
2232 	pool_cache_t pc;
2233 
2234 	mutex_enter(&pool_head_lock);
2235 	TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
2236 		pc->pc_refcnt++;
2237 		mutex_exit(&pool_head_lock);
2238 
2239 		pool_cache_cpu_init1(ci, pc);
2240 
2241 		mutex_enter(&pool_head_lock);
2242 		pc->pc_refcnt--;
2243 		cv_broadcast(&pool_busy);
2244 	}
2245 	mutex_exit(&pool_head_lock);
2246 }
2247 
2248 /*
2249  * pool_cache_reclaim:
2250  *
2251  *	Reclaim memory from a pool cache.
2252  */
2253 bool
2254 pool_cache_reclaim(pool_cache_t pc)
2255 {
2256 
2257 	return pool_reclaim(&pc->pc_pool);
2258 }
2259 
2260 static void
2261 pool_cache_destruct_object1(pool_cache_t pc, void *object)
2262 {
2263 	(*pc->pc_dtor)(pc->pc_arg, object);
2264 	pool_put(&pc->pc_pool, object);
2265 }
2266 
2267 /*
2268  * pool_cache_destruct_object:
2269  *
2270  *	Force destruction of an object and its release back into
2271  *	the pool.
2272  */
2273 void
2274 pool_cache_destruct_object(pool_cache_t pc, void *object)
2275 {
2276 
2277 	FREECHECK_IN(&pc->pc_freecheck, object);
2278 
2279 	pool_cache_destruct_object1(pc, object);
2280 }
2281 
2282 /*
2283  * pool_cache_invalidate_groups:
2284  *
2285  *	Invalidate a chain of groups and destruct all objects.
2286  */
2287 static void
2288 pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
2289 {
2290 	void *object;
2291 	pcg_t *next;
2292 	int i;
2293 
2294 	for (; pcg != NULL; pcg = next) {
2295 		next = pcg->pcg_next;
2296 
2297 		for (i = 0; i < pcg->pcg_avail; i++) {
2298 			object = pcg->pcg_objects[i].pcgo_va;
2299 			pool_cache_destruct_object1(pc, object);
2300 		}
2301 
2302 		if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
2303 			pool_put(&pcg_large_pool, pcg);
2304 		} else {
2305 			KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
2306 			pool_put(&pcg_normal_pool, pcg);
2307 		}
2308 	}
2309 }
2310 
2311 /*
2312  * pool_cache_invalidate:
2313  *
2314  *	Invalidate a pool cache (destruct and release all of the
2315  *	cached objects).  Does not reclaim objects from the pool.
2316  *
2317  *	Note: For pool caches that provide constructed objects, there
2318  *	is an assumption that another level of synchronization is occurring
2319  *	between the input to the constructor and the cache invalidation.
2320  *
2321  *	Invalidation is a costly process and should not be called from
2322  *	interrupt context.
2323  */
2324 void
2325 pool_cache_invalidate(pool_cache_t pc)
2326 {
2327 	uint64_t where;
2328 	pcg_t *full, *empty, *part;
2329 
2330 	KASSERT(!cpu_intr_p() && !cpu_softintr_p());
2331 
2332 	if (ncpu < 2 || !mp_online) {
2333 		/*
2334 		 * We might be called early enough in the boot process
2335 		 * for the CPU data structures to not be fully initialized.
2336 		 * In this case, transfer the content of the local CPU's
2337 		 * cache back into global cache as only this CPU is currently
2338 		 * running.
2339 		 */
2340 		pool_cache_transfer(pc);
2341 	} else {
2342 		/*
2343 		 * Signal all CPUs that they must transfer their local
2344 		 * cache back to the global pool then wait for the xcall to
2345 		 * complete.
2346 		 */
2347 		where = xc_broadcast(0,
2348 		    __FPTRCAST(xcfunc_t, pool_cache_transfer), pc, NULL);
2349 		xc_wait(where);
2350 	}
2351 
2352 	/* Empty pool caches, then invalidate objects */
2353 	mutex_enter(&pc->pc_lock);
2354 	full = pc->pc_fullgroups;
2355 	empty = pc->pc_emptygroups;
2356 	part = pc->pc_partgroups;
2357 	pc->pc_fullgroups = NULL;
2358 	pc->pc_emptygroups = NULL;
2359 	pc->pc_partgroups = NULL;
2360 	pc->pc_nfull = 0;
2361 	pc->pc_nempty = 0;
2362 	pc->pc_npart = 0;
2363 	mutex_exit(&pc->pc_lock);
2364 
2365 	pool_cache_invalidate_groups(pc, full);
2366 	pool_cache_invalidate_groups(pc, empty);
2367 	pool_cache_invalidate_groups(pc, part);
2368 }
2369 
2370 /*
2371  * pool_cache_invalidate_cpu:
2372  *
2373  *	Invalidate all CPU-bound cached objects in pool cache, the CPU being
2374  *	identified by its associated index.
2375  *	It is caller's responsibility to ensure that no operation is
2376  *	taking place on this pool cache while doing this invalidation.
2377  *	WARNING: as no inter-CPU locking is enforced, trying to invalidate
2378  *	pool cached objects from a CPU different from the one currently running
2379  *	may result in an undefined behaviour.
2380  */
2381 static void
2382 pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
2383 {
2384 	pool_cache_cpu_t *cc;
2385 	pcg_t *pcg;
2386 
2387 	if ((cc = pc->pc_cpus[index]) == NULL)
2388 		return;
2389 
2390 	if ((pcg = cc->cc_current) != &pcg_dummy) {
2391 		pcg->pcg_next = NULL;
2392 		pool_cache_invalidate_groups(pc, pcg);
2393 	}
2394 	if ((pcg = cc->cc_previous) != &pcg_dummy) {
2395 		pcg->pcg_next = NULL;
2396 		pool_cache_invalidate_groups(pc, pcg);
2397 	}
2398 	if (cc != &pc->pc_cpu0)
2399 		pool_put(&cache_cpu_pool, cc);
2400 
2401 }
2402 
2403 void
2404 pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2405 {
2406 
2407 	pool_set_drain_hook(&pc->pc_pool, fn, arg);
2408 }
2409 
2410 void
2411 pool_cache_setlowat(pool_cache_t pc, int n)
2412 {
2413 
2414 	pool_setlowat(&pc->pc_pool, n);
2415 }
2416 
2417 void
2418 pool_cache_sethiwat(pool_cache_t pc, int n)
2419 {
2420 
2421 	pool_sethiwat(&pc->pc_pool, n);
2422 }
2423 
2424 void
2425 pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2426 {
2427 
2428 	pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2429 }
2430 
2431 static bool __noinline
2432 pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
2433 		    paddr_t *pap, int flags)
2434 {
2435 	pcg_t *pcg, *cur;
2436 	uint64_t ncsw;
2437 	pool_cache_t pc;
2438 	void *object;
2439 
2440 	KASSERT(cc->cc_current->pcg_avail == 0);
2441 	KASSERT(cc->cc_previous->pcg_avail == 0);
2442 
2443 	pc = cc->cc_cache;
2444 	cc->cc_misses++;
2445 
2446 	/*
2447 	 * Nothing was available locally.  Try and grab a group
2448 	 * from the cache.
2449 	 */
2450 	if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
2451 		ncsw = curlwp->l_ncsw;
2452 		__insn_barrier();
2453 		mutex_enter(&pc->pc_lock);
2454 		pc->pc_contended++;
2455 
2456 		/*
2457 		 * If we context switched while locking, then
2458 		 * our view of the per-CPU data is invalid:
2459 		 * retry.
2460 		 */
2461 		__insn_barrier();
2462 		if (curlwp->l_ncsw != ncsw) {
2463 			mutex_exit(&pc->pc_lock);
2464 			return true;
2465 		}
2466 	}
2467 
2468 	if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
2469 		/*
2470 		 * If there's a full group, release our empty
2471 		 * group back to the cache.  Install the full
2472 		 * group as cc_current and return.
2473 		 */
2474 		if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
2475 			KASSERT(cur->pcg_avail == 0);
2476 			cur->pcg_next = pc->pc_emptygroups;
2477 			pc->pc_emptygroups = cur;
2478 			pc->pc_nempty++;
2479 		}
2480 		KASSERT(pcg->pcg_avail == pcg->pcg_size);
2481 		cc->cc_current = pcg;
2482 		pc->pc_fullgroups = pcg->pcg_next;
2483 		pc->pc_hits++;
2484 		pc->pc_nfull--;
2485 		mutex_exit(&pc->pc_lock);
2486 		return true;
2487 	}
2488 
2489 	/*
2490 	 * Nothing available locally or in cache.  Take the slow
2491 	 * path: fetch a new object from the pool and construct
2492 	 * it.
2493 	 */
2494 	pc->pc_misses++;
2495 	mutex_exit(&pc->pc_lock);
2496 	splx(s);
2497 
2498 	object = pool_get(&pc->pc_pool, flags);
2499 	*objectp = object;
2500 	if (__predict_false(object == NULL)) {
2501 		KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0);
2502 		return false;
2503 	}
2504 
2505 	if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
2506 		pool_put(&pc->pc_pool, object);
2507 		*objectp = NULL;
2508 		return false;
2509 	}
2510 
2511 	KASSERT((((vaddr_t)object) & (pc->pc_pool.pr_align - 1)) == 0);
2512 
2513 	if (pap != NULL) {
2514 #ifdef POOL_VTOPHYS
2515 		*pap = POOL_VTOPHYS(object);
2516 #else
2517 		*pap = POOL_PADDR_INVALID;
2518 #endif
2519 	}
2520 
2521 	FREECHECK_OUT(&pc->pc_freecheck, object);
2522 	return false;
2523 }
2524 
2525 /*
2526  * pool_cache_get{,_paddr}:
2527  *
2528  *	Get an object from a pool cache (optionally returning
2529  *	the physical address of the object).
2530  */
2531 void *
2532 pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
2533 {
2534 	pool_cache_cpu_t *cc;
2535 	pcg_t *pcg;
2536 	void *object;
2537 	int s;
2538 
2539 	KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK));
2540 	KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) ||
2541 	    (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL),
2542 	    "%s: [%s] is IPL_NONE, but called from interrupt context",
2543 	    __func__, pc->pc_pool.pr_wchan);
2544 
2545 	if (flags & PR_WAITOK) {
2546 		ASSERT_SLEEPABLE();
2547 	}
2548 
2549 	/* Lock out interrupts and disable preemption. */
2550 	s = splvm();
2551 	while (/* CONSTCOND */ true) {
2552 		/* Try and allocate an object from the current group. */
2553 		cc = pc->pc_cpus[curcpu()->ci_index];
2554 		KASSERT(cc->cc_cache == pc);
2555 	 	pcg = cc->cc_current;
2556 		if (__predict_true(pcg->pcg_avail > 0)) {
2557 			object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
2558 			if (__predict_false(pap != NULL))
2559 				*pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
2560 #if defined(DIAGNOSTIC)
2561 			pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
2562 			KASSERT(pcg->pcg_avail < pcg->pcg_size);
2563 			KASSERT(object != NULL);
2564 #endif
2565 			cc->cc_hits++;
2566 			splx(s);
2567 			FREECHECK_OUT(&pc->pc_freecheck, object);
2568 			pool_redzone_fill(&pc->pc_pool, object);
2569 			pool_cache_get_kmsan(pc, object);
2570 			return object;
2571 		}
2572 
2573 		/*
2574 		 * That failed.  If the previous group isn't empty, swap
2575 		 * it with the current group and allocate from there.
2576 		 */
2577 		pcg = cc->cc_previous;
2578 		if (__predict_true(pcg->pcg_avail > 0)) {
2579 			cc->cc_previous = cc->cc_current;
2580 			cc->cc_current = pcg;
2581 			continue;
2582 		}
2583 
2584 		/*
2585 		 * Can't allocate from either group: try the slow path.
2586 		 * If get_slow() allocated an object for us, or if
2587 		 * no more objects are available, it will return false.
2588 		 * Otherwise, we need to retry.
2589 		 */
2590 		if (!pool_cache_get_slow(cc, s, &object, pap, flags))
2591 			break;
2592 	}
2593 
2594 	/*
2595 	 * We would like to KASSERT(object || (flags & PR_NOWAIT)), but
2596 	 * pool_cache_get can fail even in the PR_WAITOK case, if the
2597 	 * constructor fails.
2598 	 */
2599 	return object;
2600 }
2601 
2602 static bool __noinline
2603 pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
2604 {
2605 	struct lwp *l = curlwp;
2606 	pcg_t *pcg, *cur;
2607 	uint64_t ncsw;
2608 	pool_cache_t pc;
2609 
2610 	KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
2611 	KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
2612 
2613 	pc = cc->cc_cache;
2614 	pcg = NULL;
2615 	cc->cc_misses++;
2616 	ncsw = l->l_ncsw;
2617 	__insn_barrier();
2618 
2619 	/*
2620 	 * If there are no empty groups in the cache then allocate one
2621 	 * while still unlocked.
2622 	 */
2623 	if (__predict_false(pc->pc_emptygroups == NULL)) {
2624 		if (__predict_true(!pool_cache_disable)) {
2625 			pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
2626 		}
2627 		/*
2628 		 * If pool_get() blocked, then our view of
2629 		 * the per-CPU data is invalid: retry.
2630 		 */
2631 		__insn_barrier();
2632 		if (__predict_false(l->l_ncsw != ncsw)) {
2633 			if (pcg != NULL) {
2634 				pool_put(pc->pc_pcgpool, pcg);
2635 			}
2636 			return true;
2637 		}
2638 		if (__predict_true(pcg != NULL)) {
2639 			pcg->pcg_avail = 0;
2640 			pcg->pcg_size = pc->pc_pcgsize;
2641 		}
2642 	}
2643 
2644 	/* Lock the cache. */
2645 	if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
2646 		mutex_enter(&pc->pc_lock);
2647 		pc->pc_contended++;
2648 
2649 		/*
2650 		 * If we context switched while locking, then our view of
2651 		 * the per-CPU data is invalid: retry.
2652 		 */
2653 		__insn_barrier();
2654 		if (__predict_false(l->l_ncsw != ncsw)) {
2655 			mutex_exit(&pc->pc_lock);
2656 			if (pcg != NULL) {
2657 				pool_put(pc->pc_pcgpool, pcg);
2658 			}
2659 			return true;
2660 		}
2661 	}
2662 
2663 	/* If there are no empty groups in the cache then allocate one. */
2664 	if (pcg == NULL && pc->pc_emptygroups != NULL) {
2665 		pcg = pc->pc_emptygroups;
2666 		pc->pc_emptygroups = pcg->pcg_next;
2667 		pc->pc_nempty--;
2668 	}
2669 
2670 	/*
2671 	 * If there's a empty group, release our full group back
2672 	 * to the cache.  Install the empty group to the local CPU
2673 	 * and return.
2674 	 */
2675 	if (pcg != NULL) {
2676 		KASSERT(pcg->pcg_avail == 0);
2677 		if (__predict_false(cc->cc_previous == &pcg_dummy)) {
2678 			cc->cc_previous = pcg;
2679 		} else {
2680 			cur = cc->cc_current;
2681 			if (__predict_true(cur != &pcg_dummy)) {
2682 				KASSERT(cur->pcg_avail == cur->pcg_size);
2683 				cur->pcg_next = pc->pc_fullgroups;
2684 				pc->pc_fullgroups = cur;
2685 				pc->pc_nfull++;
2686 			}
2687 			cc->cc_current = pcg;
2688 		}
2689 		pc->pc_hits++;
2690 		mutex_exit(&pc->pc_lock);
2691 		return true;
2692 	}
2693 
2694 	/*
2695 	 * Nothing available locally or in cache, and we didn't
2696 	 * allocate an empty group.  Take the slow path and destroy
2697 	 * the object here and now.
2698 	 */
2699 	pc->pc_misses++;
2700 	mutex_exit(&pc->pc_lock);
2701 	splx(s);
2702 	pool_cache_destruct_object(pc, object);
2703 
2704 	return false;
2705 }
2706 
2707 /*
2708  * pool_cache_put{,_paddr}:
2709  *
2710  *	Put an object back to the pool cache (optionally caching the
2711  *	physical address of the object).
2712  */
2713 void
2714 pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
2715 {
2716 	pool_cache_cpu_t *cc;
2717 	pcg_t *pcg;
2718 	int s;
2719 
2720 	KASSERT(object != NULL);
2721 	pool_cache_put_kmsan(pc, object);
2722 	pool_cache_redzone_check(pc, object);
2723 	FREECHECK_IN(&pc->pc_freecheck, object);
2724 
2725 	if (pc->pc_pool.pr_roflags & PR_PHINPAGE) {
2726 		pc_phinpage_check(pc, object);
2727 	}
2728 
2729 	if (pool_cache_put_quarantine(pc, object, pa)) {
2730 		return;
2731 	}
2732 
2733 	/* Lock out interrupts and disable preemption. */
2734 	s = splvm();
2735 	while (/* CONSTCOND */ true) {
2736 		/* If the current group isn't full, release it there. */
2737 		cc = pc->pc_cpus[curcpu()->ci_index];
2738 		KASSERT(cc->cc_cache == pc);
2739 	 	pcg = cc->cc_current;
2740 		if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
2741 			pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2742 			pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2743 			pcg->pcg_avail++;
2744 			cc->cc_hits++;
2745 			splx(s);
2746 			return;
2747 		}
2748 
2749 		/*
2750 		 * That failed.  If the previous group isn't full, swap
2751 		 * it with the current group and try again.
2752 		 */
2753 		pcg = cc->cc_previous;
2754 		if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
2755 			cc->cc_previous = cc->cc_current;
2756 			cc->cc_current = pcg;
2757 			continue;
2758 		}
2759 
2760 		/*
2761 		 * Can't free to either group: try the slow path.
2762 		 * If put_slow() releases the object for us, it
2763 		 * will return false.  Otherwise we need to retry.
2764 		 */
2765 		if (!pool_cache_put_slow(cc, s, object))
2766 			break;
2767 	}
2768 }
2769 
2770 /*
2771  * pool_cache_transfer:
2772  *
2773  *	Transfer objects from the per-CPU cache to the global cache.
2774  *	Run within a cross-call thread.
2775  */
2776 static void
2777 pool_cache_transfer(pool_cache_t pc)
2778 {
2779 	pool_cache_cpu_t *cc;
2780 	pcg_t *prev, *cur, **list;
2781 	int s;
2782 
2783 	s = splvm();
2784 	mutex_enter(&pc->pc_lock);
2785 	cc = pc->pc_cpus[curcpu()->ci_index];
2786 	cur = cc->cc_current;
2787 	cc->cc_current = __UNCONST(&pcg_dummy);
2788 	prev = cc->cc_previous;
2789 	cc->cc_previous = __UNCONST(&pcg_dummy);
2790 	if (cur != &pcg_dummy) {
2791 		if (cur->pcg_avail == cur->pcg_size) {
2792 			list = &pc->pc_fullgroups;
2793 			pc->pc_nfull++;
2794 		} else if (cur->pcg_avail == 0) {
2795 			list = &pc->pc_emptygroups;
2796 			pc->pc_nempty++;
2797 		} else {
2798 			list = &pc->pc_partgroups;
2799 			pc->pc_npart++;
2800 		}
2801 		cur->pcg_next = *list;
2802 		*list = cur;
2803 	}
2804 	if (prev != &pcg_dummy) {
2805 		if (prev->pcg_avail == prev->pcg_size) {
2806 			list = &pc->pc_fullgroups;
2807 			pc->pc_nfull++;
2808 		} else if (prev->pcg_avail == 0) {
2809 			list = &pc->pc_emptygroups;
2810 			pc->pc_nempty++;
2811 		} else {
2812 			list = &pc->pc_partgroups;
2813 			pc->pc_npart++;
2814 		}
2815 		prev->pcg_next = *list;
2816 		*list = prev;
2817 	}
2818 	mutex_exit(&pc->pc_lock);
2819 	splx(s);
2820 }
2821 
2822 static int
2823 pool_bigidx(size_t size)
2824 {
2825 	int i;
2826 
2827 	for (i = 0; i < __arraycount(pool_allocator_big); i++) {
2828 		if (1 << (i + POOL_ALLOCATOR_BIG_BASE) >= size)
2829 			return i;
2830 	}
2831 	panic("pool item size %zu too large, use a custom allocator", size);
2832 }
2833 
2834 static void *
2835 pool_allocator_alloc(struct pool *pp, int flags)
2836 {
2837 	struct pool_allocator *pa = pp->pr_alloc;
2838 	void *res;
2839 
2840 	res = (*pa->pa_alloc)(pp, flags);
2841 	if (res == NULL && (flags & PR_WAITOK) == 0) {
2842 		/*
2843 		 * We only run the drain hook here if PR_NOWAIT.
2844 		 * In other cases, the hook will be run in
2845 		 * pool_reclaim().
2846 		 */
2847 		if (pp->pr_drain_hook != NULL) {
2848 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2849 			res = (*pa->pa_alloc)(pp, flags);
2850 		}
2851 	}
2852 	return res;
2853 }
2854 
2855 static void
2856 pool_allocator_free(struct pool *pp, void *v)
2857 {
2858 	struct pool_allocator *pa = pp->pr_alloc;
2859 
2860 	if (pp->pr_redzone) {
2861 		kasan_mark(v, pa->pa_pagesz, pa->pa_pagesz, 0);
2862 	}
2863 	(*pa->pa_free)(pp, v);
2864 }
2865 
2866 void *
2867 pool_page_alloc(struct pool *pp, int flags)
2868 {
2869 	const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2870 	vmem_addr_t va;
2871 	int ret;
2872 
2873 	ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz,
2874 	    vflags | VM_INSTANTFIT, &va);
2875 
2876 	return ret ? NULL : (void *)va;
2877 }
2878 
2879 void
2880 pool_page_free(struct pool *pp, void *v)
2881 {
2882 
2883 	uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
2884 }
2885 
2886 static void *
2887 pool_page_alloc_meta(struct pool *pp, int flags)
2888 {
2889 	const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2890 	vmem_addr_t va;
2891 	int ret;
2892 
2893 	ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
2894 	    vflags | VM_INSTANTFIT, &va);
2895 
2896 	return ret ? NULL : (void *)va;
2897 }
2898 
2899 static void
2900 pool_page_free_meta(struct pool *pp, void *v)
2901 {
2902 
2903 	vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
2904 }
2905 
2906 #ifdef KMSAN
2907 static inline void
2908 pool_get_kmsan(struct pool *pp, void *p)
2909 {
2910 	kmsan_orig(p, pp->pr_size, KMSAN_TYPE_POOL, __RET_ADDR);
2911 	kmsan_mark(p, pp->pr_size, KMSAN_STATE_UNINIT);
2912 }
2913 
2914 static inline void
2915 pool_put_kmsan(struct pool *pp, void *p)
2916 {
2917 	kmsan_mark(p, pp->pr_size, KMSAN_STATE_INITED);
2918 }
2919 
2920 static inline void
2921 pool_cache_get_kmsan(pool_cache_t pc, void *p)
2922 {
2923 	if (__predict_false(pc_has_ctor(pc))) {
2924 		return;
2925 	}
2926 	pool_get_kmsan(&pc->pc_pool, p);
2927 }
2928 
2929 static inline void
2930 pool_cache_put_kmsan(pool_cache_t pc, void *p)
2931 {
2932 	pool_put_kmsan(&pc->pc_pool, p);
2933 }
2934 #endif
2935 
2936 #ifdef POOL_QUARANTINE
2937 static void
2938 pool_quarantine_init(struct pool *pp)
2939 {
2940 	pp->pr_quar.rotor = 0;
2941 	memset(&pp->pr_quar, 0, sizeof(pp->pr_quar));
2942 }
2943 
2944 static void
2945 pool_quarantine_flush(struct pool *pp)
2946 {
2947 	pool_quar_t *quar = &pp->pr_quar;
2948 	struct pool_pagelist pq;
2949 	size_t i;
2950 
2951 	LIST_INIT(&pq);
2952 
2953 	mutex_enter(&pp->pr_lock);
2954 	for (i = 0; i < POOL_QUARANTINE_DEPTH; i++) {
2955 		if (quar->list[i] == 0)
2956 			continue;
2957 		pool_do_put(pp, (void *)quar->list[i], &pq);
2958 	}
2959 	mutex_exit(&pp->pr_lock);
2960 
2961 	pr_pagelist_free(pp, &pq);
2962 }
2963 
2964 static bool
2965 pool_put_quarantine(struct pool *pp, void *v, struct pool_pagelist *pq)
2966 {
2967 	pool_quar_t *quar = &pp->pr_quar;
2968 	uintptr_t old;
2969 
2970 	if (pp->pr_roflags & PR_NOTOUCH) {
2971 		return false;
2972 	}
2973 
2974 	pool_redzone_check(pp, v);
2975 
2976 	old = quar->list[quar->rotor];
2977 	quar->list[quar->rotor] = (uintptr_t)v;
2978 	quar->rotor = (quar->rotor + 1) % POOL_QUARANTINE_DEPTH;
2979 	if (old != 0) {
2980 		pool_do_put(pp, (void *)old, pq);
2981 	}
2982 
2983 	return true;
2984 }
2985 
2986 static bool
2987 pool_cache_put_quarantine(pool_cache_t pc, void *p, paddr_t pa)
2988 {
2989 	pool_cache_destruct_object(pc, p);
2990 	return true;
2991 }
2992 #endif
2993 
2994 #ifdef POOL_REDZONE
2995 #if defined(_LP64)
2996 # define PRIME 0x9e37fffffffc0000UL
2997 #else /* defined(_LP64) */
2998 # define PRIME 0x9e3779b1
2999 #endif /* defined(_LP64) */
3000 #define STATIC_BYTE	0xFE
3001 CTASSERT(POOL_REDZONE_SIZE > 1);
3002 
3003 #ifndef KASAN
3004 static inline uint8_t
3005 pool_pattern_generate(const void *p)
3006 {
3007 	return (uint8_t)(((uintptr_t)p) * PRIME
3008 	   >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
3009 }
3010 #endif
3011 
3012 static void
3013 pool_redzone_init(struct pool *pp, size_t requested_size)
3014 {
3015 	size_t redzsz;
3016 	size_t nsz;
3017 
3018 #ifdef KASAN
3019 	redzsz = requested_size;
3020 	kasan_add_redzone(&redzsz);
3021 	redzsz -= requested_size;
3022 #else
3023 	redzsz = POOL_REDZONE_SIZE;
3024 #endif
3025 
3026 	if (pp->pr_roflags & PR_NOTOUCH) {
3027 		pp->pr_redzone = false;
3028 		return;
3029 	}
3030 
3031 	/*
3032 	 * We may have extended the requested size earlier; check if
3033 	 * there's naturally space in the padding for a red zone.
3034 	 */
3035 	if (pp->pr_size - requested_size >= redzsz) {
3036 		pp->pr_reqsize_with_redzone = requested_size + redzsz;
3037 		pp->pr_redzone = true;
3038 		return;
3039 	}
3040 
3041 	/*
3042 	 * No space in the natural padding; check if we can extend a
3043 	 * bit the size of the pool.
3044 	 */
3045 	nsz = roundup(pp->pr_size + redzsz, pp->pr_align);
3046 	if (nsz <= pp->pr_alloc->pa_pagesz) {
3047 		/* Ok, we can */
3048 		pp->pr_size = nsz;
3049 		pp->pr_reqsize_with_redzone = requested_size + redzsz;
3050 		pp->pr_redzone = true;
3051 	} else {
3052 		/* No space for a red zone... snif :'( */
3053 		pp->pr_redzone = false;
3054 		printf("pool redzone disabled for '%s'\n", pp->pr_wchan);
3055 	}
3056 }
3057 
3058 static void
3059 pool_redzone_fill(struct pool *pp, void *p)
3060 {
3061 	if (!pp->pr_redzone)
3062 		return;
3063 #ifdef KASAN
3064 	kasan_mark(p, pp->pr_reqsize, pp->pr_reqsize_with_redzone,
3065 	    KASAN_POOL_REDZONE);
3066 #else
3067 	uint8_t *cp, pat;
3068 	const uint8_t *ep;
3069 
3070 	cp = (uint8_t *)p + pp->pr_reqsize;
3071 	ep = cp + POOL_REDZONE_SIZE;
3072 
3073 	/*
3074 	 * We really don't want the first byte of the red zone to be '\0';
3075 	 * an off-by-one in a string may not be properly detected.
3076 	 */
3077 	pat = pool_pattern_generate(cp);
3078 	*cp = (pat == '\0') ? STATIC_BYTE: pat;
3079 	cp++;
3080 
3081 	while (cp < ep) {
3082 		*cp = pool_pattern_generate(cp);
3083 		cp++;
3084 	}
3085 #endif
3086 }
3087 
3088 static void
3089 pool_redzone_check(struct pool *pp, void *p)
3090 {
3091 	if (!pp->pr_redzone)
3092 		return;
3093 #ifdef KASAN
3094 	kasan_mark(p, 0, pp->pr_reqsize_with_redzone, KASAN_POOL_FREED);
3095 #else
3096 	uint8_t *cp, pat, expected;
3097 	const uint8_t *ep;
3098 
3099 	cp = (uint8_t *)p + pp->pr_reqsize;
3100 	ep = cp + POOL_REDZONE_SIZE;
3101 
3102 	pat = pool_pattern_generate(cp);
3103 	expected = (pat == '\0') ? STATIC_BYTE: pat;
3104 	if (__predict_false(*cp != expected)) {
3105 		panic("%s: [%s] 0x%02x != 0x%02x", __func__,
3106 		    pp->pr_wchan, *cp, expected);
3107 	}
3108 	cp++;
3109 
3110 	while (cp < ep) {
3111 		expected = pool_pattern_generate(cp);
3112 		if (__predict_false(*cp != expected)) {
3113 			panic("%s: [%s] 0x%02x != 0x%02x", __func__,
3114 			    pp->pr_wchan, *cp, expected);
3115 		}
3116 		cp++;
3117 	}
3118 #endif
3119 }
3120 
3121 static void
3122 pool_cache_redzone_check(pool_cache_t pc, void *p)
3123 {
3124 #ifdef KASAN
3125 	/* If there is a ctor/dtor, leave the data as valid. */
3126 	if (__predict_false(pc_has_ctor(pc) || pc_has_dtor(pc))) {
3127 		return;
3128 	}
3129 #endif
3130 	pool_redzone_check(&pc->pc_pool, p);
3131 }
3132 
3133 #endif /* POOL_REDZONE */
3134 
3135 #if defined(DDB)
3136 static bool
3137 pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
3138 {
3139 
3140 	return (uintptr_t)ph->ph_page <= addr &&
3141 	    addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
3142 }
3143 
3144 static bool
3145 pool_in_item(struct pool *pp, void *item, uintptr_t addr)
3146 {
3147 
3148 	return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
3149 }
3150 
3151 static bool
3152 pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
3153 {
3154 	int i;
3155 
3156 	if (pcg == NULL) {
3157 		return false;
3158 	}
3159 	for (i = 0; i < pcg->pcg_avail; i++) {
3160 		if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
3161 			return true;
3162 		}
3163 	}
3164 	return false;
3165 }
3166 
3167 static bool
3168 pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
3169 {
3170 
3171 	if ((pp->pr_roflags & PR_USEBMAP) != 0) {
3172 		unsigned int idx = pr_item_bitmap_index(pp, ph, (void *)addr);
3173 		pool_item_bitmap_t *bitmap =
3174 		    ph->ph_bitmap + (idx / BITMAP_SIZE);
3175 		pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
3176 
3177 		return (*bitmap & mask) == 0;
3178 	} else {
3179 		struct pool_item *pi;
3180 
3181 		LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
3182 			if (pool_in_item(pp, pi, addr)) {
3183 				return false;
3184 			}
3185 		}
3186 		return true;
3187 	}
3188 }
3189 
3190 void
3191 pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
3192 {
3193 	struct pool *pp;
3194 
3195 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
3196 		struct pool_item_header *ph;
3197 		uintptr_t item;
3198 		bool allocated = true;
3199 		bool incache = false;
3200 		bool incpucache = false;
3201 		char cpucachestr[32];
3202 
3203 		if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
3204 			LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
3205 				if (pool_in_page(pp, ph, addr)) {
3206 					goto found;
3207 				}
3208 			}
3209 			LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
3210 				if (pool_in_page(pp, ph, addr)) {
3211 					allocated =
3212 					    pool_allocated(pp, ph, addr);
3213 					goto found;
3214 				}
3215 			}
3216 			LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
3217 				if (pool_in_page(pp, ph, addr)) {
3218 					allocated = false;
3219 					goto found;
3220 				}
3221 			}
3222 			continue;
3223 		} else {
3224 			ph = pr_find_pagehead_noalign(pp, (void *)addr);
3225 			if (ph == NULL || !pool_in_page(pp, ph, addr)) {
3226 				continue;
3227 			}
3228 			allocated = pool_allocated(pp, ph, addr);
3229 		}
3230 found:
3231 		if (allocated && pp->pr_cache) {
3232 			pool_cache_t pc = pp->pr_cache;
3233 			struct pool_cache_group *pcg;
3234 			int i;
3235 
3236 			for (pcg = pc->pc_fullgroups; pcg != NULL;
3237 			    pcg = pcg->pcg_next) {
3238 				if (pool_in_cg(pp, pcg, addr)) {
3239 					incache = true;
3240 					goto print;
3241 				}
3242 			}
3243 			for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
3244 				pool_cache_cpu_t *cc;
3245 
3246 				if ((cc = pc->pc_cpus[i]) == NULL) {
3247 					continue;
3248 				}
3249 				if (pool_in_cg(pp, cc->cc_current, addr) ||
3250 				    pool_in_cg(pp, cc->cc_previous, addr)) {
3251 					struct cpu_info *ci =
3252 					    cpu_lookup(i);
3253 
3254 					incpucache = true;
3255 					snprintf(cpucachestr,
3256 					    sizeof(cpucachestr),
3257 					    "cached by CPU %u",
3258 					    ci->ci_index);
3259 					goto print;
3260 				}
3261 			}
3262 		}
3263 print:
3264 		item = (uintptr_t)ph->ph_page + ph->ph_off;
3265 		item = item + rounddown(addr - item, pp->pr_size);
3266 		(*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
3267 		    (void *)addr, item, (size_t)(addr - item),
3268 		    pp->pr_wchan,
3269 		    incpucache ? cpucachestr :
3270 		    incache ? "cached" : allocated ? "allocated" : "free");
3271 	}
3272 }
3273 #endif /* defined(DDB) */
3274 
3275 static int
3276 pool_sysctl(SYSCTLFN_ARGS)
3277 {
3278 	struct pool_sysctl data;
3279 	struct pool *pp;
3280 	struct pool_cache *pc;
3281 	pool_cache_cpu_t *cc;
3282 	int error;
3283 	size_t i, written;
3284 
3285 	if (oldp == NULL) {
3286 		*oldlenp = 0;
3287 		TAILQ_FOREACH(pp, &pool_head, pr_poollist)
3288 			*oldlenp += sizeof(data);
3289 		return 0;
3290 	}
3291 
3292 	memset(&data, 0, sizeof(data));
3293 	error = 0;
3294 	written = 0;
3295 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
3296 		if (written + sizeof(data) > *oldlenp)
3297 			break;
3298 		strlcpy(data.pr_wchan, pp->pr_wchan, sizeof(data.pr_wchan));
3299 		data.pr_pagesize = pp->pr_alloc->pa_pagesz;
3300 		data.pr_flags = pp->pr_roflags | pp->pr_flags;
3301 #define COPY(field) data.field = pp->field
3302 		COPY(pr_size);
3303 
3304 		COPY(pr_itemsperpage);
3305 		COPY(pr_nitems);
3306 		COPY(pr_nout);
3307 		COPY(pr_hardlimit);
3308 		COPY(pr_npages);
3309 		COPY(pr_minpages);
3310 		COPY(pr_maxpages);
3311 
3312 		COPY(pr_nget);
3313 		COPY(pr_nfail);
3314 		COPY(pr_nput);
3315 		COPY(pr_npagealloc);
3316 		COPY(pr_npagefree);
3317 		COPY(pr_hiwat);
3318 		COPY(pr_nidle);
3319 #undef COPY
3320 
3321 		data.pr_cache_nmiss_pcpu = 0;
3322 		data.pr_cache_nhit_pcpu = 0;
3323 		if (pp->pr_cache) {
3324 			pc = pp->pr_cache;
3325 			data.pr_cache_meta_size = pc->pc_pcgsize;
3326 			data.pr_cache_nfull = pc->pc_nfull;
3327 			data.pr_cache_npartial = pc->pc_npart;
3328 			data.pr_cache_nempty = pc->pc_nempty;
3329 			data.pr_cache_ncontended = pc->pc_contended;
3330 			data.pr_cache_nmiss_global = pc->pc_misses;
3331 			data.pr_cache_nhit_global = pc->pc_hits;
3332 			for (i = 0; i < pc->pc_ncpu; ++i) {
3333 				cc = pc->pc_cpus[i];
3334 				if (cc == NULL)
3335 					continue;
3336 				data.pr_cache_nmiss_pcpu += cc->cc_misses;
3337 				data.pr_cache_nhit_pcpu += cc->cc_hits;
3338 			}
3339 		} else {
3340 			data.pr_cache_meta_size = 0;
3341 			data.pr_cache_nfull = 0;
3342 			data.pr_cache_npartial = 0;
3343 			data.pr_cache_nempty = 0;
3344 			data.pr_cache_ncontended = 0;
3345 			data.pr_cache_nmiss_global = 0;
3346 			data.pr_cache_nhit_global = 0;
3347 		}
3348 
3349 		error = sysctl_copyout(l, &data, oldp, sizeof(data));
3350 		if (error)
3351 			break;
3352 		written += sizeof(data);
3353 		oldp = (char *)oldp + sizeof(data);
3354 	}
3355 
3356 	*oldlenp = written;
3357 	return error;
3358 }
3359 
3360 SYSCTL_SETUP(sysctl_pool_setup, "sysctl kern.pool setup")
3361 {
3362 	const struct sysctlnode *rnode = NULL;
3363 
3364 	sysctl_createv(clog, 0, NULL, &rnode,
3365 		       CTLFLAG_PERMANENT,
3366 		       CTLTYPE_STRUCT, "pool",
3367 		       SYSCTL_DESCR("Get pool statistics"),
3368 		       pool_sysctl, 0, NULL, 0,
3369 		       CTL_KERN, CTL_CREATE, CTL_EOL);
3370 }
3371