xref: /openbsd-src/sys/kern/subr_pool.c (revision c020cf82e0cc147236f01a8dca7052034cf9d30d)
1 /*	$OpenBSD: subr_pool.c,v 1.230 2020/01/24 06:31:17 cheloha Exp $	*/
2 /*	$NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $	*/
3 
4 /*-
5  * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
10  * Simulation Facility, NASA Ames Research Center.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/errno.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/pool.h>
40 #include <sys/proc.h>
41 #include <sys/syslog.h>
42 #include <sys/sysctl.h>
43 #include <sys/task.h>
44 #include <sys/timeout.h>
45 #include <sys/percpu.h>
46 
47 #include <uvm/uvm_extern.h>
48 
49 /*
50  * Pool resource management utility.
51  *
52  * Memory is allocated in pages which are split into pieces according to
53  * the pool item size. Each page is kept on one of three lists in the
54  * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
55  * for empty, full and partially-full pages respectively. The individual
56  * pool items are on a linked list headed by `ph_items' in each page
57  * header. The memory for building the page list is either taken from
58  * the allocated pages themselves (for small pool items) or taken from
59  * an internal pool of page headers (`phpool').
60  */
61 
62 /* List of all pools */
63 SIMPLEQ_HEAD(,pool) pool_head = SIMPLEQ_HEAD_INITIALIZER(pool_head);
64 
65 /*
66  * Every pool gets a unique serial number assigned to it. If this counter
67  * wraps, we're screwed, but we shouldn't create so many pools anyway.
68  */
69 unsigned int pool_serial;
70 unsigned int pool_count;
71 
72 /* Lock the previous variables making up the global pool state */
73 struct rwlock pool_lock = RWLOCK_INITIALIZER("pools");
74 
75 /* Private pool for page header structures */
76 struct pool phpool;
77 
78 struct pool_lock_ops {
79 	void	(*pl_init)(struct pool *, union pool_lock *,
80 		    const struct lock_type *);
81 	void	(*pl_enter)(union pool_lock *);
82 	int	(*pl_enter_try)(union pool_lock *);
83 	void	(*pl_leave)(union pool_lock *);
84 	void	(*pl_assert_locked)(union pool_lock *);
85 	void	(*pl_assert_unlocked)(union pool_lock *);
86 	int	(*pl_sleep)(void *, union pool_lock *, int, const char *);
87 };
88 
89 static const struct pool_lock_ops pool_lock_ops_mtx;
90 static const struct pool_lock_ops pool_lock_ops_rw;
91 
92 #ifdef WITNESS
93 #define pl_init(pp, pl) do {						\
94 	static const struct lock_type __lock_type = { .lt_name = #pl };	\
95 	(pp)->pr_lock_ops->pl_init(pp, pl, &__lock_type);		\
96 } while (0)
97 #else /* WITNESS */
98 #define pl_init(pp, pl)		(pp)->pr_lock_ops->pl_init(pp, pl, NULL)
99 #endif /* WITNESS */
100 
101 static inline void
102 pl_enter(struct pool *pp, union pool_lock *pl)
103 {
104 	pp->pr_lock_ops->pl_enter(pl);
105 }
106 static inline int
107 pl_enter_try(struct pool *pp, union pool_lock *pl)
108 {
109 	return pp->pr_lock_ops->pl_enter_try(pl);
110 }
111 static inline void
112 pl_leave(struct pool *pp, union pool_lock *pl)
113 {
114 	pp->pr_lock_ops->pl_leave(pl);
115 }
116 static inline void
117 pl_assert_locked(struct pool *pp, union pool_lock *pl)
118 {
119 	pp->pr_lock_ops->pl_assert_locked(pl);
120 }
121 static inline void
122 pl_assert_unlocked(struct pool *pp, union pool_lock *pl)
123 {
124 	pp->pr_lock_ops->pl_assert_unlocked(pl);
125 }
126 static inline int
127 pl_sleep(struct pool *pp, void *ident, union pool_lock *lock, int priority,
128     const char *wmesg)
129 {
130 	return pp->pr_lock_ops->pl_sleep(ident, lock, priority, wmesg);
131 }
132 
133 struct pool_item {
134 	u_long				pi_magic;
135 	XSIMPLEQ_ENTRY(pool_item)	pi_list;
136 };
137 #define POOL_IMAGIC(ph, pi) ((u_long)(pi) ^ (ph)->ph_magic)
138 
139 struct pool_page_header {
140 	/* Page headers */
141 	TAILQ_ENTRY(pool_page_header)
142 				ph_entry;	/* pool page list */
143 	XSIMPLEQ_HEAD(, pool_item)
144 				ph_items;	/* free items on the page */
145 	RBT_ENTRY(pool_page_header)
146 				ph_node;	/* off-page page headers */
147 	unsigned int		ph_nmissing;	/* # of chunks in use */
148 	caddr_t			ph_page;	/* this page's address */
149 	caddr_t			ph_colored;	/* page's colored address */
150 	unsigned long		ph_magic;
151 	int			ph_tick;
152 };
153 #define POOL_MAGICBIT (1 << 3) /* keep away from perturbed low bits */
154 #define POOL_PHPOISON(ph) ISSET((ph)->ph_magic, POOL_MAGICBIT)
155 
156 #ifdef MULTIPROCESSOR
157 struct pool_cache_item {
158 	struct pool_cache_item	*ci_next;	/* next item in list */
159 	unsigned long		 ci_nitems;	/* number of items in list */
160 	TAILQ_ENTRY(pool_cache_item)
161 				 ci_nextl;	/* entry in list of lists */
162 };
163 
164 /* we store whether the cached item is poisoned in the high bit of nitems */
165 #define POOL_CACHE_ITEM_NITEMS_MASK	0x7ffffffUL
166 #define POOL_CACHE_ITEM_NITEMS_POISON	0x8000000UL
167 
168 #define POOL_CACHE_ITEM_NITEMS(_ci)					\
169     ((_ci)->ci_nitems & POOL_CACHE_ITEM_NITEMS_MASK)
170 
171 #define POOL_CACHE_ITEM_POISONED(_ci)					\
172     ISSET((_ci)->ci_nitems, POOL_CACHE_ITEM_NITEMS_POISON)
173 
174 struct pool_cache {
175 	struct pool_cache_item	*pc_actv;	/* active list of items */
176 	unsigned long		 pc_nactv;	/* actv head nitems cache */
177 	struct pool_cache_item	*pc_prev;	/* previous list of items */
178 
179 	uint64_t		 pc_gen;	/* generation number */
180 	uint64_t		 pc_nget;	/* # of successful requests */
181 	uint64_t		 pc_nfail;	/* # of unsuccessful reqs */
182 	uint64_t		 pc_nput;	/* # of releases */
183 	uint64_t		 pc_nlget;	/* # of list requests */
184 	uint64_t		 pc_nlfail;	/* # of fails getting a list */
185 	uint64_t		 pc_nlput;	/* # of list releases */
186 
187 	int			 pc_nout;
188 };
189 
190 void	*pool_cache_get(struct pool *);
191 void	 pool_cache_put(struct pool *, void *);
192 void	 pool_cache_destroy(struct pool *);
193 void	 pool_cache_gc(struct pool *);
194 #endif
195 void	 pool_cache_pool_info(struct pool *, struct kinfo_pool *);
196 int	 pool_cache_info(struct pool *, void *, size_t *);
197 int	 pool_cache_cpus_info(struct pool *, void *, size_t *);
198 
199 #ifdef POOL_DEBUG
200 int	pool_debug = 1;
201 #else
202 int	pool_debug = 0;
203 #endif
204 
205 #define POOL_INPGHDR(pp) ((pp)->pr_phoffset != 0)
206 
207 struct pool_page_header *
208 	 pool_p_alloc(struct pool *, int, int *);
209 void	 pool_p_insert(struct pool *, struct pool_page_header *);
210 void	 pool_p_remove(struct pool *, struct pool_page_header *);
211 void	 pool_p_free(struct pool *, struct pool_page_header *);
212 
213 void	 pool_update_curpage(struct pool *);
214 void	*pool_do_get(struct pool *, int, int *);
215 void	 pool_do_put(struct pool *, void *);
216 int	 pool_chk_page(struct pool *, struct pool_page_header *, int);
217 int	 pool_chk(struct pool *);
218 void	 pool_get_done(struct pool *, void *, void *);
219 void	 pool_runqueue(struct pool *, int);
220 
221 void	*pool_allocator_alloc(struct pool *, int, int *);
222 void	 pool_allocator_free(struct pool *, void *);
223 
224 /*
225  * The default pool allocator.
226  */
227 void	*pool_page_alloc(struct pool *, int, int *);
228 void	pool_page_free(struct pool *, void *);
229 
230 /*
231  * safe for interrupts; this is the default allocator
232  */
233 struct pool_allocator pool_allocator_single = {
234 	pool_page_alloc,
235 	pool_page_free,
236 	POOL_ALLOC_SIZE(PAGE_SIZE, POOL_ALLOC_ALIGNED)
237 };
238 
239 void	*pool_multi_alloc(struct pool *, int, int *);
240 void	pool_multi_free(struct pool *, void *);
241 
242 struct pool_allocator pool_allocator_multi = {
243 	pool_multi_alloc,
244 	pool_multi_free,
245 	POOL_ALLOC_SIZES(PAGE_SIZE, (1UL << 31), POOL_ALLOC_ALIGNED)
246 };
247 
248 void	*pool_multi_alloc_ni(struct pool *, int, int *);
249 void	pool_multi_free_ni(struct pool *, void *);
250 
251 struct pool_allocator pool_allocator_multi_ni = {
252 	pool_multi_alloc_ni,
253 	pool_multi_free_ni,
254 	POOL_ALLOC_SIZES(PAGE_SIZE, (1UL << 31), POOL_ALLOC_ALIGNED)
255 };
256 
257 #ifdef DDB
258 void	 pool_print_pagelist(struct pool_pagelist *, int (*)(const char *, ...)
259 	     __attribute__((__format__(__kprintf__,1,2))));
260 void	 pool_print1(struct pool *, const char *, int (*)(const char *, ...)
261 	     __attribute__((__format__(__kprintf__,1,2))));
262 #endif
263 
264 /* stale page garbage collectors */
265 void	pool_gc_sched(void *);
266 struct timeout pool_gc_tick = TIMEOUT_INITIALIZER(pool_gc_sched, NULL);
267 void	pool_gc_pages(void *);
268 struct task pool_gc_task = TASK_INITIALIZER(pool_gc_pages, NULL);
269 int pool_wait_free = 1;
270 int pool_wait_gc = 8;
271 
272 RBT_PROTOTYPE(phtree, pool_page_header, ph_node, phtree_compare);
273 
274 static inline int
275 phtree_compare(const struct pool_page_header *a,
276     const struct pool_page_header *b)
277 {
278 	vaddr_t va = (vaddr_t)a->ph_page;
279 	vaddr_t vb = (vaddr_t)b->ph_page;
280 
281 	/* the compares in this order are important for the NFIND to work */
282 	if (vb < va)
283 		return (-1);
284 	if (vb > va)
285 		return (1);
286 
287 	return (0);
288 }
289 
290 RBT_GENERATE(phtree, pool_page_header, ph_node, phtree_compare);
291 
292 /*
293  * Return the pool page header based on page address.
294  */
295 static inline struct pool_page_header *
296 pr_find_pagehead(struct pool *pp, void *v)
297 {
298 	struct pool_page_header *ph, key;
299 
300 	if (POOL_INPGHDR(pp)) {
301 		caddr_t page;
302 
303 		page = (caddr_t)((vaddr_t)v & pp->pr_pgmask);
304 
305 		return ((struct pool_page_header *)(page + pp->pr_phoffset));
306 	}
307 
308 	key.ph_page = v;
309 	ph = RBT_NFIND(phtree, &pp->pr_phtree, &key);
310 	if (ph == NULL)
311 		panic("%s: %s: page header missing", __func__, pp->pr_wchan);
312 
313 	KASSERT(ph->ph_page <= (caddr_t)v);
314 	if (ph->ph_page + pp->pr_pgsize <= (caddr_t)v)
315 		panic("%s: %s: incorrect page", __func__, pp->pr_wchan);
316 
317 	return (ph);
318 }
319 
320 /*
321  * Initialize the given pool resource structure.
322  *
323  * We export this routine to allow other kernel parts to declare
324  * static pools that must be initialized before malloc() is available.
325  */
326 void
327 pool_init(struct pool *pp, size_t size, u_int align, int ipl, int flags,
328     const char *wchan, struct pool_allocator *palloc)
329 {
330 	int off = 0, space;
331 	unsigned int pgsize = PAGE_SIZE, items;
332 	size_t pa_pagesz;
333 #ifdef DIAGNOSTIC
334 	struct pool *iter;
335 #endif
336 
337 	if (align == 0)
338 		align = ALIGN(1);
339 
340 	if (size < sizeof(struct pool_item))
341 		size = sizeof(struct pool_item);
342 
343 	size = roundup(size, align);
344 
345 	while (size * 8 > pgsize)
346 		pgsize <<= 1;
347 
348 	if (palloc == NULL) {
349 		if (pgsize > PAGE_SIZE) {
350 			palloc = ISSET(flags, PR_WAITOK) ?
351 			    &pool_allocator_multi_ni : &pool_allocator_multi;
352 		} else
353 			palloc = &pool_allocator_single;
354 
355 		pa_pagesz = palloc->pa_pagesz;
356 	} else {
357 		size_t pgsizes;
358 
359 		pa_pagesz = palloc->pa_pagesz;
360 		if (pa_pagesz == 0)
361 			pa_pagesz = POOL_ALLOC_DEFAULT;
362 
363 		pgsizes = pa_pagesz & ~POOL_ALLOC_ALIGNED;
364 
365 		/* make sure the allocator can fit at least one item */
366 		if (size > pgsizes) {
367 			panic("%s: pool %s item size 0x%zx > "
368 			    "allocator %p sizes 0x%zx", __func__, wchan,
369 			    size, palloc, pgsizes);
370 		}
371 
372 		/* shrink pgsize until it fits into the range */
373 		while (!ISSET(pgsizes, pgsize))
374 			pgsize >>= 1;
375 	}
376 	KASSERT(ISSET(pa_pagesz, pgsize));
377 
378 	items = pgsize / size;
379 
380 	/*
381 	 * Decide whether to put the page header off page to avoid
382 	 * wasting too large a part of the page. Off-page page headers
383 	 * go into an RB tree, so we can match a returned item with
384 	 * its header based on the page address.
385 	 */
386 	if (ISSET(pa_pagesz, POOL_ALLOC_ALIGNED)) {
387 		if (pgsize - (size * items) >
388 		    sizeof(struct pool_page_header)) {
389 			off = pgsize - sizeof(struct pool_page_header);
390 		} else if (sizeof(struct pool_page_header) * 2 >= size) {
391 			off = pgsize - sizeof(struct pool_page_header);
392 			items = off / size;
393 		}
394 	}
395 
396 	KASSERT(items > 0);
397 
398 	/*
399 	 * Initialize the pool structure.
400 	 */
401 	memset(pp, 0, sizeof(*pp));
402 	if (ISSET(flags, PR_RWLOCK)) {
403 		KASSERT(flags & PR_WAITOK);
404 		pp->pr_lock_ops = &pool_lock_ops_rw;
405 	} else
406 		pp->pr_lock_ops = &pool_lock_ops_mtx;
407 	TAILQ_INIT(&pp->pr_emptypages);
408 	TAILQ_INIT(&pp->pr_fullpages);
409 	TAILQ_INIT(&pp->pr_partpages);
410 	pp->pr_curpage = NULL;
411 	pp->pr_npages = 0;
412 	pp->pr_minitems = 0;
413 	pp->pr_minpages = 0;
414 	pp->pr_maxpages = 8;
415 	pp->pr_size = size;
416 	pp->pr_pgsize = pgsize;
417 	pp->pr_pgmask = ~0UL ^ (pgsize - 1);
418 	pp->pr_phoffset = off;
419 	pp->pr_itemsperpage = items;
420 	pp->pr_wchan = wchan;
421 	pp->pr_alloc = palloc;
422 	pp->pr_nitems = 0;
423 	pp->pr_nout = 0;
424 	pp->pr_hardlimit = UINT_MAX;
425 	pp->pr_hardlimit_warning = NULL;
426 	pp->pr_hardlimit_ratecap.tv_sec = 0;
427 	pp->pr_hardlimit_ratecap.tv_usec = 0;
428 	pp->pr_hardlimit_warning_last.tv_sec = 0;
429 	pp->pr_hardlimit_warning_last.tv_usec = 0;
430 	RBT_INIT(phtree, &pp->pr_phtree);
431 
432 	/*
433 	 * Use the space between the chunks and the page header
434 	 * for cache coloring.
435 	 */
436 	space = POOL_INPGHDR(pp) ? pp->pr_phoffset : pp->pr_pgsize;
437 	space -= pp->pr_itemsperpage * pp->pr_size;
438 	pp->pr_align = align;
439 	pp->pr_maxcolors = (space / align) + 1;
440 
441 	pp->pr_nget = 0;
442 	pp->pr_nfail = 0;
443 	pp->pr_nput = 0;
444 	pp->pr_npagealloc = 0;
445 	pp->pr_npagefree = 0;
446 	pp->pr_hiwat = 0;
447 	pp->pr_nidle = 0;
448 
449 	pp->pr_ipl = ipl;
450 	pp->pr_flags = flags;
451 
452 	pl_init(pp, &pp->pr_lock);
453 	pl_init(pp, &pp->pr_requests_lock);
454 	TAILQ_INIT(&pp->pr_requests);
455 
456 	if (phpool.pr_size == 0) {
457 		pool_init(&phpool, sizeof(struct pool_page_header), 0,
458 		    IPL_HIGH, 0, "phpool", NULL);
459 
460 		/* make sure phpool wont "recurse" */
461 		KASSERT(POOL_INPGHDR(&phpool));
462 	}
463 
464 	/* pglistalloc/constraint parameters */
465 	pp->pr_crange = &kp_dirty;
466 
467 	/* Insert this into the list of all pools. */
468 	rw_enter_write(&pool_lock);
469 #ifdef DIAGNOSTIC
470 	SIMPLEQ_FOREACH(iter, &pool_head, pr_poollist) {
471 		if (iter == pp)
472 			panic("%s: pool %s already on list", __func__, wchan);
473 	}
474 #endif
475 
476 	pp->pr_serial = ++pool_serial;
477 	if (pool_serial == 0)
478 		panic("%s: too much uptime", __func__);
479 
480 	SIMPLEQ_INSERT_HEAD(&pool_head, pp, pr_poollist);
481 	pool_count++;
482 	rw_exit_write(&pool_lock);
483 }
484 
485 /*
486  * Decommission a pool resource.
487  */
488 void
489 pool_destroy(struct pool *pp)
490 {
491 	struct pool_page_header *ph;
492 	struct pool *prev, *iter;
493 
494 #ifdef MULTIPROCESSOR
495 	if (pp->pr_cache != NULL)
496 		pool_cache_destroy(pp);
497 #endif
498 
499 #ifdef DIAGNOSTIC
500 	if (pp->pr_nout != 0)
501 		panic("%s: pool busy: still out: %u", __func__, pp->pr_nout);
502 #endif
503 
504 	/* Remove from global pool list */
505 	rw_enter_write(&pool_lock);
506 	pool_count--;
507 	if (pp == SIMPLEQ_FIRST(&pool_head))
508 		SIMPLEQ_REMOVE_HEAD(&pool_head, pr_poollist);
509 	else {
510 		prev = SIMPLEQ_FIRST(&pool_head);
511 		SIMPLEQ_FOREACH(iter, &pool_head, pr_poollist) {
512 			if (iter == pp) {
513 				SIMPLEQ_REMOVE_AFTER(&pool_head, prev,
514 				    pr_poollist);
515 				break;
516 			}
517 			prev = iter;
518 		}
519 	}
520 	rw_exit_write(&pool_lock);
521 
522 	/* Remove all pages */
523 	while ((ph = TAILQ_FIRST(&pp->pr_emptypages)) != NULL) {
524 		pl_enter(pp, &pp->pr_lock);
525 		pool_p_remove(pp, ph);
526 		pl_leave(pp, &pp->pr_lock);
527 		pool_p_free(pp, ph);
528 	}
529 	KASSERT(TAILQ_EMPTY(&pp->pr_fullpages));
530 	KASSERT(TAILQ_EMPTY(&pp->pr_partpages));
531 }
532 
533 void
534 pool_request_init(struct pool_request *pr,
535     void (*handler)(struct pool *, void *, void *), void *cookie)
536 {
537 	pr->pr_handler = handler;
538 	pr->pr_cookie = cookie;
539 	pr->pr_item = NULL;
540 }
541 
542 void
543 pool_request(struct pool *pp, struct pool_request *pr)
544 {
545 	pl_enter(pp, &pp->pr_requests_lock);
546 	TAILQ_INSERT_TAIL(&pp->pr_requests, pr, pr_entry);
547 	pool_runqueue(pp, PR_NOWAIT);
548 	pl_leave(pp, &pp->pr_requests_lock);
549 }
550 
551 struct pool_get_memory {
552 	union pool_lock lock;
553 	void * volatile v;
554 };
555 
556 /*
557  * Grab an item from the pool.
558  */
559 void *
560 pool_get(struct pool *pp, int flags)
561 {
562 	void *v = NULL;
563 	int slowdown = 0;
564 
565 	KASSERT(flags & (PR_WAITOK | PR_NOWAIT));
566 	if (pp->pr_flags & PR_RWLOCK)
567 		KASSERT(flags & PR_WAITOK);
568 
569 #ifdef MULTIPROCESSOR
570 	if (pp->pr_cache != NULL) {
571 		v = pool_cache_get(pp);
572 		if (v != NULL)
573 			goto good;
574 	}
575 #endif
576 
577 	pl_enter(pp, &pp->pr_lock);
578 	if (pp->pr_nout >= pp->pr_hardlimit) {
579 		if (ISSET(flags, PR_NOWAIT|PR_LIMITFAIL))
580 			goto fail;
581 	} else if ((v = pool_do_get(pp, flags, &slowdown)) == NULL) {
582 		if (ISSET(flags, PR_NOWAIT))
583 			goto fail;
584 	}
585 	pl_leave(pp, &pp->pr_lock);
586 
587 	if ((slowdown || pool_debug == 2) && ISSET(flags, PR_WAITOK))
588 		yield();
589 
590 	if (v == NULL) {
591 		struct pool_get_memory mem = { .v = NULL };
592 		struct pool_request pr;
593 
594 #ifdef DIAGNOSTIC
595 		if (ISSET(flags, PR_WAITOK) && curproc == &proc0)
596 			panic("%s: cannot sleep for memory during boot",
597 			    __func__);
598 #endif
599 		pl_init(pp, &mem.lock);
600 		pool_request_init(&pr, pool_get_done, &mem);
601 		pool_request(pp, &pr);
602 
603 		pl_enter(pp, &mem.lock);
604 		while (mem.v == NULL)
605 			pl_sleep(pp, &mem, &mem.lock, PSWP, pp->pr_wchan);
606 		pl_leave(pp, &mem.lock);
607 
608 		v = mem.v;
609 	}
610 
611 #ifdef MULTIPROCESSOR
612 good:
613 #endif
614 	if (ISSET(flags, PR_ZERO))
615 		memset(v, 0, pp->pr_size);
616 
617 	return (v);
618 
619 fail:
620 	pp->pr_nfail++;
621 	pl_leave(pp, &pp->pr_lock);
622 	return (NULL);
623 }
624 
625 void
626 pool_get_done(struct pool *pp, void *xmem, void *v)
627 {
628 	struct pool_get_memory *mem = xmem;
629 
630 	pl_enter(pp, &mem->lock);
631 	mem->v = v;
632 	pl_leave(pp, &mem->lock);
633 
634 	wakeup_one(mem);
635 }
636 
637 void
638 pool_runqueue(struct pool *pp, int flags)
639 {
640 	struct pool_requests prl = TAILQ_HEAD_INITIALIZER(prl);
641 	struct pool_request *pr;
642 
643 	pl_assert_unlocked(pp, &pp->pr_lock);
644 	pl_assert_locked(pp, &pp->pr_requests_lock);
645 
646 	if (pp->pr_requesting++)
647 		return;
648 
649 	do {
650 		pp->pr_requesting = 1;
651 
652 		TAILQ_CONCAT(&prl, &pp->pr_requests, pr_entry);
653 		if (TAILQ_EMPTY(&prl))
654 			continue;
655 
656 		pl_leave(pp, &pp->pr_requests_lock);
657 
658 		pl_enter(pp, &pp->pr_lock);
659 		pr = TAILQ_FIRST(&prl);
660 		while (pr != NULL) {
661 			int slowdown = 0;
662 
663 			if (pp->pr_nout >= pp->pr_hardlimit)
664 				break;
665 
666 			pr->pr_item = pool_do_get(pp, flags, &slowdown);
667 			if (pr->pr_item == NULL) /* || slowdown ? */
668 				break;
669 
670 			pr = TAILQ_NEXT(pr, pr_entry);
671 		}
672 		pl_leave(pp, &pp->pr_lock);
673 
674 		while ((pr = TAILQ_FIRST(&prl)) != NULL &&
675 		    pr->pr_item != NULL) {
676 			TAILQ_REMOVE(&prl, pr, pr_entry);
677 			(*pr->pr_handler)(pp, pr->pr_cookie, pr->pr_item);
678 		}
679 
680 		pl_enter(pp, &pp->pr_requests_lock);
681 	} while (--pp->pr_requesting);
682 
683 	TAILQ_CONCAT(&pp->pr_requests, &prl, pr_entry);
684 }
685 
686 void *
687 pool_do_get(struct pool *pp, int flags, int *slowdown)
688 {
689 	struct pool_item *pi;
690 	struct pool_page_header *ph;
691 
692 	pl_assert_locked(pp, &pp->pr_lock);
693 
694 	splassert(pp->pr_ipl);
695 
696 	/*
697 	 * Account for this item now to avoid races if we need to give up
698 	 * pr_lock to allocate a page.
699 	 */
700 	pp->pr_nout++;
701 
702 	if (pp->pr_curpage == NULL) {
703 		pl_leave(pp, &pp->pr_lock);
704 		ph = pool_p_alloc(pp, flags, slowdown);
705 		pl_enter(pp, &pp->pr_lock);
706 
707 		if (ph == NULL) {
708 			pp->pr_nout--;
709 			return (NULL);
710 		}
711 
712 		pool_p_insert(pp, ph);
713 	}
714 
715 	ph = pp->pr_curpage;
716 	pi = XSIMPLEQ_FIRST(&ph->ph_items);
717 	if (__predict_false(pi == NULL))
718 		panic("%s: %s: page empty", __func__, pp->pr_wchan);
719 
720 	if (__predict_false(pi->pi_magic != POOL_IMAGIC(ph, pi))) {
721 		panic("%s: %s free list modified: "
722 		    "page %p; item addr %p; offset 0x%x=0x%lx != 0x%lx",
723 		    __func__, pp->pr_wchan, ph->ph_page, pi,
724 		    0, pi->pi_magic, POOL_IMAGIC(ph, pi));
725 	}
726 
727 	XSIMPLEQ_REMOVE_HEAD(&ph->ph_items, pi_list);
728 
729 #ifdef DIAGNOSTIC
730 	if (pool_debug && POOL_PHPOISON(ph)) {
731 		size_t pidx;
732 		uint32_t pval;
733 		if (poison_check(pi + 1, pp->pr_size - sizeof(*pi),
734 		    &pidx, &pval)) {
735 			int *ip = (int *)(pi + 1);
736 			panic("%s: %s free list modified: "
737 			    "page %p; item addr %p; offset 0x%zx=0x%x",
738 			    __func__, pp->pr_wchan, ph->ph_page, pi,
739 			    (pidx * sizeof(int)) + sizeof(*pi), ip[pidx]);
740 		}
741 	}
742 #endif /* DIAGNOSTIC */
743 
744 	if (ph->ph_nmissing++ == 0) {
745 		/*
746 		 * This page was previously empty.  Move it to the list of
747 		 * partially-full pages.  This page is already curpage.
748 		 */
749 		TAILQ_REMOVE(&pp->pr_emptypages, ph, ph_entry);
750 		TAILQ_INSERT_TAIL(&pp->pr_partpages, ph, ph_entry);
751 
752 		pp->pr_nidle--;
753 	}
754 
755 	if (ph->ph_nmissing == pp->pr_itemsperpage) {
756 		/*
757 		 * This page is now full.  Move it to the full list
758 		 * and select a new current page.
759 		 */
760 		TAILQ_REMOVE(&pp->pr_partpages, ph, ph_entry);
761 		TAILQ_INSERT_TAIL(&pp->pr_fullpages, ph, ph_entry);
762 		pool_update_curpage(pp);
763 	}
764 
765 	pp->pr_nget++;
766 
767 	return (pi);
768 }
769 
770 /*
771  * Return resource to the pool.
772  */
773 void
774 pool_put(struct pool *pp, void *v)
775 {
776 	struct pool_page_header *ph, *freeph = NULL;
777 
778 #ifdef DIAGNOSTIC
779 	if (v == NULL)
780 		panic("%s: NULL item", __func__);
781 #endif
782 
783 #ifdef MULTIPROCESSOR
784 	if (pp->pr_cache != NULL && TAILQ_EMPTY(&pp->pr_requests)) {
785 		pool_cache_put(pp, v);
786 		return;
787 	}
788 #endif
789 
790 	pl_enter(pp, &pp->pr_lock);
791 
792 	pool_do_put(pp, v);
793 
794 	pp->pr_nout--;
795 	pp->pr_nput++;
796 
797 	/* is it time to free a page? */
798 	if (pp->pr_nidle > pp->pr_maxpages &&
799 	    (ph = TAILQ_FIRST(&pp->pr_emptypages)) != NULL &&
800 	    (ticks - ph->ph_tick) > (hz * pool_wait_free)) {
801 		freeph = ph;
802 		pool_p_remove(pp, freeph);
803 	}
804 
805 	pl_leave(pp, &pp->pr_lock);
806 
807 	if (freeph != NULL)
808 		pool_p_free(pp, freeph);
809 
810 	pool_wakeup(pp);
811 }
812 
813 void
814 pool_wakeup(struct pool *pp)
815 {
816 	if (!TAILQ_EMPTY(&pp->pr_requests)) {
817 		pl_enter(pp, &pp->pr_requests_lock);
818 		pool_runqueue(pp, PR_NOWAIT);
819 		pl_leave(pp, &pp->pr_requests_lock);
820 	}
821 }
822 
823 void
824 pool_do_put(struct pool *pp, void *v)
825 {
826 	struct pool_item *pi = v;
827 	struct pool_page_header *ph;
828 
829 	splassert(pp->pr_ipl);
830 
831 	ph = pr_find_pagehead(pp, v);
832 
833 #ifdef DIAGNOSTIC
834 	if (pool_debug) {
835 		struct pool_item *qi;
836 		XSIMPLEQ_FOREACH(qi, &ph->ph_items, pi_list) {
837 			if (pi == qi) {
838 				panic("%s: %s: double pool_put: %p", __func__,
839 				    pp->pr_wchan, pi);
840 			}
841 		}
842 	}
843 #endif /* DIAGNOSTIC */
844 
845 	pi->pi_magic = POOL_IMAGIC(ph, pi);
846 	XSIMPLEQ_INSERT_HEAD(&ph->ph_items, pi, pi_list);
847 #ifdef DIAGNOSTIC
848 	if (POOL_PHPOISON(ph))
849 		poison_mem(pi + 1, pp->pr_size - sizeof(*pi));
850 #endif /* DIAGNOSTIC */
851 
852 	if (ph->ph_nmissing-- == pp->pr_itemsperpage) {
853 		/*
854 		 * The page was previously completely full, move it to the
855 		 * partially-full list.
856 		 */
857 		TAILQ_REMOVE(&pp->pr_fullpages, ph, ph_entry);
858 		TAILQ_INSERT_TAIL(&pp->pr_partpages, ph, ph_entry);
859 	}
860 
861 	if (ph->ph_nmissing == 0) {
862 		/*
863 		 * The page is now empty, so move it to the empty page list.
864 		 */
865 		pp->pr_nidle++;
866 
867 		ph->ph_tick = ticks;
868 		TAILQ_REMOVE(&pp->pr_partpages, ph, ph_entry);
869 		TAILQ_INSERT_TAIL(&pp->pr_emptypages, ph, ph_entry);
870 		pool_update_curpage(pp);
871 	}
872 }
873 
874 /*
875  * Add N items to the pool.
876  */
877 int
878 pool_prime(struct pool *pp, int n)
879 {
880 	struct pool_pagelist pl = TAILQ_HEAD_INITIALIZER(pl);
881 	struct pool_page_header *ph;
882 	int newpages;
883 
884 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
885 
886 	while (newpages-- > 0) {
887 		int slowdown = 0;
888 
889 		ph = pool_p_alloc(pp, PR_NOWAIT, &slowdown);
890 		if (ph == NULL) /* or slowdown? */
891 			break;
892 
893 		TAILQ_INSERT_TAIL(&pl, ph, ph_entry);
894 	}
895 
896 	pl_enter(pp, &pp->pr_lock);
897 	while ((ph = TAILQ_FIRST(&pl)) != NULL) {
898 		TAILQ_REMOVE(&pl, ph, ph_entry);
899 		pool_p_insert(pp, ph);
900 	}
901 	pl_leave(pp, &pp->pr_lock);
902 
903 	return (0);
904 }
905 
906 struct pool_page_header *
907 pool_p_alloc(struct pool *pp, int flags, int *slowdown)
908 {
909 	struct pool_page_header *ph;
910 	struct pool_item *pi;
911 	caddr_t addr;
912 	unsigned int order;
913 	int o;
914 	int n;
915 
916 	pl_assert_unlocked(pp, &pp->pr_lock);
917 	KASSERT(pp->pr_size >= sizeof(*pi));
918 
919 	addr = pool_allocator_alloc(pp, flags, slowdown);
920 	if (addr == NULL)
921 		return (NULL);
922 
923 	if (POOL_INPGHDR(pp))
924 		ph = (struct pool_page_header *)(addr + pp->pr_phoffset);
925 	else {
926 		ph = pool_get(&phpool, flags);
927 		if (ph == NULL) {
928 			pool_allocator_free(pp, addr);
929 			return (NULL);
930 		}
931 	}
932 
933 	XSIMPLEQ_INIT(&ph->ph_items);
934 	ph->ph_page = addr;
935 	addr += pp->pr_align * (pp->pr_npagealloc % pp->pr_maxcolors);
936 	ph->ph_colored = addr;
937 	ph->ph_nmissing = 0;
938 	arc4random_buf(&ph->ph_magic, sizeof(ph->ph_magic));
939 #ifdef DIAGNOSTIC
940 	/* use a bit in ph_magic to record if we poison page items */
941 	if (pool_debug)
942 		SET(ph->ph_magic, POOL_MAGICBIT);
943 	else
944 		CLR(ph->ph_magic, POOL_MAGICBIT);
945 #endif /* DIAGNOSTIC */
946 
947 	n = pp->pr_itemsperpage;
948 	o = 32;
949 	while (n--) {
950 		pi = (struct pool_item *)addr;
951 		pi->pi_magic = POOL_IMAGIC(ph, pi);
952 
953 		if (o == 32) {
954 			order = arc4random();
955 			o = 0;
956 		}
957 		if (ISSET(order, 1 << o++))
958 			XSIMPLEQ_INSERT_TAIL(&ph->ph_items, pi, pi_list);
959 		else
960 			XSIMPLEQ_INSERT_HEAD(&ph->ph_items, pi, pi_list);
961 
962 #ifdef DIAGNOSTIC
963 		if (POOL_PHPOISON(ph))
964 			poison_mem(pi + 1, pp->pr_size - sizeof(*pi));
965 #endif /* DIAGNOSTIC */
966 
967 		addr += pp->pr_size;
968 	}
969 
970 	return (ph);
971 }
972 
973 void
974 pool_p_free(struct pool *pp, struct pool_page_header *ph)
975 {
976 	struct pool_item *pi;
977 
978 	pl_assert_unlocked(pp, &pp->pr_lock);
979 	KASSERT(ph->ph_nmissing == 0);
980 
981 	XSIMPLEQ_FOREACH(pi, &ph->ph_items, pi_list) {
982 		if (__predict_false(pi->pi_magic != POOL_IMAGIC(ph, pi))) {
983 			panic("%s: %s free list modified: "
984 			    "page %p; item addr %p; offset 0x%x=0x%lx",
985 			    __func__, pp->pr_wchan, ph->ph_page, pi,
986 			    0, pi->pi_magic);
987 		}
988 
989 #ifdef DIAGNOSTIC
990 		if (POOL_PHPOISON(ph)) {
991 			size_t pidx;
992 			uint32_t pval;
993 			if (poison_check(pi + 1, pp->pr_size - sizeof(*pi),
994 			    &pidx, &pval)) {
995 				int *ip = (int *)(pi + 1);
996 				panic("%s: %s free list modified: "
997 				    "page %p; item addr %p; offset 0x%zx=0x%x",
998 				    __func__, pp->pr_wchan, ph->ph_page, pi,
999 				    pidx * sizeof(int), ip[pidx]);
1000 			}
1001 		}
1002 #endif
1003 	}
1004 
1005 	pool_allocator_free(pp, ph->ph_page);
1006 
1007 	if (!POOL_INPGHDR(pp))
1008 		pool_put(&phpool, ph);
1009 }
1010 
1011 void
1012 pool_p_insert(struct pool *pp, struct pool_page_header *ph)
1013 {
1014 	pl_assert_locked(pp, &pp->pr_lock);
1015 
1016 	/* If the pool was depleted, point at the new page */
1017 	if (pp->pr_curpage == NULL)
1018 		pp->pr_curpage = ph;
1019 
1020 	TAILQ_INSERT_TAIL(&pp->pr_emptypages, ph, ph_entry);
1021 	if (!POOL_INPGHDR(pp))
1022 		RBT_INSERT(phtree, &pp->pr_phtree, ph);
1023 
1024 	pp->pr_nitems += pp->pr_itemsperpage;
1025 	pp->pr_nidle++;
1026 
1027 	pp->pr_npagealloc++;
1028 	if (++pp->pr_npages > pp->pr_hiwat)
1029 		pp->pr_hiwat = pp->pr_npages;
1030 }
1031 
1032 void
1033 pool_p_remove(struct pool *pp, struct pool_page_header *ph)
1034 {
1035 	pl_assert_locked(pp, &pp->pr_lock);
1036 
1037 	pp->pr_npagefree++;
1038 	pp->pr_npages--;
1039 	pp->pr_nidle--;
1040 	pp->pr_nitems -= pp->pr_itemsperpage;
1041 
1042 	if (!POOL_INPGHDR(pp))
1043 		RBT_REMOVE(phtree, &pp->pr_phtree, ph);
1044 	TAILQ_REMOVE(&pp->pr_emptypages, ph, ph_entry);
1045 
1046 	pool_update_curpage(pp);
1047 }
1048 
1049 void
1050 pool_update_curpage(struct pool *pp)
1051 {
1052 	pp->pr_curpage = TAILQ_LAST(&pp->pr_partpages, pool_pagelist);
1053 	if (pp->pr_curpage == NULL) {
1054 		pp->pr_curpage = TAILQ_LAST(&pp->pr_emptypages, pool_pagelist);
1055 	}
1056 }
1057 
1058 void
1059 pool_setlowat(struct pool *pp, int n)
1060 {
1061 	int prime = 0;
1062 
1063 	pl_enter(pp, &pp->pr_lock);
1064 	pp->pr_minitems = n;
1065 	pp->pr_minpages = (n == 0)
1066 		? 0
1067 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1068 
1069 	if (pp->pr_nitems < n)
1070 		prime = n - pp->pr_nitems;
1071 	pl_leave(pp, &pp->pr_lock);
1072 
1073 	if (prime > 0)
1074 		pool_prime(pp, prime);
1075 }
1076 
1077 void
1078 pool_sethiwat(struct pool *pp, int n)
1079 {
1080 	pp->pr_maxpages = (n == 0)
1081 		? 0
1082 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1083 }
1084 
1085 int
1086 pool_sethardlimit(struct pool *pp, u_int n, const char *warnmsg, int ratecap)
1087 {
1088 	int error = 0;
1089 
1090 	if (n < pp->pr_nout) {
1091 		error = EINVAL;
1092 		goto done;
1093 	}
1094 
1095 	pp->pr_hardlimit = n;
1096 	pp->pr_hardlimit_warning = warnmsg;
1097 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1098 	pp->pr_hardlimit_warning_last.tv_sec = 0;
1099 	pp->pr_hardlimit_warning_last.tv_usec = 0;
1100 
1101 done:
1102 	return (error);
1103 }
1104 
1105 void
1106 pool_set_constraints(struct pool *pp, const struct kmem_pa_mode *mode)
1107 {
1108 	pp->pr_crange = mode;
1109 }
1110 
1111 /*
1112  * Release all complete pages that have not been used recently.
1113  *
1114  * Returns non-zero if any pages have been reclaimed.
1115  */
1116 int
1117 pool_reclaim(struct pool *pp)
1118 {
1119 	struct pool_page_header *ph, *phnext;
1120 	struct pool_pagelist pl = TAILQ_HEAD_INITIALIZER(pl);
1121 
1122 	pl_enter(pp, &pp->pr_lock);
1123 	for (ph = TAILQ_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1124 		phnext = TAILQ_NEXT(ph, ph_entry);
1125 
1126 		/* Check our minimum page claim */
1127 		if (pp->pr_npages <= pp->pr_minpages)
1128 			break;
1129 
1130 		/*
1131 		 * If freeing this page would put us below
1132 		 * the low water mark, stop now.
1133 		 */
1134 		if ((pp->pr_nitems - pp->pr_itemsperpage) <
1135 		    pp->pr_minitems)
1136 			break;
1137 
1138 		pool_p_remove(pp, ph);
1139 		TAILQ_INSERT_TAIL(&pl, ph, ph_entry);
1140 	}
1141 	pl_leave(pp, &pp->pr_lock);
1142 
1143 	if (TAILQ_EMPTY(&pl))
1144 		return (0);
1145 
1146 	while ((ph = TAILQ_FIRST(&pl)) != NULL) {
1147 		TAILQ_REMOVE(&pl, ph, ph_entry);
1148 		pool_p_free(pp, ph);
1149 	}
1150 
1151 	return (1);
1152 }
1153 
1154 /*
1155  * Release all complete pages that have not been used recently
1156  * from all pools.
1157  */
1158 void
1159 pool_reclaim_all(void)
1160 {
1161 	struct pool	*pp;
1162 
1163 	rw_enter_read(&pool_lock);
1164 	SIMPLEQ_FOREACH(pp, &pool_head, pr_poollist)
1165 		pool_reclaim(pp);
1166 	rw_exit_read(&pool_lock);
1167 }
1168 
1169 #ifdef DDB
1170 #include <machine/db_machdep.h>
1171 #include <ddb/db_output.h>
1172 
1173 /*
1174  * Diagnostic helpers.
1175  */
1176 void
1177 pool_printit(struct pool *pp, const char *modif,
1178     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
1179 {
1180 	pool_print1(pp, modif, pr);
1181 }
1182 
1183 void
1184 pool_print_pagelist(struct pool_pagelist *pl,
1185     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
1186 {
1187 	struct pool_page_header *ph;
1188 	struct pool_item *pi;
1189 
1190 	TAILQ_FOREACH(ph, pl, ph_entry) {
1191 		(*pr)("\t\tpage %p, color %p, nmissing %d\n",
1192 		    ph->ph_page, ph->ph_colored, ph->ph_nmissing);
1193 		XSIMPLEQ_FOREACH(pi, &ph->ph_items, pi_list) {
1194 			if (pi->pi_magic != POOL_IMAGIC(ph, pi)) {
1195 				(*pr)("\t\t\titem %p, magic 0x%lx\n",
1196 				    pi, pi->pi_magic);
1197 			}
1198 		}
1199 	}
1200 }
1201 
1202 void
1203 pool_print1(struct pool *pp, const char *modif,
1204     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
1205 {
1206 	struct pool_page_header *ph;
1207 	int print_pagelist = 0;
1208 	char c;
1209 
1210 	while ((c = *modif++) != '\0') {
1211 		if (c == 'p')
1212 			print_pagelist = 1;
1213 		modif++;
1214 	}
1215 
1216 	(*pr)("POOL %s: size %u maxcolors %u\n", pp->pr_wchan, pp->pr_size,
1217 	    pp->pr_maxcolors);
1218 	(*pr)("\talloc %p\n", pp->pr_alloc);
1219 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1220 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1221 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1222 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1223 
1224 	(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1225 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1226 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1227 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1228 
1229 	if (print_pagelist == 0)
1230 		return;
1231 
1232 	if ((ph = TAILQ_FIRST(&pp->pr_emptypages)) != NULL)
1233 		(*pr)("\n\tempty page list:\n");
1234 	pool_print_pagelist(&pp->pr_emptypages, pr);
1235 	if ((ph = TAILQ_FIRST(&pp->pr_fullpages)) != NULL)
1236 		(*pr)("\n\tfull page list:\n");
1237 	pool_print_pagelist(&pp->pr_fullpages, pr);
1238 	if ((ph = TAILQ_FIRST(&pp->pr_partpages)) != NULL)
1239 		(*pr)("\n\tpartial-page list:\n");
1240 	pool_print_pagelist(&pp->pr_partpages, pr);
1241 
1242 	if (pp->pr_curpage == NULL)
1243 		(*pr)("\tno current page\n");
1244 	else
1245 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1246 }
1247 
1248 void
1249 db_show_all_pools(db_expr_t expr, int haddr, db_expr_t count, char *modif)
1250 {
1251 	struct pool *pp;
1252 	char maxp[16];
1253 	int ovflw;
1254 	char mode;
1255 
1256 	mode = modif[0];
1257 	if (mode != '\0' && mode != 'a') {
1258 		db_printf("usage: show all pools [/a]\n");
1259 		return;
1260 	}
1261 
1262 	if (mode == '\0')
1263 		db_printf("%-10s%4s%9s%5s%9s%6s%6s%6s%6s%6s%6s%5s\n",
1264 		    "Name",
1265 		    "Size",
1266 		    "Requests",
1267 		    "Fail",
1268 		    "Releases",
1269 		    "Pgreq",
1270 		    "Pgrel",
1271 		    "Npage",
1272 		    "Hiwat",
1273 		    "Minpg",
1274 		    "Maxpg",
1275 		    "Idle");
1276 	else
1277 		db_printf("%-12s %18s %18s\n",
1278 		    "Name", "Address", "Allocator");
1279 
1280 	SIMPLEQ_FOREACH(pp, &pool_head, pr_poollist) {
1281 		if (mode == 'a') {
1282 			db_printf("%-12s %18p %18p\n", pp->pr_wchan, pp,
1283 			    pp->pr_alloc);
1284 			continue;
1285 		}
1286 
1287 		if (!pp->pr_nget)
1288 			continue;
1289 
1290 		if (pp->pr_maxpages == UINT_MAX)
1291 			snprintf(maxp, sizeof maxp, "inf");
1292 		else
1293 			snprintf(maxp, sizeof maxp, "%u", pp->pr_maxpages);
1294 
1295 #define PRWORD(ovflw, fmt, width, fixed, val) do {	\
1296 	(ovflw) += db_printf((fmt),			\
1297 	    (width) - (fixed) - (ovflw) > 0 ?		\
1298 	    (width) - (fixed) - (ovflw) : 0,		\
1299 	    (val)) - (width);				\
1300 	if ((ovflw) < 0)				\
1301 		(ovflw) = 0;				\
1302 } while (/* CONSTCOND */0)
1303 
1304 		ovflw = 0;
1305 		PRWORD(ovflw, "%-*s", 10, 0, pp->pr_wchan);
1306 		PRWORD(ovflw, " %*u", 4, 1, pp->pr_size);
1307 		PRWORD(ovflw, " %*lu", 9, 1, pp->pr_nget);
1308 		PRWORD(ovflw, " %*lu", 5, 1, pp->pr_nfail);
1309 		PRWORD(ovflw, " %*lu", 9, 1, pp->pr_nput);
1310 		PRWORD(ovflw, " %*lu", 6, 1, pp->pr_npagealloc);
1311 		PRWORD(ovflw, " %*lu", 6, 1, pp->pr_npagefree);
1312 		PRWORD(ovflw, " %*d", 6, 1, pp->pr_npages);
1313 		PRWORD(ovflw, " %*d", 6, 1, pp->pr_hiwat);
1314 		PRWORD(ovflw, " %*d", 6, 1, pp->pr_minpages);
1315 		PRWORD(ovflw, " %*s", 6, 1, maxp);
1316 		PRWORD(ovflw, " %*lu\n", 5, 1, pp->pr_nidle);
1317 
1318 		pool_chk(pp);
1319 	}
1320 }
1321 #endif /* DDB */
1322 
1323 #if defined(POOL_DEBUG) || defined(DDB)
1324 int
1325 pool_chk_page(struct pool *pp, struct pool_page_header *ph, int expected)
1326 {
1327 	struct pool_item *pi;
1328 	caddr_t page;
1329 	int n;
1330 	const char *label = pp->pr_wchan;
1331 
1332 	page = (caddr_t)((u_long)ph & pp->pr_pgmask);
1333 	if (page != ph->ph_page && POOL_INPGHDR(pp)) {
1334 		printf("%s: ", label);
1335 		printf("pool(%p:%s): page inconsistency: page %p; "
1336 		    "at page head addr %p (p %p)\n",
1337 		    pp, pp->pr_wchan, ph->ph_page, ph, page);
1338 		return 1;
1339 	}
1340 
1341 	for (pi = XSIMPLEQ_FIRST(&ph->ph_items), n = 0;
1342 	     pi != NULL;
1343 	     pi = XSIMPLEQ_NEXT(&ph->ph_items, pi, pi_list), n++) {
1344 		if ((caddr_t)pi < ph->ph_page ||
1345 		    (caddr_t)pi >= ph->ph_page + pp->pr_pgsize) {
1346 			printf("%s: ", label);
1347 			printf("pool(%p:%s): page inconsistency: page %p;"
1348 			    " item ordinal %d; addr %p\n", pp,
1349 			    pp->pr_wchan, ph->ph_page, n, pi);
1350 			return (1);
1351 		}
1352 
1353 		if (pi->pi_magic != POOL_IMAGIC(ph, pi)) {
1354 			printf("%s: ", label);
1355 			printf("pool(%p:%s): free list modified: "
1356 			    "page %p; item ordinal %d; addr %p "
1357 			    "(p %p); offset 0x%x=0x%lx\n",
1358 			    pp, pp->pr_wchan, ph->ph_page, n, pi, page,
1359 			    0, pi->pi_magic);
1360 		}
1361 
1362 #ifdef DIAGNOSTIC
1363 		if (POOL_PHPOISON(ph)) {
1364 			size_t pidx;
1365 			uint32_t pval;
1366 			if (poison_check(pi + 1, pp->pr_size - sizeof(*pi),
1367 			    &pidx, &pval)) {
1368 				int *ip = (int *)(pi + 1);
1369 				printf("pool(%s): free list modified: "
1370 				    "page %p; item ordinal %d; addr %p "
1371 				    "(p %p); offset 0x%zx=0x%x\n",
1372 				    pp->pr_wchan, ph->ph_page, n, pi,
1373 				    page, pidx * sizeof(int), ip[pidx]);
1374 			}
1375 		}
1376 #endif /* DIAGNOSTIC */
1377 	}
1378 	if (n + ph->ph_nmissing != pp->pr_itemsperpage) {
1379 		printf("pool(%p:%s): page inconsistency: page %p;"
1380 		    " %d on list, %d missing, %d items per page\n", pp,
1381 		    pp->pr_wchan, ph->ph_page, n, ph->ph_nmissing,
1382 		    pp->pr_itemsperpage);
1383 		return 1;
1384 	}
1385 	if (expected >= 0 && n != expected) {
1386 		printf("pool(%p:%s): page inconsistency: page %p;"
1387 		    " %d on list, %d missing, %d expected\n", pp,
1388 		    pp->pr_wchan, ph->ph_page, n, ph->ph_nmissing,
1389 		    expected);
1390 		return 1;
1391 	}
1392 	return 0;
1393 }
1394 
1395 int
1396 pool_chk(struct pool *pp)
1397 {
1398 	struct pool_page_header *ph;
1399 	int r = 0;
1400 
1401 	TAILQ_FOREACH(ph, &pp->pr_emptypages, ph_entry)
1402 		r += pool_chk_page(pp, ph, pp->pr_itemsperpage);
1403 	TAILQ_FOREACH(ph, &pp->pr_fullpages, ph_entry)
1404 		r += pool_chk_page(pp, ph, 0);
1405 	TAILQ_FOREACH(ph, &pp->pr_partpages, ph_entry)
1406 		r += pool_chk_page(pp, ph, -1);
1407 
1408 	return (r);
1409 }
1410 #endif /* defined(POOL_DEBUG) || defined(DDB) */
1411 
1412 #ifdef DDB
1413 void
1414 pool_walk(struct pool *pp, int full,
1415     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))),
1416     void (*func)(void *, int, int (*)(const char *, ...)
1417 	    __attribute__((__format__(__kprintf__,1,2)))))
1418 {
1419 	struct pool_page_header *ph;
1420 	struct pool_item *pi;
1421 	caddr_t cp;
1422 	int n;
1423 
1424 	TAILQ_FOREACH(ph, &pp->pr_fullpages, ph_entry) {
1425 		cp = ph->ph_colored;
1426 		n = ph->ph_nmissing;
1427 
1428 		while (n--) {
1429 			func(cp, full, pr);
1430 			cp += pp->pr_size;
1431 		}
1432 	}
1433 
1434 	TAILQ_FOREACH(ph, &pp->pr_partpages, ph_entry) {
1435 		cp = ph->ph_colored;
1436 		n = ph->ph_nmissing;
1437 
1438 		do {
1439 			XSIMPLEQ_FOREACH(pi, &ph->ph_items, pi_list) {
1440 				if (cp == (caddr_t)pi)
1441 					break;
1442 			}
1443 			if (cp != (caddr_t)pi) {
1444 				func(cp, full, pr);
1445 				n--;
1446 			}
1447 
1448 			cp += pp->pr_size;
1449 		} while (n > 0);
1450 	}
1451 }
1452 #endif
1453 
1454 /*
1455  * We have three different sysctls.
1456  * kern.pool.npools - the number of pools.
1457  * kern.pool.pool.<pool#> - the pool struct for the pool#.
1458  * kern.pool.name.<pool#> - the name for pool#.
1459  */
1460 int
1461 sysctl_dopool(int *name, u_int namelen, char *oldp, size_t *oldlenp)
1462 {
1463 	struct kinfo_pool pi;
1464 	struct pool *pp;
1465 	int rv = ENOENT;
1466 
1467 	switch (name[0]) {
1468 	case KERN_POOL_NPOOLS:
1469 		if (namelen != 1)
1470 			return (ENOTDIR);
1471 		return (sysctl_rdint(oldp, oldlenp, NULL, pool_count));
1472 
1473 	case KERN_POOL_NAME:
1474 	case KERN_POOL_POOL:
1475 	case KERN_POOL_CACHE:
1476 	case KERN_POOL_CACHE_CPUS:
1477 		break;
1478 	default:
1479 		return (EOPNOTSUPP);
1480 	}
1481 
1482 	if (namelen != 2)
1483 		return (ENOTDIR);
1484 
1485 	rw_enter_read(&pool_lock);
1486 
1487 	SIMPLEQ_FOREACH(pp, &pool_head, pr_poollist) {
1488 		if (name[1] == pp->pr_serial)
1489 			break;
1490 	}
1491 
1492 	if (pp == NULL)
1493 		goto done;
1494 
1495 	switch (name[0]) {
1496 	case KERN_POOL_NAME:
1497 		rv = sysctl_rdstring(oldp, oldlenp, NULL, pp->pr_wchan);
1498 		break;
1499 	case KERN_POOL_POOL:
1500 		memset(&pi, 0, sizeof(pi));
1501 
1502 		pl_enter(pp, &pp->pr_lock);
1503 		pi.pr_size = pp->pr_size;
1504 		pi.pr_pgsize = pp->pr_pgsize;
1505 		pi.pr_itemsperpage = pp->pr_itemsperpage;
1506 		pi.pr_npages = pp->pr_npages;
1507 		pi.pr_minpages = pp->pr_minpages;
1508 		pi.pr_maxpages = pp->pr_maxpages;
1509 		pi.pr_hardlimit = pp->pr_hardlimit;
1510 		pi.pr_nout = pp->pr_nout;
1511 		pi.pr_nitems = pp->pr_nitems;
1512 		pi.pr_nget = pp->pr_nget;
1513 		pi.pr_nput = pp->pr_nput;
1514 		pi.pr_nfail = pp->pr_nfail;
1515 		pi.pr_npagealloc = pp->pr_npagealloc;
1516 		pi.pr_npagefree = pp->pr_npagefree;
1517 		pi.pr_hiwat = pp->pr_hiwat;
1518 		pi.pr_nidle = pp->pr_nidle;
1519 		pl_leave(pp, &pp->pr_lock);
1520 
1521 		pool_cache_pool_info(pp, &pi);
1522 
1523 		rv = sysctl_rdstruct(oldp, oldlenp, NULL, &pi, sizeof(pi));
1524 		break;
1525 
1526 	case KERN_POOL_CACHE:
1527 		rv = pool_cache_info(pp, oldp, oldlenp);
1528 		break;
1529 
1530 	case KERN_POOL_CACHE_CPUS:
1531 		rv = pool_cache_cpus_info(pp, oldp, oldlenp);
1532 		break;
1533 	}
1534 
1535 done:
1536 	rw_exit_read(&pool_lock);
1537 
1538 	return (rv);
1539 }
1540 
1541 void
1542 pool_gc_sched(void *null)
1543 {
1544 	task_add(systqmp, &pool_gc_task);
1545 }
1546 
1547 void
1548 pool_gc_pages(void *null)
1549 {
1550 	struct pool *pp;
1551 	struct pool_page_header *ph, *freeph;
1552 	int s;
1553 
1554 	rw_enter_read(&pool_lock);
1555 	s = splvm(); /* XXX go to splvm until all pools _setipl properly */
1556 	SIMPLEQ_FOREACH(pp, &pool_head, pr_poollist) {
1557 #ifdef MULTIPROCESSOR
1558 		if (pp->pr_cache != NULL)
1559 			pool_cache_gc(pp);
1560 #endif
1561 
1562 		if (pp->pr_nidle <= pp->pr_minpages || /* guess */
1563 		    !pl_enter_try(pp, &pp->pr_lock)) /* try */
1564 			continue;
1565 
1566 		/* is it time to free a page? */
1567 		if (pp->pr_nidle > pp->pr_minpages &&
1568 		    (ph = TAILQ_FIRST(&pp->pr_emptypages)) != NULL &&
1569 		    (ticks - ph->ph_tick) > (hz * pool_wait_gc)) {
1570 			freeph = ph;
1571 			pool_p_remove(pp, freeph);
1572 		} else
1573 			freeph = NULL;
1574 
1575 		pl_leave(pp, &pp->pr_lock);
1576 
1577 		if (freeph != NULL)
1578 			pool_p_free(pp, freeph);
1579 	}
1580 	splx(s);
1581 	rw_exit_read(&pool_lock);
1582 
1583 	timeout_add_sec(&pool_gc_tick, 1);
1584 }
1585 
1586 /*
1587  * Pool backend allocators.
1588  */
1589 
1590 void *
1591 pool_allocator_alloc(struct pool *pp, int flags, int *slowdown)
1592 {
1593 	void *v;
1594 
1595 	v = (*pp->pr_alloc->pa_alloc)(pp, flags, slowdown);
1596 
1597 #ifdef DIAGNOSTIC
1598 	if (v != NULL && POOL_INPGHDR(pp)) {
1599 		vaddr_t addr = (vaddr_t)v;
1600 		if ((addr & pp->pr_pgmask) != addr) {
1601 			panic("%s: %s page address %p isnt aligned to %u",
1602 			    __func__, pp->pr_wchan, v, pp->pr_pgsize);
1603 		}
1604 	}
1605 #endif
1606 
1607 	return (v);
1608 }
1609 
1610 void
1611 pool_allocator_free(struct pool *pp, void *v)
1612 {
1613 	struct pool_allocator *pa = pp->pr_alloc;
1614 
1615 	(*pa->pa_free)(pp, v);
1616 }
1617 
1618 void *
1619 pool_page_alloc(struct pool *pp, int flags, int *slowdown)
1620 {
1621 	struct kmem_dyn_mode kd = KMEM_DYN_INITIALIZER;
1622 
1623 	kd.kd_waitok = ISSET(flags, PR_WAITOK);
1624 	kd.kd_slowdown = slowdown;
1625 
1626 	return (km_alloc(pp->pr_pgsize, &kv_page, pp->pr_crange, &kd));
1627 }
1628 
1629 void
1630 pool_page_free(struct pool *pp, void *v)
1631 {
1632 	km_free(v, pp->pr_pgsize, &kv_page, pp->pr_crange);
1633 }
1634 
1635 void *
1636 pool_multi_alloc(struct pool *pp, int flags, int *slowdown)
1637 {
1638 	struct kmem_va_mode kv = kv_intrsafe;
1639 	struct kmem_dyn_mode kd = KMEM_DYN_INITIALIZER;
1640 	void *v;
1641 	int s;
1642 
1643 	if (POOL_INPGHDR(pp))
1644 		kv.kv_align = pp->pr_pgsize;
1645 
1646 	kd.kd_waitok = ISSET(flags, PR_WAITOK);
1647 	kd.kd_slowdown = slowdown;
1648 
1649 	s = splvm();
1650 	v = km_alloc(pp->pr_pgsize, &kv, pp->pr_crange, &kd);
1651 	splx(s);
1652 
1653 	return (v);
1654 }
1655 
1656 void
1657 pool_multi_free(struct pool *pp, void *v)
1658 {
1659 	struct kmem_va_mode kv = kv_intrsafe;
1660 	int s;
1661 
1662 	if (POOL_INPGHDR(pp))
1663 		kv.kv_align = pp->pr_pgsize;
1664 
1665 	s = splvm();
1666 	km_free(v, pp->pr_pgsize, &kv, pp->pr_crange);
1667 	splx(s);
1668 }
1669 
1670 void *
1671 pool_multi_alloc_ni(struct pool *pp, int flags, int *slowdown)
1672 {
1673 	struct kmem_va_mode kv = kv_any;
1674 	struct kmem_dyn_mode kd = KMEM_DYN_INITIALIZER;
1675 	void *v;
1676 
1677 	if (POOL_INPGHDR(pp))
1678 		kv.kv_align = pp->pr_pgsize;
1679 
1680 	kd.kd_waitok = ISSET(flags, PR_WAITOK);
1681 	kd.kd_slowdown = slowdown;
1682 
1683 	KERNEL_LOCK();
1684 	v = km_alloc(pp->pr_pgsize, &kv, pp->pr_crange, &kd);
1685 	KERNEL_UNLOCK();
1686 
1687 	return (v);
1688 }
1689 
1690 void
1691 pool_multi_free_ni(struct pool *pp, void *v)
1692 {
1693 	struct kmem_va_mode kv = kv_any;
1694 
1695 	if (POOL_INPGHDR(pp))
1696 		kv.kv_align = pp->pr_pgsize;
1697 
1698 	KERNEL_LOCK();
1699 	km_free(v, pp->pr_pgsize, &kv, pp->pr_crange);
1700 	KERNEL_UNLOCK();
1701 }
1702 
1703 #ifdef MULTIPROCESSOR
1704 
1705 struct pool pool_caches; /* per cpu cache entries */
1706 
1707 void
1708 pool_cache_init(struct pool *pp)
1709 {
1710 	struct cpumem *cm;
1711 	struct pool_cache *pc;
1712 	struct cpumem_iter i;
1713 
1714 	if (pool_caches.pr_size == 0) {
1715 		pool_init(&pool_caches, sizeof(struct pool_cache),
1716 		    CACHELINESIZE, IPL_NONE, PR_WAITOK | PR_RWLOCK,
1717 		    "plcache", NULL);
1718 	}
1719 
1720 	/* must be able to use the pool items as cache list items */
1721 	KASSERT(pp->pr_size >= sizeof(struct pool_cache_item));
1722 
1723 	cm = cpumem_get(&pool_caches);
1724 
1725 	pl_init(pp, &pp->pr_cache_lock);
1726 	arc4random_buf(pp->pr_cache_magic, sizeof(pp->pr_cache_magic));
1727 	TAILQ_INIT(&pp->pr_cache_lists);
1728 	pp->pr_cache_nitems = 0;
1729 	pp->pr_cache_tick = ticks;
1730 	pp->pr_cache_items = 8;
1731 	pp->pr_cache_contention = 0;
1732 	pp->pr_cache_ngc = 0;
1733 
1734 	CPUMEM_FOREACH(pc, &i, cm) {
1735 		pc->pc_actv = NULL;
1736 		pc->pc_nactv = 0;
1737 		pc->pc_prev = NULL;
1738 
1739 		pc->pc_nget = 0;
1740 		pc->pc_nfail = 0;
1741 		pc->pc_nput = 0;
1742 		pc->pc_nlget = 0;
1743 		pc->pc_nlfail = 0;
1744 		pc->pc_nlput = 0;
1745 		pc->pc_nout = 0;
1746 	}
1747 
1748 	membar_producer();
1749 
1750 	pp->pr_cache = cm;
1751 }
1752 
1753 static inline void
1754 pool_cache_item_magic(struct pool *pp, struct pool_cache_item *ci)
1755 {
1756 	unsigned long *entry = (unsigned long *)&ci->ci_nextl;
1757 
1758 	entry[0] = pp->pr_cache_magic[0] ^ (u_long)ci;
1759 	entry[1] = pp->pr_cache_magic[1] ^ (u_long)ci->ci_next;
1760 }
1761 
1762 static inline void
1763 pool_cache_item_magic_check(struct pool *pp, struct pool_cache_item *ci)
1764 {
1765 	unsigned long *entry;
1766 	unsigned long val;
1767 
1768 	entry = (unsigned long *)&ci->ci_nextl;
1769 	val = pp->pr_cache_magic[0] ^ (u_long)ci;
1770 	if (*entry != val)
1771 		goto fail;
1772 
1773 	entry++;
1774 	val = pp->pr_cache_magic[1] ^ (u_long)ci->ci_next;
1775 	if (*entry != val)
1776 		goto fail;
1777 
1778 	return;
1779 
1780 fail:
1781 	panic("%s: %s cpu free list modified: item addr %p+%zu 0x%lx!=0x%lx",
1782 	    __func__, pp->pr_wchan, ci, (caddr_t)entry - (caddr_t)ci,
1783 	    *entry, val);
1784 }
1785 
1786 static inline void
1787 pool_list_enter(struct pool *pp)
1788 {
1789 	if (pl_enter_try(pp, &pp->pr_cache_lock) == 0) {
1790 		pl_enter(pp, &pp->pr_cache_lock);
1791 		pp->pr_cache_contention++;
1792 	}
1793 }
1794 
1795 static inline void
1796 pool_list_leave(struct pool *pp)
1797 {
1798 	pl_leave(pp, &pp->pr_cache_lock);
1799 }
1800 
1801 static inline struct pool_cache_item *
1802 pool_cache_list_alloc(struct pool *pp, struct pool_cache *pc)
1803 {
1804 	struct pool_cache_item *pl;
1805 
1806 	pool_list_enter(pp);
1807 	pl = TAILQ_FIRST(&pp->pr_cache_lists);
1808 	if (pl != NULL) {
1809 		TAILQ_REMOVE(&pp->pr_cache_lists, pl, ci_nextl);
1810 		pp->pr_cache_nitems -= POOL_CACHE_ITEM_NITEMS(pl);
1811 
1812 		pool_cache_item_magic(pp, pl);
1813 
1814 		pc->pc_nlget++;
1815 	} else
1816 		pc->pc_nlfail++;
1817 
1818 	/* fold this cpus nout into the global while we have the lock */
1819 	pp->pr_cache_nout += pc->pc_nout;
1820 	pc->pc_nout = 0;
1821 	pool_list_leave(pp);
1822 
1823 	return (pl);
1824 }
1825 
1826 static inline void
1827 pool_cache_list_free(struct pool *pp, struct pool_cache *pc,
1828     struct pool_cache_item *ci)
1829 {
1830 	pool_list_enter(pp);
1831 	if (TAILQ_EMPTY(&pp->pr_cache_lists))
1832 		pp->pr_cache_tick = ticks;
1833 
1834 	pp->pr_cache_nitems += POOL_CACHE_ITEM_NITEMS(ci);
1835 	TAILQ_INSERT_TAIL(&pp->pr_cache_lists, ci, ci_nextl);
1836 
1837 	pc->pc_nlput++;
1838 
1839 	/* fold this cpus nout into the global while we have the lock */
1840 	pp->pr_cache_nout += pc->pc_nout;
1841 	pc->pc_nout = 0;
1842 	pool_list_leave(pp);
1843 }
1844 
1845 static inline struct pool_cache *
1846 pool_cache_enter(struct pool *pp, int *s)
1847 {
1848 	struct pool_cache *pc;
1849 
1850 	pc = cpumem_enter(pp->pr_cache);
1851 	*s = splraise(pp->pr_ipl);
1852 	pc->pc_gen++;
1853 
1854 	return (pc);
1855 }
1856 
1857 static inline void
1858 pool_cache_leave(struct pool *pp, struct pool_cache *pc, int s)
1859 {
1860 	pc->pc_gen++;
1861 	splx(s);
1862 	cpumem_leave(pp->pr_cache, pc);
1863 }
1864 
1865 void *
1866 pool_cache_get(struct pool *pp)
1867 {
1868 	struct pool_cache *pc;
1869 	struct pool_cache_item *ci;
1870 	int s;
1871 
1872 	pc = pool_cache_enter(pp, &s);
1873 
1874 	if (pc->pc_actv != NULL) {
1875 		ci = pc->pc_actv;
1876 	} else if (pc->pc_prev != NULL) {
1877 		ci = pc->pc_prev;
1878 		pc->pc_prev = NULL;
1879 	} else if ((ci = pool_cache_list_alloc(pp, pc)) == NULL) {
1880 		pc->pc_nfail++;
1881 		goto done;
1882 	}
1883 
1884 	pool_cache_item_magic_check(pp, ci);
1885 #ifdef DIAGNOSTIC
1886 	if (pool_debug && POOL_CACHE_ITEM_POISONED(ci)) {
1887 		size_t pidx;
1888 		uint32_t pval;
1889 
1890 		if (poison_check(ci + 1, pp->pr_size - sizeof(*ci),
1891 		    &pidx, &pval)) {
1892 			int *ip = (int *)(ci + 1);
1893 			ip += pidx;
1894 
1895 			panic("%s: %s cpu free list modified: "
1896 			    "item addr %p+%zu 0x%x!=0x%x",
1897 			    __func__, pp->pr_wchan, ci,
1898 			    (caddr_t)ip - (caddr_t)ci, *ip, pval);
1899 		}
1900 	}
1901 #endif
1902 
1903 	pc->pc_actv = ci->ci_next;
1904 	pc->pc_nactv = POOL_CACHE_ITEM_NITEMS(ci) - 1;
1905 	pc->pc_nget++;
1906 	pc->pc_nout++;
1907 
1908 done:
1909 	pool_cache_leave(pp, pc, s);
1910 
1911 	return (ci);
1912 }
1913 
1914 void
1915 pool_cache_put(struct pool *pp, void *v)
1916 {
1917 	struct pool_cache *pc;
1918 	struct pool_cache_item *ci = v;
1919 	unsigned long nitems;
1920 	int s;
1921 #ifdef DIAGNOSTIC
1922 	int poison = pool_debug && pp->pr_size > sizeof(*ci);
1923 
1924 	if (poison)
1925 		poison_mem(ci + 1, pp->pr_size - sizeof(*ci));
1926 #endif
1927 
1928 	pc = pool_cache_enter(pp, &s);
1929 
1930 	nitems = pc->pc_nactv;
1931 	if (nitems >= pp->pr_cache_items) {
1932 		if (pc->pc_prev != NULL)
1933 			pool_cache_list_free(pp, pc, pc->pc_prev);
1934 
1935 		pc->pc_prev = pc->pc_actv;
1936 
1937 		pc->pc_actv = NULL;
1938 		pc->pc_nactv = 0;
1939 		nitems = 0;
1940 	}
1941 
1942 	ci->ci_next = pc->pc_actv;
1943 	ci->ci_nitems = ++nitems;
1944 #ifdef DIAGNOSTIC
1945 	ci->ci_nitems |= poison ? POOL_CACHE_ITEM_NITEMS_POISON : 0;
1946 #endif
1947 	pool_cache_item_magic(pp, ci);
1948 
1949 	pc->pc_actv = ci;
1950 	pc->pc_nactv = nitems;
1951 
1952 	pc->pc_nput++;
1953 	pc->pc_nout--;
1954 
1955 	pool_cache_leave(pp, pc, s);
1956 }
1957 
1958 struct pool_cache_item *
1959 pool_cache_list_put(struct pool *pp, struct pool_cache_item *pl)
1960 {
1961 	struct pool_cache_item *rpl, *next;
1962 
1963 	if (pl == NULL)
1964 		return (NULL);
1965 
1966 	rpl = TAILQ_NEXT(pl, ci_nextl);
1967 
1968 	pl_enter(pp, &pp->pr_lock);
1969 	do {
1970 		next = pl->ci_next;
1971 		pool_do_put(pp, pl);
1972 		pl = next;
1973 	} while (pl != NULL);
1974 	pl_leave(pp, &pp->pr_lock);
1975 
1976 	return (rpl);
1977 }
1978 
1979 void
1980 pool_cache_destroy(struct pool *pp)
1981 {
1982 	struct pool_cache *pc;
1983 	struct pool_cache_item *pl;
1984 	struct cpumem_iter i;
1985 	struct cpumem *cm;
1986 
1987 	rw_enter_write(&pool_lock); /* serialise with the gc */
1988 	cm = pp->pr_cache;
1989 	pp->pr_cache = NULL; /* make pool_put avoid the cache */
1990 	rw_exit_write(&pool_lock);
1991 
1992 	CPUMEM_FOREACH(pc, &i, cm) {
1993 		pool_cache_list_put(pp, pc->pc_actv);
1994 		pool_cache_list_put(pp, pc->pc_prev);
1995 	}
1996 
1997 	cpumem_put(&pool_caches, cm);
1998 
1999 	pl = TAILQ_FIRST(&pp->pr_cache_lists);
2000 	while (pl != NULL)
2001 		pl = pool_cache_list_put(pp, pl);
2002 }
2003 
2004 void
2005 pool_cache_gc(struct pool *pp)
2006 {
2007 	unsigned int contention, delta;
2008 
2009 	if ((ticks - pp->pr_cache_tick) > (hz * pool_wait_gc) &&
2010 	    !TAILQ_EMPTY(&pp->pr_cache_lists) &&
2011 	    pl_enter_try(pp, &pp->pr_cache_lock)) {
2012 		struct pool_cache_item *pl = NULL;
2013 
2014 		pl = TAILQ_FIRST(&pp->pr_cache_lists);
2015 		if (pl != NULL) {
2016 			TAILQ_REMOVE(&pp->pr_cache_lists, pl, ci_nextl);
2017 			pp->pr_cache_nitems -= POOL_CACHE_ITEM_NITEMS(pl);
2018 			pp->pr_cache_tick = ticks;
2019 
2020 			pp->pr_cache_ngc++;
2021 		}
2022 
2023 		pl_leave(pp, &pp->pr_cache_lock);
2024 
2025 		pool_cache_list_put(pp, pl);
2026 	}
2027 
2028 	/*
2029 	 * if there's a lot of contention on the pr_cache_mtx then consider
2030 	 * growing the length of the list to reduce the need to access the
2031 	 * global pool.
2032 	 */
2033 
2034 	contention = pp->pr_cache_contention;
2035 	delta = contention - pp->pr_cache_contention_prev;
2036 	if (delta > 8 /* magic */) {
2037 		if ((ncpusfound * 8 * 2) <= pp->pr_cache_nitems)
2038 			pp->pr_cache_items += 8;
2039 	} else if (delta == 0) {
2040 		if (pp->pr_cache_items > 8)
2041 			pp->pr_cache_items--;
2042 	}
2043 	pp->pr_cache_contention_prev = contention;
2044 }
2045 
2046 void
2047 pool_cache_pool_info(struct pool *pp, struct kinfo_pool *pi)
2048 {
2049 	struct pool_cache *pc;
2050 	struct cpumem_iter i;
2051 
2052 	if (pp->pr_cache == NULL)
2053 		return;
2054 
2055 	/* loop through the caches twice to collect stats */
2056 
2057 	/* once without the lock so we can yield while reading nget/nput */
2058 	CPUMEM_FOREACH(pc, &i, pp->pr_cache) {
2059 		uint64_t gen, nget, nput;
2060 
2061 		do {
2062 			while ((gen = pc->pc_gen) & 1)
2063 				yield();
2064 
2065 			nget = pc->pc_nget;
2066 			nput = pc->pc_nput;
2067 		} while (gen != pc->pc_gen);
2068 
2069 		pi->pr_nget += nget;
2070 		pi->pr_nput += nput;
2071 	}
2072 
2073 	/* and once with the mtx so we can get consistent nout values */
2074 	pl_enter(pp, &pp->pr_cache_lock);
2075 	CPUMEM_FOREACH(pc, &i, pp->pr_cache)
2076 		pi->pr_nout += pc->pc_nout;
2077 
2078 	pi->pr_nout += pp->pr_cache_nout;
2079 	pl_leave(pp, &pp->pr_cache_lock);
2080 }
2081 
2082 int
2083 pool_cache_info(struct pool *pp, void *oldp, size_t *oldlenp)
2084 {
2085 	struct kinfo_pool_cache kpc;
2086 
2087 	if (pp->pr_cache == NULL)
2088 		return (EOPNOTSUPP);
2089 
2090 	memset(&kpc, 0, sizeof(kpc)); /* don't leak padding */
2091 
2092 	pl_enter(pp, &pp->pr_cache_lock);
2093 	kpc.pr_ngc = pp->pr_cache_ngc;
2094 	kpc.pr_len = pp->pr_cache_items;
2095 	kpc.pr_nitems = pp->pr_cache_nitems;
2096 	kpc.pr_contention = pp->pr_cache_contention;
2097 	pl_leave(pp, &pp->pr_cache_lock);
2098 
2099 	return (sysctl_rdstruct(oldp, oldlenp, NULL, &kpc, sizeof(kpc)));
2100 }
2101 
2102 int
2103 pool_cache_cpus_info(struct pool *pp, void *oldp, size_t *oldlenp)
2104 {
2105 	struct pool_cache *pc;
2106 	struct kinfo_pool_cache_cpu *kpcc, *info;
2107 	unsigned int cpu = 0;
2108 	struct cpumem_iter i;
2109 	int error = 0;
2110 	size_t len;
2111 
2112 	if (pp->pr_cache == NULL)
2113 		return (EOPNOTSUPP);
2114 	if (*oldlenp % sizeof(*kpcc))
2115 		return (EINVAL);
2116 
2117 	kpcc = mallocarray(ncpusfound, sizeof(*kpcc), M_TEMP,
2118 	    M_WAITOK|M_CANFAIL|M_ZERO);
2119 	if (kpcc == NULL)
2120 		return (EIO);
2121 
2122 	len = ncpusfound * sizeof(*kpcc);
2123 
2124 	CPUMEM_FOREACH(pc, &i, pp->pr_cache) {
2125 		uint64_t gen;
2126 
2127 		if (cpu >= ncpusfound) {
2128 			error = EIO;
2129 			goto err;
2130 		}
2131 
2132 		info = &kpcc[cpu];
2133 		info->pr_cpu = cpu;
2134 
2135 		do {
2136 			while ((gen = pc->pc_gen) & 1)
2137 				yield();
2138 
2139 			info->pr_nget = pc->pc_nget;
2140 			info->pr_nfail = pc->pc_nfail;
2141 			info->pr_nput = pc->pc_nput;
2142 			info->pr_nlget = pc->pc_nlget;
2143 			info->pr_nlfail = pc->pc_nlfail;
2144 			info->pr_nlput = pc->pc_nlput;
2145 		} while (gen != pc->pc_gen);
2146 
2147 		cpu++;
2148 	}
2149 
2150 	error = sysctl_rdstruct(oldp, oldlenp, NULL, kpcc, len);
2151 err:
2152 	free(kpcc, M_TEMP, len);
2153 
2154 	return (error);
2155 }
2156 #else /* MULTIPROCESSOR */
2157 void
2158 pool_cache_init(struct pool *pp)
2159 {
2160 	/* nop */
2161 }
2162 
2163 void
2164 pool_cache_pool_info(struct pool *pp, struct kinfo_pool *pi)
2165 {
2166 	/* nop */
2167 }
2168 
2169 int
2170 pool_cache_info(struct pool *pp, void *oldp, size_t *oldlenp)
2171 {
2172 	return (EOPNOTSUPP);
2173 }
2174 
2175 int
2176 pool_cache_cpus_info(struct pool *pp, void *oldp, size_t *oldlenp)
2177 {
2178 	return (EOPNOTSUPP);
2179 }
2180 #endif /* MULTIPROCESSOR */
2181 
2182 
2183 void
2184 pool_lock_mtx_init(struct pool *pp, union pool_lock *lock,
2185     const struct lock_type *type)
2186 {
2187 	_mtx_init_flags(&lock->prl_mtx, pp->pr_ipl, pp->pr_wchan, 0, type);
2188 }
2189 
2190 void
2191 pool_lock_mtx_enter(union pool_lock *lock)
2192 {
2193 	mtx_enter(&lock->prl_mtx);
2194 }
2195 
2196 int
2197 pool_lock_mtx_enter_try(union pool_lock *lock)
2198 {
2199 	return (mtx_enter_try(&lock->prl_mtx));
2200 }
2201 
2202 void
2203 pool_lock_mtx_leave(union pool_lock *lock)
2204 {
2205 	mtx_leave(&lock->prl_mtx);
2206 }
2207 
2208 void
2209 pool_lock_mtx_assert_locked(union pool_lock *lock)
2210 {
2211 	MUTEX_ASSERT_LOCKED(&lock->prl_mtx);
2212 }
2213 
2214 void
2215 pool_lock_mtx_assert_unlocked(union pool_lock *lock)
2216 {
2217 	MUTEX_ASSERT_UNLOCKED(&lock->prl_mtx);
2218 }
2219 
2220 int
2221 pool_lock_mtx_sleep(void *ident, union pool_lock *lock, int priority,
2222     const char *wmesg)
2223 {
2224 	return msleep_nsec(ident, &lock->prl_mtx, priority, wmesg, INFSLP);
2225 }
2226 
2227 static const struct pool_lock_ops pool_lock_ops_mtx = {
2228 	pool_lock_mtx_init,
2229 	pool_lock_mtx_enter,
2230 	pool_lock_mtx_enter_try,
2231 	pool_lock_mtx_leave,
2232 	pool_lock_mtx_assert_locked,
2233 	pool_lock_mtx_assert_unlocked,
2234 	pool_lock_mtx_sleep,
2235 };
2236 
2237 void
2238 pool_lock_rw_init(struct pool *pp, union pool_lock *lock,
2239     const struct lock_type *type)
2240 {
2241 	_rw_init_flags(&lock->prl_rwlock, pp->pr_wchan, 0, type);
2242 }
2243 
2244 void
2245 pool_lock_rw_enter(union pool_lock *lock)
2246 {
2247 	rw_enter_write(&lock->prl_rwlock);
2248 }
2249 
2250 int
2251 pool_lock_rw_enter_try(union pool_lock *lock)
2252 {
2253 	return (rw_enter(&lock->prl_rwlock, RW_WRITE | RW_NOSLEEP) == 0);
2254 }
2255 
2256 void
2257 pool_lock_rw_leave(union pool_lock *lock)
2258 {
2259 	rw_exit_write(&lock->prl_rwlock);
2260 }
2261 
2262 void
2263 pool_lock_rw_assert_locked(union pool_lock *lock)
2264 {
2265 	rw_assert_wrlock(&lock->prl_rwlock);
2266 }
2267 
2268 void
2269 pool_lock_rw_assert_unlocked(union pool_lock *lock)
2270 {
2271 	KASSERT(rw_status(&lock->prl_rwlock) != RW_WRITE);
2272 }
2273 
2274 int
2275 pool_lock_rw_sleep(void *ident, union pool_lock *lock, int priority,
2276     const char *wmesg)
2277 {
2278 	return rwsleep_nsec(ident, &lock->prl_rwlock, priority, wmesg, INFSLP);
2279 }
2280 
2281 static const struct pool_lock_ops pool_lock_ops_rw = {
2282 	pool_lock_rw_init,
2283 	pool_lock_rw_enter,
2284 	pool_lock_rw_enter_try,
2285 	pool_lock_rw_leave,
2286 	pool_lock_rw_assert_locked,
2287 	pool_lock_rw_assert_unlocked,
2288 	pool_lock_rw_sleep,
2289 };
2290