xref: /netbsd-src/sys/kern/subr_pool.c (revision d48f14661dda8638fee055ba15d35bdfb29b9fa8)
1 /*	$NetBSD: subr_pool.c,v 1.118 2006/06/07 22:33:40 kardel Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.118 2006/06/07 22:33:40 kardel Exp $");
42 
43 #include "opt_pool.h"
44 #include "opt_poollog.h"
45 #include "opt_lockdebug.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/errno.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/lock.h>
54 #include <sys/pool.h>
55 #include <sys/syslog.h>
56 
57 #include <uvm/uvm.h>
58 
59 /*
60  * Pool resource management utility.
61  *
62  * Memory is allocated in pages which are split into pieces according to
63  * the pool item size. Each page is kept on one of three lists in the
64  * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
65  * for empty, full and partially-full pages respectively. The individual
66  * pool items are on a linked list headed by `ph_itemlist' in each page
67  * header. The memory for building the page list is either taken from
68  * the allocated pages themselves (for small pool items) or taken from
69  * an internal pool of page headers (`phpool').
70  */
71 
72 /* List of all pools */
73 LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head);
74 
75 /* Private pool for page header structures */
76 #define	PHPOOL_MAX	8
77 static struct pool phpool[PHPOOL_MAX];
78 #define	PHPOOL_FREELIST_NELEM(idx)	(((idx) == 0) ? 0 : (1 << (idx)))
79 
80 #ifdef POOL_SUBPAGE
81 /* Pool of subpages for use by normal pools. */
82 static struct pool psppool;
83 #endif
84 
85 static SLIST_HEAD(, pool_allocator) pa_deferinitq =
86     SLIST_HEAD_INITIALIZER(pa_deferinitq);
87 
88 static void *pool_page_alloc_meta(struct pool *, int);
89 static void pool_page_free_meta(struct pool *, void *);
90 
91 /* allocator for pool metadata */
92 static struct pool_allocator pool_allocator_meta = {
93 	pool_page_alloc_meta, pool_page_free_meta,
94 	.pa_backingmapptr = &kmem_map,
95 };
96 
97 /* # of seconds to retain page after last use */
98 int pool_inactive_time = 10;
99 
100 /* Next candidate for drainage (see pool_drain()) */
101 static struct pool	*drainpp;
102 
103 /* This spin lock protects both pool_head and drainpp. */
104 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
105 
106 typedef uint8_t pool_item_freelist_t;
107 
108 struct pool_item_header {
109 	/* Page headers */
110 	LIST_ENTRY(pool_item_header)
111 				ph_pagelist;	/* pool page list */
112 	SPLAY_ENTRY(pool_item_header)
113 				ph_node;	/* Off-page page headers */
114 	caddr_t			ph_page;	/* this page's address */
115 	struct timeval		ph_time;	/* last referenced */
116 	union {
117 		/* !PR_NOTOUCH */
118 		struct {
119 			LIST_HEAD(, pool_item)
120 				phu_itemlist;	/* chunk list for this page */
121 		} phu_normal;
122 		/* PR_NOTOUCH */
123 		struct {
124 			uint16_t
125 				phu_off;	/* start offset in page */
126 			pool_item_freelist_t
127 				phu_firstfree;	/* first free item */
128 			/*
129 			 * XXX it might be better to use
130 			 * a simple bitmap and ffs(3)
131 			 */
132 		} phu_notouch;
133 	} ph_u;
134 	uint16_t		ph_nmissing;	/* # of chunks in use */
135 };
136 #define	ph_itemlist	ph_u.phu_normal.phu_itemlist
137 #define	ph_off		ph_u.phu_notouch.phu_off
138 #define	ph_firstfree	ph_u.phu_notouch.phu_firstfree
139 
140 struct pool_item {
141 #ifdef DIAGNOSTIC
142 	u_int pi_magic;
143 #endif
144 #define	PI_MAGIC 0xdeadbeefU
145 	/* Other entries use only this list entry */
146 	LIST_ENTRY(pool_item)	pi_list;
147 };
148 
149 #define	POOL_NEEDS_CATCHUP(pp)						\
150 	((pp)->pr_nitems < (pp)->pr_minitems)
151 
152 /*
153  * Pool cache management.
154  *
155  * Pool caches provide a way for constructed objects to be cached by the
156  * pool subsystem.  This can lead to performance improvements by avoiding
157  * needless object construction/destruction; it is deferred until absolutely
158  * necessary.
159  *
160  * Caches are grouped into cache groups.  Each cache group references
161  * up to 16 constructed objects.  When a cache allocates an object
162  * from the pool, it calls the object's constructor and places it into
163  * a cache group.  When a cache group frees an object back to the pool,
164  * it first calls the object's destructor.  This allows the object to
165  * persist in constructed form while freed to the cache.
166  *
167  * Multiple caches may exist for each pool.  This allows a single
168  * object type to have multiple constructed forms.  The pool references
169  * each cache, so that when a pool is drained by the pagedaemon, it can
170  * drain each individual cache as well.  Each time a cache is drained,
171  * the most idle cache group is freed to the pool in its entirety.
172  *
173  * Pool caches are layed on top of pools.  By layering them, we can avoid
174  * the complexity of cache management for pools which would not benefit
175  * from it.
176  */
177 
178 /* The cache group pool. */
179 static struct pool pcgpool;
180 
181 static void	pool_cache_reclaim(struct pool_cache *, struct pool_pagelist *,
182 				   struct pool_cache_grouplist *);
183 static void	pcg_grouplist_free(struct pool_cache_grouplist *);
184 
185 static int	pool_catchup(struct pool *);
186 static void	pool_prime_page(struct pool *, caddr_t,
187 		    struct pool_item_header *);
188 static void	pool_update_curpage(struct pool *);
189 
190 static int	pool_grow(struct pool *, int);
191 static void	*pool_allocator_alloc(struct pool *, int);
192 static void	pool_allocator_free(struct pool *, void *);
193 
194 static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
195 	void (*)(const char *, ...));
196 static void pool_print1(struct pool *, const char *,
197 	void (*)(const char *, ...));
198 
199 static int pool_chk_page(struct pool *, const char *,
200 			 struct pool_item_header *);
201 
202 /*
203  * Pool log entry. An array of these is allocated in pool_init().
204  */
205 struct pool_log {
206 	const char	*pl_file;
207 	long		pl_line;
208 	int		pl_action;
209 #define	PRLOG_GET	1
210 #define	PRLOG_PUT	2
211 	void		*pl_addr;
212 };
213 
214 #ifdef POOL_DIAGNOSTIC
215 /* Number of entries in pool log buffers */
216 #ifndef POOL_LOGSIZE
217 #define	POOL_LOGSIZE	10
218 #endif
219 
220 int pool_logsize = POOL_LOGSIZE;
221 
222 static inline void
223 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
224 {
225 	int n = pp->pr_curlogentry;
226 	struct pool_log *pl;
227 
228 	if ((pp->pr_roflags & PR_LOGGING) == 0)
229 		return;
230 
231 	/*
232 	 * Fill in the current entry. Wrap around and overwrite
233 	 * the oldest entry if necessary.
234 	 */
235 	pl = &pp->pr_log[n];
236 	pl->pl_file = file;
237 	pl->pl_line = line;
238 	pl->pl_action = action;
239 	pl->pl_addr = v;
240 	if (++n >= pp->pr_logsize)
241 		n = 0;
242 	pp->pr_curlogentry = n;
243 }
244 
245 static void
246 pr_printlog(struct pool *pp, struct pool_item *pi,
247     void (*pr)(const char *, ...))
248 {
249 	int i = pp->pr_logsize;
250 	int n = pp->pr_curlogentry;
251 
252 	if ((pp->pr_roflags & PR_LOGGING) == 0)
253 		return;
254 
255 	/*
256 	 * Print all entries in this pool's log.
257 	 */
258 	while (i-- > 0) {
259 		struct pool_log *pl = &pp->pr_log[n];
260 		if (pl->pl_action != 0) {
261 			if (pi == NULL || pi == pl->pl_addr) {
262 				(*pr)("\tlog entry %d:\n", i);
263 				(*pr)("\t\taction = %s, addr = %p\n",
264 				    pl->pl_action == PRLOG_GET ? "get" : "put",
265 				    pl->pl_addr);
266 				(*pr)("\t\tfile: %s at line %lu\n",
267 				    pl->pl_file, pl->pl_line);
268 			}
269 		}
270 		if (++n >= pp->pr_logsize)
271 			n = 0;
272 	}
273 }
274 
275 static inline void
276 pr_enter(struct pool *pp, const char *file, long line)
277 {
278 
279 	if (__predict_false(pp->pr_entered_file != NULL)) {
280 		printf("pool %s: reentrancy at file %s line %ld\n",
281 		    pp->pr_wchan, file, line);
282 		printf("         previous entry at file %s line %ld\n",
283 		    pp->pr_entered_file, pp->pr_entered_line);
284 		panic("pr_enter");
285 	}
286 
287 	pp->pr_entered_file = file;
288 	pp->pr_entered_line = line;
289 }
290 
291 static inline void
292 pr_leave(struct pool *pp)
293 {
294 
295 	if (__predict_false(pp->pr_entered_file == NULL)) {
296 		printf("pool %s not entered?\n", pp->pr_wchan);
297 		panic("pr_leave");
298 	}
299 
300 	pp->pr_entered_file = NULL;
301 	pp->pr_entered_line = 0;
302 }
303 
304 static inline void
305 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
306 {
307 
308 	if (pp->pr_entered_file != NULL)
309 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
310 		    pp->pr_entered_file, pp->pr_entered_line);
311 }
312 #else
313 #define	pr_log(pp, v, action, file, line)
314 #define	pr_printlog(pp, pi, pr)
315 #define	pr_enter(pp, file, line)
316 #define	pr_leave(pp)
317 #define	pr_enter_check(pp, pr)
318 #endif /* POOL_DIAGNOSTIC */
319 
320 static inline int
321 pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
322     const void *v)
323 {
324 	const char *cp = v;
325 	int idx;
326 
327 	KASSERT(pp->pr_roflags & PR_NOTOUCH);
328 	idx = (cp - ph->ph_page - ph->ph_off) / pp->pr_size;
329 	KASSERT(idx < pp->pr_itemsperpage);
330 	return idx;
331 }
332 
333 #define	PR_FREELIST_ALIGN(p) \
334 	roundup((uintptr_t)(p), sizeof(pool_item_freelist_t))
335 #define	PR_FREELIST(ph)	((pool_item_freelist_t *)PR_FREELIST_ALIGN((ph) + 1))
336 #define	PR_INDEX_USED	((pool_item_freelist_t)-1)
337 #define	PR_INDEX_EOL	((pool_item_freelist_t)-2)
338 
339 static inline void
340 pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
341     void *obj)
342 {
343 	int idx = pr_item_notouch_index(pp, ph, obj);
344 	pool_item_freelist_t *freelist = PR_FREELIST(ph);
345 
346 	KASSERT(freelist[idx] == PR_INDEX_USED);
347 	freelist[idx] = ph->ph_firstfree;
348 	ph->ph_firstfree = idx;
349 }
350 
351 static inline void *
352 pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
353 {
354 	int idx = ph->ph_firstfree;
355 	pool_item_freelist_t *freelist = PR_FREELIST(ph);
356 
357 	KASSERT(freelist[idx] != PR_INDEX_USED);
358 	ph->ph_firstfree = freelist[idx];
359 	freelist[idx] = PR_INDEX_USED;
360 
361 	return ph->ph_page + ph->ph_off + idx * pp->pr_size;
362 }
363 
364 static inline int
365 phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
366 {
367 	if (a->ph_page < b->ph_page)
368 		return (-1);
369 	else if (a->ph_page > b->ph_page)
370 		return (1);
371 	else
372 		return (0);
373 }
374 
375 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
376 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
377 
378 /*
379  * Return the pool page header based on page address.
380  */
381 static inline struct pool_item_header *
382 pr_find_pagehead(struct pool *pp, caddr_t page)
383 {
384 	struct pool_item_header *ph, tmp;
385 
386 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
387 		return ((struct pool_item_header *)(page + pp->pr_phoffset));
388 
389 	tmp.ph_page = page;
390 	ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
391 	return ph;
392 }
393 
394 static void
395 pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
396 {
397 	struct pool_item_header *ph;
398 	int s;
399 
400 	while ((ph = LIST_FIRST(pq)) != NULL) {
401 		LIST_REMOVE(ph, ph_pagelist);
402 		pool_allocator_free(pp, ph->ph_page);
403 		if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
404 			s = splvm();
405 			pool_put(pp->pr_phpool, ph);
406 			splx(s);
407 		}
408 	}
409 }
410 
411 /*
412  * Remove a page from the pool.
413  */
414 static inline void
415 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
416      struct pool_pagelist *pq)
417 {
418 
419 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
420 
421 	/*
422 	 * If the page was idle, decrement the idle page count.
423 	 */
424 	if (ph->ph_nmissing == 0) {
425 #ifdef DIAGNOSTIC
426 		if (pp->pr_nidle == 0)
427 			panic("pr_rmpage: nidle inconsistent");
428 		if (pp->pr_nitems < pp->pr_itemsperpage)
429 			panic("pr_rmpage: nitems inconsistent");
430 #endif
431 		pp->pr_nidle--;
432 	}
433 
434 	pp->pr_nitems -= pp->pr_itemsperpage;
435 
436 	/*
437 	 * Unlink the page from the pool and queue it for release.
438 	 */
439 	LIST_REMOVE(ph, ph_pagelist);
440 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
441 		SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
442 	LIST_INSERT_HEAD(pq, ph, ph_pagelist);
443 
444 	pp->pr_npages--;
445 	pp->pr_npagefree++;
446 
447 	pool_update_curpage(pp);
448 }
449 
450 static boolean_t
451 pa_starved_p(struct pool_allocator *pa)
452 {
453 
454 	if (pa->pa_backingmap != NULL) {
455 		return vm_map_starved_p(pa->pa_backingmap);
456 	}
457 	return FALSE;
458 }
459 
460 static int
461 pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
462 {
463 	struct pool *pp = obj;
464 	struct pool_allocator *pa = pp->pr_alloc;
465 
466 	KASSERT(&pp->pr_reclaimerentry == ce);
467 	pool_reclaim(pp);
468 	if (!pa_starved_p(pa)) {
469 		return CALLBACK_CHAIN_ABORT;
470 	}
471 	return CALLBACK_CHAIN_CONTINUE;
472 }
473 
474 static void
475 pool_reclaim_register(struct pool *pp)
476 {
477 	struct vm_map *map = pp->pr_alloc->pa_backingmap;
478 	int s;
479 
480 	if (map == NULL) {
481 		return;
482 	}
483 
484 	s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
485 	callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback,
486 	    &pp->pr_reclaimerentry, pp, pool_reclaim_callback);
487 	splx(s);
488 }
489 
490 static void
491 pool_reclaim_unregister(struct pool *pp)
492 {
493 	struct vm_map *map = pp->pr_alloc->pa_backingmap;
494 	int s;
495 
496 	if (map == NULL) {
497 		return;
498 	}
499 
500 	s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
501 	callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback,
502 	    &pp->pr_reclaimerentry);
503 	splx(s);
504 }
505 
506 static void
507 pa_reclaim_register(struct pool_allocator *pa)
508 {
509 	struct vm_map *map = *pa->pa_backingmapptr;
510 	struct pool *pp;
511 
512 	KASSERT(pa->pa_backingmap == NULL);
513 	if (map == NULL) {
514 		SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q);
515 		return;
516 	}
517 	pa->pa_backingmap = map;
518 	TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
519 		pool_reclaim_register(pp);
520 	}
521 }
522 
523 /*
524  * Initialize all the pools listed in the "pools" link set.
525  */
526 void
527 pool_subsystem_init(void)
528 {
529 	struct pool_allocator *pa;
530 	__link_set_decl(pools, struct link_pool_init);
531 	struct link_pool_init * const *pi;
532 
533 	__link_set_foreach(pi, pools)
534 		pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
535 		    (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
536 		    (*pi)->palloc);
537 
538 	while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {
539 		KASSERT(pa->pa_backingmapptr != NULL);
540 		KASSERT(*pa->pa_backingmapptr != NULL);
541 		SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q);
542 		pa_reclaim_register(pa);
543 	}
544 }
545 
546 /*
547  * Initialize the given pool resource structure.
548  *
549  * We export this routine to allow other kernel parts to declare
550  * static pools that must be initialized before malloc() is available.
551  */
552 void
553 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
554     const char *wchan, struct pool_allocator *palloc)
555 {
556 #ifdef DEBUG
557 	struct pool *pp1;
558 #endif
559 	size_t trysize, phsize;
560 	int off, slack, s;
561 
562 	KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >=
563 	    PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1));
564 
565 #ifdef DEBUG
566 	/*
567 	 * Check that the pool hasn't already been initialised and
568 	 * added to the list of all pools.
569 	 */
570 	LIST_FOREACH(pp1, &pool_head, pr_poollist) {
571 		if (pp == pp1)
572 			panic("pool_init: pool %s already initialised",
573 			    wchan);
574 	}
575 #endif
576 
577 #ifdef POOL_DIAGNOSTIC
578 	/*
579 	 * Always log if POOL_DIAGNOSTIC is defined.
580 	 */
581 	if (pool_logsize != 0)
582 		flags |= PR_LOGGING;
583 #endif
584 
585 	if (palloc == NULL)
586 		palloc = &pool_allocator_kmem;
587 #ifdef POOL_SUBPAGE
588 	if (size > palloc->pa_pagesz) {
589 		if (palloc == &pool_allocator_kmem)
590 			palloc = &pool_allocator_kmem_fullpage;
591 		else if (palloc == &pool_allocator_nointr)
592 			palloc = &pool_allocator_nointr_fullpage;
593 	}
594 #endif /* POOL_SUBPAGE */
595 	if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
596 		if (palloc->pa_pagesz == 0)
597 			palloc->pa_pagesz = PAGE_SIZE;
598 
599 		TAILQ_INIT(&palloc->pa_list);
600 
601 		simple_lock_init(&palloc->pa_slock);
602 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
603 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
604 
605 		if (palloc->pa_backingmapptr != NULL) {
606 			pa_reclaim_register(palloc);
607 		}
608 		palloc->pa_flags |= PA_INITIALIZED;
609 	}
610 
611 	if (align == 0)
612 		align = ALIGN(1);
613 
614 	if (size < sizeof(struct pool_item))
615 		size = sizeof(struct pool_item);
616 
617 	size = roundup(size, align);
618 #ifdef DIAGNOSTIC
619 	if (size > palloc->pa_pagesz)
620 		panic("pool_init: pool item size (%lu) too large",
621 		      (u_long)size);
622 #endif
623 
624 	/*
625 	 * Initialize the pool structure.
626 	 */
627 	LIST_INIT(&pp->pr_emptypages);
628 	LIST_INIT(&pp->pr_fullpages);
629 	LIST_INIT(&pp->pr_partpages);
630 	LIST_INIT(&pp->pr_cachelist);
631 	pp->pr_curpage = NULL;
632 	pp->pr_npages = 0;
633 	pp->pr_minitems = 0;
634 	pp->pr_minpages = 0;
635 	pp->pr_maxpages = UINT_MAX;
636 	pp->pr_roflags = flags;
637 	pp->pr_flags = 0;
638 	pp->pr_size = size;
639 	pp->pr_align = align;
640 	pp->pr_wchan = wchan;
641 	pp->pr_alloc = palloc;
642 	pp->pr_nitems = 0;
643 	pp->pr_nout = 0;
644 	pp->pr_hardlimit = UINT_MAX;
645 	pp->pr_hardlimit_warning = NULL;
646 	pp->pr_hardlimit_ratecap.tv_sec = 0;
647 	pp->pr_hardlimit_ratecap.tv_usec = 0;
648 	pp->pr_hardlimit_warning_last.tv_sec = 0;
649 	pp->pr_hardlimit_warning_last.tv_usec = 0;
650 	pp->pr_drain_hook = NULL;
651 	pp->pr_drain_hook_arg = NULL;
652 
653 	/*
654 	 * Decide whether to put the page header off page to avoid
655 	 * wasting too large a part of the page or too big item.
656 	 * Off-page page headers go on a hash table, so we can match
657 	 * a returned item with its header based on the page address.
658 	 * We use 1/16 of the page size and about 8 times of the item
659 	 * size as the threshold (XXX: tune)
660 	 *
661 	 * However, we'll put the header into the page if we can put
662 	 * it without wasting any items.
663 	 *
664 	 * Silently enforce `0 <= ioff < align'.
665 	 */
666 	pp->pr_itemoffset = ioff %= align;
667 	/* See the comment below about reserved bytes. */
668 	trysize = palloc->pa_pagesz - ((align - ioff) % align);
669 	phsize = ALIGN(sizeof(struct pool_item_header));
670 	if ((pp->pr_roflags & PR_NOTOUCH) == 0 &&
671 	    (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
672 	    trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
673 		/* Use the end of the page for the page header */
674 		pp->pr_roflags |= PR_PHINPAGE;
675 		pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
676 	} else {
677 		/* The page header will be taken from our page header pool */
678 		pp->pr_phoffset = 0;
679 		off = palloc->pa_pagesz;
680 		SPLAY_INIT(&pp->pr_phtree);
681 	}
682 
683 	/*
684 	 * Alignment is to take place at `ioff' within the item. This means
685 	 * we must reserve up to `align - 1' bytes on the page to allow
686 	 * appropriate positioning of each item.
687 	 */
688 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
689 	KASSERT(pp->pr_itemsperpage != 0);
690 	if ((pp->pr_roflags & PR_NOTOUCH)) {
691 		int idx;
692 
693 		for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
694 		    idx++) {
695 			/* nothing */
696 		}
697 		if (idx >= PHPOOL_MAX) {
698 			/*
699 			 * if you see this panic, consider to tweak
700 			 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
701 			 */
702 			panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
703 			    pp->pr_wchan, pp->pr_itemsperpage);
704 		}
705 		pp->pr_phpool = &phpool[idx];
706 	} else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
707 		pp->pr_phpool = &phpool[0];
708 	}
709 #if defined(DIAGNOSTIC)
710 	else {
711 		pp->pr_phpool = NULL;
712 	}
713 #endif
714 
715 	/*
716 	 * Use the slack between the chunks and the page header
717 	 * for "cache coloring".
718 	 */
719 	slack = off - pp->pr_itemsperpage * pp->pr_size;
720 	pp->pr_maxcolor = (slack / align) * align;
721 	pp->pr_curcolor = 0;
722 
723 	pp->pr_nget = 0;
724 	pp->pr_nfail = 0;
725 	pp->pr_nput = 0;
726 	pp->pr_npagealloc = 0;
727 	pp->pr_npagefree = 0;
728 	pp->pr_hiwat = 0;
729 	pp->pr_nidle = 0;
730 
731 #ifdef POOL_DIAGNOSTIC
732 	if (flags & PR_LOGGING) {
733 		if (kmem_map == NULL ||
734 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
735 		     M_TEMP, M_NOWAIT)) == NULL)
736 			pp->pr_roflags &= ~PR_LOGGING;
737 		pp->pr_curlogentry = 0;
738 		pp->pr_logsize = pool_logsize;
739 	}
740 #endif
741 
742 	pp->pr_entered_file = NULL;
743 	pp->pr_entered_line = 0;
744 
745 	simple_lock_init(&pp->pr_slock);
746 
747 	/*
748 	 * Initialize private page header pool and cache magazine pool if we
749 	 * haven't done so yet.
750 	 * XXX LOCKING.
751 	 */
752 	if (phpool[0].pr_size == 0) {
753 		int idx;
754 		for (idx = 0; idx < PHPOOL_MAX; idx++) {
755 			static char phpool_names[PHPOOL_MAX][6+1+6+1];
756 			int nelem;
757 			size_t sz;
758 
759 			nelem = PHPOOL_FREELIST_NELEM(idx);
760 			snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
761 			    "phpool-%d", nelem);
762 			sz = sizeof(struct pool_item_header);
763 			if (nelem) {
764 				sz = PR_FREELIST_ALIGN(sz)
765 				    + nelem * sizeof(pool_item_freelist_t);
766 			}
767 			pool_init(&phpool[idx], sz, 0, 0, 0,
768 			    phpool_names[idx], &pool_allocator_meta);
769 		}
770 #ifdef POOL_SUBPAGE
771 		pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
772 		    PR_RECURSIVE, "psppool", &pool_allocator_meta);
773 #endif
774 		pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
775 		    0, "pcgpool", &pool_allocator_meta);
776 	}
777 
778 	/* Insert into the list of all pools. */
779 	simple_lock(&pool_head_slock);
780 	LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
781 	simple_unlock(&pool_head_slock);
782 
783 	/* Insert this into the list of pools using this allocator. */
784 	s = splvm();
785 	simple_lock(&palloc->pa_slock);
786 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
787 	simple_unlock(&palloc->pa_slock);
788 	splx(s);
789 	pool_reclaim_register(pp);
790 }
791 
792 /*
793  * De-commision a pool resource.
794  */
795 void
796 pool_destroy(struct pool *pp)
797 {
798 	struct pool_pagelist pq;
799 	struct pool_item_header *ph;
800 	int s;
801 
802 	/* Remove from global pool list */
803 	simple_lock(&pool_head_slock);
804 	LIST_REMOVE(pp, pr_poollist);
805 	if (drainpp == pp)
806 		drainpp = NULL;
807 	simple_unlock(&pool_head_slock);
808 
809 	/* Remove this pool from its allocator's list of pools. */
810 	pool_reclaim_unregister(pp);
811 	s = splvm();
812 	simple_lock(&pp->pr_alloc->pa_slock);
813 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
814 	simple_unlock(&pp->pr_alloc->pa_slock);
815 	splx(s);
816 
817 	s = splvm();
818 	simple_lock(&pp->pr_slock);
819 
820 	KASSERT(LIST_EMPTY(&pp->pr_cachelist));
821 
822 #ifdef DIAGNOSTIC
823 	if (pp->pr_nout != 0) {
824 		pr_printlog(pp, NULL, printf);
825 		panic("pool_destroy: pool busy: still out: %u",
826 		    pp->pr_nout);
827 	}
828 #endif
829 
830 	KASSERT(LIST_EMPTY(&pp->pr_fullpages));
831 	KASSERT(LIST_EMPTY(&pp->pr_partpages));
832 
833 	/* Remove all pages */
834 	LIST_INIT(&pq);
835 	while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
836 		pr_rmpage(pp, ph, &pq);
837 
838 	simple_unlock(&pp->pr_slock);
839 	splx(s);
840 
841 	pr_pagelist_free(pp, &pq);
842 
843 #ifdef POOL_DIAGNOSTIC
844 	if ((pp->pr_roflags & PR_LOGGING) != 0)
845 		free(pp->pr_log, M_TEMP);
846 #endif
847 }
848 
849 void
850 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
851 {
852 
853 	/* XXX no locking -- must be used just after pool_init() */
854 #ifdef DIAGNOSTIC
855 	if (pp->pr_drain_hook != NULL)
856 		panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
857 #endif
858 	pp->pr_drain_hook = fn;
859 	pp->pr_drain_hook_arg = arg;
860 }
861 
862 static struct pool_item_header *
863 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
864 {
865 	struct pool_item_header *ph;
866 	int s;
867 
868 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
869 
870 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
871 		ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
872 	else {
873 		s = splvm();
874 		ph = pool_get(pp->pr_phpool, flags);
875 		splx(s);
876 	}
877 
878 	return (ph);
879 }
880 
881 /*
882  * Grab an item from the pool; must be called at appropriate spl level
883  */
884 void *
885 #ifdef POOL_DIAGNOSTIC
886 _pool_get(struct pool *pp, int flags, const char *file, long line)
887 #else
888 pool_get(struct pool *pp, int flags)
889 #endif
890 {
891 	struct pool_item *pi;
892 	struct pool_item_header *ph;
893 	void *v;
894 
895 #ifdef DIAGNOSTIC
896 	if (__predict_false(pp->pr_itemsperpage == 0))
897 		panic("pool_get: pool %p: pr_itemsperpage is zero, "
898 		    "pool not initialized?", pp);
899 	if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
900 			    (flags & PR_WAITOK) != 0))
901 		panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
902 
903 #endif /* DIAGNOSTIC */
904 #ifdef LOCKDEBUG
905 	if (flags & PR_WAITOK)
906 		simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
907 	SCHED_ASSERT_UNLOCKED();
908 #endif
909 
910 	simple_lock(&pp->pr_slock);
911 	pr_enter(pp, file, line);
912 
913  startover:
914 	/*
915 	 * Check to see if we've reached the hard limit.  If we have,
916 	 * and we can wait, then wait until an item has been returned to
917 	 * the pool.
918 	 */
919 #ifdef DIAGNOSTIC
920 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
921 		pr_leave(pp);
922 		simple_unlock(&pp->pr_slock);
923 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
924 	}
925 #endif
926 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
927 		if (pp->pr_drain_hook != NULL) {
928 			/*
929 			 * Since the drain hook is going to free things
930 			 * back to the pool, unlock, call the hook, re-lock,
931 			 * and check the hardlimit condition again.
932 			 */
933 			pr_leave(pp);
934 			simple_unlock(&pp->pr_slock);
935 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
936 			simple_lock(&pp->pr_slock);
937 			pr_enter(pp, file, line);
938 			if (pp->pr_nout < pp->pr_hardlimit)
939 				goto startover;
940 		}
941 
942 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
943 			/*
944 			 * XXX: A warning isn't logged in this case.  Should
945 			 * it be?
946 			 */
947 			pp->pr_flags |= PR_WANTED;
948 			pr_leave(pp);
949 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
950 			pr_enter(pp, file, line);
951 			goto startover;
952 		}
953 
954 		/*
955 		 * Log a message that the hard limit has been hit.
956 		 */
957 		if (pp->pr_hardlimit_warning != NULL &&
958 		    ratecheck(&pp->pr_hardlimit_warning_last,
959 			      &pp->pr_hardlimit_ratecap))
960 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
961 
962 		pp->pr_nfail++;
963 
964 		pr_leave(pp);
965 		simple_unlock(&pp->pr_slock);
966 		return (NULL);
967 	}
968 
969 	/*
970 	 * The convention we use is that if `curpage' is not NULL, then
971 	 * it points at a non-empty bucket. In particular, `curpage'
972 	 * never points at a page header which has PR_PHINPAGE set and
973 	 * has no items in its bucket.
974 	 */
975 	if ((ph = pp->pr_curpage) == NULL) {
976 		int error;
977 
978 #ifdef DIAGNOSTIC
979 		if (pp->pr_nitems != 0) {
980 			simple_unlock(&pp->pr_slock);
981 			printf("pool_get: %s: curpage NULL, nitems %u\n",
982 			    pp->pr_wchan, pp->pr_nitems);
983 			panic("pool_get: nitems inconsistent");
984 		}
985 #endif
986 
987 		/*
988 		 * Call the back-end page allocator for more memory.
989 		 * Release the pool lock, as the back-end page allocator
990 		 * may block.
991 		 */
992 		pr_leave(pp);
993 		error = pool_grow(pp, flags);
994 		pr_enter(pp, file, line);
995 		if (error != 0) {
996 			/*
997 			 * We were unable to allocate a page or item
998 			 * header, but we released the lock during
999 			 * allocation, so perhaps items were freed
1000 			 * back to the pool.  Check for this case.
1001 			 */
1002 			if (pp->pr_curpage != NULL)
1003 				goto startover;
1004 
1005 			pp->pr_nfail++;
1006 			pr_leave(pp);
1007 			simple_unlock(&pp->pr_slock);
1008 			return (NULL);
1009 		}
1010 
1011 		/* Start the allocation process over. */
1012 		goto startover;
1013 	}
1014 	if (pp->pr_roflags & PR_NOTOUCH) {
1015 #ifdef DIAGNOSTIC
1016 		if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
1017 			pr_leave(pp);
1018 			simple_unlock(&pp->pr_slock);
1019 			panic("pool_get: %s: page empty", pp->pr_wchan);
1020 		}
1021 #endif
1022 		v = pr_item_notouch_get(pp, ph);
1023 #ifdef POOL_DIAGNOSTIC
1024 		pr_log(pp, v, PRLOG_GET, file, line);
1025 #endif
1026 	} else {
1027 		v = pi = LIST_FIRST(&ph->ph_itemlist);
1028 		if (__predict_false(v == NULL)) {
1029 			pr_leave(pp);
1030 			simple_unlock(&pp->pr_slock);
1031 			panic("pool_get: %s: page empty", pp->pr_wchan);
1032 		}
1033 #ifdef DIAGNOSTIC
1034 		if (__predict_false(pp->pr_nitems == 0)) {
1035 			pr_leave(pp);
1036 			simple_unlock(&pp->pr_slock);
1037 			printf("pool_get: %s: items on itemlist, nitems %u\n",
1038 			    pp->pr_wchan, pp->pr_nitems);
1039 			panic("pool_get: nitems inconsistent");
1040 		}
1041 #endif
1042 
1043 #ifdef POOL_DIAGNOSTIC
1044 		pr_log(pp, v, PRLOG_GET, file, line);
1045 #endif
1046 
1047 #ifdef DIAGNOSTIC
1048 		if (__predict_false(pi->pi_magic != PI_MAGIC)) {
1049 			pr_printlog(pp, pi, printf);
1050 			panic("pool_get(%s): free list modified: "
1051 			    "magic=%x; page %p; item addr %p\n",
1052 			    pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
1053 		}
1054 #endif
1055 
1056 		/*
1057 		 * Remove from item list.
1058 		 */
1059 		LIST_REMOVE(pi, pi_list);
1060 	}
1061 	pp->pr_nitems--;
1062 	pp->pr_nout++;
1063 	if (ph->ph_nmissing == 0) {
1064 #ifdef DIAGNOSTIC
1065 		if (__predict_false(pp->pr_nidle == 0))
1066 			panic("pool_get: nidle inconsistent");
1067 #endif
1068 		pp->pr_nidle--;
1069 
1070 		/*
1071 		 * This page was previously empty.  Move it to the list of
1072 		 * partially-full pages.  This page is already curpage.
1073 		 */
1074 		LIST_REMOVE(ph, ph_pagelist);
1075 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1076 	}
1077 	ph->ph_nmissing++;
1078 	if (ph->ph_nmissing == pp->pr_itemsperpage) {
1079 #ifdef DIAGNOSTIC
1080 		if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
1081 		    !LIST_EMPTY(&ph->ph_itemlist))) {
1082 			pr_leave(pp);
1083 			simple_unlock(&pp->pr_slock);
1084 			panic("pool_get: %s: nmissing inconsistent",
1085 			    pp->pr_wchan);
1086 		}
1087 #endif
1088 		/*
1089 		 * This page is now full.  Move it to the full list
1090 		 * and select a new current page.
1091 		 */
1092 		LIST_REMOVE(ph, ph_pagelist);
1093 		LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1094 		pool_update_curpage(pp);
1095 	}
1096 
1097 	pp->pr_nget++;
1098 	pr_leave(pp);
1099 
1100 	/*
1101 	 * If we have a low water mark and we are now below that low
1102 	 * water mark, add more items to the pool.
1103 	 */
1104 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1105 		/*
1106 		 * XXX: Should we log a warning?  Should we set up a timeout
1107 		 * to try again in a second or so?  The latter could break
1108 		 * a caller's assumptions about interrupt protection, etc.
1109 		 */
1110 	}
1111 
1112 	simple_unlock(&pp->pr_slock);
1113 	return (v);
1114 }
1115 
1116 /*
1117  * Internal version of pool_put().  Pool is already locked/entered.
1118  */
1119 static void
1120 pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1121 {
1122 	struct pool_item *pi = v;
1123 	struct pool_item_header *ph;
1124 	caddr_t page;
1125 
1126 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1127 	SCHED_ASSERT_UNLOCKED();
1128 
1129 	page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
1130 
1131 #ifdef DIAGNOSTIC
1132 	if (__predict_false(pp->pr_nout == 0)) {
1133 		printf("pool %s: putting with none out\n",
1134 		    pp->pr_wchan);
1135 		panic("pool_put");
1136 	}
1137 #endif
1138 
1139 	if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
1140 		pr_printlog(pp, NULL, printf);
1141 		panic("pool_put: %s: page header missing", pp->pr_wchan);
1142 	}
1143 
1144 #ifdef LOCKDEBUG
1145 	/*
1146 	 * Check if we're freeing a locked simple lock.
1147 	 */
1148 	simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
1149 #endif
1150 
1151 	/*
1152 	 * Return to item list.
1153 	 */
1154 	if (pp->pr_roflags & PR_NOTOUCH) {
1155 		pr_item_notouch_put(pp, ph, v);
1156 	} else {
1157 #ifdef DIAGNOSTIC
1158 		pi->pi_magic = PI_MAGIC;
1159 #endif
1160 #ifdef DEBUG
1161 		{
1162 			int i, *ip = v;
1163 
1164 			for (i = 0; i < pp->pr_size / sizeof(int); i++) {
1165 				*ip++ = PI_MAGIC;
1166 			}
1167 		}
1168 #endif
1169 
1170 		LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1171 	}
1172 	KDASSERT(ph->ph_nmissing != 0);
1173 	ph->ph_nmissing--;
1174 	pp->pr_nput++;
1175 	pp->pr_nitems++;
1176 	pp->pr_nout--;
1177 
1178 	/* Cancel "pool empty" condition if it exists */
1179 	if (pp->pr_curpage == NULL)
1180 		pp->pr_curpage = ph;
1181 
1182 	if (pp->pr_flags & PR_WANTED) {
1183 		pp->pr_flags &= ~PR_WANTED;
1184 		if (ph->ph_nmissing == 0)
1185 			pp->pr_nidle++;
1186 		wakeup((caddr_t)pp);
1187 		return;
1188 	}
1189 
1190 	/*
1191 	 * If this page is now empty, do one of two things:
1192 	 *
1193 	 *	(1) If we have more pages than the page high water mark,
1194 	 *	    free the page back to the system.  ONLY CONSIDER
1195 	 *	    FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1196 	 *	    CLAIM.
1197 	 *
1198 	 *	(2) Otherwise, move the page to the empty page list.
1199 	 *
1200 	 * Either way, select a new current page (so we use a partially-full
1201 	 * page if one is available).
1202 	 */
1203 	if (ph->ph_nmissing == 0) {
1204 		pp->pr_nidle++;
1205 		if (pp->pr_npages > pp->pr_minpages &&
1206 		    (pp->pr_npages > pp->pr_maxpages ||
1207 		     pa_starved_p(pp->pr_alloc))) {
1208 			pr_rmpage(pp, ph, pq);
1209 		} else {
1210 			LIST_REMOVE(ph, ph_pagelist);
1211 			LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1212 
1213 			/*
1214 			 * Update the timestamp on the page.  A page must
1215 			 * be idle for some period of time before it can
1216 			 * be reclaimed by the pagedaemon.  This minimizes
1217 			 * ping-pong'ing for memory.
1218 			 */
1219 			getmicrotime(&ph->ph_time);
1220 		}
1221 		pool_update_curpage(pp);
1222 	}
1223 
1224 	/*
1225 	 * If the page was previously completely full, move it to the
1226 	 * partially-full list and make it the current page.  The next
1227 	 * allocation will get the item from this page, instead of
1228 	 * further fragmenting the pool.
1229 	 */
1230 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1231 		LIST_REMOVE(ph, ph_pagelist);
1232 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1233 		pp->pr_curpage = ph;
1234 	}
1235 }
1236 
1237 /*
1238  * Return resource to the pool; must be called at appropriate spl level
1239  */
1240 #ifdef POOL_DIAGNOSTIC
1241 void
1242 _pool_put(struct pool *pp, void *v, const char *file, long line)
1243 {
1244 	struct pool_pagelist pq;
1245 
1246 	LIST_INIT(&pq);
1247 
1248 	simple_lock(&pp->pr_slock);
1249 	pr_enter(pp, file, line);
1250 
1251 	pr_log(pp, v, PRLOG_PUT, file, line);
1252 
1253 	pool_do_put(pp, v, &pq);
1254 
1255 	pr_leave(pp);
1256 	simple_unlock(&pp->pr_slock);
1257 
1258 	pr_pagelist_free(pp, &pq);
1259 }
1260 #undef pool_put
1261 #endif /* POOL_DIAGNOSTIC */
1262 
1263 void
1264 pool_put(struct pool *pp, void *v)
1265 {
1266 	struct pool_pagelist pq;
1267 
1268 	LIST_INIT(&pq);
1269 
1270 	simple_lock(&pp->pr_slock);
1271 	pool_do_put(pp, v, &pq);
1272 	simple_unlock(&pp->pr_slock);
1273 
1274 	pr_pagelist_free(pp, &pq);
1275 }
1276 
1277 #ifdef POOL_DIAGNOSTIC
1278 #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
1279 #endif
1280 
1281 /*
1282  * pool_grow: grow a pool by a page.
1283  *
1284  * => called with pool locked.
1285  * => unlock and relock the pool.
1286  * => return with pool locked.
1287  */
1288 
1289 static int
1290 pool_grow(struct pool *pp, int flags)
1291 {
1292 	struct pool_item_header *ph = NULL;
1293 	char *cp;
1294 
1295 	simple_unlock(&pp->pr_slock);
1296 	cp = pool_allocator_alloc(pp, flags);
1297 	if (__predict_true(cp != NULL)) {
1298 		ph = pool_alloc_item_header(pp, cp, flags);
1299 	}
1300 	if (__predict_false(cp == NULL || ph == NULL)) {
1301 		if (cp != NULL) {
1302 			pool_allocator_free(pp, cp);
1303 		}
1304 		simple_lock(&pp->pr_slock);
1305 		return ENOMEM;
1306 	}
1307 
1308 	simple_lock(&pp->pr_slock);
1309 	pool_prime_page(pp, cp, ph);
1310 	pp->pr_npagealloc++;
1311 	return 0;
1312 }
1313 
1314 /*
1315  * Add N items to the pool.
1316  */
1317 int
1318 pool_prime(struct pool *pp, int n)
1319 {
1320 	int newpages;
1321 	int error = 0;
1322 
1323 	simple_lock(&pp->pr_slock);
1324 
1325 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1326 
1327 	while (newpages-- > 0) {
1328 		error = pool_grow(pp, PR_NOWAIT);
1329 		if (error) {
1330 			break;
1331 		}
1332 		pp->pr_minpages++;
1333 	}
1334 
1335 	if (pp->pr_minpages >= pp->pr_maxpages)
1336 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
1337 
1338 	simple_unlock(&pp->pr_slock);
1339 	return error;
1340 }
1341 
1342 /*
1343  * Add a page worth of items to the pool.
1344  *
1345  * Note, we must be called with the pool descriptor LOCKED.
1346  */
1347 static void
1348 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1349 {
1350 	struct pool_item *pi;
1351 	caddr_t cp = storage;
1352 	unsigned int align = pp->pr_align;
1353 	unsigned int ioff = pp->pr_itemoffset;
1354 	int n;
1355 
1356 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1357 
1358 #ifdef DIAGNOSTIC
1359 	if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1360 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1361 #endif
1362 
1363 	/*
1364 	 * Insert page header.
1365 	 */
1366 	LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1367 	LIST_INIT(&ph->ph_itemlist);
1368 	ph->ph_page = storage;
1369 	ph->ph_nmissing = 0;
1370 	getmicrotime(&ph->ph_time);
1371 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1372 		SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1373 
1374 	pp->pr_nidle++;
1375 
1376 	/*
1377 	 * Color this page.
1378 	 */
1379 	cp = (caddr_t)(cp + pp->pr_curcolor);
1380 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1381 		pp->pr_curcolor = 0;
1382 
1383 	/*
1384 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1385 	 */
1386 	if (ioff != 0)
1387 		cp = (caddr_t)(cp + (align - ioff));
1388 
1389 	/*
1390 	 * Insert remaining chunks on the bucket list.
1391 	 */
1392 	n = pp->pr_itemsperpage;
1393 	pp->pr_nitems += n;
1394 
1395 	if (pp->pr_roflags & PR_NOTOUCH) {
1396 		pool_item_freelist_t *freelist = PR_FREELIST(ph);
1397 		int i;
1398 
1399 		ph->ph_off = cp - storage;
1400 		ph->ph_firstfree = 0;
1401 		for (i = 0; i < n - 1; i++)
1402 			freelist[i] = i + 1;
1403 		freelist[n - 1] = PR_INDEX_EOL;
1404 	} else {
1405 		while (n--) {
1406 			pi = (struct pool_item *)cp;
1407 
1408 			KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1409 
1410 			/* Insert on page list */
1411 			LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1412 #ifdef DIAGNOSTIC
1413 			pi->pi_magic = PI_MAGIC;
1414 #endif
1415 			cp = (caddr_t)(cp + pp->pr_size);
1416 		}
1417 	}
1418 
1419 	/*
1420 	 * If the pool was depleted, point at the new page.
1421 	 */
1422 	if (pp->pr_curpage == NULL)
1423 		pp->pr_curpage = ph;
1424 
1425 	if (++pp->pr_npages > pp->pr_hiwat)
1426 		pp->pr_hiwat = pp->pr_npages;
1427 }
1428 
1429 /*
1430  * Used by pool_get() when nitems drops below the low water mark.  This
1431  * is used to catch up pr_nitems with the low water mark.
1432  *
1433  * Note 1, we never wait for memory here, we let the caller decide what to do.
1434  *
1435  * Note 2, we must be called with the pool already locked, and we return
1436  * with it locked.
1437  */
1438 static int
1439 pool_catchup(struct pool *pp)
1440 {
1441 	int error = 0;
1442 
1443 	while (POOL_NEEDS_CATCHUP(pp)) {
1444 		error = pool_grow(pp, PR_NOWAIT);
1445 		if (error) {
1446 			break;
1447 		}
1448 	}
1449 	return error;
1450 }
1451 
1452 static void
1453 pool_update_curpage(struct pool *pp)
1454 {
1455 
1456 	pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1457 	if (pp->pr_curpage == NULL) {
1458 		pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1459 	}
1460 }
1461 
1462 void
1463 pool_setlowat(struct pool *pp, int n)
1464 {
1465 
1466 	simple_lock(&pp->pr_slock);
1467 
1468 	pp->pr_minitems = n;
1469 	pp->pr_minpages = (n == 0)
1470 		? 0
1471 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1472 
1473 	/* Make sure we're caught up with the newly-set low water mark. */
1474 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1475 		/*
1476 		 * XXX: Should we log a warning?  Should we set up a timeout
1477 		 * to try again in a second or so?  The latter could break
1478 		 * a caller's assumptions about interrupt protection, etc.
1479 		 */
1480 	}
1481 
1482 	simple_unlock(&pp->pr_slock);
1483 }
1484 
1485 void
1486 pool_sethiwat(struct pool *pp, int n)
1487 {
1488 
1489 	simple_lock(&pp->pr_slock);
1490 
1491 	pp->pr_maxpages = (n == 0)
1492 		? 0
1493 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1494 
1495 	simple_unlock(&pp->pr_slock);
1496 }
1497 
1498 void
1499 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1500 {
1501 
1502 	simple_lock(&pp->pr_slock);
1503 
1504 	pp->pr_hardlimit = n;
1505 	pp->pr_hardlimit_warning = warnmess;
1506 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1507 	pp->pr_hardlimit_warning_last.tv_sec = 0;
1508 	pp->pr_hardlimit_warning_last.tv_usec = 0;
1509 
1510 	/*
1511 	 * In-line version of pool_sethiwat(), because we don't want to
1512 	 * release the lock.
1513 	 */
1514 	pp->pr_maxpages = (n == 0)
1515 		? 0
1516 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1517 
1518 	simple_unlock(&pp->pr_slock);
1519 }
1520 
1521 /*
1522  * Release all complete pages that have not been used recently.
1523  */
1524 int
1525 #ifdef POOL_DIAGNOSTIC
1526 _pool_reclaim(struct pool *pp, const char *file, long line)
1527 #else
1528 pool_reclaim(struct pool *pp)
1529 #endif
1530 {
1531 	struct pool_item_header *ph, *phnext;
1532 	struct pool_cache *pc;
1533 	struct pool_pagelist pq;
1534 	struct pool_cache_grouplist pcgl;
1535 	struct timeval curtime, diff;
1536 
1537 	if (pp->pr_drain_hook != NULL) {
1538 		/*
1539 		 * The drain hook must be called with the pool unlocked.
1540 		 */
1541 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1542 	}
1543 
1544 	if (simple_lock_try(&pp->pr_slock) == 0)
1545 		return (0);
1546 	pr_enter(pp, file, line);
1547 
1548 	LIST_INIT(&pq);
1549 	LIST_INIT(&pcgl);
1550 
1551 	/*
1552 	 * Reclaim items from the pool's caches.
1553 	 */
1554 	LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1555 		pool_cache_reclaim(pc, &pq, &pcgl);
1556 
1557 	getmicrotime(&curtime);
1558 
1559 	for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1560 		phnext = LIST_NEXT(ph, ph_pagelist);
1561 
1562 		/* Check our minimum page claim */
1563 		if (pp->pr_npages <= pp->pr_minpages)
1564 			break;
1565 
1566 		KASSERT(ph->ph_nmissing == 0);
1567 		timersub(&curtime, &ph->ph_time, &diff);
1568 		if (diff.tv_sec < pool_inactive_time
1569 		    && !pa_starved_p(pp->pr_alloc))
1570 			continue;
1571 
1572 		/*
1573 		 * If freeing this page would put us below
1574 		 * the low water mark, stop now.
1575 		 */
1576 		if ((pp->pr_nitems - pp->pr_itemsperpage) <
1577 		    pp->pr_minitems)
1578 			break;
1579 
1580 		pr_rmpage(pp, ph, &pq);
1581 	}
1582 
1583 	pr_leave(pp);
1584 	simple_unlock(&pp->pr_slock);
1585 	if (LIST_EMPTY(&pq) && LIST_EMPTY(&pcgl))
1586 		return 0;
1587 
1588 	pr_pagelist_free(pp, &pq);
1589 	pcg_grouplist_free(&pcgl);
1590 	return (1);
1591 }
1592 
1593 /*
1594  * Drain pools, one at a time.
1595  *
1596  * Note, we must never be called from an interrupt context.
1597  */
1598 void
1599 pool_drain(void *arg)
1600 {
1601 	struct pool *pp;
1602 	int s;
1603 
1604 	pp = NULL;
1605 	s = splvm();
1606 	simple_lock(&pool_head_slock);
1607 	if (drainpp == NULL) {
1608 		drainpp = LIST_FIRST(&pool_head);
1609 	}
1610 	if (drainpp) {
1611 		pp = drainpp;
1612 		drainpp = LIST_NEXT(pp, pr_poollist);
1613 	}
1614 	simple_unlock(&pool_head_slock);
1615 	if (pp)
1616 		pool_reclaim(pp);
1617 	splx(s);
1618 }
1619 
1620 /*
1621  * Diagnostic helpers.
1622  */
1623 void
1624 pool_print(struct pool *pp, const char *modif)
1625 {
1626 	int s;
1627 
1628 	s = splvm();
1629 	if (simple_lock_try(&pp->pr_slock) == 0) {
1630 		printf("pool %s is locked; try again later\n",
1631 		    pp->pr_wchan);
1632 		splx(s);
1633 		return;
1634 	}
1635 	pool_print1(pp, modif, printf);
1636 	simple_unlock(&pp->pr_slock);
1637 	splx(s);
1638 }
1639 
1640 void
1641 pool_printall(const char *modif, void (*pr)(const char *, ...))
1642 {
1643 	struct pool *pp;
1644 
1645 	if (simple_lock_try(&pool_head_slock) == 0) {
1646 		(*pr)("WARNING: pool_head_slock is locked\n");
1647 	} else {
1648 		simple_unlock(&pool_head_slock);
1649 	}
1650 
1651 	LIST_FOREACH(pp, &pool_head, pr_poollist) {
1652 		pool_printit(pp, modif, pr);
1653 	}
1654 }
1655 
1656 void
1657 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1658 {
1659 
1660 	if (pp == NULL) {
1661 		(*pr)("Must specify a pool to print.\n");
1662 		return;
1663 	}
1664 
1665 	/*
1666 	 * Called from DDB; interrupts should be blocked, and all
1667 	 * other processors should be paused.  We can skip locking
1668 	 * the pool in this case.
1669 	 *
1670 	 * We do a simple_lock_try() just to print the lock
1671 	 * status, however.
1672 	 */
1673 
1674 	if (simple_lock_try(&pp->pr_slock) == 0)
1675 		(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1676 	else
1677 		simple_unlock(&pp->pr_slock);
1678 
1679 	pool_print1(pp, modif, pr);
1680 }
1681 
1682 static void
1683 pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1684     void (*pr)(const char *, ...))
1685 {
1686 	struct pool_item_header *ph;
1687 #ifdef DIAGNOSTIC
1688 	struct pool_item *pi;
1689 #endif
1690 
1691 	LIST_FOREACH(ph, pl, ph_pagelist) {
1692 		(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1693 		    ph->ph_page, ph->ph_nmissing,
1694 		    (u_long)ph->ph_time.tv_sec,
1695 		    (u_long)ph->ph_time.tv_usec);
1696 #ifdef DIAGNOSTIC
1697 		if (!(pp->pr_roflags & PR_NOTOUCH)) {
1698 			LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1699 				if (pi->pi_magic != PI_MAGIC) {
1700 					(*pr)("\t\t\titem %p, magic 0x%x\n",
1701 					    pi, pi->pi_magic);
1702 				}
1703 			}
1704 		}
1705 #endif
1706 	}
1707 }
1708 
1709 static void
1710 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1711 {
1712 	struct pool_item_header *ph;
1713 	struct pool_cache *pc;
1714 	struct pool_cache_group *pcg;
1715 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1716 	char c;
1717 
1718 	while ((c = *modif++) != '\0') {
1719 		if (c == 'l')
1720 			print_log = 1;
1721 		if (c == 'p')
1722 			print_pagelist = 1;
1723 		if (c == 'c')
1724 			print_cache = 1;
1725 	}
1726 
1727 	(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1728 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1729 	    pp->pr_roflags);
1730 	(*pr)("\talloc %p\n", pp->pr_alloc);
1731 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1732 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1733 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1734 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1735 
1736 	(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1737 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1738 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1739 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1740 
1741 	if (print_pagelist == 0)
1742 		goto skip_pagelist;
1743 
1744 	if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1745 		(*pr)("\n\tempty page list:\n");
1746 	pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1747 	if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1748 		(*pr)("\n\tfull page list:\n");
1749 	pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1750 	if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1751 		(*pr)("\n\tpartial-page list:\n");
1752 	pool_print_pagelist(pp, &pp->pr_partpages, pr);
1753 
1754 	if (pp->pr_curpage == NULL)
1755 		(*pr)("\tno current page\n");
1756 	else
1757 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1758 
1759  skip_pagelist:
1760 	if (print_log == 0)
1761 		goto skip_log;
1762 
1763 	(*pr)("\n");
1764 	if ((pp->pr_roflags & PR_LOGGING) == 0)
1765 		(*pr)("\tno log\n");
1766 	else
1767 		pr_printlog(pp, NULL, pr);
1768 
1769  skip_log:
1770 	if (print_cache == 0)
1771 		goto skip_cache;
1772 
1773 #define PR_GROUPLIST(pcg)						\
1774 	(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);		\
1775 	for (i = 0; i < PCG_NOBJECTS; i++) {				\
1776 		if (pcg->pcg_objects[i].pcgo_pa !=			\
1777 		    POOL_PADDR_INVALID) {				\
1778 			(*pr)("\t\t\t%p, 0x%llx\n",			\
1779 			    pcg->pcg_objects[i].pcgo_va,		\
1780 			    (unsigned long long)			\
1781 			    pcg->pcg_objects[i].pcgo_pa);		\
1782 		} else {						\
1783 			(*pr)("\t\t\t%p\n",				\
1784 			    pcg->pcg_objects[i].pcgo_va);		\
1785 		}							\
1786 	}
1787 
1788 	LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1789 		(*pr)("\tcache %p\n", pc);
1790 		(*pr)("\t    hits %lu misses %lu ngroups %lu nitems %lu\n",
1791 		    pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1792 		(*pr)("\t    full groups:\n");
1793 		LIST_FOREACH(pcg, &pc->pc_fullgroups, pcg_list) {
1794 			PR_GROUPLIST(pcg);
1795 		}
1796 		(*pr)("\t    partial groups:\n");
1797 		LIST_FOREACH(pcg, &pc->pc_partgroups, pcg_list) {
1798 			PR_GROUPLIST(pcg);
1799 		}
1800 		(*pr)("\t    empty groups:\n");
1801 		LIST_FOREACH(pcg, &pc->pc_emptygroups, pcg_list) {
1802 			PR_GROUPLIST(pcg);
1803 		}
1804 	}
1805 #undef PR_GROUPLIST
1806 
1807  skip_cache:
1808 	pr_enter_check(pp, pr);
1809 }
1810 
1811 static int
1812 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1813 {
1814 	struct pool_item *pi;
1815 	caddr_t page;
1816 	int n;
1817 
1818 	page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1819 	if (page != ph->ph_page &&
1820 	    (pp->pr_roflags & PR_PHINPAGE) != 0) {
1821 		if (label != NULL)
1822 			printf("%s: ", label);
1823 		printf("pool(%p:%s): page inconsistency: page %p;"
1824 		       " at page head addr %p (p %p)\n", pp,
1825 			pp->pr_wchan, ph->ph_page,
1826 			ph, page);
1827 		return 1;
1828 	}
1829 
1830 	if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1831 		return 0;
1832 
1833 	for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1834 	     pi != NULL;
1835 	     pi = LIST_NEXT(pi,pi_list), n++) {
1836 
1837 #ifdef DIAGNOSTIC
1838 		if (pi->pi_magic != PI_MAGIC) {
1839 			if (label != NULL)
1840 				printf("%s: ", label);
1841 			printf("pool(%s): free list modified: magic=%x;"
1842 			       " page %p; item ordinal %d;"
1843 			       " addr %p (p %p)\n",
1844 				pp->pr_wchan, pi->pi_magic, ph->ph_page,
1845 				n, pi, page);
1846 			panic("pool");
1847 		}
1848 #endif
1849 		page =
1850 		    (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1851 		if (page == ph->ph_page)
1852 			continue;
1853 
1854 		if (label != NULL)
1855 			printf("%s: ", label);
1856 		printf("pool(%p:%s): page inconsistency: page %p;"
1857 		       " item ordinal %d; addr %p (p %p)\n", pp,
1858 			pp->pr_wchan, ph->ph_page,
1859 			n, pi, page);
1860 		return 1;
1861 	}
1862 	return 0;
1863 }
1864 
1865 
1866 int
1867 pool_chk(struct pool *pp, const char *label)
1868 {
1869 	struct pool_item_header *ph;
1870 	int r = 0;
1871 
1872 	simple_lock(&pp->pr_slock);
1873 	LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1874 		r = pool_chk_page(pp, label, ph);
1875 		if (r) {
1876 			goto out;
1877 		}
1878 	}
1879 	LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1880 		r = pool_chk_page(pp, label, ph);
1881 		if (r) {
1882 			goto out;
1883 		}
1884 	}
1885 	LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1886 		r = pool_chk_page(pp, label, ph);
1887 		if (r) {
1888 			goto out;
1889 		}
1890 	}
1891 
1892 out:
1893 	simple_unlock(&pp->pr_slock);
1894 	return (r);
1895 }
1896 
1897 /*
1898  * pool_cache_init:
1899  *
1900  *	Initialize a pool cache.
1901  *
1902  *	NOTE: If the pool must be protected from interrupts, we expect
1903  *	to be called at the appropriate interrupt priority level.
1904  */
1905 void
1906 pool_cache_init(struct pool_cache *pc, struct pool *pp,
1907     int (*ctor)(void *, void *, int),
1908     void (*dtor)(void *, void *),
1909     void *arg)
1910 {
1911 
1912 	LIST_INIT(&pc->pc_emptygroups);
1913 	LIST_INIT(&pc->pc_fullgroups);
1914 	LIST_INIT(&pc->pc_partgroups);
1915 	simple_lock_init(&pc->pc_slock);
1916 
1917 	pc->pc_pool = pp;
1918 
1919 	pc->pc_ctor = ctor;
1920 	pc->pc_dtor = dtor;
1921 	pc->pc_arg  = arg;
1922 
1923 	pc->pc_hits   = 0;
1924 	pc->pc_misses = 0;
1925 
1926 	pc->pc_ngroups = 0;
1927 
1928 	pc->pc_nitems = 0;
1929 
1930 	simple_lock(&pp->pr_slock);
1931 	LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist);
1932 	simple_unlock(&pp->pr_slock);
1933 }
1934 
1935 /*
1936  * pool_cache_destroy:
1937  *
1938  *	Destroy a pool cache.
1939  */
1940 void
1941 pool_cache_destroy(struct pool_cache *pc)
1942 {
1943 	struct pool *pp = pc->pc_pool;
1944 
1945 	/* First, invalidate the entire cache. */
1946 	pool_cache_invalidate(pc);
1947 
1948 	/* ...and remove it from the pool's cache list. */
1949 	simple_lock(&pp->pr_slock);
1950 	LIST_REMOVE(pc, pc_poollist);
1951 	simple_unlock(&pp->pr_slock);
1952 }
1953 
1954 static inline void *
1955 pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
1956 {
1957 	void *object;
1958 	u_int idx;
1959 
1960 	KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1961 	KASSERT(pcg->pcg_avail != 0);
1962 	idx = --pcg->pcg_avail;
1963 
1964 	KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
1965 	object = pcg->pcg_objects[idx].pcgo_va;
1966 	if (pap != NULL)
1967 		*pap = pcg->pcg_objects[idx].pcgo_pa;
1968 	pcg->pcg_objects[idx].pcgo_va = NULL;
1969 
1970 	return (object);
1971 }
1972 
1973 static inline void
1974 pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
1975 {
1976 	u_int idx;
1977 
1978 	KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1979 	idx = pcg->pcg_avail++;
1980 
1981 	KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
1982 	pcg->pcg_objects[idx].pcgo_va = object;
1983 	pcg->pcg_objects[idx].pcgo_pa = pa;
1984 }
1985 
1986 static void
1987 pcg_grouplist_free(struct pool_cache_grouplist *pcgl)
1988 {
1989 	struct pool_cache_group *pcg;
1990 	int s;
1991 
1992 	s = splvm();
1993 	while ((pcg = LIST_FIRST(pcgl)) != NULL) {
1994 		LIST_REMOVE(pcg, pcg_list);
1995 		pool_put(&pcgpool, pcg);
1996 	}
1997 	splx(s);
1998 }
1999 
2000 /*
2001  * pool_cache_get{,_paddr}:
2002  *
2003  *	Get an object from a pool cache (optionally returning
2004  *	the physical address of the object).
2005  */
2006 void *
2007 pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
2008 {
2009 	struct pool_cache_group *pcg;
2010 	void *object;
2011 
2012 #ifdef LOCKDEBUG
2013 	if (flags & PR_WAITOK)
2014 		simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
2015 #endif
2016 
2017 	simple_lock(&pc->pc_slock);
2018 
2019 	pcg = LIST_FIRST(&pc->pc_partgroups);
2020 	if (pcg == NULL) {
2021 		pcg = LIST_FIRST(&pc->pc_fullgroups);
2022 		if (pcg != NULL) {
2023 			LIST_REMOVE(pcg, pcg_list);
2024 			LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
2025 		}
2026 	}
2027 	if (pcg == NULL) {
2028 
2029 		/*
2030 		 * No groups with any available objects.  Allocate
2031 		 * a new object, construct it, and return it to
2032 		 * the caller.  We will allocate a group, if necessary,
2033 		 * when the object is freed back to the cache.
2034 		 */
2035 		pc->pc_misses++;
2036 		simple_unlock(&pc->pc_slock);
2037 		object = pool_get(pc->pc_pool, flags);
2038 		if (object != NULL && pc->pc_ctor != NULL) {
2039 			if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
2040 				pool_put(pc->pc_pool, object);
2041 				return (NULL);
2042 			}
2043 		}
2044 		if (object != NULL && pap != NULL) {
2045 #ifdef POOL_VTOPHYS
2046 			*pap = POOL_VTOPHYS(object);
2047 #else
2048 			*pap = POOL_PADDR_INVALID;
2049 #endif
2050 		}
2051 		return (object);
2052 	}
2053 
2054 	pc->pc_hits++;
2055 	pc->pc_nitems--;
2056 	object = pcg_get(pcg, pap);
2057 
2058 	if (pcg->pcg_avail == 0) {
2059 		LIST_REMOVE(pcg, pcg_list);
2060 		LIST_INSERT_HEAD(&pc->pc_emptygroups, pcg, pcg_list);
2061 	}
2062 	simple_unlock(&pc->pc_slock);
2063 
2064 	return (object);
2065 }
2066 
2067 /*
2068  * pool_cache_put{,_paddr}:
2069  *
2070  *	Put an object back to the pool cache (optionally caching the
2071  *	physical address of the object).
2072  */
2073 void
2074 pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
2075 {
2076 	struct pool_cache_group *pcg;
2077 	int s;
2078 
2079 	if (__predict_false((pc->pc_pool->pr_flags & PR_WANTED) != 0)) {
2080 		goto destruct;
2081 	}
2082 
2083 	simple_lock(&pc->pc_slock);
2084 
2085 	pcg = LIST_FIRST(&pc->pc_partgroups);
2086 	if (pcg == NULL) {
2087 		pcg = LIST_FIRST(&pc->pc_emptygroups);
2088 		if (pcg != NULL) {
2089 			LIST_REMOVE(pcg, pcg_list);
2090 			LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
2091 		}
2092 	}
2093 	if (pcg == NULL) {
2094 
2095 		/*
2096 		 * No empty groups to free the object to.  Attempt to
2097 		 * allocate one.
2098 		 */
2099 		simple_unlock(&pc->pc_slock);
2100 		s = splvm();
2101 		pcg = pool_get(&pcgpool, PR_NOWAIT);
2102 		splx(s);
2103 		if (pcg == NULL) {
2104 destruct:
2105 
2106 			/*
2107 			 * Unable to allocate a cache group; destruct the object
2108 			 * and free it back to the pool.
2109 			 */
2110 			pool_cache_destruct_object(pc, object);
2111 			return;
2112 		}
2113 		memset(pcg, 0, sizeof(*pcg));
2114 		simple_lock(&pc->pc_slock);
2115 		pc->pc_ngroups++;
2116 		LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
2117 	}
2118 
2119 	pc->pc_nitems++;
2120 	pcg_put(pcg, object, pa);
2121 
2122 	if (pcg->pcg_avail == PCG_NOBJECTS) {
2123 		LIST_REMOVE(pcg, pcg_list);
2124 		LIST_INSERT_HEAD(&pc->pc_fullgroups, pcg, pcg_list);
2125 	}
2126 	simple_unlock(&pc->pc_slock);
2127 }
2128 
2129 /*
2130  * pool_cache_destruct_object:
2131  *
2132  *	Force destruction of an object and its release back into
2133  *	the pool.
2134  */
2135 void
2136 pool_cache_destruct_object(struct pool_cache *pc, void *object)
2137 {
2138 
2139 	if (pc->pc_dtor != NULL)
2140 		(*pc->pc_dtor)(pc->pc_arg, object);
2141 	pool_put(pc->pc_pool, object);
2142 }
2143 
2144 static void
2145 pool_do_cache_invalidate_grouplist(struct pool_cache_grouplist *pcgsl,
2146     struct pool_cache *pc, struct pool_pagelist *pq,
2147     struct pool_cache_grouplist *pcgdl)
2148 {
2149 	struct pool_cache_group *pcg, *npcg;
2150 	void *object;
2151 
2152 	for (pcg = LIST_FIRST(pcgsl); pcg != NULL; pcg = npcg) {
2153 		npcg = LIST_NEXT(pcg, pcg_list);
2154 		while (pcg->pcg_avail != 0) {
2155 			pc->pc_nitems--;
2156 			object = pcg_get(pcg, NULL);
2157 			if (pc->pc_dtor != NULL)
2158 				(*pc->pc_dtor)(pc->pc_arg, object);
2159 			pool_do_put(pc->pc_pool, object, pq);
2160 		}
2161 		pc->pc_ngroups--;
2162 		LIST_REMOVE(pcg, pcg_list);
2163 		LIST_INSERT_HEAD(pcgdl, pcg, pcg_list);
2164 	}
2165 }
2166 
2167 static void
2168 pool_do_cache_invalidate(struct pool_cache *pc, struct pool_pagelist *pq,
2169     struct pool_cache_grouplist *pcgl)
2170 {
2171 
2172 	LOCK_ASSERT(simple_lock_held(&pc->pc_slock));
2173 	LOCK_ASSERT(simple_lock_held(&pc->pc_pool->pr_slock));
2174 
2175 	pool_do_cache_invalidate_grouplist(&pc->pc_fullgroups, pc, pq, pcgl);
2176 	pool_do_cache_invalidate_grouplist(&pc->pc_partgroups, pc, pq, pcgl);
2177 
2178 	KASSERT(LIST_EMPTY(&pc->pc_partgroups));
2179 	KASSERT(LIST_EMPTY(&pc->pc_fullgroups));
2180 	KASSERT(pc->pc_nitems == 0);
2181 }
2182 
2183 /*
2184  * pool_cache_invalidate:
2185  *
2186  *	Invalidate a pool cache (destruct and release all of the
2187  *	cached objects).
2188  */
2189 void
2190 pool_cache_invalidate(struct pool_cache *pc)
2191 {
2192 	struct pool_pagelist pq;
2193 	struct pool_cache_grouplist pcgl;
2194 
2195 	LIST_INIT(&pq);
2196 	LIST_INIT(&pcgl);
2197 
2198 	simple_lock(&pc->pc_slock);
2199 	simple_lock(&pc->pc_pool->pr_slock);
2200 
2201 	pool_do_cache_invalidate(pc, &pq, &pcgl);
2202 
2203 	simple_unlock(&pc->pc_pool->pr_slock);
2204 	simple_unlock(&pc->pc_slock);
2205 
2206 	pr_pagelist_free(pc->pc_pool, &pq);
2207 	pcg_grouplist_free(&pcgl);
2208 }
2209 
2210 /*
2211  * pool_cache_reclaim:
2212  *
2213  *	Reclaim a pool cache for pool_reclaim().
2214  */
2215 static void
2216 pool_cache_reclaim(struct pool_cache *pc, struct pool_pagelist *pq,
2217     struct pool_cache_grouplist *pcgl)
2218 {
2219 
2220 	/*
2221 	 * We're locking in the wrong order (normally pool_cache -> pool,
2222 	 * but the pool is already locked when we get here), so we have
2223 	 * to use trylock.  If we can't lock the pool_cache, it's not really
2224 	 * a big deal here.
2225 	 */
2226 	if (simple_lock_try(&pc->pc_slock) == 0)
2227 		return;
2228 
2229 	pool_do_cache_invalidate(pc, pq, pcgl);
2230 
2231 	simple_unlock(&pc->pc_slock);
2232 }
2233 
2234 /*
2235  * Pool backend allocators.
2236  *
2237  * Each pool has a backend allocator that handles allocation, deallocation,
2238  * and any additional draining that might be needed.
2239  *
2240  * We provide two standard allocators:
2241  *
2242  *	pool_allocator_kmem - the default when no allocator is specified
2243  *
2244  *	pool_allocator_nointr - used for pools that will not be accessed
2245  *	in interrupt context.
2246  */
2247 void	*pool_page_alloc(struct pool *, int);
2248 void	pool_page_free(struct pool *, void *);
2249 
2250 #ifdef POOL_SUBPAGE
2251 struct pool_allocator pool_allocator_kmem_fullpage = {
2252 	pool_page_alloc, pool_page_free, 0,
2253 	.pa_backingmapptr = &kmem_map,
2254 };
2255 #else
2256 struct pool_allocator pool_allocator_kmem = {
2257 	pool_page_alloc, pool_page_free, 0,
2258 	.pa_backingmapptr = &kmem_map,
2259 };
2260 #endif
2261 
2262 void	*pool_page_alloc_nointr(struct pool *, int);
2263 void	pool_page_free_nointr(struct pool *, void *);
2264 
2265 #ifdef POOL_SUBPAGE
2266 struct pool_allocator pool_allocator_nointr_fullpage = {
2267 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
2268 	.pa_backingmapptr = &kernel_map,
2269 };
2270 #else
2271 struct pool_allocator pool_allocator_nointr = {
2272 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
2273 	.pa_backingmapptr = &kernel_map,
2274 };
2275 #endif
2276 
2277 #ifdef POOL_SUBPAGE
2278 void	*pool_subpage_alloc(struct pool *, int);
2279 void	pool_subpage_free(struct pool *, void *);
2280 
2281 struct pool_allocator pool_allocator_kmem = {
2282 	pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
2283 	.pa_backingmapptr = &kmem_map,
2284 };
2285 
2286 void	*pool_subpage_alloc_nointr(struct pool *, int);
2287 void	pool_subpage_free_nointr(struct pool *, void *);
2288 
2289 struct pool_allocator pool_allocator_nointr = {
2290 	pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
2291 	.pa_backingmapptr = &kmem_map,
2292 };
2293 #endif /* POOL_SUBPAGE */
2294 
2295 static void *
2296 pool_allocator_alloc(struct pool *pp, int flags)
2297 {
2298 	struct pool_allocator *pa = pp->pr_alloc;
2299 	void *res;
2300 
2301 	LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
2302 
2303 	res = (*pa->pa_alloc)(pp, flags);
2304 	if (res == NULL && (flags & PR_WAITOK) == 0) {
2305 		/*
2306 		 * We only run the drain hook here if PR_NOWAIT.
2307 		 * In other cases, the hook will be run in
2308 		 * pool_reclaim().
2309 		 */
2310 		if (pp->pr_drain_hook != NULL) {
2311 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2312 			res = (*pa->pa_alloc)(pp, flags);
2313 		}
2314 	}
2315 	return res;
2316 }
2317 
2318 static void
2319 pool_allocator_free(struct pool *pp, void *v)
2320 {
2321 	struct pool_allocator *pa = pp->pr_alloc;
2322 
2323 	LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
2324 
2325 	(*pa->pa_free)(pp, v);
2326 }
2327 
2328 void *
2329 pool_page_alloc(struct pool *pp, int flags)
2330 {
2331 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2332 
2333 	return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
2334 }
2335 
2336 void
2337 pool_page_free(struct pool *pp, void *v)
2338 {
2339 
2340 	uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
2341 }
2342 
2343 static void *
2344 pool_page_alloc_meta(struct pool *pp, int flags)
2345 {
2346 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2347 
2348 	return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
2349 }
2350 
2351 static void
2352 pool_page_free_meta(struct pool *pp, void *v)
2353 {
2354 
2355 	uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
2356 }
2357 
2358 #ifdef POOL_SUBPAGE
2359 /* Sub-page allocator, for machines with large hardware pages. */
2360 void *
2361 pool_subpage_alloc(struct pool *pp, int flags)
2362 {
2363 	void *v;
2364 	int s;
2365 	s = splvm();
2366 	v = pool_get(&psppool, flags);
2367 	splx(s);
2368 	return v;
2369 }
2370 
2371 void
2372 pool_subpage_free(struct pool *pp, void *v)
2373 {
2374 	int s;
2375 	s = splvm();
2376 	pool_put(&psppool, v);
2377 	splx(s);
2378 }
2379 
2380 /* We don't provide a real nointr allocator.  Maybe later. */
2381 void *
2382 pool_subpage_alloc_nointr(struct pool *pp, int flags)
2383 {
2384 
2385 	return (pool_subpage_alloc(pp, flags));
2386 }
2387 
2388 void
2389 pool_subpage_free_nointr(struct pool *pp, void *v)
2390 {
2391 
2392 	pool_subpage_free(pp, v);
2393 }
2394 #endif /* POOL_SUBPAGE */
2395 void *
2396 pool_page_alloc_nointr(struct pool *pp, int flags)
2397 {
2398 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2399 
2400 	return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
2401 }
2402 
2403 void
2404 pool_page_free_nointr(struct pool *pp, void *v)
2405 {
2406 
2407 	uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
2408 }
2409