xref: /netbsd-src/sys/kern/subr_pool.c (revision 4b896b232495b7a9b8b94a1cf1e21873296d53b8)
1 /*	$NetBSD: subr_pool.c,v 1.95 2004/05/20 05:08:29 atatat Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.95 2004/05/20 05:08:29 atatat Exp $");
42 
43 #include "opt_pool.h"
44 #include "opt_poollog.h"
45 #include "opt_lockdebug.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/errno.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/lock.h>
54 #include <sys/pool.h>
55 #include <sys/syslog.h>
56 
57 #include <uvm/uvm.h>
58 
59 /*
60  * Pool resource management utility.
61  *
62  * Memory is allocated in pages which are split into pieces according to
63  * the pool item size. Each page is kept on one of three lists in the
64  * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
65  * for empty, full and partially-full pages respectively. The individual
66  * pool items are on a linked list headed by `ph_itemlist' in each page
67  * header. The memory for building the page list is either taken from
68  * the allocated pages themselves (for small pool items) or taken from
69  * an internal pool of page headers (`phpool').
70  */
71 
72 /* List of all pools */
73 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
74 
75 /* Private pool for page header structures */
76 static struct pool phpool;
77 
78 #ifdef POOL_SUBPAGE
79 /* Pool of subpages for use by normal pools. */
80 static struct pool psppool;
81 #endif
82 
83 /* # of seconds to retain page after last use */
84 int pool_inactive_time = 10;
85 
86 /* Next candidate for drainage (see pool_drain()) */
87 static struct pool	*drainpp;
88 
89 /* This spin lock protects both pool_head and drainpp. */
90 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
91 
92 struct pool_item_header {
93 	/* Page headers */
94 	LIST_ENTRY(pool_item_header)
95 				ph_pagelist;	/* pool page list */
96 	TAILQ_HEAD(,pool_item)	ph_itemlist;	/* chunk list for this page */
97 	SPLAY_ENTRY(pool_item_header)
98 				ph_node;	/* Off-page page headers */
99 	unsigned int		ph_nmissing;	/* # of chunks in use */
100 	caddr_t			ph_page;	/* this page's address */
101 	struct timeval		ph_time;	/* last referenced */
102 };
103 
104 struct pool_item {
105 #ifdef DIAGNOSTIC
106 	u_int pi_magic;
107 #endif
108 #define	PI_MAGIC 0xdeadbeefU
109 	/* Other entries use only this list entry */
110 	TAILQ_ENTRY(pool_item)	pi_list;
111 };
112 
113 #define	POOL_NEEDS_CATCHUP(pp)						\
114 	((pp)->pr_nitems < (pp)->pr_minitems)
115 
116 /*
117  * Pool cache management.
118  *
119  * Pool caches provide a way for constructed objects to be cached by the
120  * pool subsystem.  This can lead to performance improvements by avoiding
121  * needless object construction/destruction; it is deferred until absolutely
122  * necessary.
123  *
124  * Caches are grouped into cache groups.  Each cache group references
125  * up to 16 constructed objects.  When a cache allocates an object
126  * from the pool, it calls the object's constructor and places it into
127  * a cache group.  When a cache group frees an object back to the pool,
128  * it first calls the object's destructor.  This allows the object to
129  * persist in constructed form while freed to the cache.
130  *
131  * Multiple caches may exist for each pool.  This allows a single
132  * object type to have multiple constructed forms.  The pool references
133  * each cache, so that when a pool is drained by the pagedaemon, it can
134  * drain each individual cache as well.  Each time a cache is drained,
135  * the most idle cache group is freed to the pool in its entirety.
136  *
137  * Pool caches are layed on top of pools.  By layering them, we can avoid
138  * the complexity of cache management for pools which would not benefit
139  * from it.
140  */
141 
142 /* The cache group pool. */
143 static struct pool pcgpool;
144 
145 static void	pool_cache_reclaim(struct pool_cache *);
146 
147 static int	pool_catchup(struct pool *);
148 static void	pool_prime_page(struct pool *, caddr_t,
149 		    struct pool_item_header *);
150 static void	pool_update_curpage(struct pool *);
151 
152 void		*pool_allocator_alloc(struct pool *, int);
153 void		pool_allocator_free(struct pool *, void *);
154 
155 static void pool_print_pagelist(struct pool_pagelist *,
156 	void (*)(const char *, ...));
157 static void pool_print1(struct pool *, const char *,
158 	void (*)(const char *, ...));
159 
160 static int pool_chk_page(struct pool *, const char *,
161 			 struct pool_item_header *);
162 
163 /*
164  * Pool log entry. An array of these is allocated in pool_init().
165  */
166 struct pool_log {
167 	const char	*pl_file;
168 	long		pl_line;
169 	int		pl_action;
170 #define	PRLOG_GET	1
171 #define	PRLOG_PUT	2
172 	void		*pl_addr;
173 };
174 
175 #ifdef POOL_DIAGNOSTIC
176 /* Number of entries in pool log buffers */
177 #ifndef POOL_LOGSIZE
178 #define	POOL_LOGSIZE	10
179 #endif
180 
181 int pool_logsize = POOL_LOGSIZE;
182 
183 static __inline void
184 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
185 {
186 	int n = pp->pr_curlogentry;
187 	struct pool_log *pl;
188 
189 	if ((pp->pr_roflags & PR_LOGGING) == 0)
190 		return;
191 
192 	/*
193 	 * Fill in the current entry. Wrap around and overwrite
194 	 * the oldest entry if necessary.
195 	 */
196 	pl = &pp->pr_log[n];
197 	pl->pl_file = file;
198 	pl->pl_line = line;
199 	pl->pl_action = action;
200 	pl->pl_addr = v;
201 	if (++n >= pp->pr_logsize)
202 		n = 0;
203 	pp->pr_curlogentry = n;
204 }
205 
206 static void
207 pr_printlog(struct pool *pp, struct pool_item *pi,
208     void (*pr)(const char *, ...))
209 {
210 	int i = pp->pr_logsize;
211 	int n = pp->pr_curlogentry;
212 
213 	if ((pp->pr_roflags & PR_LOGGING) == 0)
214 		return;
215 
216 	/*
217 	 * Print all entries in this pool's log.
218 	 */
219 	while (i-- > 0) {
220 		struct pool_log *pl = &pp->pr_log[n];
221 		if (pl->pl_action != 0) {
222 			if (pi == NULL || pi == pl->pl_addr) {
223 				(*pr)("\tlog entry %d:\n", i);
224 				(*pr)("\t\taction = %s, addr = %p\n",
225 				    pl->pl_action == PRLOG_GET ? "get" : "put",
226 				    pl->pl_addr);
227 				(*pr)("\t\tfile: %s at line %lu\n",
228 				    pl->pl_file, pl->pl_line);
229 			}
230 		}
231 		if (++n >= pp->pr_logsize)
232 			n = 0;
233 	}
234 }
235 
236 static __inline void
237 pr_enter(struct pool *pp, const char *file, long line)
238 {
239 
240 	if (__predict_false(pp->pr_entered_file != NULL)) {
241 		printf("pool %s: reentrancy at file %s line %ld\n",
242 		    pp->pr_wchan, file, line);
243 		printf("         previous entry at file %s line %ld\n",
244 		    pp->pr_entered_file, pp->pr_entered_line);
245 		panic("pr_enter");
246 	}
247 
248 	pp->pr_entered_file = file;
249 	pp->pr_entered_line = line;
250 }
251 
252 static __inline void
253 pr_leave(struct pool *pp)
254 {
255 
256 	if (__predict_false(pp->pr_entered_file == NULL)) {
257 		printf("pool %s not entered?\n", pp->pr_wchan);
258 		panic("pr_leave");
259 	}
260 
261 	pp->pr_entered_file = NULL;
262 	pp->pr_entered_line = 0;
263 }
264 
265 static __inline void
266 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
267 {
268 
269 	if (pp->pr_entered_file != NULL)
270 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
271 		    pp->pr_entered_file, pp->pr_entered_line);
272 }
273 #else
274 #define	pr_log(pp, v, action, file, line)
275 #define	pr_printlog(pp, pi, pr)
276 #define	pr_enter(pp, file, line)
277 #define	pr_leave(pp)
278 #define	pr_enter_check(pp, pr)
279 #endif /* POOL_DIAGNOSTIC */
280 
281 static __inline int
282 phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
283 {
284 	if (a->ph_page < b->ph_page)
285 		return (-1);
286 	else if (a->ph_page > b->ph_page)
287 		return (1);
288 	else
289 		return (0);
290 }
291 
292 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
293 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
294 
295 /*
296  * Return the pool page header based on page address.
297  */
298 static __inline struct pool_item_header *
299 pr_find_pagehead(struct pool *pp, caddr_t page)
300 {
301 	struct pool_item_header *ph, tmp;
302 
303 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
304 		return ((struct pool_item_header *)(page + pp->pr_phoffset));
305 
306 	tmp.ph_page = page;
307 	ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
308 	return ph;
309 }
310 
311 /*
312  * Remove a page from the pool.
313  */
314 static __inline void
315 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
316      struct pool_pagelist *pq)
317 {
318 	int s;
319 
320 	LOCK_ASSERT(!simple_lock_held(&pp->pr_slock) || pq != NULL);
321 
322 	/*
323 	 * If the page was idle, decrement the idle page count.
324 	 */
325 	if (ph->ph_nmissing == 0) {
326 #ifdef DIAGNOSTIC
327 		if (pp->pr_nidle == 0)
328 			panic("pr_rmpage: nidle inconsistent");
329 		if (pp->pr_nitems < pp->pr_itemsperpage)
330 			panic("pr_rmpage: nitems inconsistent");
331 #endif
332 		pp->pr_nidle--;
333 	}
334 
335 	pp->pr_nitems -= pp->pr_itemsperpage;
336 
337 	/*
338 	 * Unlink a page from the pool and release it (or queue it for release).
339 	 */
340 	LIST_REMOVE(ph, ph_pagelist);
341 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
342 		SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
343 	if (pq) {
344 		LIST_INSERT_HEAD(pq, ph, ph_pagelist);
345 	} else {
346 		pool_allocator_free(pp, ph->ph_page);
347 		if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
348 			s = splvm();
349 			pool_put(&phpool, ph);
350 			splx(s);
351 		}
352 	}
353 	pp->pr_npages--;
354 	pp->pr_npagefree++;
355 
356 	pool_update_curpage(pp);
357 }
358 
359 /*
360  * Initialize all the pools listed in the "pools" link set.
361  */
362 void
363 link_pool_init(void)
364 {
365 	__link_set_decl(pools, struct link_pool_init);
366 	struct link_pool_init * const *pi;
367 
368 	__link_set_foreach(pi, pools)
369 		pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
370 		    (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
371 		    (*pi)->palloc);
372 }
373 
374 /*
375  * Initialize the given pool resource structure.
376  *
377  * We export this routine to allow other kernel parts to declare
378  * static pools that must be initialized before malloc() is available.
379  */
380 void
381 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
382     const char *wchan, struct pool_allocator *palloc)
383 {
384 	int off, slack;
385 	size_t trysize, phsize;
386 	int s;
387 
388 #ifdef POOL_DIAGNOSTIC
389 	/*
390 	 * Always log if POOL_DIAGNOSTIC is defined.
391 	 */
392 	if (pool_logsize != 0)
393 		flags |= PR_LOGGING;
394 #endif
395 
396 #ifdef POOL_SUBPAGE
397 	/*
398 	 * XXX We don't provide a real `nointr' back-end
399 	 * yet; all sub-pages come from a kmem back-end.
400 	 * maybe some day...
401 	 */
402 	if (palloc == NULL) {
403 		extern struct pool_allocator pool_allocator_kmem_subpage;
404 		palloc = &pool_allocator_kmem_subpage;
405 	}
406 	/*
407 	 * We'll assume any user-specified back-end allocator
408 	 * will deal with sub-pages, or simply don't care.
409 	 */
410 #else
411 	if (palloc == NULL)
412 		palloc = &pool_allocator_kmem;
413 #endif /* POOL_SUBPAGE */
414 	if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
415 		if (palloc->pa_pagesz == 0) {
416 #ifdef POOL_SUBPAGE
417 			if (palloc == &pool_allocator_kmem)
418 				palloc->pa_pagesz = PAGE_SIZE;
419 			else
420 				palloc->pa_pagesz = POOL_SUBPAGE;
421 #else
422 			palloc->pa_pagesz = PAGE_SIZE;
423 #endif /* POOL_SUBPAGE */
424 		}
425 
426 		TAILQ_INIT(&palloc->pa_list);
427 
428 		simple_lock_init(&palloc->pa_slock);
429 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
430 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
431 		palloc->pa_flags |= PA_INITIALIZED;
432 	}
433 
434 	if (align == 0)
435 		align = ALIGN(1);
436 
437 	if (size < sizeof(struct pool_item))
438 		size = sizeof(struct pool_item);
439 
440 	size = roundup(size, align);
441 #ifdef DIAGNOSTIC
442 	if (size > palloc->pa_pagesz)
443 		panic("pool_init: pool item size (%lu) too large",
444 		      (u_long)size);
445 #endif
446 
447 	/*
448 	 * Initialize the pool structure.
449 	 */
450 	LIST_INIT(&pp->pr_emptypages);
451 	LIST_INIT(&pp->pr_fullpages);
452 	LIST_INIT(&pp->pr_partpages);
453 	TAILQ_INIT(&pp->pr_cachelist);
454 	pp->pr_curpage = NULL;
455 	pp->pr_npages = 0;
456 	pp->pr_minitems = 0;
457 	pp->pr_minpages = 0;
458 	pp->pr_maxpages = UINT_MAX;
459 	pp->pr_roflags = flags;
460 	pp->pr_flags = 0;
461 	pp->pr_size = size;
462 	pp->pr_align = align;
463 	pp->pr_wchan = wchan;
464 	pp->pr_alloc = palloc;
465 	pp->pr_nitems = 0;
466 	pp->pr_nout = 0;
467 	pp->pr_hardlimit = UINT_MAX;
468 	pp->pr_hardlimit_warning = NULL;
469 	pp->pr_hardlimit_ratecap.tv_sec = 0;
470 	pp->pr_hardlimit_ratecap.tv_usec = 0;
471 	pp->pr_hardlimit_warning_last.tv_sec = 0;
472 	pp->pr_hardlimit_warning_last.tv_usec = 0;
473 	pp->pr_drain_hook = NULL;
474 	pp->pr_drain_hook_arg = NULL;
475 
476 	/*
477 	 * Decide whether to put the page header off page to avoid
478 	 * wasting too large a part of the page or too big item.
479 	 * Off-page page headers go on a hash table, so we can match
480 	 * a returned item with its header based on the page address.
481 	 * We use 1/16 of the page size and about 8 times of the item
482 	 * size as the threshold (XXX: tune)
483 	 *
484 	 * However, we'll put the header into the page if we can put
485 	 * it without wasting any items.
486 	 *
487 	 * Silently enforce `0 <= ioff < align'.
488 	 */
489 	pp->pr_itemoffset = ioff %= align;
490 	/* See the comment below about reserved bytes. */
491 	trysize = palloc->pa_pagesz - ((align - ioff) % align);
492 	phsize = ALIGN(sizeof(struct pool_item_header));
493 	if (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
494 	    trysize / pp->pr_size == (trysize - phsize) / pp->pr_size) {
495 		/* Use the end of the page for the page header */
496 		pp->pr_roflags |= PR_PHINPAGE;
497 		pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
498 	} else {
499 		/* The page header will be taken from our page header pool */
500 		pp->pr_phoffset = 0;
501 		off = palloc->pa_pagesz;
502 		SPLAY_INIT(&pp->pr_phtree);
503 	}
504 
505 	/*
506 	 * Alignment is to take place at `ioff' within the item. This means
507 	 * we must reserve up to `align - 1' bytes on the page to allow
508 	 * appropriate positioning of each item.
509 	 */
510 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
511 	KASSERT(pp->pr_itemsperpage != 0);
512 
513 	/*
514 	 * Use the slack between the chunks and the page header
515 	 * for "cache coloring".
516 	 */
517 	slack = off - pp->pr_itemsperpage * pp->pr_size;
518 	pp->pr_maxcolor = (slack / align) * align;
519 	pp->pr_curcolor = 0;
520 
521 	pp->pr_nget = 0;
522 	pp->pr_nfail = 0;
523 	pp->pr_nput = 0;
524 	pp->pr_npagealloc = 0;
525 	pp->pr_npagefree = 0;
526 	pp->pr_hiwat = 0;
527 	pp->pr_nidle = 0;
528 
529 #ifdef POOL_DIAGNOSTIC
530 	if (flags & PR_LOGGING) {
531 		if (kmem_map == NULL ||
532 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
533 		     M_TEMP, M_NOWAIT)) == NULL)
534 			pp->pr_roflags &= ~PR_LOGGING;
535 		pp->pr_curlogentry = 0;
536 		pp->pr_logsize = pool_logsize;
537 	}
538 #endif
539 
540 	pp->pr_entered_file = NULL;
541 	pp->pr_entered_line = 0;
542 
543 	simple_lock_init(&pp->pr_slock);
544 
545 	/*
546 	 * Initialize private page header pool and cache magazine pool if we
547 	 * haven't done so yet.
548 	 * XXX LOCKING.
549 	 */
550 	if (phpool.pr_size == 0) {
551 #ifdef POOL_SUBPAGE
552 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
553 		    "phpool", &pool_allocator_kmem);
554 		pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
555 		    PR_RECURSIVE, "psppool", &pool_allocator_kmem);
556 #else
557 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
558 		    0, "phpool", NULL);
559 #endif
560 		pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
561 		    0, "pcgpool", NULL);
562 	}
563 
564 	/* Insert into the list of all pools. */
565 	simple_lock(&pool_head_slock);
566 	TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
567 	simple_unlock(&pool_head_slock);
568 
569 	/* Insert this into the list of pools using this allocator. */
570 	s = splvm();
571 	simple_lock(&palloc->pa_slock);
572 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
573 	simple_unlock(&palloc->pa_slock);
574 	splx(s);
575 }
576 
577 /*
578  * De-commision a pool resource.
579  */
580 void
581 pool_destroy(struct pool *pp)
582 {
583 	struct pool_item_header *ph;
584 	struct pool_cache *pc;
585 	int s;
586 
587 	/* Locking order: pool_allocator -> pool */
588 	s = splvm();
589 	simple_lock(&pp->pr_alloc->pa_slock);
590 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
591 	simple_unlock(&pp->pr_alloc->pa_slock);
592 	splx(s);
593 
594 	/* Destroy all caches for this pool. */
595 	while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
596 		pool_cache_destroy(pc);
597 
598 #ifdef DIAGNOSTIC
599 	if (pp->pr_nout != 0) {
600 		pr_printlog(pp, NULL, printf);
601 		panic("pool_destroy: pool busy: still out: %u",
602 		    pp->pr_nout);
603 	}
604 #endif
605 
606 	/* Remove all pages */
607 	while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
608 		pr_rmpage(pp, ph, NULL);
609 	KASSERT(LIST_EMPTY(&pp->pr_fullpages));
610 	KASSERT(LIST_EMPTY(&pp->pr_partpages));
611 
612 	/* Remove from global pool list */
613 	simple_lock(&pool_head_slock);
614 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
615 	if (drainpp == pp) {
616 		drainpp = NULL;
617 	}
618 	simple_unlock(&pool_head_slock);
619 
620 #ifdef POOL_DIAGNOSTIC
621 	if ((pp->pr_roflags & PR_LOGGING) != 0)
622 		free(pp->pr_log, M_TEMP);
623 #endif
624 }
625 
626 void
627 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
628 {
629 
630 	/* XXX no locking -- must be used just after pool_init() */
631 #ifdef DIAGNOSTIC
632 	if (pp->pr_drain_hook != NULL)
633 		panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
634 #endif
635 	pp->pr_drain_hook = fn;
636 	pp->pr_drain_hook_arg = arg;
637 }
638 
639 static struct pool_item_header *
640 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
641 {
642 	struct pool_item_header *ph;
643 	int s;
644 
645 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
646 
647 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
648 		ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
649 	else {
650 		s = splvm();
651 		ph = pool_get(&phpool, flags);
652 		splx(s);
653 	}
654 
655 	return (ph);
656 }
657 
658 /*
659  * Grab an item from the pool; must be called at appropriate spl level
660  */
661 void *
662 #ifdef POOL_DIAGNOSTIC
663 _pool_get(struct pool *pp, int flags, const char *file, long line)
664 #else
665 pool_get(struct pool *pp, int flags)
666 #endif
667 {
668 	struct pool_item *pi;
669 	struct pool_item_header *ph;
670 	void *v;
671 
672 #ifdef DIAGNOSTIC
673 	if (__predict_false(pp->pr_itemsperpage == 0))
674 		panic("pool_get: pool %p: pr_itemsperpage is zero, "
675 		    "pool not initialized?", pp);
676 	if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
677 			    (flags & PR_WAITOK) != 0))
678 		panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
679 
680 #ifdef LOCKDEBUG
681 	if (flags & PR_WAITOK)
682 		simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
683 #endif
684 #endif /* DIAGNOSTIC */
685 
686 	simple_lock(&pp->pr_slock);
687 	pr_enter(pp, file, line);
688 
689  startover:
690 	/*
691 	 * Check to see if we've reached the hard limit.  If we have,
692 	 * and we can wait, then wait until an item has been returned to
693 	 * the pool.
694 	 */
695 #ifdef DIAGNOSTIC
696 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
697 		pr_leave(pp);
698 		simple_unlock(&pp->pr_slock);
699 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
700 	}
701 #endif
702 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
703 		if (pp->pr_drain_hook != NULL) {
704 			/*
705 			 * Since the drain hook is going to free things
706 			 * back to the pool, unlock, call the hook, re-lock,
707 			 * and check the hardlimit condition again.
708 			 */
709 			pr_leave(pp);
710 			simple_unlock(&pp->pr_slock);
711 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
712 			simple_lock(&pp->pr_slock);
713 			pr_enter(pp, file, line);
714 			if (pp->pr_nout < pp->pr_hardlimit)
715 				goto startover;
716 		}
717 
718 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
719 			/*
720 			 * XXX: A warning isn't logged in this case.  Should
721 			 * it be?
722 			 */
723 			pp->pr_flags |= PR_WANTED;
724 			pr_leave(pp);
725 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
726 			pr_enter(pp, file, line);
727 			goto startover;
728 		}
729 
730 		/*
731 		 * Log a message that the hard limit has been hit.
732 		 */
733 		if (pp->pr_hardlimit_warning != NULL &&
734 		    ratecheck(&pp->pr_hardlimit_warning_last,
735 			      &pp->pr_hardlimit_ratecap))
736 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
737 
738 		pp->pr_nfail++;
739 
740 		pr_leave(pp);
741 		simple_unlock(&pp->pr_slock);
742 		return (NULL);
743 	}
744 
745 	/*
746 	 * The convention we use is that if `curpage' is not NULL, then
747 	 * it points at a non-empty bucket. In particular, `curpage'
748 	 * never points at a page header which has PR_PHINPAGE set and
749 	 * has no items in its bucket.
750 	 */
751 	if ((ph = pp->pr_curpage) == NULL) {
752 #ifdef DIAGNOSTIC
753 		if (pp->pr_nitems != 0) {
754 			simple_unlock(&pp->pr_slock);
755 			printf("pool_get: %s: curpage NULL, nitems %u\n",
756 			    pp->pr_wchan, pp->pr_nitems);
757 			panic("pool_get: nitems inconsistent");
758 		}
759 #endif
760 
761 		/*
762 		 * Call the back-end page allocator for more memory.
763 		 * Release the pool lock, as the back-end page allocator
764 		 * may block.
765 		 */
766 		pr_leave(pp);
767 		simple_unlock(&pp->pr_slock);
768 		v = pool_allocator_alloc(pp, flags);
769 		if (__predict_true(v != NULL))
770 			ph = pool_alloc_item_header(pp, v, flags);
771 
772 		if (__predict_false(v == NULL || ph == NULL)) {
773 			if (v != NULL)
774 				pool_allocator_free(pp, v);
775 
776 			simple_lock(&pp->pr_slock);
777 			pr_enter(pp, file, line);
778 
779 			/*
780 			 * We were unable to allocate a page or item
781 			 * header, but we released the lock during
782 			 * allocation, so perhaps items were freed
783 			 * back to the pool.  Check for this case.
784 			 */
785 			if (pp->pr_curpage != NULL)
786 				goto startover;
787 
788 			if ((flags & PR_WAITOK) == 0) {
789 				pp->pr_nfail++;
790 				pr_leave(pp);
791 				simple_unlock(&pp->pr_slock);
792 				return (NULL);
793 			}
794 
795 			/*
796 			 * Wait for items to be returned to this pool.
797 			 *
798 			 * XXX: maybe we should wake up once a second and
799 			 * try again?
800 			 */
801 			pp->pr_flags |= PR_WANTED;
802 			/* PA_WANTED is already set on the allocator. */
803 			pr_leave(pp);
804 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
805 			pr_enter(pp, file, line);
806 			goto startover;
807 		}
808 
809 		/* We have more memory; add it to the pool */
810 		simple_lock(&pp->pr_slock);
811 		pr_enter(pp, file, line);
812 		pool_prime_page(pp, v, ph);
813 		pp->pr_npagealloc++;
814 
815 		/* Start the allocation process over. */
816 		goto startover;
817 	}
818 	if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
819 		pr_leave(pp);
820 		simple_unlock(&pp->pr_slock);
821 		panic("pool_get: %s: page empty", pp->pr_wchan);
822 	}
823 #ifdef DIAGNOSTIC
824 	if (__predict_false(pp->pr_nitems == 0)) {
825 		pr_leave(pp);
826 		simple_unlock(&pp->pr_slock);
827 		printf("pool_get: %s: items on itemlist, nitems %u\n",
828 		    pp->pr_wchan, pp->pr_nitems);
829 		panic("pool_get: nitems inconsistent");
830 	}
831 #endif
832 
833 #ifdef POOL_DIAGNOSTIC
834 	pr_log(pp, v, PRLOG_GET, file, line);
835 #endif
836 
837 #ifdef DIAGNOSTIC
838 	if (__predict_false(pi->pi_magic != PI_MAGIC)) {
839 		pr_printlog(pp, pi, printf);
840 		panic("pool_get(%s): free list modified: magic=%x; page %p;"
841 		       " item addr %p\n",
842 			pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
843 	}
844 #endif
845 
846 	/*
847 	 * Remove from item list.
848 	 */
849 	TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
850 	pp->pr_nitems--;
851 	pp->pr_nout++;
852 	if (ph->ph_nmissing == 0) {
853 #ifdef DIAGNOSTIC
854 		if (__predict_false(pp->pr_nidle == 0))
855 			panic("pool_get: nidle inconsistent");
856 #endif
857 		pp->pr_nidle--;
858 
859 		/*
860 		 * This page was previously empty.  Move it to the list of
861 		 * partially-full pages.  This page is already curpage.
862 		 */
863 		LIST_REMOVE(ph, ph_pagelist);
864 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
865 	}
866 	ph->ph_nmissing++;
867 	if (TAILQ_EMPTY(&ph->ph_itemlist)) {
868 #ifdef DIAGNOSTIC
869 		if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
870 			pr_leave(pp);
871 			simple_unlock(&pp->pr_slock);
872 			panic("pool_get: %s: nmissing inconsistent",
873 			    pp->pr_wchan);
874 		}
875 #endif
876 		/*
877 		 * This page is now full.  Move it to the full list
878 		 * and select a new current page.
879 		 */
880 		LIST_REMOVE(ph, ph_pagelist);
881 		LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
882 		pool_update_curpage(pp);
883 	}
884 
885 	pp->pr_nget++;
886 
887 	/*
888 	 * If we have a low water mark and we are now below that low
889 	 * water mark, add more items to the pool.
890 	 */
891 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
892 		/*
893 		 * XXX: Should we log a warning?  Should we set up a timeout
894 		 * to try again in a second or so?  The latter could break
895 		 * a caller's assumptions about interrupt protection, etc.
896 		 */
897 	}
898 
899 	pr_leave(pp);
900 	simple_unlock(&pp->pr_slock);
901 	return (v);
902 }
903 
904 /*
905  * Internal version of pool_put().  Pool is already locked/entered.
906  */
907 static void
908 pool_do_put(struct pool *pp, void *v)
909 {
910 	struct pool_item *pi = v;
911 	struct pool_item_header *ph;
912 	caddr_t page;
913 	int s;
914 
915 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
916 
917 	page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
918 
919 #ifdef DIAGNOSTIC
920 	if (__predict_false(pp->pr_nout == 0)) {
921 		printf("pool %s: putting with none out\n",
922 		    pp->pr_wchan);
923 		panic("pool_put");
924 	}
925 #endif
926 
927 	if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
928 		pr_printlog(pp, NULL, printf);
929 		panic("pool_put: %s: page header missing", pp->pr_wchan);
930 	}
931 
932 #ifdef LOCKDEBUG
933 	/*
934 	 * Check if we're freeing a locked simple lock.
935 	 */
936 	simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
937 #endif
938 
939 	/*
940 	 * Return to item list.
941 	 */
942 #ifdef DIAGNOSTIC
943 	pi->pi_magic = PI_MAGIC;
944 #endif
945 #ifdef DEBUG
946 	{
947 		int i, *ip = v;
948 
949 		for (i = 0; i < pp->pr_size / sizeof(int); i++) {
950 			*ip++ = PI_MAGIC;
951 		}
952 	}
953 #endif
954 
955 	TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
956 	KDASSERT(ph->ph_nmissing != 0);
957 	ph->ph_nmissing--;
958 	pp->pr_nput++;
959 	pp->pr_nitems++;
960 	pp->pr_nout--;
961 
962 	/* Cancel "pool empty" condition if it exists */
963 	if (pp->pr_curpage == NULL)
964 		pp->pr_curpage = ph;
965 
966 	if (pp->pr_flags & PR_WANTED) {
967 		pp->pr_flags &= ~PR_WANTED;
968 		if (ph->ph_nmissing == 0)
969 			pp->pr_nidle++;
970 		wakeup((caddr_t)pp);
971 		return;
972 	}
973 
974 	/*
975 	 * If this page is now empty, do one of two things:
976 	 *
977 	 *	(1) If we have more pages than the page high water mark,
978 	 *	    or if we are flagged as immediately freeing back idle
979 	 *	    pages, free the page back to the system.  ONLY CONSIDER
980 	 *	    FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
981 	 *	    CLAIM.
982 	 *
983 	 *	(2) Otherwise, move the page to the empty page list.
984 	 *
985 	 * Either way, select a new current page (so we use a partially-full
986 	 * page if one is available).
987 	 */
988 	if (ph->ph_nmissing == 0) {
989 		pp->pr_nidle++;
990 		if (pp->pr_npages > pp->pr_minpages &&
991 		    (pp->pr_npages > pp->pr_maxpages ||
992 		     (pp->pr_roflags & PR_IMMEDRELEASE) != 0 ||
993 		     (pp->pr_alloc->pa_flags & PA_WANT) != 0)) {
994 			simple_unlock(&pp->pr_slock);
995 			pr_rmpage(pp, ph, NULL);
996 			simple_lock(&pp->pr_slock);
997 		} else {
998 			LIST_REMOVE(ph, ph_pagelist);
999 			LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1000 
1001 			/*
1002 			 * Update the timestamp on the page.  A page must
1003 			 * be idle for some period of time before it can
1004 			 * be reclaimed by the pagedaemon.  This minimizes
1005 			 * ping-pong'ing for memory.
1006 			 */
1007 			s = splclock();
1008 			ph->ph_time = mono_time;
1009 			splx(s);
1010 		}
1011 		pool_update_curpage(pp);
1012 	}
1013 
1014 	/*
1015 	 * If the page was previously completely full, move it to the
1016 	 * partially-full list and make it the current page.  The next
1017 	 * allocation will get the item from this page, instead of
1018 	 * further fragmenting the pool.
1019 	 */
1020 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1021 		LIST_REMOVE(ph, ph_pagelist);
1022 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1023 		pp->pr_curpage = ph;
1024 	}
1025 }
1026 
1027 /*
1028  * Return resource to the pool; must be called at appropriate spl level
1029  */
1030 #ifdef POOL_DIAGNOSTIC
1031 void
1032 _pool_put(struct pool *pp, void *v, const char *file, long line)
1033 {
1034 
1035 	simple_lock(&pp->pr_slock);
1036 	pr_enter(pp, file, line);
1037 
1038 	pr_log(pp, v, PRLOG_PUT, file, line);
1039 
1040 	pool_do_put(pp, v);
1041 
1042 	pr_leave(pp);
1043 	simple_unlock(&pp->pr_slock);
1044 }
1045 #undef pool_put
1046 #endif /* POOL_DIAGNOSTIC */
1047 
1048 void
1049 pool_put(struct pool *pp, void *v)
1050 {
1051 
1052 	simple_lock(&pp->pr_slock);
1053 
1054 	pool_do_put(pp, v);
1055 
1056 	simple_unlock(&pp->pr_slock);
1057 }
1058 
1059 #ifdef POOL_DIAGNOSTIC
1060 #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
1061 #endif
1062 
1063 /*
1064  * Add N items to the pool.
1065  */
1066 int
1067 pool_prime(struct pool *pp, int n)
1068 {
1069 	struct pool_item_header *ph = NULL;
1070 	caddr_t cp;
1071 	int newpages;
1072 
1073 	simple_lock(&pp->pr_slock);
1074 
1075 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1076 
1077 	while (newpages-- > 0) {
1078 		simple_unlock(&pp->pr_slock);
1079 		cp = pool_allocator_alloc(pp, PR_NOWAIT);
1080 		if (__predict_true(cp != NULL))
1081 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1082 
1083 		if (__predict_false(cp == NULL || ph == NULL)) {
1084 			if (cp != NULL)
1085 				pool_allocator_free(pp, cp);
1086 			simple_lock(&pp->pr_slock);
1087 			break;
1088 		}
1089 
1090 		simple_lock(&pp->pr_slock);
1091 		pool_prime_page(pp, cp, ph);
1092 		pp->pr_npagealloc++;
1093 		pp->pr_minpages++;
1094 	}
1095 
1096 	if (pp->pr_minpages >= pp->pr_maxpages)
1097 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
1098 
1099 	simple_unlock(&pp->pr_slock);
1100 	return (0);
1101 }
1102 
1103 /*
1104  * Add a page worth of items to the pool.
1105  *
1106  * Note, we must be called with the pool descriptor LOCKED.
1107  */
1108 static void
1109 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1110 {
1111 	struct pool_item *pi;
1112 	caddr_t cp = storage;
1113 	unsigned int align = pp->pr_align;
1114 	unsigned int ioff = pp->pr_itemoffset;
1115 	int n;
1116 	int s;
1117 
1118 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1119 
1120 #ifdef DIAGNOSTIC
1121 	if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1122 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1123 #endif
1124 
1125 	/*
1126 	 * Insert page header.
1127 	 */
1128 	LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1129 	TAILQ_INIT(&ph->ph_itemlist);
1130 	ph->ph_page = storage;
1131 	ph->ph_nmissing = 0;
1132 	s = splclock();
1133 	ph->ph_time = mono_time;
1134 	splx(s);
1135 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1136 		SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1137 
1138 	pp->pr_nidle++;
1139 
1140 	/*
1141 	 * Color this page.
1142 	 */
1143 	cp = (caddr_t)(cp + pp->pr_curcolor);
1144 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1145 		pp->pr_curcolor = 0;
1146 
1147 	/*
1148 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1149 	 */
1150 	if (ioff != 0)
1151 		cp = (caddr_t)(cp + (align - ioff));
1152 
1153 	/*
1154 	 * Insert remaining chunks on the bucket list.
1155 	 */
1156 	n = pp->pr_itemsperpage;
1157 	pp->pr_nitems += n;
1158 
1159 	while (n--) {
1160 		pi = (struct pool_item *)cp;
1161 
1162 		KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1163 
1164 		/* Insert on page list */
1165 		TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1166 #ifdef DIAGNOSTIC
1167 		pi->pi_magic = PI_MAGIC;
1168 #endif
1169 		cp = (caddr_t)(cp + pp->pr_size);
1170 	}
1171 
1172 	/*
1173 	 * If the pool was depleted, point at the new page.
1174 	 */
1175 	if (pp->pr_curpage == NULL)
1176 		pp->pr_curpage = ph;
1177 
1178 	if (++pp->pr_npages > pp->pr_hiwat)
1179 		pp->pr_hiwat = pp->pr_npages;
1180 }
1181 
1182 /*
1183  * Used by pool_get() when nitems drops below the low water mark.  This
1184  * is used to catch up pr_nitems with the low water mark.
1185  *
1186  * Note 1, we never wait for memory here, we let the caller decide what to do.
1187  *
1188  * Note 2, we must be called with the pool already locked, and we return
1189  * with it locked.
1190  */
1191 static int
1192 pool_catchup(struct pool *pp)
1193 {
1194 	struct pool_item_header *ph = NULL;
1195 	caddr_t cp;
1196 	int error = 0;
1197 
1198 	while (POOL_NEEDS_CATCHUP(pp)) {
1199 		/*
1200 		 * Call the page back-end allocator for more memory.
1201 		 *
1202 		 * XXX: We never wait, so should we bother unlocking
1203 		 * the pool descriptor?
1204 		 */
1205 		simple_unlock(&pp->pr_slock);
1206 		cp = pool_allocator_alloc(pp, PR_NOWAIT);
1207 		if (__predict_true(cp != NULL))
1208 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1209 		if (__predict_false(cp == NULL || ph == NULL)) {
1210 			if (cp != NULL)
1211 				pool_allocator_free(pp, cp);
1212 			error = ENOMEM;
1213 			simple_lock(&pp->pr_slock);
1214 			break;
1215 		}
1216 		simple_lock(&pp->pr_slock);
1217 		pool_prime_page(pp, cp, ph);
1218 		pp->pr_npagealloc++;
1219 	}
1220 
1221 	return (error);
1222 }
1223 
1224 static void
1225 pool_update_curpage(struct pool *pp)
1226 {
1227 
1228 	pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1229 	if (pp->pr_curpage == NULL) {
1230 		pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1231 	}
1232 }
1233 
1234 void
1235 pool_setlowat(struct pool *pp, int n)
1236 {
1237 
1238 	simple_lock(&pp->pr_slock);
1239 
1240 	pp->pr_minitems = n;
1241 	pp->pr_minpages = (n == 0)
1242 		? 0
1243 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1244 
1245 	/* Make sure we're caught up with the newly-set low water mark. */
1246 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1247 		/*
1248 		 * XXX: Should we log a warning?  Should we set up a timeout
1249 		 * to try again in a second or so?  The latter could break
1250 		 * a caller's assumptions about interrupt protection, etc.
1251 		 */
1252 	}
1253 
1254 	simple_unlock(&pp->pr_slock);
1255 }
1256 
1257 void
1258 pool_sethiwat(struct pool *pp, int n)
1259 {
1260 
1261 	simple_lock(&pp->pr_slock);
1262 
1263 	pp->pr_maxpages = (n == 0)
1264 		? 0
1265 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1266 
1267 	simple_unlock(&pp->pr_slock);
1268 }
1269 
1270 void
1271 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1272 {
1273 
1274 	simple_lock(&pp->pr_slock);
1275 
1276 	pp->pr_hardlimit = n;
1277 	pp->pr_hardlimit_warning = warnmess;
1278 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1279 	pp->pr_hardlimit_warning_last.tv_sec = 0;
1280 	pp->pr_hardlimit_warning_last.tv_usec = 0;
1281 
1282 	/*
1283 	 * In-line version of pool_sethiwat(), because we don't want to
1284 	 * release the lock.
1285 	 */
1286 	pp->pr_maxpages = (n == 0)
1287 		? 0
1288 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1289 
1290 	simple_unlock(&pp->pr_slock);
1291 }
1292 
1293 /*
1294  * Release all complete pages that have not been used recently.
1295  */
1296 int
1297 #ifdef POOL_DIAGNOSTIC
1298 _pool_reclaim(struct pool *pp, const char *file, long line)
1299 #else
1300 pool_reclaim(struct pool *pp)
1301 #endif
1302 {
1303 	struct pool_item_header *ph, *phnext;
1304 	struct pool_cache *pc;
1305 	struct timeval curtime;
1306 	struct pool_pagelist pq;
1307 	struct timeval diff;
1308 	int s;
1309 
1310 	if (pp->pr_drain_hook != NULL) {
1311 		/*
1312 		 * The drain hook must be called with the pool unlocked.
1313 		 */
1314 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1315 	}
1316 
1317 	if (simple_lock_try(&pp->pr_slock) == 0)
1318 		return (0);
1319 	pr_enter(pp, file, line);
1320 
1321 	LIST_INIT(&pq);
1322 
1323 	/*
1324 	 * Reclaim items from the pool's caches.
1325 	 */
1326 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1327 		pool_cache_reclaim(pc);
1328 
1329 	s = splclock();
1330 	curtime = mono_time;
1331 	splx(s);
1332 
1333 	for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1334 		phnext = LIST_NEXT(ph, ph_pagelist);
1335 
1336 		/* Check our minimum page claim */
1337 		if (pp->pr_npages <= pp->pr_minpages)
1338 			break;
1339 
1340 		KASSERT(ph->ph_nmissing == 0);
1341 		timersub(&curtime, &ph->ph_time, &diff);
1342 		if (diff.tv_sec < pool_inactive_time)
1343 			continue;
1344 
1345 		/*
1346 		 * If freeing this page would put us below
1347 		 * the low water mark, stop now.
1348 		 */
1349 		if ((pp->pr_nitems - pp->pr_itemsperpage) <
1350 		    pp->pr_minitems)
1351 			break;
1352 
1353 		pr_rmpage(pp, ph, &pq);
1354 	}
1355 
1356 	pr_leave(pp);
1357 	simple_unlock(&pp->pr_slock);
1358 	if (LIST_EMPTY(&pq))
1359 		return (0);
1360 
1361 	while ((ph = LIST_FIRST(&pq)) != NULL) {
1362 		LIST_REMOVE(ph, ph_pagelist);
1363 		pool_allocator_free(pp, ph->ph_page);
1364 		if (pp->pr_roflags & PR_PHINPAGE) {
1365 			continue;
1366 		}
1367 		s = splvm();
1368 		pool_put(&phpool, ph);
1369 		splx(s);
1370 	}
1371 
1372 	return (1);
1373 }
1374 
1375 /*
1376  * Drain pools, one at a time.
1377  *
1378  * Note, we must never be called from an interrupt context.
1379  */
1380 void
1381 pool_drain(void *arg)
1382 {
1383 	struct pool *pp;
1384 	int s;
1385 
1386 	pp = NULL;
1387 	s = splvm();
1388 	simple_lock(&pool_head_slock);
1389 	if (drainpp == NULL) {
1390 		drainpp = TAILQ_FIRST(&pool_head);
1391 	}
1392 	if (drainpp) {
1393 		pp = drainpp;
1394 		drainpp = TAILQ_NEXT(pp, pr_poollist);
1395 	}
1396 	simple_unlock(&pool_head_slock);
1397 	pool_reclaim(pp);
1398 	splx(s);
1399 }
1400 
1401 /*
1402  * Diagnostic helpers.
1403  */
1404 void
1405 pool_print(struct pool *pp, const char *modif)
1406 {
1407 	int s;
1408 
1409 	s = splvm();
1410 	if (simple_lock_try(&pp->pr_slock) == 0) {
1411 		printf("pool %s is locked; try again later\n",
1412 		    pp->pr_wchan);
1413 		splx(s);
1414 		return;
1415 	}
1416 	pool_print1(pp, modif, printf);
1417 	simple_unlock(&pp->pr_slock);
1418 	splx(s);
1419 }
1420 
1421 void
1422 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1423 {
1424 	int didlock = 0;
1425 
1426 	if (pp == NULL) {
1427 		(*pr)("Must specify a pool to print.\n");
1428 		return;
1429 	}
1430 
1431 	/*
1432 	 * Called from DDB; interrupts should be blocked, and all
1433 	 * other processors should be paused.  We can skip locking
1434 	 * the pool in this case.
1435 	 *
1436 	 * We do a simple_lock_try() just to print the lock
1437 	 * status, however.
1438 	 */
1439 
1440 	if (simple_lock_try(&pp->pr_slock) == 0)
1441 		(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1442 	else
1443 		didlock = 1;
1444 
1445 	pool_print1(pp, modif, pr);
1446 
1447 	if (didlock)
1448 		simple_unlock(&pp->pr_slock);
1449 }
1450 
1451 static void
1452 pool_print_pagelist(struct pool_pagelist *pl, void (*pr)(const char *, ...))
1453 {
1454 	struct pool_item_header *ph;
1455 #ifdef DIAGNOSTIC
1456 	struct pool_item *pi;
1457 #endif
1458 
1459 	LIST_FOREACH(ph, pl, ph_pagelist) {
1460 		(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1461 		    ph->ph_page, ph->ph_nmissing,
1462 		    (u_long)ph->ph_time.tv_sec,
1463 		    (u_long)ph->ph_time.tv_usec);
1464 #ifdef DIAGNOSTIC
1465 		TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1466 			if (pi->pi_magic != PI_MAGIC) {
1467 				(*pr)("\t\t\titem %p, magic 0x%x\n",
1468 				    pi, pi->pi_magic);
1469 			}
1470 		}
1471 #endif
1472 	}
1473 }
1474 
1475 static void
1476 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1477 {
1478 	struct pool_item_header *ph;
1479 	struct pool_cache *pc;
1480 	struct pool_cache_group *pcg;
1481 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1482 	char c;
1483 
1484 	while ((c = *modif++) != '\0') {
1485 		if (c == 'l')
1486 			print_log = 1;
1487 		if (c == 'p')
1488 			print_pagelist = 1;
1489 		if (c == 'c')
1490 			print_cache = 1;
1491 	}
1492 
1493 	(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1494 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1495 	    pp->pr_roflags);
1496 	(*pr)("\talloc %p\n", pp->pr_alloc);
1497 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1498 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1499 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1500 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1501 
1502 	(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1503 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1504 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1505 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1506 
1507 	if (print_pagelist == 0)
1508 		goto skip_pagelist;
1509 
1510 	if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1511 		(*pr)("\n\tempty page list:\n");
1512 	pool_print_pagelist(&pp->pr_emptypages, pr);
1513 	if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1514 		(*pr)("\n\tfull page list:\n");
1515 	pool_print_pagelist(&pp->pr_fullpages, pr);
1516 	if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1517 		(*pr)("\n\tpartial-page list:\n");
1518 	pool_print_pagelist(&pp->pr_partpages, pr);
1519 
1520 	if (pp->pr_curpage == NULL)
1521 		(*pr)("\tno current page\n");
1522 	else
1523 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1524 
1525  skip_pagelist:
1526 	if (print_log == 0)
1527 		goto skip_log;
1528 
1529 	(*pr)("\n");
1530 	if ((pp->pr_roflags & PR_LOGGING) == 0)
1531 		(*pr)("\tno log\n");
1532 	else
1533 		pr_printlog(pp, NULL, pr);
1534 
1535  skip_log:
1536 	if (print_cache == 0)
1537 		goto skip_cache;
1538 
1539 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1540 		(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1541 		    pc->pc_allocfrom, pc->pc_freeto);
1542 		(*pr)("\t    hits %lu misses %lu ngroups %lu nitems %lu\n",
1543 		    pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1544 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1545 			(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1546 			for (i = 0; i < PCG_NOBJECTS; i++) {
1547 				if (pcg->pcg_objects[i].pcgo_pa !=
1548 				    POOL_PADDR_INVALID) {
1549 					(*pr)("\t\t\t%p, 0x%llx\n",
1550 					    pcg->pcg_objects[i].pcgo_va,
1551 					    (unsigned long long)
1552 					    pcg->pcg_objects[i].pcgo_pa);
1553 				} else {
1554 					(*pr)("\t\t\t%p\n",
1555 					    pcg->pcg_objects[i].pcgo_va);
1556 				}
1557 			}
1558 		}
1559 	}
1560 
1561  skip_cache:
1562 	pr_enter_check(pp, pr);
1563 }
1564 
1565 static int
1566 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1567 {
1568 	struct pool_item *pi;
1569 	caddr_t page;
1570 	int n;
1571 
1572 	page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1573 	if (page != ph->ph_page &&
1574 	    (pp->pr_roflags & PR_PHINPAGE) != 0) {
1575 		if (label != NULL)
1576 			printf("%s: ", label);
1577 		printf("pool(%p:%s): page inconsistency: page %p;"
1578 		       " at page head addr %p (p %p)\n", pp,
1579 			pp->pr_wchan, ph->ph_page,
1580 			ph, page);
1581 		return 1;
1582 	}
1583 
1584 	for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1585 	     pi != NULL;
1586 	     pi = TAILQ_NEXT(pi,pi_list), n++) {
1587 
1588 #ifdef DIAGNOSTIC
1589 		if (pi->pi_magic != PI_MAGIC) {
1590 			if (label != NULL)
1591 				printf("%s: ", label);
1592 			printf("pool(%s): free list modified: magic=%x;"
1593 			       " page %p; item ordinal %d;"
1594 			       " addr %p (p %p)\n",
1595 				pp->pr_wchan, pi->pi_magic, ph->ph_page,
1596 				n, pi, page);
1597 			panic("pool");
1598 		}
1599 #endif
1600 		page =
1601 		    (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1602 		if (page == ph->ph_page)
1603 			continue;
1604 
1605 		if (label != NULL)
1606 			printf("%s: ", label);
1607 		printf("pool(%p:%s): page inconsistency: page %p;"
1608 		       " item ordinal %d; addr %p (p %p)\n", pp,
1609 			pp->pr_wchan, ph->ph_page,
1610 			n, pi, page);
1611 		return 1;
1612 	}
1613 	return 0;
1614 }
1615 
1616 
1617 int
1618 pool_chk(struct pool *pp, const char *label)
1619 {
1620 	struct pool_item_header *ph;
1621 	int r = 0;
1622 
1623 	simple_lock(&pp->pr_slock);
1624 	LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1625 		r = pool_chk_page(pp, label, ph);
1626 		if (r) {
1627 			goto out;
1628 		}
1629 	}
1630 	LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1631 		r = pool_chk_page(pp, label, ph);
1632 		if (r) {
1633 			goto out;
1634 		}
1635 	}
1636 	LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1637 		r = pool_chk_page(pp, label, ph);
1638 		if (r) {
1639 			goto out;
1640 		}
1641 	}
1642 
1643 out:
1644 	simple_unlock(&pp->pr_slock);
1645 	return (r);
1646 }
1647 
1648 /*
1649  * pool_cache_init:
1650  *
1651  *	Initialize a pool cache.
1652  *
1653  *	NOTE: If the pool must be protected from interrupts, we expect
1654  *	to be called at the appropriate interrupt priority level.
1655  */
1656 void
1657 pool_cache_init(struct pool_cache *pc, struct pool *pp,
1658     int (*ctor)(void *, void *, int),
1659     void (*dtor)(void *, void *),
1660     void *arg)
1661 {
1662 
1663 	TAILQ_INIT(&pc->pc_grouplist);
1664 	simple_lock_init(&pc->pc_slock);
1665 
1666 	pc->pc_allocfrom = NULL;
1667 	pc->pc_freeto = NULL;
1668 	pc->pc_pool = pp;
1669 
1670 	pc->pc_ctor = ctor;
1671 	pc->pc_dtor = dtor;
1672 	pc->pc_arg  = arg;
1673 
1674 	pc->pc_hits   = 0;
1675 	pc->pc_misses = 0;
1676 
1677 	pc->pc_ngroups = 0;
1678 
1679 	pc->pc_nitems = 0;
1680 
1681 	simple_lock(&pp->pr_slock);
1682 	TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1683 	simple_unlock(&pp->pr_slock);
1684 }
1685 
1686 /*
1687  * pool_cache_destroy:
1688  *
1689  *	Destroy a pool cache.
1690  */
1691 void
1692 pool_cache_destroy(struct pool_cache *pc)
1693 {
1694 	struct pool *pp = pc->pc_pool;
1695 
1696 	/* First, invalidate the entire cache. */
1697 	pool_cache_invalidate(pc);
1698 
1699 	/* ...and remove it from the pool's cache list. */
1700 	simple_lock(&pp->pr_slock);
1701 	TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1702 	simple_unlock(&pp->pr_slock);
1703 }
1704 
1705 static __inline void *
1706 pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
1707 {
1708 	void *object;
1709 	u_int idx;
1710 
1711 	KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1712 	KASSERT(pcg->pcg_avail != 0);
1713 	idx = --pcg->pcg_avail;
1714 
1715 	KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
1716 	object = pcg->pcg_objects[idx].pcgo_va;
1717 	if (pap != NULL)
1718 		*pap = pcg->pcg_objects[idx].pcgo_pa;
1719 	pcg->pcg_objects[idx].pcgo_va = NULL;
1720 
1721 	return (object);
1722 }
1723 
1724 static __inline void
1725 pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
1726 {
1727 	u_int idx;
1728 
1729 	KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1730 	idx = pcg->pcg_avail++;
1731 
1732 	KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
1733 	pcg->pcg_objects[idx].pcgo_va = object;
1734 	pcg->pcg_objects[idx].pcgo_pa = pa;
1735 }
1736 
1737 /*
1738  * pool_cache_get{,_paddr}:
1739  *
1740  *	Get an object from a pool cache (optionally returning
1741  *	the physical address of the object).
1742  */
1743 void *
1744 pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
1745 {
1746 	struct pool_cache_group *pcg;
1747 	void *object;
1748 
1749 #ifdef LOCKDEBUG
1750 	if (flags & PR_WAITOK)
1751 		simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1752 #endif
1753 
1754 	simple_lock(&pc->pc_slock);
1755 
1756 	if ((pcg = pc->pc_allocfrom) == NULL) {
1757 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1758 			if (pcg->pcg_avail != 0) {
1759 				pc->pc_allocfrom = pcg;
1760 				goto have_group;
1761 			}
1762 		}
1763 
1764 		/*
1765 		 * No groups with any available objects.  Allocate
1766 		 * a new object, construct it, and return it to
1767 		 * the caller.  We will allocate a group, if necessary,
1768 		 * when the object is freed back to the cache.
1769 		 */
1770 		pc->pc_misses++;
1771 		simple_unlock(&pc->pc_slock);
1772 		object = pool_get(pc->pc_pool, flags);
1773 		if (object != NULL && pc->pc_ctor != NULL) {
1774 			if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1775 				pool_put(pc->pc_pool, object);
1776 				return (NULL);
1777 			}
1778 		}
1779 		if (object != NULL && pap != NULL) {
1780 #ifdef POOL_VTOPHYS
1781 			*pap = POOL_VTOPHYS(object);
1782 #else
1783 			*pap = POOL_PADDR_INVALID;
1784 #endif
1785 		}
1786 		return (object);
1787 	}
1788 
1789  have_group:
1790 	pc->pc_hits++;
1791 	pc->pc_nitems--;
1792 	object = pcg_get(pcg, pap);
1793 
1794 	if (pcg->pcg_avail == 0)
1795 		pc->pc_allocfrom = NULL;
1796 
1797 	simple_unlock(&pc->pc_slock);
1798 
1799 	return (object);
1800 }
1801 
1802 /*
1803  * pool_cache_put{,_paddr}:
1804  *
1805  *	Put an object back to the pool cache (optionally caching the
1806  *	physical address of the object).
1807  */
1808 void
1809 pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
1810 {
1811 	struct pool_cache_group *pcg;
1812 	int s;
1813 
1814 	simple_lock(&pc->pc_slock);
1815 
1816 	if ((pcg = pc->pc_freeto) == NULL) {
1817 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1818 			if (pcg->pcg_avail != PCG_NOBJECTS) {
1819 				pc->pc_freeto = pcg;
1820 				goto have_group;
1821 			}
1822 		}
1823 
1824 		/*
1825 		 * No empty groups to free the object to.  Attempt to
1826 		 * allocate one.
1827 		 */
1828 		simple_unlock(&pc->pc_slock);
1829 		s = splvm();
1830 		pcg = pool_get(&pcgpool, PR_NOWAIT);
1831 		splx(s);
1832 		if (pcg != NULL) {
1833 			memset(pcg, 0, sizeof(*pcg));
1834 			simple_lock(&pc->pc_slock);
1835 			pc->pc_ngroups++;
1836 			TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1837 			if (pc->pc_freeto == NULL)
1838 				pc->pc_freeto = pcg;
1839 			goto have_group;
1840 		}
1841 
1842 		/*
1843 		 * Unable to allocate a cache group; destruct the object
1844 		 * and free it back to the pool.
1845 		 */
1846 		pool_cache_destruct_object(pc, object);
1847 		return;
1848 	}
1849 
1850  have_group:
1851 	pc->pc_nitems++;
1852 	pcg_put(pcg, object, pa);
1853 
1854 	if (pcg->pcg_avail == PCG_NOBJECTS)
1855 		pc->pc_freeto = NULL;
1856 
1857 	simple_unlock(&pc->pc_slock);
1858 }
1859 
1860 /*
1861  * pool_cache_destruct_object:
1862  *
1863  *	Force destruction of an object and its release back into
1864  *	the pool.
1865  */
1866 void
1867 pool_cache_destruct_object(struct pool_cache *pc, void *object)
1868 {
1869 
1870 	if (pc->pc_dtor != NULL)
1871 		(*pc->pc_dtor)(pc->pc_arg, object);
1872 	pool_put(pc->pc_pool, object);
1873 }
1874 
1875 /*
1876  * pool_cache_do_invalidate:
1877  *
1878  *	This internal function implements pool_cache_invalidate() and
1879  *	pool_cache_reclaim().
1880  */
1881 static void
1882 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1883     void (*putit)(struct pool *, void *))
1884 {
1885 	struct pool_cache_group *pcg, *npcg;
1886 	void *object;
1887 	int s;
1888 
1889 	for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1890 	     pcg = npcg) {
1891 		npcg = TAILQ_NEXT(pcg, pcg_list);
1892 		while (pcg->pcg_avail != 0) {
1893 			pc->pc_nitems--;
1894 			object = pcg_get(pcg, NULL);
1895 			if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1896 				pc->pc_allocfrom = NULL;
1897 			if (pc->pc_dtor != NULL)
1898 				(*pc->pc_dtor)(pc->pc_arg, object);
1899 			(*putit)(pc->pc_pool, object);
1900 		}
1901 		if (free_groups) {
1902 			pc->pc_ngroups--;
1903 			TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1904 			if (pc->pc_freeto == pcg)
1905 				pc->pc_freeto = NULL;
1906 			s = splvm();
1907 			pool_put(&pcgpool, pcg);
1908 			splx(s);
1909 		}
1910 	}
1911 }
1912 
1913 /*
1914  * pool_cache_invalidate:
1915  *
1916  *	Invalidate a pool cache (destruct and release all of the
1917  *	cached objects).
1918  */
1919 void
1920 pool_cache_invalidate(struct pool_cache *pc)
1921 {
1922 
1923 	simple_lock(&pc->pc_slock);
1924 	pool_cache_do_invalidate(pc, 0, pool_put);
1925 	simple_unlock(&pc->pc_slock);
1926 }
1927 
1928 /*
1929  * pool_cache_reclaim:
1930  *
1931  *	Reclaim a pool cache for pool_reclaim().
1932  */
1933 static void
1934 pool_cache_reclaim(struct pool_cache *pc)
1935 {
1936 
1937 	simple_lock(&pc->pc_slock);
1938 	pool_cache_do_invalidate(pc, 1, pool_do_put);
1939 	simple_unlock(&pc->pc_slock);
1940 }
1941 
1942 /*
1943  * Pool backend allocators.
1944  *
1945  * Each pool has a backend allocator that handles allocation, deallocation,
1946  * and any additional draining that might be needed.
1947  *
1948  * We provide two standard allocators:
1949  *
1950  *	pool_allocator_kmem - the default when no allocator is specified
1951  *
1952  *	pool_allocator_nointr - used for pools that will not be accessed
1953  *	in interrupt context.
1954  */
1955 void	*pool_page_alloc(struct pool *, int);
1956 void	pool_page_free(struct pool *, void *);
1957 
1958 struct pool_allocator pool_allocator_kmem = {
1959 	pool_page_alloc, pool_page_free, 0,
1960 };
1961 
1962 void	*pool_page_alloc_nointr(struct pool *, int);
1963 void	pool_page_free_nointr(struct pool *, void *);
1964 
1965 struct pool_allocator pool_allocator_nointr = {
1966 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
1967 };
1968 
1969 #ifdef POOL_SUBPAGE
1970 void	*pool_subpage_alloc(struct pool *, int);
1971 void	pool_subpage_free(struct pool *, void *);
1972 
1973 struct pool_allocator pool_allocator_kmem_subpage = {
1974 	pool_subpage_alloc, pool_subpage_free, 0,
1975 };
1976 #endif /* POOL_SUBPAGE */
1977 
1978 /*
1979  * We have at least three different resources for the same allocation and
1980  * each resource can be depleted.  First, we have the ready elements in the
1981  * pool.  Then we have the resource (typically a vm_map) for this allocator.
1982  * Finally, we have physical memory.  Waiting for any of these can be
1983  * unnecessary when any other is freed, but the kernel doesn't support
1984  * sleeping on multiple wait channels, so we have to employ another strategy.
1985  *
1986  * The caller sleeps on the pool (so that it can be awakened when an item
1987  * is returned to the pool), but we set PA_WANT on the allocator.  When a
1988  * page is returned to the allocator and PA_WANT is set, pool_allocator_free
1989  * will wake up all sleeping pools belonging to this allocator.
1990  *
1991  * XXX Thundering herd.
1992  */
1993 void *
1994 pool_allocator_alloc(struct pool *org, int flags)
1995 {
1996 	struct pool_allocator *pa = org->pr_alloc;
1997 	struct pool *pp, *start;
1998 	int s, freed;
1999 	void *res;
2000 
2001 	LOCK_ASSERT(!simple_lock_held(&org->pr_slock));
2002 
2003 	do {
2004 		if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
2005 			return (res);
2006 		if ((flags & PR_WAITOK) == 0) {
2007 			/*
2008 			 * We only run the drain hookhere if PR_NOWAIT.
2009 			 * In other cases, the hook will be run in
2010 			 * pool_reclaim().
2011 			 */
2012 			if (org->pr_drain_hook != NULL) {
2013 				(*org->pr_drain_hook)(org->pr_drain_hook_arg,
2014 				    flags);
2015 				if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
2016 					return (res);
2017 			}
2018 			break;
2019 		}
2020 
2021 		/*
2022 		 * Drain all pools, except "org", that use this
2023 		 * allocator.  We do this to reclaim VA space.
2024 		 * pa_alloc is responsible for waiting for
2025 		 * physical memory.
2026 		 *
2027 		 * XXX We risk looping forever if start if someone
2028 		 * calls pool_destroy on "start".  But there is no
2029 		 * other way to have potentially sleeping pool_reclaim,
2030 		 * non-sleeping locks on pool_allocator, and some
2031 		 * stirring of drained pools in the allocator.
2032 		 *
2033 		 * XXX Maybe we should use pool_head_slock for locking
2034 		 * the allocators?
2035 		 */
2036 		freed = 0;
2037 
2038 		s = splvm();
2039 		simple_lock(&pa->pa_slock);
2040 		pp = start = TAILQ_FIRST(&pa->pa_list);
2041 		do {
2042 			TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
2043 			TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
2044 			if (pp == org)
2045 				continue;
2046 			simple_unlock(&pa->pa_slock);
2047 			freed = pool_reclaim(pp);
2048 			simple_lock(&pa->pa_slock);
2049 		} while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
2050 			 freed == 0);
2051 
2052 		if (freed == 0) {
2053 			/*
2054 			 * We set PA_WANT here, the caller will most likely
2055 			 * sleep waiting for pages (if not, this won't hurt
2056 			 * that much), and there is no way to set this in
2057 			 * the caller without violating locking order.
2058 			 */
2059 			pa->pa_flags |= PA_WANT;
2060 		}
2061 		simple_unlock(&pa->pa_slock);
2062 		splx(s);
2063 	} while (freed);
2064 	return (NULL);
2065 }
2066 
2067 void
2068 pool_allocator_free(struct pool *pp, void *v)
2069 {
2070 	struct pool_allocator *pa = pp->pr_alloc;
2071 	int s;
2072 
2073 	LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
2074 
2075 	(*pa->pa_free)(pp, v);
2076 
2077 	s = splvm();
2078 	simple_lock(&pa->pa_slock);
2079 	if ((pa->pa_flags & PA_WANT) == 0) {
2080 		simple_unlock(&pa->pa_slock);
2081 		splx(s);
2082 		return;
2083 	}
2084 
2085 	TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
2086 		simple_lock(&pp->pr_slock);
2087 		if ((pp->pr_flags & PR_WANTED) != 0) {
2088 			pp->pr_flags &= ~PR_WANTED;
2089 			wakeup(pp);
2090 		}
2091 		simple_unlock(&pp->pr_slock);
2092 	}
2093 	pa->pa_flags &= ~PA_WANT;
2094 	simple_unlock(&pa->pa_slock);
2095 	splx(s);
2096 }
2097 
2098 void *
2099 pool_page_alloc(struct pool *pp, int flags)
2100 {
2101 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2102 
2103 	return ((void *) uvm_km_alloc_poolpage(waitok));
2104 }
2105 
2106 void
2107 pool_page_free(struct pool *pp, void *v)
2108 {
2109 
2110 	uvm_km_free_poolpage((vaddr_t) v);
2111 }
2112 
2113 #ifdef POOL_SUBPAGE
2114 /* Sub-page allocator, for machines with large hardware pages. */
2115 void *
2116 pool_subpage_alloc(struct pool *pp, int flags)
2117 {
2118 	void *v;
2119 	int s;
2120 	s = splvm();
2121 	v = pool_get(&psppool, flags);
2122 	splx(s);
2123 	return v;
2124 }
2125 
2126 void
2127 pool_subpage_free(struct pool *pp, void *v)
2128 {
2129 	int s;
2130 	s = splvm();
2131 	pool_put(&psppool, v);
2132 	splx(s);
2133 }
2134 
2135 /* We don't provide a real nointr allocator.  Maybe later. */
2136 void *
2137 pool_page_alloc_nointr(struct pool *pp, int flags)
2138 {
2139 
2140 	return (pool_subpage_alloc(pp, flags));
2141 }
2142 
2143 void
2144 pool_page_free_nointr(struct pool *pp, void *v)
2145 {
2146 
2147 	pool_subpage_free(pp, v);
2148 }
2149 #else
2150 void *
2151 pool_page_alloc_nointr(struct pool *pp, int flags)
2152 {
2153 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2154 
2155 	return ((void *) uvm_km_alloc_poolpage1(kernel_map,
2156 	    uvm.kernel_object, waitok));
2157 }
2158 
2159 void
2160 pool_page_free_nointr(struct pool *pp, void *v)
2161 {
2162 
2163 	uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
2164 }
2165 #endif /* POOL_SUBPAGE */
2166