xref: /netbsd-src/sys/kern/subr_pool.c (revision fc9881cf50d4dfd3ae2c7566421b3b7f28780795)
1 /*	$NetBSD: subr_pool.c,v 1.74 2002/03/09 18:06:55 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.74 2002/03/09 18:06:55 thorpej Exp $");
42 
43 #include "opt_pool.h"
44 #include "opt_poollog.h"
45 #include "opt_lockdebug.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/errno.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/lock.h>
54 #include <sys/pool.h>
55 #include <sys/syslog.h>
56 
57 #include <uvm/uvm.h>
58 
59 /*
60  * Pool resource management utility.
61  *
62  * Memory is allocated in pages which are split into pieces according
63  * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
64  * in the pool structure and the individual pool items are on a linked list
65  * headed by `ph_itemlist' in each page header. The memory for building
66  * the page list is either taken from the allocated pages themselves (for
67  * small pool items) or taken from an internal pool of page headers (`phpool').
68  */
69 
70 /* List of all pools */
71 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
72 
73 /* Private pool for page header structures */
74 static struct pool phpool;
75 
76 #ifdef POOL_SUBPAGE
77 /* Pool of subpages for use by normal pools. */
78 static struct pool psppool;
79 #endif
80 
81 /* # of seconds to retain page after last use */
82 int pool_inactive_time = 10;
83 
84 /* Next candidate for drainage (see pool_drain()) */
85 static struct pool	*drainpp;
86 
87 /* This spin lock protects both pool_head and drainpp. */
88 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
89 
90 struct pool_item_header {
91 	/* Page headers */
92 	TAILQ_ENTRY(pool_item_header)
93 				ph_pagelist;	/* pool page list */
94 	TAILQ_HEAD(,pool_item)	ph_itemlist;	/* chunk list for this page */
95 	LIST_ENTRY(pool_item_header)
96 				ph_hashlist;	/* Off-page page headers */
97 	int			ph_nmissing;	/* # of chunks in use */
98 	caddr_t			ph_page;	/* this page's address */
99 	struct timeval		ph_time;	/* last referenced */
100 };
101 TAILQ_HEAD(pool_pagelist,pool_item_header);
102 
103 struct pool_item {
104 #ifdef DIAGNOSTIC
105 	int pi_magic;
106 #endif
107 #define	PI_MAGIC 0xdeadbeef
108 	/* Other entries use only this list entry */
109 	TAILQ_ENTRY(pool_item)	pi_list;
110 };
111 
112 #define	PR_HASH_INDEX(pp,addr) \
113 	(((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & \
114 	 (PR_HASHTABSIZE - 1))
115 
116 #define	POOL_NEEDS_CATCHUP(pp)						\
117 	((pp)->pr_nitems < (pp)->pr_minitems)
118 
119 /*
120  * Pool cache management.
121  *
122  * Pool caches provide a way for constructed objects to be cached by the
123  * pool subsystem.  This can lead to performance improvements by avoiding
124  * needless object construction/destruction; it is deferred until absolutely
125  * necessary.
126  *
127  * Caches are grouped into cache groups.  Each cache group references
128  * up to 16 constructed objects.  When a cache allocates an object
129  * from the pool, it calls the object's constructor and places it into
130  * a cache group.  When a cache group frees an object back to the pool,
131  * it first calls the object's destructor.  This allows the object to
132  * persist in constructed form while freed to the cache.
133  *
134  * Multiple caches may exist for each pool.  This allows a single
135  * object type to have multiple constructed forms.  The pool references
136  * each cache, so that when a pool is drained by the pagedaemon, it can
137  * drain each individual cache as well.  Each time a cache is drained,
138  * the most idle cache group is freed to the pool in its entirety.
139  *
140  * Pool caches are layed on top of pools.  By layering them, we can avoid
141  * the complexity of cache management for pools which would not benefit
142  * from it.
143  */
144 
145 /* The cache group pool. */
146 static struct pool pcgpool;
147 
148 /* The pool cache group. */
149 #define	PCG_NOBJECTS		16
150 struct pool_cache_group {
151 	TAILQ_ENTRY(pool_cache_group)
152 		pcg_list;	/* link in the pool cache's group list */
153 	u_int	pcg_avail;	/* # available objects */
154 				/* pointers to the objects */
155 	void	*pcg_objects[PCG_NOBJECTS];
156 };
157 
158 static void	pool_cache_reclaim(struct pool_cache *);
159 
160 static int	pool_catchup(struct pool *);
161 static void	pool_prime_page(struct pool *, caddr_t,
162 		    struct pool_item_header *);
163 
164 void		*pool_allocator_alloc(struct pool *, int);
165 void		pool_allocator_free(struct pool *, void *);
166 
167 static void pool_print1(struct pool *, const char *,
168 	void (*)(const char *, ...));
169 
170 /*
171  * Pool log entry. An array of these is allocated in pool_init().
172  */
173 struct pool_log {
174 	const char	*pl_file;
175 	long		pl_line;
176 	int		pl_action;
177 #define	PRLOG_GET	1
178 #define	PRLOG_PUT	2
179 	void		*pl_addr;
180 };
181 
182 /* Number of entries in pool log buffers */
183 #ifndef POOL_LOGSIZE
184 #define	POOL_LOGSIZE	10
185 #endif
186 
187 int pool_logsize = POOL_LOGSIZE;
188 
189 #ifdef POOL_DIAGNOSTIC
190 static __inline void
191 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
192 {
193 	int n = pp->pr_curlogentry;
194 	struct pool_log *pl;
195 
196 	if ((pp->pr_roflags & PR_LOGGING) == 0)
197 		return;
198 
199 	/*
200 	 * Fill in the current entry. Wrap around and overwrite
201 	 * the oldest entry if necessary.
202 	 */
203 	pl = &pp->pr_log[n];
204 	pl->pl_file = file;
205 	pl->pl_line = line;
206 	pl->pl_action = action;
207 	pl->pl_addr = v;
208 	if (++n >= pp->pr_logsize)
209 		n = 0;
210 	pp->pr_curlogentry = n;
211 }
212 
213 static void
214 pr_printlog(struct pool *pp, struct pool_item *pi,
215     void (*pr)(const char *, ...))
216 {
217 	int i = pp->pr_logsize;
218 	int n = pp->pr_curlogentry;
219 
220 	if ((pp->pr_roflags & PR_LOGGING) == 0)
221 		return;
222 
223 	/*
224 	 * Print all entries in this pool's log.
225 	 */
226 	while (i-- > 0) {
227 		struct pool_log *pl = &pp->pr_log[n];
228 		if (pl->pl_action != 0) {
229 			if (pi == NULL || pi == pl->pl_addr) {
230 				(*pr)("\tlog entry %d:\n", i);
231 				(*pr)("\t\taction = %s, addr = %p\n",
232 				    pl->pl_action == PRLOG_GET ? "get" : "put",
233 				    pl->pl_addr);
234 				(*pr)("\t\tfile: %s at line %lu\n",
235 				    pl->pl_file, pl->pl_line);
236 			}
237 		}
238 		if (++n >= pp->pr_logsize)
239 			n = 0;
240 	}
241 }
242 
243 static __inline void
244 pr_enter(struct pool *pp, const char *file, long line)
245 {
246 
247 	if (__predict_false(pp->pr_entered_file != NULL)) {
248 		printf("pool %s: reentrancy at file %s line %ld\n",
249 		    pp->pr_wchan, file, line);
250 		printf("         previous entry at file %s line %ld\n",
251 		    pp->pr_entered_file, pp->pr_entered_line);
252 		panic("pr_enter");
253 	}
254 
255 	pp->pr_entered_file = file;
256 	pp->pr_entered_line = line;
257 }
258 
259 static __inline void
260 pr_leave(struct pool *pp)
261 {
262 
263 	if (__predict_false(pp->pr_entered_file == NULL)) {
264 		printf("pool %s not entered?\n", pp->pr_wchan);
265 		panic("pr_leave");
266 	}
267 
268 	pp->pr_entered_file = NULL;
269 	pp->pr_entered_line = 0;
270 }
271 
272 static __inline void
273 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
274 {
275 
276 	if (pp->pr_entered_file != NULL)
277 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
278 		    pp->pr_entered_file, pp->pr_entered_line);
279 }
280 #else
281 #define	pr_log(pp, v, action, file, line)
282 #define	pr_printlog(pp, pi, pr)
283 #define	pr_enter(pp, file, line)
284 #define	pr_leave(pp)
285 #define	pr_enter_check(pp, pr)
286 #endif /* POOL_DIAGNOSTIC */
287 
288 /*
289  * Return the pool page header based on page address.
290  */
291 static __inline struct pool_item_header *
292 pr_find_pagehead(struct pool *pp, caddr_t page)
293 {
294 	struct pool_item_header *ph;
295 
296 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
297 		return ((struct pool_item_header *)(page + pp->pr_phoffset));
298 
299 	for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
300 	     ph != NULL;
301 	     ph = LIST_NEXT(ph, ph_hashlist)) {
302 		if (ph->ph_page == page)
303 			return (ph);
304 	}
305 	return (NULL);
306 }
307 
308 /*
309  * Remove a page from the pool.
310  */
311 static __inline void
312 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
313      struct pool_pagelist *pq)
314 {
315 	int s;
316 
317 	/*
318 	 * If the page was idle, decrement the idle page count.
319 	 */
320 	if (ph->ph_nmissing == 0) {
321 #ifdef DIAGNOSTIC
322 		if (pp->pr_nidle == 0)
323 			panic("pr_rmpage: nidle inconsistent");
324 		if (pp->pr_nitems < pp->pr_itemsperpage)
325 			panic("pr_rmpage: nitems inconsistent");
326 #endif
327 		pp->pr_nidle--;
328 	}
329 
330 	pp->pr_nitems -= pp->pr_itemsperpage;
331 
332 	/*
333 	 * Unlink a page from the pool and release it (or queue it for release).
334 	 */
335 	TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
336 	if (pq) {
337 		TAILQ_INSERT_HEAD(pq, ph, ph_pagelist);
338 	} else {
339 		pool_allocator_free(pp, ph->ph_page);
340 		if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
341 			LIST_REMOVE(ph, ph_hashlist);
342 			s = splhigh();
343 			pool_put(&phpool, ph);
344 			splx(s);
345 		}
346 	}
347 	pp->pr_npages--;
348 	pp->pr_npagefree++;
349 
350 	if (pp->pr_curpage == ph) {
351 		/*
352 		 * Find a new non-empty page header, if any.
353 		 * Start search from the page head, to increase the
354 		 * chance for "high water" pages to be freed.
355 		 */
356 		TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
357 			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
358 				break;
359 
360 		pp->pr_curpage = ph;
361 	}
362 }
363 
364 /*
365  * Initialize the given pool resource structure.
366  *
367  * We export this routine to allow other kernel parts to declare
368  * static pools that must be initialized before malloc() is available.
369  */
370 void
371 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
372     const char *wchan, struct pool_allocator *palloc)
373 {
374 	int off, slack, i;
375 
376 #ifdef POOL_DIAGNOSTIC
377 	/*
378 	 * Always log if POOL_DIAGNOSTIC is defined.
379 	 */
380 	if (pool_logsize != 0)
381 		flags |= PR_LOGGING;
382 #endif
383 
384 #ifdef POOL_SUBPAGE
385 	/*
386 	 * XXX We don't provide a real `nointr' back-end
387 	 * yet; all sub-pages come from a kmem back-end.
388 	 * maybe some day...
389 	 */
390 	if (palloc == NULL) {
391 		extern struct pool_allocator pool_allocator_kmem_subpage;
392 		palloc = &pool_allocator_kmem_subpage;
393 	}
394 	/*
395 	 * We'll assume any user-specified back-end allocator
396 	 * will deal with sub-pages, or simply don't care.
397 	 */
398 #else
399 	if (palloc == NULL)
400 		palloc = &pool_allocator_kmem;
401 #endif /* POOL_SUBPAGE */
402 	if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
403 		if (palloc->pa_pagesz == 0) {
404 #ifdef POOL_SUBPAGE
405 			if (palloc == &pool_allocator_kmem)
406 				palloc->pa_pagesz = PAGE_SIZE;
407 			else
408 				palloc->pa_pagesz = POOL_SUBPAGE;
409 #else
410 			palloc->pa_pagesz = PAGE_SIZE;
411 #endif /* POOL_SUBPAGE */
412 		}
413 
414 		TAILQ_INIT(&palloc->pa_list);
415 
416 		simple_lock_init(&palloc->pa_slock);
417 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
418 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
419 		palloc->pa_flags |= PA_INITIALIZED;
420 	}
421 
422 	if (align == 0)
423 		align = ALIGN(1);
424 
425 	if (size < sizeof(struct pool_item))
426 		size = sizeof(struct pool_item);
427 
428 	size = ALIGN(size);
429 #ifdef DIAGNOSTIC
430 	if (size > palloc->pa_pagesz)
431 		panic("pool_init: pool item size (%lu) too large",
432 		      (u_long)size);
433 #endif
434 
435 	/*
436 	 * Initialize the pool structure.
437 	 */
438 	TAILQ_INIT(&pp->pr_pagelist);
439 	TAILQ_INIT(&pp->pr_cachelist);
440 	pp->pr_curpage = NULL;
441 	pp->pr_npages = 0;
442 	pp->pr_minitems = 0;
443 	pp->pr_minpages = 0;
444 	pp->pr_maxpages = UINT_MAX;
445 	pp->pr_roflags = flags;
446 	pp->pr_flags = 0;
447 	pp->pr_size = size;
448 	pp->pr_align = align;
449 	pp->pr_wchan = wchan;
450 	pp->pr_alloc = palloc;
451 	pp->pr_nitems = 0;
452 	pp->pr_nout = 0;
453 	pp->pr_hardlimit = UINT_MAX;
454 	pp->pr_hardlimit_warning = NULL;
455 	pp->pr_hardlimit_ratecap.tv_sec = 0;
456 	pp->pr_hardlimit_ratecap.tv_usec = 0;
457 	pp->pr_hardlimit_warning_last.tv_sec = 0;
458 	pp->pr_hardlimit_warning_last.tv_usec = 0;
459 	pp->pr_drain_hook = NULL;
460 	pp->pr_drain_hook_arg = NULL;
461 
462 	/*
463 	 * Decide whether to put the page header off page to avoid
464 	 * wasting too large a part of the page. Off-page page headers
465 	 * go on a hash table, so we can match a returned item
466 	 * with its header based on the page address.
467 	 * We use 1/16 of the page size as the threshold (XXX: tune)
468 	 */
469 	if (pp->pr_size < palloc->pa_pagesz/16) {
470 		/* Use the end of the page for the page header */
471 		pp->pr_roflags |= PR_PHINPAGE;
472 		pp->pr_phoffset = off = palloc->pa_pagesz -
473 		    ALIGN(sizeof(struct pool_item_header));
474 	} else {
475 		/* The page header will be taken from our page header pool */
476 		pp->pr_phoffset = 0;
477 		off = palloc->pa_pagesz;
478 		for (i = 0; i < PR_HASHTABSIZE; i++) {
479 			LIST_INIT(&pp->pr_hashtab[i]);
480 		}
481 	}
482 
483 	/*
484 	 * Alignment is to take place at `ioff' within the item. This means
485 	 * we must reserve up to `align - 1' bytes on the page to allow
486 	 * appropriate positioning of each item.
487 	 *
488 	 * Silently enforce `0 <= ioff < align'.
489 	 */
490 	pp->pr_itemoffset = ioff = ioff % align;
491 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
492 	KASSERT(pp->pr_itemsperpage != 0);
493 
494 	/*
495 	 * Use the slack between the chunks and the page header
496 	 * for "cache coloring".
497 	 */
498 	slack = off - pp->pr_itemsperpage * pp->pr_size;
499 	pp->pr_maxcolor = (slack / align) * align;
500 	pp->pr_curcolor = 0;
501 
502 	pp->pr_nget = 0;
503 	pp->pr_nfail = 0;
504 	pp->pr_nput = 0;
505 	pp->pr_npagealloc = 0;
506 	pp->pr_npagefree = 0;
507 	pp->pr_hiwat = 0;
508 	pp->pr_nidle = 0;
509 
510 #ifdef POOL_DIAGNOSTIC
511 	if (flags & PR_LOGGING) {
512 		if (kmem_map == NULL ||
513 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
514 		     M_TEMP, M_NOWAIT)) == NULL)
515 			pp->pr_roflags &= ~PR_LOGGING;
516 		pp->pr_curlogentry = 0;
517 		pp->pr_logsize = pool_logsize;
518 	}
519 #endif
520 
521 	pp->pr_entered_file = NULL;
522 	pp->pr_entered_line = 0;
523 
524 	simple_lock_init(&pp->pr_slock);
525 
526 	/*
527 	 * Initialize private page header pool and cache magazine pool if we
528 	 * haven't done so yet.
529 	 * XXX LOCKING.
530 	 */
531 	if (phpool.pr_size == 0) {
532 #ifdef POOL_SUBPAGE
533 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
534 		    "phpool", &pool_allocator_kmem);
535 		pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
536 		    PR_RECURSIVE, "psppool", &pool_allocator_kmem);
537 #else
538 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
539 		    0, "phpool", NULL);
540 #endif
541 		pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
542 		    0, "pcgpool", NULL);
543 	}
544 
545 	/* Insert into the list of all pools. */
546 	simple_lock(&pool_head_slock);
547 	TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
548 	simple_unlock(&pool_head_slock);
549 
550 	/* Insert this into the list of pools using this allocator. */
551 	simple_lock(&palloc->pa_slock);
552 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
553 	simple_unlock(&palloc->pa_slock);
554 }
555 
556 /*
557  * De-commision a pool resource.
558  */
559 void
560 pool_destroy(struct pool *pp)
561 {
562 	struct pool_item_header *ph;
563 	struct pool_cache *pc;
564 
565 	/* Locking order: pool_allocator -> pool */
566 	simple_lock(&pp->pr_alloc->pa_slock);
567 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
568 	simple_unlock(&pp->pr_alloc->pa_slock);
569 
570 	/* Destroy all caches for this pool. */
571 	while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
572 		pool_cache_destroy(pc);
573 
574 #ifdef DIAGNOSTIC
575 	if (pp->pr_nout != 0) {
576 		pr_printlog(pp, NULL, printf);
577 		panic("pool_destroy: pool busy: still out: %u\n",
578 		    pp->pr_nout);
579 	}
580 #endif
581 
582 	/* Remove all pages */
583 	while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
584 		pr_rmpage(pp, ph, NULL);
585 
586 	/* Remove from global pool list */
587 	simple_lock(&pool_head_slock);
588 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
589 	if (drainpp == pp) {
590 		drainpp = NULL;
591 	}
592 	simple_unlock(&pool_head_slock);
593 
594 #ifdef POOL_DIAGNOSTIC
595 	if ((pp->pr_roflags & PR_LOGGING) != 0)
596 		free(pp->pr_log, M_TEMP);
597 #endif
598 }
599 
600 void
601 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
602 {
603 
604 	/* XXX no locking -- must be used just after pool_init() */
605 #ifdef DIAGNOSTIC
606 	if (pp->pr_drain_hook != NULL)
607 		panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
608 #endif
609 	pp->pr_drain_hook = fn;
610 	pp->pr_drain_hook_arg = arg;
611 }
612 
613 static __inline struct pool_item_header *
614 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
615 {
616 	struct pool_item_header *ph;
617 	int s;
618 
619 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
620 
621 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
622 		ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
623 	else {
624 		s = splhigh();
625 		ph = pool_get(&phpool, flags);
626 		splx(s);
627 	}
628 
629 	return (ph);
630 }
631 
632 /*
633  * Grab an item from the pool; must be called at appropriate spl level
634  */
635 void *
636 #ifdef POOL_DIAGNOSTIC
637 _pool_get(struct pool *pp, int flags, const char *file, long line)
638 #else
639 pool_get(struct pool *pp, int flags)
640 #endif
641 {
642 	struct pool_item *pi;
643 	struct pool_item_header *ph;
644 	void *v;
645 
646 #ifdef DIAGNOSTIC
647 	if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
648 			    (flags & PR_WAITOK) != 0))
649 		panic("pool_get: must have NOWAIT");
650 
651 #ifdef LOCKDEBUG
652 	if (flags & PR_WAITOK)
653 		simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
654 #endif
655 #endif /* DIAGNOSTIC */
656 
657 	simple_lock(&pp->pr_slock);
658 	pr_enter(pp, file, line);
659 
660  startover:
661 	/*
662 	 * Check to see if we've reached the hard limit.  If we have,
663 	 * and we can wait, then wait until an item has been returned to
664 	 * the pool.
665 	 */
666 #ifdef DIAGNOSTIC
667 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
668 		pr_leave(pp);
669 		simple_unlock(&pp->pr_slock);
670 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
671 	}
672 #endif
673 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
674 		if (pp->pr_drain_hook != NULL) {
675 			/*
676 			 * Since the drain hook is going to free things
677 			 * back to the pool, unlock, call the hook, re-lock,
678 			 * and check the hardlimit condition again.
679 			 */
680 			pr_leave(pp);
681 			simple_unlock(&pp->pr_slock);
682 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
683 			simple_lock(&pp->pr_slock);
684 			pr_enter(pp, file, line);
685 			if (pp->pr_nout < pp->pr_hardlimit)
686 				goto startover;
687 		}
688 
689 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
690 			/*
691 			 * XXX: A warning isn't logged in this case.  Should
692 			 * it be?
693 			 */
694 			pp->pr_flags |= PR_WANTED;
695 			pr_leave(pp);
696 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
697 			pr_enter(pp, file, line);
698 			goto startover;
699 		}
700 
701 		/*
702 		 * Log a message that the hard limit has been hit.
703 		 */
704 		if (pp->pr_hardlimit_warning != NULL &&
705 		    ratecheck(&pp->pr_hardlimit_warning_last,
706 			      &pp->pr_hardlimit_ratecap))
707 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
708 
709 		pp->pr_nfail++;
710 
711 		pr_leave(pp);
712 		simple_unlock(&pp->pr_slock);
713 		return (NULL);
714 	}
715 
716 	/*
717 	 * The convention we use is that if `curpage' is not NULL, then
718 	 * it points at a non-empty bucket. In particular, `curpage'
719 	 * never points at a page header which has PR_PHINPAGE set and
720 	 * has no items in its bucket.
721 	 */
722 	if ((ph = pp->pr_curpage) == NULL) {
723 #ifdef DIAGNOSTIC
724 		if (pp->pr_nitems != 0) {
725 			simple_unlock(&pp->pr_slock);
726 			printf("pool_get: %s: curpage NULL, nitems %u\n",
727 			    pp->pr_wchan, pp->pr_nitems);
728 			panic("pool_get: nitems inconsistent\n");
729 		}
730 #endif
731 
732 		/*
733 		 * Call the back-end page allocator for more memory.
734 		 * Release the pool lock, as the back-end page allocator
735 		 * may block.
736 		 */
737 		pr_leave(pp);
738 		simple_unlock(&pp->pr_slock);
739 		v = pool_allocator_alloc(pp, flags);
740 		if (__predict_true(v != NULL))
741 			ph = pool_alloc_item_header(pp, v, flags);
742 		simple_lock(&pp->pr_slock);
743 		pr_enter(pp, file, line);
744 
745 		if (__predict_false(v == NULL || ph == NULL)) {
746 			if (v != NULL)
747 				pool_allocator_free(pp, v);
748 
749 			/*
750 			 * We were unable to allocate a page or item
751 			 * header, but we released the lock during
752 			 * allocation, so perhaps items were freed
753 			 * back to the pool.  Check for this case.
754 			 */
755 			if (pp->pr_curpage != NULL)
756 				goto startover;
757 
758 			if ((flags & PR_WAITOK) == 0) {
759 				pp->pr_nfail++;
760 				pr_leave(pp);
761 				simple_unlock(&pp->pr_slock);
762 				return (NULL);
763 			}
764 
765 			/*
766 			 * Wait for items to be returned to this pool.
767 			 *
768 			 * XXX: maybe we should wake up once a second and
769 			 * try again?
770 			 */
771 			pp->pr_flags |= PR_WANTED;
772 			/* PA_WANTED is already set on the allocator. */
773 			pr_leave(pp);
774 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
775 			pr_enter(pp, file, line);
776 			goto startover;
777 		}
778 
779 		/* We have more memory; add it to the pool */
780 		pool_prime_page(pp, v, ph);
781 		pp->pr_npagealloc++;
782 
783 		/* Start the allocation process over. */
784 		goto startover;
785 	}
786 
787 	if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
788 		pr_leave(pp);
789 		simple_unlock(&pp->pr_slock);
790 		panic("pool_get: %s: page empty", pp->pr_wchan);
791 	}
792 #ifdef DIAGNOSTIC
793 	if (__predict_false(pp->pr_nitems == 0)) {
794 		pr_leave(pp);
795 		simple_unlock(&pp->pr_slock);
796 		printf("pool_get: %s: items on itemlist, nitems %u\n",
797 		    pp->pr_wchan, pp->pr_nitems);
798 		panic("pool_get: nitems inconsistent\n");
799 	}
800 #endif
801 
802 #ifdef POOL_DIAGNOSTIC
803 	pr_log(pp, v, PRLOG_GET, file, line);
804 #endif
805 
806 #ifdef DIAGNOSTIC
807 	if (__predict_false(pi->pi_magic != PI_MAGIC)) {
808 		pr_printlog(pp, pi, printf);
809 		panic("pool_get(%s): free list modified: magic=%x; page %p;"
810 		       " item addr %p\n",
811 			pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
812 	}
813 #endif
814 
815 	/*
816 	 * Remove from item list.
817 	 */
818 	TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
819 	pp->pr_nitems--;
820 	pp->pr_nout++;
821 	if (ph->ph_nmissing == 0) {
822 #ifdef DIAGNOSTIC
823 		if (__predict_false(pp->pr_nidle == 0))
824 			panic("pool_get: nidle inconsistent");
825 #endif
826 		pp->pr_nidle--;
827 	}
828 	ph->ph_nmissing++;
829 	if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
830 #ifdef DIAGNOSTIC
831 		if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
832 			pr_leave(pp);
833 			simple_unlock(&pp->pr_slock);
834 			panic("pool_get: %s: nmissing inconsistent",
835 			    pp->pr_wchan);
836 		}
837 #endif
838 		/*
839 		 * Find a new non-empty page header, if any.
840 		 * Start search from the page head, to increase
841 		 * the chance for "high water" pages to be freed.
842 		 *
843 		 * Migrate empty pages to the end of the list.  This
844 		 * will speed the update of curpage as pages become
845 		 * idle.  Empty pages intermingled with idle pages
846 		 * is no big deal.  As soon as a page becomes un-empty,
847 		 * it will move back to the head of the list.
848 		 */
849 		TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
850 		TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
851 		TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
852 			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
853 				break;
854 
855 		pp->pr_curpage = ph;
856 	}
857 
858 	pp->pr_nget++;
859 
860 	/*
861 	 * If we have a low water mark and we are now below that low
862 	 * water mark, add more items to the pool.
863 	 */
864 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
865 		/*
866 		 * XXX: Should we log a warning?  Should we set up a timeout
867 		 * to try again in a second or so?  The latter could break
868 		 * a caller's assumptions about interrupt protection, etc.
869 		 */
870 	}
871 
872 	pr_leave(pp);
873 	simple_unlock(&pp->pr_slock);
874 	return (v);
875 }
876 
877 /*
878  * Internal version of pool_put().  Pool is already locked/entered.
879  */
880 static void
881 pool_do_put(struct pool *pp, void *v)
882 {
883 	struct pool_item *pi = v;
884 	struct pool_item_header *ph;
885 	caddr_t page;
886 	int s;
887 
888 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
889 
890 	page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
891 
892 #ifdef DIAGNOSTIC
893 	if (__predict_false(pp->pr_nout == 0)) {
894 		printf("pool %s: putting with none out\n",
895 		    pp->pr_wchan);
896 		panic("pool_put");
897 	}
898 #endif
899 
900 	if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
901 		pr_printlog(pp, NULL, printf);
902 		panic("pool_put: %s: page header missing", pp->pr_wchan);
903 	}
904 
905 #ifdef LOCKDEBUG
906 	/*
907 	 * Check if we're freeing a locked simple lock.
908 	 */
909 	simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
910 #endif
911 
912 	/*
913 	 * Return to item list.
914 	 */
915 #ifdef DIAGNOSTIC
916 	pi->pi_magic = PI_MAGIC;
917 #endif
918 #ifdef DEBUG
919 	{
920 		int i, *ip = v;
921 
922 		for (i = 0; i < pp->pr_size / sizeof(int); i++) {
923 			*ip++ = PI_MAGIC;
924 		}
925 	}
926 #endif
927 
928 	TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
929 	ph->ph_nmissing--;
930 	pp->pr_nput++;
931 	pp->pr_nitems++;
932 	pp->pr_nout--;
933 
934 	/* Cancel "pool empty" condition if it exists */
935 	if (pp->pr_curpage == NULL)
936 		pp->pr_curpage = ph;
937 
938 	if (pp->pr_flags & PR_WANTED) {
939 		pp->pr_flags &= ~PR_WANTED;
940 		if (ph->ph_nmissing == 0)
941 			pp->pr_nidle++;
942 		wakeup((caddr_t)pp);
943 		return;
944 	}
945 
946 	/*
947 	 * If this page is now complete, do one of two things:
948 	 *
949 	 *	(1) If we have more pages than the page high water
950 	 *	    mark, free the page back to the system.
951 	 *
952 	 *	(2) Move it to the end of the page list, so that
953 	 *	    we minimize our chances of fragmenting the
954 	 *	    pool.  Idle pages migrate to the end (along with
955 	 *	    completely empty pages, so that we find un-empty
956 	 *	    pages more quickly when we update curpage) of the
957 	 *	    list so they can be more easily swept up by
958 	 *	    the pagedaemon when pages are scarce.
959 	 */
960 	if (ph->ph_nmissing == 0) {
961 		pp->pr_nidle++;
962 		if (pp->pr_npages > pp->pr_maxpages ||
963 		    (pp->pr_alloc->pa_flags & PA_WANT) != 0) {
964 			pr_rmpage(pp, ph, NULL);
965 		} else {
966 			TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
967 			TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
968 
969 			/*
970 			 * Update the timestamp on the page.  A page must
971 			 * be idle for some period of time before it can
972 			 * be reclaimed by the pagedaemon.  This minimizes
973 			 * ping-pong'ing for memory.
974 			 */
975 			s = splclock();
976 			ph->ph_time = mono_time;
977 			splx(s);
978 
979 			/*
980 			 * Update the current page pointer.  Just look for
981 			 * the first page with any free items.
982 			 *
983 			 * XXX: Maybe we want an option to look for the
984 			 * page with the fewest available items, to minimize
985 			 * fragmentation?
986 			 */
987 			TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
988 				if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
989 					break;
990 
991 			pp->pr_curpage = ph;
992 		}
993 	}
994 	/*
995 	 * If the page has just become un-empty, move it to the head of
996 	 * the list, and make it the current page.  The next allocation
997 	 * will get the item from this page, instead of further fragmenting
998 	 * the pool.
999 	 */
1000 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1001 		TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
1002 		TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1003 		pp->pr_curpage = ph;
1004 	}
1005 }
1006 
1007 /*
1008  * Return resource to the pool; must be called at appropriate spl level
1009  */
1010 #ifdef POOL_DIAGNOSTIC
1011 void
1012 _pool_put(struct pool *pp, void *v, const char *file, long line)
1013 {
1014 
1015 	simple_lock(&pp->pr_slock);
1016 	pr_enter(pp, file, line);
1017 
1018 	pr_log(pp, v, PRLOG_PUT, file, line);
1019 
1020 	pool_do_put(pp, v);
1021 
1022 	pr_leave(pp);
1023 	simple_unlock(&pp->pr_slock);
1024 }
1025 #undef pool_put
1026 #endif /* POOL_DIAGNOSTIC */
1027 
1028 void
1029 pool_put(struct pool *pp, void *v)
1030 {
1031 
1032 	simple_lock(&pp->pr_slock);
1033 
1034 	pool_do_put(pp, v);
1035 
1036 	simple_unlock(&pp->pr_slock);
1037 }
1038 
1039 #ifdef POOL_DIAGNOSTIC
1040 #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
1041 #endif
1042 
1043 /*
1044  * Add N items to the pool.
1045  */
1046 int
1047 pool_prime(struct pool *pp, int n)
1048 {
1049 	struct pool_item_header *ph;
1050 	caddr_t cp;
1051 	int newpages, error = 0;
1052 
1053 	simple_lock(&pp->pr_slock);
1054 
1055 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1056 
1057 	while (newpages-- > 0) {
1058 		simple_unlock(&pp->pr_slock);
1059 		cp = pool_allocator_alloc(pp, PR_NOWAIT);
1060 		if (__predict_true(cp != NULL))
1061 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1062 		simple_lock(&pp->pr_slock);
1063 
1064 		if (__predict_false(cp == NULL || ph == NULL)) {
1065 			error = ENOMEM;
1066 			if (cp != NULL)
1067 				pool_allocator_free(pp, cp);
1068 			break;
1069 		}
1070 
1071 		pool_prime_page(pp, cp, ph);
1072 		pp->pr_npagealloc++;
1073 		pp->pr_minpages++;
1074 	}
1075 
1076 	if (pp->pr_minpages >= pp->pr_maxpages)
1077 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
1078 
1079 	simple_unlock(&pp->pr_slock);
1080 	return (0);
1081 }
1082 
1083 /*
1084  * Add a page worth of items to the pool.
1085  *
1086  * Note, we must be called with the pool descriptor LOCKED.
1087  */
1088 static void
1089 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1090 {
1091 	struct pool_item *pi;
1092 	caddr_t cp = storage;
1093 	unsigned int align = pp->pr_align;
1094 	unsigned int ioff = pp->pr_itemoffset;
1095 	int n;
1096 
1097 #ifdef DIAGNOSTIC
1098 	if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1099 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1100 #endif
1101 
1102 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1103 		LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
1104 		    ph, ph_hashlist);
1105 
1106 	/*
1107 	 * Insert page header.
1108 	 */
1109 	TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1110 	TAILQ_INIT(&ph->ph_itemlist);
1111 	ph->ph_page = storage;
1112 	ph->ph_nmissing = 0;
1113 	memset(&ph->ph_time, 0, sizeof(ph->ph_time));
1114 
1115 	pp->pr_nidle++;
1116 
1117 	/*
1118 	 * Color this page.
1119 	 */
1120 	cp = (caddr_t)(cp + pp->pr_curcolor);
1121 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1122 		pp->pr_curcolor = 0;
1123 
1124 	/*
1125 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1126 	 */
1127 	if (ioff != 0)
1128 		cp = (caddr_t)(cp + (align - ioff));
1129 
1130 	/*
1131 	 * Insert remaining chunks on the bucket list.
1132 	 */
1133 	n = pp->pr_itemsperpage;
1134 	pp->pr_nitems += n;
1135 
1136 	while (n--) {
1137 		pi = (struct pool_item *)cp;
1138 
1139 		/* Insert on page list */
1140 		TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1141 #ifdef DIAGNOSTIC
1142 		pi->pi_magic = PI_MAGIC;
1143 #endif
1144 		cp = (caddr_t)(cp + pp->pr_size);
1145 	}
1146 
1147 	/*
1148 	 * If the pool was depleted, point at the new page.
1149 	 */
1150 	if (pp->pr_curpage == NULL)
1151 		pp->pr_curpage = ph;
1152 
1153 	if (++pp->pr_npages > pp->pr_hiwat)
1154 		pp->pr_hiwat = pp->pr_npages;
1155 }
1156 
1157 /*
1158  * Used by pool_get() when nitems drops below the low water mark.  This
1159  * is used to catch up nitmes with the low water mark.
1160  *
1161  * Note 1, we never wait for memory here, we let the caller decide what to do.
1162  *
1163  * Note 2, we must be called with the pool already locked, and we return
1164  * with it locked.
1165  */
1166 static int
1167 pool_catchup(struct pool *pp)
1168 {
1169 	struct pool_item_header *ph;
1170 	caddr_t cp;
1171 	int error = 0;
1172 
1173 	while (POOL_NEEDS_CATCHUP(pp)) {
1174 		/*
1175 		 * Call the page back-end allocator for more memory.
1176 		 *
1177 		 * XXX: We never wait, so should we bother unlocking
1178 		 * the pool descriptor?
1179 		 */
1180 		simple_unlock(&pp->pr_slock);
1181 		cp = pool_allocator_alloc(pp, PR_NOWAIT);
1182 		if (__predict_true(cp != NULL))
1183 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1184 		simple_lock(&pp->pr_slock);
1185 		if (__predict_false(cp == NULL || ph == NULL)) {
1186 			if (cp != NULL)
1187 				pool_allocator_free(pp, cp);
1188 			error = ENOMEM;
1189 			break;
1190 		}
1191 		pool_prime_page(pp, cp, ph);
1192 		pp->pr_npagealloc++;
1193 	}
1194 
1195 	return (error);
1196 }
1197 
1198 void
1199 pool_setlowat(struct pool *pp, int n)
1200 {
1201 	int error;
1202 
1203 	simple_lock(&pp->pr_slock);
1204 
1205 	pp->pr_minitems = n;
1206 	pp->pr_minpages = (n == 0)
1207 		? 0
1208 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1209 
1210 	/* Make sure we're caught up with the newly-set low water mark. */
1211 	if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) {
1212 		/*
1213 		 * XXX: Should we log a warning?  Should we set up a timeout
1214 		 * to try again in a second or so?  The latter could break
1215 		 * a caller's assumptions about interrupt protection, etc.
1216 		 */
1217 	}
1218 
1219 	simple_unlock(&pp->pr_slock);
1220 }
1221 
1222 void
1223 pool_sethiwat(struct pool *pp, int n)
1224 {
1225 
1226 	simple_lock(&pp->pr_slock);
1227 
1228 	pp->pr_maxpages = (n == 0)
1229 		? 0
1230 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1231 
1232 	simple_unlock(&pp->pr_slock);
1233 }
1234 
1235 void
1236 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1237 {
1238 
1239 	simple_lock(&pp->pr_slock);
1240 
1241 	pp->pr_hardlimit = n;
1242 	pp->pr_hardlimit_warning = warnmess;
1243 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1244 	pp->pr_hardlimit_warning_last.tv_sec = 0;
1245 	pp->pr_hardlimit_warning_last.tv_usec = 0;
1246 
1247 	/*
1248 	 * In-line version of pool_sethiwat(), because we don't want to
1249 	 * release the lock.
1250 	 */
1251 	pp->pr_maxpages = (n == 0)
1252 		? 0
1253 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1254 
1255 	simple_unlock(&pp->pr_slock);
1256 }
1257 
1258 /*
1259  * Release all complete pages that have not been used recently.
1260  */
1261 int
1262 #ifdef POOL_DIAGNOSTIC
1263 _pool_reclaim(struct pool *pp, const char *file, long line)
1264 #else
1265 pool_reclaim(struct pool *pp)
1266 #endif
1267 {
1268 	struct pool_item_header *ph, *phnext;
1269 	struct pool_cache *pc;
1270 	struct timeval curtime;
1271 	struct pool_pagelist pq;
1272 	int s;
1273 
1274 	if (pp->pr_drain_hook != NULL) {
1275 		/*
1276 		 * The drain hook must be called with the pool unlocked.
1277 		 */
1278 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1279 	}
1280 
1281 	if (simple_lock_try(&pp->pr_slock) == 0)
1282 		return (0);
1283 	pr_enter(pp, file, line);
1284 
1285 	TAILQ_INIT(&pq);
1286 
1287 	/*
1288 	 * Reclaim items from the pool's caches.
1289 	 */
1290 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1291 		pool_cache_reclaim(pc);
1292 
1293 	s = splclock();
1294 	curtime = mono_time;
1295 	splx(s);
1296 
1297 	for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
1298 		phnext = TAILQ_NEXT(ph, ph_pagelist);
1299 
1300 		/* Check our minimum page claim */
1301 		if (pp->pr_npages <= pp->pr_minpages)
1302 			break;
1303 
1304 		if (ph->ph_nmissing == 0) {
1305 			struct timeval diff;
1306 			timersub(&curtime, &ph->ph_time, &diff);
1307 			if (diff.tv_sec < pool_inactive_time)
1308 				continue;
1309 
1310 			/*
1311 			 * If freeing this page would put us below
1312 			 * the low water mark, stop now.
1313 			 */
1314 			if ((pp->pr_nitems - pp->pr_itemsperpage) <
1315 			    pp->pr_minitems)
1316 				break;
1317 
1318 			pr_rmpage(pp, ph, &pq);
1319 		}
1320 	}
1321 
1322 	pr_leave(pp);
1323 	simple_unlock(&pp->pr_slock);
1324 	if (TAILQ_EMPTY(&pq))
1325 		return (0);
1326 
1327 	while ((ph = TAILQ_FIRST(&pq)) != NULL) {
1328 		TAILQ_REMOVE(&pq, ph, ph_pagelist);
1329 		pool_allocator_free(pp, ph->ph_page);
1330 		if (pp->pr_roflags & PR_PHINPAGE) {
1331 			continue;
1332 		}
1333 		LIST_REMOVE(ph, ph_hashlist);
1334 		s = splhigh();
1335 		pool_put(&phpool, ph);
1336 		splx(s);
1337 	}
1338 
1339 	return (1);
1340 }
1341 
1342 /*
1343  * Drain pools, one at a time.
1344  *
1345  * Note, we must never be called from an interrupt context.
1346  */
1347 void
1348 pool_drain(void *arg)
1349 {
1350 	struct pool *pp;
1351 	int s;
1352 
1353 	pp = NULL;
1354 	s = splvm();
1355 	simple_lock(&pool_head_slock);
1356 	if (drainpp == NULL) {
1357 		drainpp = TAILQ_FIRST(&pool_head);
1358 	}
1359 	if (drainpp) {
1360 		pp = drainpp;
1361 		drainpp = TAILQ_NEXT(pp, pr_poollist);
1362 	}
1363 	simple_unlock(&pool_head_slock);
1364 	pool_reclaim(pp);
1365 	splx(s);
1366 }
1367 
1368 /*
1369  * Diagnostic helpers.
1370  */
1371 void
1372 pool_print(struct pool *pp, const char *modif)
1373 {
1374 	int s;
1375 
1376 	s = splvm();
1377 	if (simple_lock_try(&pp->pr_slock) == 0) {
1378 		printf("pool %s is locked; try again later\n",
1379 		    pp->pr_wchan);
1380 		splx(s);
1381 		return;
1382 	}
1383 	pool_print1(pp, modif, printf);
1384 	simple_unlock(&pp->pr_slock);
1385 	splx(s);
1386 }
1387 
1388 void
1389 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1390 {
1391 	int didlock = 0;
1392 
1393 	if (pp == NULL) {
1394 		(*pr)("Must specify a pool to print.\n");
1395 		return;
1396 	}
1397 
1398 	/*
1399 	 * Called from DDB; interrupts should be blocked, and all
1400 	 * other processors should be paused.  We can skip locking
1401 	 * the pool in this case.
1402 	 *
1403 	 * We do a simple_lock_try() just to print the lock
1404 	 * status, however.
1405 	 */
1406 
1407 	if (simple_lock_try(&pp->pr_slock) == 0)
1408 		(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1409 	else
1410 		didlock = 1;
1411 
1412 	pool_print1(pp, modif, pr);
1413 
1414 	if (didlock)
1415 		simple_unlock(&pp->pr_slock);
1416 }
1417 
1418 static void
1419 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1420 {
1421 	struct pool_item_header *ph;
1422 	struct pool_cache *pc;
1423 	struct pool_cache_group *pcg;
1424 #ifdef DIAGNOSTIC
1425 	struct pool_item *pi;
1426 #endif
1427 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1428 	char c;
1429 
1430 	while ((c = *modif++) != '\0') {
1431 		if (c == 'l')
1432 			print_log = 1;
1433 		if (c == 'p')
1434 			print_pagelist = 1;
1435 		if (c == 'c')
1436 			print_cache = 1;
1437 		modif++;
1438 	}
1439 
1440 	(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1441 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1442 	    pp->pr_roflags);
1443 	(*pr)("\talloc %p\n", pp->pr_alloc);
1444 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1445 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1446 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1447 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1448 
1449 	(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1450 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1451 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1452 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1453 
1454 	if (print_pagelist == 0)
1455 		goto skip_pagelist;
1456 
1457 	if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
1458 		(*pr)("\n\tpage list:\n");
1459 	for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
1460 		(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1461 		    ph->ph_page, ph->ph_nmissing,
1462 		    (u_long)ph->ph_time.tv_sec,
1463 		    (u_long)ph->ph_time.tv_usec);
1464 #ifdef DIAGNOSTIC
1465 		TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1466 			if (pi->pi_magic != PI_MAGIC) {
1467 				(*pr)("\t\t\titem %p, magic 0x%x\n",
1468 				    pi, pi->pi_magic);
1469 			}
1470 		}
1471 #endif
1472 	}
1473 	if (pp->pr_curpage == NULL)
1474 		(*pr)("\tno current page\n");
1475 	else
1476 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1477 
1478  skip_pagelist:
1479 
1480 	if (print_log == 0)
1481 		goto skip_log;
1482 
1483 	(*pr)("\n");
1484 	if ((pp->pr_roflags & PR_LOGGING) == 0)
1485 		(*pr)("\tno log\n");
1486 	else
1487 		pr_printlog(pp, NULL, pr);
1488 
1489  skip_log:
1490 
1491 	if (print_cache == 0)
1492 		goto skip_cache;
1493 
1494 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1495 		(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1496 		    pc->pc_allocfrom, pc->pc_freeto);
1497 		(*pr)("\t    hits %lu misses %lu ngroups %lu nitems %lu\n",
1498 		    pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1499 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1500 			(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1501 			for (i = 0; i < PCG_NOBJECTS; i++)
1502 				(*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
1503 		}
1504 	}
1505 
1506  skip_cache:
1507 
1508 	pr_enter_check(pp, pr);
1509 }
1510 
1511 int
1512 pool_chk(struct pool *pp, const char *label)
1513 {
1514 	struct pool_item_header *ph;
1515 	int r = 0;
1516 
1517 	simple_lock(&pp->pr_slock);
1518 
1519 	TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) {
1520 		struct pool_item *pi;
1521 		int n;
1522 		caddr_t page;
1523 
1524 		page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1525 		if (page != ph->ph_page &&
1526 		    (pp->pr_roflags & PR_PHINPAGE) != 0) {
1527 			if (label != NULL)
1528 				printf("%s: ", label);
1529 			printf("pool(%p:%s): page inconsistency: page %p;"
1530 			       " at page head addr %p (p %p)\n", pp,
1531 				pp->pr_wchan, ph->ph_page,
1532 				ph, page);
1533 			r++;
1534 			goto out;
1535 		}
1536 
1537 		for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1538 		     pi != NULL;
1539 		     pi = TAILQ_NEXT(pi,pi_list), n++) {
1540 
1541 #ifdef DIAGNOSTIC
1542 			if (pi->pi_magic != PI_MAGIC) {
1543 				if (label != NULL)
1544 					printf("%s: ", label);
1545 				printf("pool(%s): free list modified: magic=%x;"
1546 				       " page %p; item ordinal %d;"
1547 				       " addr %p (p %p)\n",
1548 					pp->pr_wchan, pi->pi_magic, ph->ph_page,
1549 					n, pi, page);
1550 				panic("pool");
1551 			}
1552 #endif
1553 			page =
1554 			    (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1555 			if (page == ph->ph_page)
1556 				continue;
1557 
1558 			if (label != NULL)
1559 				printf("%s: ", label);
1560 			printf("pool(%p:%s): page inconsistency: page %p;"
1561 			       " item ordinal %d; addr %p (p %p)\n", pp,
1562 				pp->pr_wchan, ph->ph_page,
1563 				n, pi, page);
1564 			r++;
1565 			goto out;
1566 		}
1567 	}
1568 out:
1569 	simple_unlock(&pp->pr_slock);
1570 	return (r);
1571 }
1572 
1573 /*
1574  * pool_cache_init:
1575  *
1576  *	Initialize a pool cache.
1577  *
1578  *	NOTE: If the pool must be protected from interrupts, we expect
1579  *	to be called at the appropriate interrupt priority level.
1580  */
1581 void
1582 pool_cache_init(struct pool_cache *pc, struct pool *pp,
1583     int (*ctor)(void *, void *, int),
1584     void (*dtor)(void *, void *),
1585     void *arg)
1586 {
1587 
1588 	TAILQ_INIT(&pc->pc_grouplist);
1589 	simple_lock_init(&pc->pc_slock);
1590 
1591 	pc->pc_allocfrom = NULL;
1592 	pc->pc_freeto = NULL;
1593 	pc->pc_pool = pp;
1594 
1595 	pc->pc_ctor = ctor;
1596 	pc->pc_dtor = dtor;
1597 	pc->pc_arg  = arg;
1598 
1599 	pc->pc_hits   = 0;
1600 	pc->pc_misses = 0;
1601 
1602 	pc->pc_ngroups = 0;
1603 
1604 	pc->pc_nitems = 0;
1605 
1606 	simple_lock(&pp->pr_slock);
1607 	TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1608 	simple_unlock(&pp->pr_slock);
1609 }
1610 
1611 /*
1612  * pool_cache_destroy:
1613  *
1614  *	Destroy a pool cache.
1615  */
1616 void
1617 pool_cache_destroy(struct pool_cache *pc)
1618 {
1619 	struct pool *pp = pc->pc_pool;
1620 
1621 	/* First, invalidate the entire cache. */
1622 	pool_cache_invalidate(pc);
1623 
1624 	/* ...and remove it from the pool's cache list. */
1625 	simple_lock(&pp->pr_slock);
1626 	TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1627 	simple_unlock(&pp->pr_slock);
1628 }
1629 
1630 static __inline void *
1631 pcg_get(struct pool_cache_group *pcg)
1632 {
1633 	void *object;
1634 	u_int idx;
1635 
1636 	KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1637 	KASSERT(pcg->pcg_avail != 0);
1638 	idx = --pcg->pcg_avail;
1639 
1640 	KASSERT(pcg->pcg_objects[idx] != NULL);
1641 	object = pcg->pcg_objects[idx];
1642 	pcg->pcg_objects[idx] = NULL;
1643 
1644 	return (object);
1645 }
1646 
1647 static __inline void
1648 pcg_put(struct pool_cache_group *pcg, void *object)
1649 {
1650 	u_int idx;
1651 
1652 	KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1653 	idx = pcg->pcg_avail++;
1654 
1655 	KASSERT(pcg->pcg_objects[idx] == NULL);
1656 	pcg->pcg_objects[idx] = object;
1657 }
1658 
1659 /*
1660  * pool_cache_get:
1661  *
1662  *	Get an object from a pool cache.
1663  */
1664 void *
1665 pool_cache_get(struct pool_cache *pc, int flags)
1666 {
1667 	struct pool_cache_group *pcg;
1668 	void *object;
1669 
1670 #ifdef LOCKDEBUG
1671 	if (flags & PR_WAITOK)
1672 		simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1673 #endif
1674 
1675 	simple_lock(&pc->pc_slock);
1676 
1677 	if ((pcg = pc->pc_allocfrom) == NULL) {
1678 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1679 			if (pcg->pcg_avail != 0) {
1680 				pc->pc_allocfrom = pcg;
1681 				goto have_group;
1682 			}
1683 		}
1684 
1685 		/*
1686 		 * No groups with any available objects.  Allocate
1687 		 * a new object, construct it, and return it to
1688 		 * the caller.  We will allocate a group, if necessary,
1689 		 * when the object is freed back to the cache.
1690 		 */
1691 		pc->pc_misses++;
1692 		simple_unlock(&pc->pc_slock);
1693 		object = pool_get(pc->pc_pool, flags);
1694 		if (object != NULL && pc->pc_ctor != NULL) {
1695 			if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1696 				pool_put(pc->pc_pool, object);
1697 				return (NULL);
1698 			}
1699 		}
1700 		return (object);
1701 	}
1702 
1703  have_group:
1704 	pc->pc_hits++;
1705 	pc->pc_nitems--;
1706 	object = pcg_get(pcg);
1707 
1708 	if (pcg->pcg_avail == 0)
1709 		pc->pc_allocfrom = NULL;
1710 
1711 	simple_unlock(&pc->pc_slock);
1712 
1713 	return (object);
1714 }
1715 
1716 /*
1717  * pool_cache_put:
1718  *
1719  *	Put an object back to the pool cache.
1720  */
1721 void
1722 pool_cache_put(struct pool_cache *pc, void *object)
1723 {
1724 	struct pool_cache_group *pcg;
1725 	int s;
1726 
1727 	simple_lock(&pc->pc_slock);
1728 
1729 	if ((pcg = pc->pc_freeto) == NULL) {
1730 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1731 			if (pcg->pcg_avail != PCG_NOBJECTS) {
1732 				pc->pc_freeto = pcg;
1733 				goto have_group;
1734 			}
1735 		}
1736 
1737 		/*
1738 		 * No empty groups to free the object to.  Attempt to
1739 		 * allocate one.
1740 		 */
1741 		simple_unlock(&pc->pc_slock);
1742 		s = splvm();
1743 		pcg = pool_get(&pcgpool, PR_NOWAIT);
1744 		splx(s);
1745 		if (pcg != NULL) {
1746 			memset(pcg, 0, sizeof(*pcg));
1747 			simple_lock(&pc->pc_slock);
1748 			pc->pc_ngroups++;
1749 			TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1750 			if (pc->pc_freeto == NULL)
1751 				pc->pc_freeto = pcg;
1752 			goto have_group;
1753 		}
1754 
1755 		/*
1756 		 * Unable to allocate a cache group; destruct the object
1757 		 * and free it back to the pool.
1758 		 */
1759 		pool_cache_destruct_object(pc, object);
1760 		return;
1761 	}
1762 
1763  have_group:
1764 	pc->pc_nitems++;
1765 	pcg_put(pcg, object);
1766 
1767 	if (pcg->pcg_avail == PCG_NOBJECTS)
1768 		pc->pc_freeto = NULL;
1769 
1770 	simple_unlock(&pc->pc_slock);
1771 }
1772 
1773 /*
1774  * pool_cache_destruct_object:
1775  *
1776  *	Force destruction of an object and its release back into
1777  *	the pool.
1778  */
1779 void
1780 pool_cache_destruct_object(struct pool_cache *pc, void *object)
1781 {
1782 
1783 	if (pc->pc_dtor != NULL)
1784 		(*pc->pc_dtor)(pc->pc_arg, object);
1785 	pool_put(pc->pc_pool, object);
1786 }
1787 
1788 /*
1789  * pool_cache_do_invalidate:
1790  *
1791  *	This internal function implements pool_cache_invalidate() and
1792  *	pool_cache_reclaim().
1793  */
1794 static void
1795 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1796     void (*putit)(struct pool *, void *))
1797 {
1798 	struct pool_cache_group *pcg, *npcg;
1799 	void *object;
1800 	int s;
1801 
1802 	for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1803 	     pcg = npcg) {
1804 		npcg = TAILQ_NEXT(pcg, pcg_list);
1805 		while (pcg->pcg_avail != 0) {
1806 			pc->pc_nitems--;
1807 			object = pcg_get(pcg);
1808 			if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1809 				pc->pc_allocfrom = NULL;
1810 			if (pc->pc_dtor != NULL)
1811 				(*pc->pc_dtor)(pc->pc_arg, object);
1812 			(*putit)(pc->pc_pool, object);
1813 		}
1814 		if (free_groups) {
1815 			pc->pc_ngroups--;
1816 			TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1817 			if (pc->pc_freeto == pcg)
1818 				pc->pc_freeto = NULL;
1819 			s = splvm();
1820 			pool_put(&pcgpool, pcg);
1821 			splx(s);
1822 		}
1823 	}
1824 }
1825 
1826 /*
1827  * pool_cache_invalidate:
1828  *
1829  *	Invalidate a pool cache (destruct and release all of the
1830  *	cached objects).
1831  */
1832 void
1833 pool_cache_invalidate(struct pool_cache *pc)
1834 {
1835 
1836 	simple_lock(&pc->pc_slock);
1837 	pool_cache_do_invalidate(pc, 0, pool_put);
1838 	simple_unlock(&pc->pc_slock);
1839 }
1840 
1841 /*
1842  * pool_cache_reclaim:
1843  *
1844  *	Reclaim a pool cache for pool_reclaim().
1845  */
1846 static void
1847 pool_cache_reclaim(struct pool_cache *pc)
1848 {
1849 
1850 	simple_lock(&pc->pc_slock);
1851 	pool_cache_do_invalidate(pc, 1, pool_do_put);
1852 	simple_unlock(&pc->pc_slock);
1853 }
1854 
1855 /*
1856  * Pool backend allocators.
1857  *
1858  * Each pool has a backend allocator that handles allocation, deallocation,
1859  * and any additional draining that might be needed.
1860  *
1861  * We provide two standard allocators:
1862  *
1863  *	pool_allocator_kmem - the default when no allocator is specified
1864  *
1865  *	pool_allocator_nointr - used for pools that will not be accessed
1866  *	in interrupt context.
1867  */
1868 void	*pool_page_alloc(struct pool *, int);
1869 void	pool_page_free(struct pool *, void *);
1870 
1871 struct pool_allocator pool_allocator_kmem = {
1872 	pool_page_alloc, pool_page_free, 0,
1873 };
1874 
1875 void	*pool_page_alloc_nointr(struct pool *, int);
1876 void	pool_page_free_nointr(struct pool *, void *);
1877 
1878 struct pool_allocator pool_allocator_nointr = {
1879 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
1880 };
1881 
1882 #ifdef POOL_SUBPAGE
1883 void	*pool_subpage_alloc(struct pool *, int);
1884 void	pool_subpage_free(struct pool *, void *);
1885 
1886 struct pool_allocator pool_allocator_kmem_subpage = {
1887 	pool_subpage_alloc, pool_subpage_free, 0,
1888 };
1889 #endif /* POOL_SUBPAGE */
1890 
1891 /*
1892  * We have at least three different resources for the same allocation and
1893  * each resource can be depleted.  First, we have the ready elements in the
1894  * pool.  Then we have the resource (typically a vm_map) for this allocator.
1895  * Finally, we have physical memory.  Waiting for any of these can be
1896  * unnecessary when any other is freed, but the kernel doesn't support
1897  * sleeping on multiple wait channels, so we have to employ another strategy.
1898  *
1899  * The caller sleeps on the pool (so that it can be awakened when an item
1900  * is returned to the pool), but we set PA_WANT on the allocator.  When a
1901  * page is returned to the allocator and PA_WANT is set, pool_allocator_free
1902  * will wake up all sleeping pools belonging to this allocator.
1903  *
1904  * XXX Thundering herd.
1905  */
1906 void *
1907 pool_allocator_alloc(struct pool *org, int flags)
1908 {
1909 	struct pool_allocator *pa = org->pr_alloc;
1910 	struct pool *pp, *start;
1911 	int s, freed;
1912 	void *res;
1913 
1914 	do {
1915 		if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1916 			return (res);
1917 		if ((flags & PR_WAITOK) == 0) {
1918 			/*
1919 			 * We only run the drain hookhere if PR_NOWAIT.
1920 			 * In other cases, the hook will be run in
1921 			 * pool_reclaim().
1922 			 */
1923 			if (org->pr_drain_hook != NULL) {
1924 				(*org->pr_drain_hook)(org->pr_drain_hook_arg,
1925 				    flags);
1926 				if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1927 					return (res);
1928 			}
1929 			break;
1930 		}
1931 
1932 		/*
1933 		 * Drain all pools, except "org", that use this
1934 		 * allocator.  We do this to reclaim VA space.
1935 		 * pa_alloc is responsible for waiting for
1936 		 * physical memory.
1937 		 *
1938 		 * XXX We risk looping forever if start if someone
1939 		 * calls pool_destroy on "start".  But there is no
1940 		 * other way to have potentially sleeping pool_reclaim,
1941 		 * non-sleeping locks on pool_allocator, and some
1942 		 * stirring of drained pools in the allocator.
1943 		 *
1944 		 * XXX Maybe we should use pool_head_slock for locking
1945 		 * the allocators?
1946 		 */
1947 		freed = 0;
1948 
1949 		s = splvm();
1950 		simple_lock(&pa->pa_slock);
1951 		pp = start = TAILQ_FIRST(&pa->pa_list);
1952 		do {
1953 			TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
1954 			TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
1955 			if (pp == org)
1956 				continue;
1957 			simple_unlock(&pa->pa_slock);
1958 			freed = pool_reclaim(pp);
1959 			simple_lock(&pa->pa_slock);
1960 		} while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
1961 			 freed == 0);
1962 
1963 		if (freed == 0) {
1964 			/*
1965 			 * We set PA_WANT here, the caller will most likely
1966 			 * sleep waiting for pages (if not, this won't hurt
1967 			 * that much), and there is no way to set this in
1968 			 * the caller without violating locking order.
1969 			 */
1970 			pa->pa_flags |= PA_WANT;
1971 		}
1972 		simple_unlock(&pa->pa_slock);
1973 		splx(s);
1974 	} while (freed);
1975 	return (NULL);
1976 }
1977 
1978 void
1979 pool_allocator_free(struct pool *pp, void *v)
1980 {
1981 	struct pool_allocator *pa = pp->pr_alloc;
1982 	int s;
1983 
1984 	(*pa->pa_free)(pp, v);
1985 
1986 	s = splvm();
1987 	simple_lock(&pa->pa_slock);
1988 	if ((pa->pa_flags & PA_WANT) == 0) {
1989 		simple_unlock(&pa->pa_slock);
1990 		splx(s);
1991 		return;
1992 	}
1993 
1994 	TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
1995 		simple_lock(&pp->pr_slock);
1996 		if ((pp->pr_flags & PR_WANTED) != 0) {
1997 			pp->pr_flags &= ~PR_WANTED;
1998 			wakeup(pp);
1999 		}
2000 		simple_unlock(&pp->pr_slock);
2001 	}
2002 	pa->pa_flags &= ~PA_WANT;
2003 	simple_unlock(&pa->pa_slock);
2004 	splx(s);
2005 }
2006 
2007 void *
2008 pool_page_alloc(struct pool *pp, int flags)
2009 {
2010 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2011 
2012 	return ((void *) uvm_km_alloc_poolpage(waitok));
2013 }
2014 
2015 void
2016 pool_page_free(struct pool *pp, void *v)
2017 {
2018 
2019 	uvm_km_free_poolpage((vaddr_t) v);
2020 }
2021 
2022 #ifdef POOL_SUBPAGE
2023 /* Sub-page allocator, for machines with large hardware pages. */
2024 void *
2025 pool_subpage_alloc(struct pool *pp, int flags)
2026 {
2027 
2028 	return (pool_get(&psppool, flags));
2029 }
2030 
2031 void
2032 pool_subpage_free(struct pool *pp, void *v)
2033 {
2034 
2035 	pool_put(&psppool, v);
2036 }
2037 
2038 /* We don't provide a real nointr allocator.  Maybe later. */
2039 void *
2040 pool_page_alloc_nointr(struct pool *pp, int flags)
2041 {
2042 
2043 	return (pool_subpage_alloc(pp, flags));
2044 }
2045 
2046 void
2047 pool_page_free_nointr(struct pool *pp, void *v)
2048 {
2049 
2050 	pool_subpage_free(pp, v);
2051 }
2052 #else
2053 void *
2054 pool_page_alloc_nointr(struct pool *pp, int flags)
2055 {
2056 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2057 
2058 	return ((void *) uvm_km_alloc_poolpage1(kernel_map,
2059 	    uvm.kernel_object, waitok));
2060 }
2061 
2062 void
2063 pool_page_free_nointr(struct pool *pp, void *v)
2064 {
2065 
2066 	uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
2067 }
2068 #endif /* POOL_SUBPAGE */
2069