xref: /openbsd-src/sys/kern/subr_pool.c (revision 47911bd667ac77dc523b8a13ef40b012dbffa741)
1 /*	$OpenBSD: subr_pool.c,v 1.36 2002/10/27 21:31:56 art Exp $	*/
2 /*	$NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $	*/
3 
4 /*-
5  * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
10  * Simulation Facility, NASA Ames Research Center.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the NetBSD
23  *	Foundation, Inc. and its contributors.
24  * 4. Neither the name of The NetBSD Foundation nor the names of its
25  *    contributors may be used to endorse or promote products derived
26  *    from this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38  * POSSIBILITY OF SUCH DAMAGE.
39  */
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/proc.h>
44 #include <sys/errno.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/lock.h>
48 #include <sys/pool.h>
49 #include <sys/syslog.h>
50 #include <sys/sysctl.h>
51 
52 #include <uvm/uvm.h>
53 
54 /*
55  * XXX - for now.
56  */
57 #define SIMPLELOCK_INITIALIZER { SLOCK_UNLOCKED }
58 #ifdef LOCKDEBUG
59 #define simple_lock_freecheck(a, s) do { /* nothing */ } while (0)
60 #define simple_lock_only_held(lkp, str) do { /* nothing */ } while (0)
61 #endif
62 
63 /*
64  * Pool resource management utility.
65  *
66  * Memory is allocated in pages which are split into pieces according
67  * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
68  * in the pool structure and the individual pool items are on a linked list
69  * headed by `ph_itemlist' in each page header. The memory for building
70  * the page list is either taken from the allocated pages themselves (for
71  * small pool items) or taken from an internal pool of page headers (`phpool').
72  */
73 
74 /* List of all pools */
75 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
76 
77 /* Private pool for page header structures */
78 static struct pool phpool;
79 
80 /* # of seconds to retain page after last use */
81 int pool_inactive_time = 10;
82 
83 /* Next candidate for drainage (see pool_drain()) */
84 static struct pool	*drainpp;
85 
86 /* This spin lock protects both pool_head and drainpp. */
87 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
88 
89 struct pool_item_header {
90 	/* Page headers */
91 	TAILQ_ENTRY(pool_item_header)
92 				ph_pagelist;	/* pool page list */
93 	TAILQ_HEAD(,pool_item)	ph_itemlist;	/* chunk list for this page */
94 	LIST_ENTRY(pool_item_header)
95 				ph_hashlist;	/* Off-page page headers */
96 	int			ph_nmissing;	/* # of chunks in use */
97 	caddr_t			ph_page;	/* this page's address */
98 	struct timeval		ph_time;	/* last referenced */
99 };
100 TAILQ_HEAD(pool_pagelist,pool_item_header);
101 
102 struct pool_item {
103 #ifdef DIAGNOSTIC
104 	int pi_magic;
105 #endif
106 #define	PI_MAGIC 0xdeafbeef
107 	/* Other entries use only this list entry */
108 	TAILQ_ENTRY(pool_item)	pi_list;
109 };
110 
111 #define	PR_HASH_INDEX(pp,addr) \
112 	(((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & (PR_HASHTABSIZE - 1))
113 
114 #define	POOL_NEEDS_CATCHUP(pp)						\
115 	((pp)->pr_nitems < (pp)->pr_minitems)
116 
117 /*
118  * Every pool get a unique serial number assigned to it. If this counter
119  * wraps, we're screwed, but we shouldn't create so many pools anyway.
120  */
121 unsigned int pool_serial;
122 
123 /*
124  * Pool cache management.
125  *
126  * Pool caches provide a way for constructed objects to be cached by the
127  * pool subsystem.  This can lead to performance improvements by avoiding
128  * needless object construction/destruction; it is deferred until absolutely
129  * necessary.
130  *
131  * Caches are grouped into cache groups.  Each cache group references
132  * up to 16 constructed objects.  When a cache allocates an object
133  * from the pool, it calls the object's constructor and places it into
134  * a cache group.  When a cache group frees an object back to the pool,
135  * it first calls the object's destructor.  This allows the object to
136  * persist in constructed form while freed to the cache.
137  *
138  * Multiple caches may exist for each pool.  This allows a single
139  * object type to have multiple constructed forms.  The pool references
140  * each cache, so that when a pool is drained by the pagedaemon, it can
141  * drain each individual cache as well.  Each time a cache is drained,
142  * the most idle cache group is freed to the pool in its entirety.
143  *
144  * Pool caches are layed on top of pools.  By layering them, we can avoid
145  * the complexity of cache management for pools which would not benefit
146  * from it.
147  */
148 
149 /* The cache group pool. */
150 static struct pool pcgpool;
151 
152 /* The pool cache group. */
153 #define	PCG_NOBJECTS		16
154 struct pool_cache_group {
155 	TAILQ_ENTRY(pool_cache_group)
156 		pcg_list;	/* link in the pool cache's group list */
157 	u_int	pcg_avail;	/* # available objects */
158 				/* pointers to the objects */
159 	void	*pcg_objects[PCG_NOBJECTS];
160 };
161 
162 void	pool_cache_reclaim(struct pool_cache *);
163 void	pool_cache_do_invalidate(struct pool_cache *, int,
164     void (*)(struct pool *, void *));
165 
166 int	pool_catchup(struct pool *);
167 void	pool_prime_page(struct pool *, caddr_t, struct pool_item_header *);
168 void	pool_do_put(struct pool *, void *);
169 void	pr_rmpage(struct pool *, struct pool_item_header *,
170     struct pool_pagelist *);
171 
172 void	*pool_allocator_alloc(struct pool *, int);
173 void	pool_allocator_free(struct pool *, void *);
174 
175 void pool_print1(struct pool *, const char *, int (*)(const char *, ...));
176 
177 /*
178  * Pool log entry. An array of these is allocated in pool_init().
179  */
180 struct pool_log {
181 	const char	*pl_file;
182 	long		pl_line;
183 	int		pl_action;
184 #define	PRLOG_GET	1
185 #define	PRLOG_PUT	2
186 	void		*pl_addr;
187 };
188 
189 /* Number of entries in pool log buffers */
190 #ifndef POOL_LOGSIZE
191 #define	POOL_LOGSIZE	10
192 #endif
193 
194 int pool_logsize = POOL_LOGSIZE;
195 
196 #ifdef POOL_DIAGNOSTIC
197 static __inline void
198 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
199 {
200 	int n = pp->pr_curlogentry;
201 	struct pool_log *pl;
202 
203 	if ((pp->pr_roflags & PR_LOGGING) == 0)
204 		return;
205 
206 	/*
207 	 * Fill in the current entry. Wrap around and overwrite
208 	 * the oldest entry if necessary.
209 	 */
210 	pl = &pp->pr_log[n];
211 	pl->pl_file = file;
212 	pl->pl_line = line;
213 	pl->pl_action = action;
214 	pl->pl_addr = v;
215 	if (++n >= pp->pr_logsize)
216 		n = 0;
217 	pp->pr_curlogentry = n;
218 }
219 
220 static void
221 pr_printlog(struct pool *pp, struct pool_item *pi,
222     int (*pr)(const char *, ...))
223 {
224 	int i = pp->pr_logsize;
225 	int n = pp->pr_curlogentry;
226 
227 	if ((pp->pr_roflags & PR_LOGGING) == 0)
228 		return;
229 
230 	/*
231 	 * Print all entries in this pool's log.
232 	 */
233 	while (i-- > 0) {
234 		struct pool_log *pl = &pp->pr_log[n];
235 		if (pl->pl_action != 0) {
236 			if (pi == NULL || pi == pl->pl_addr) {
237 				(*pr)("\tlog entry %d:\n", i);
238 				(*pr)("\t\taction = %s, addr = %p\n",
239 				    pl->pl_action == PRLOG_GET ? "get" : "put",
240 				    pl->pl_addr);
241 				(*pr)("\t\tfile: %s at line %lu\n",
242 				    pl->pl_file, pl->pl_line);
243 			}
244 		}
245 		if (++n >= pp->pr_logsize)
246 			n = 0;
247 	}
248 }
249 
250 static __inline void
251 pr_enter(struct pool *pp, const char *file, long line)
252 {
253 
254 	if (__predict_false(pp->pr_entered_file != NULL)) {
255 		printf("pool %s: reentrancy at file %s line %ld\n",
256 		    pp->pr_wchan, file, line);
257 		printf("         previous entry at file %s line %ld\n",
258 		    pp->pr_entered_file, pp->pr_entered_line);
259 		panic("pr_enter");
260 	}
261 
262 	pp->pr_entered_file = file;
263 	pp->pr_entered_line = line;
264 }
265 
266 static __inline void
267 pr_leave(struct pool *pp)
268 {
269 
270 	if (__predict_false(pp->pr_entered_file == NULL)) {
271 		printf("pool %s not entered?\n", pp->pr_wchan);
272 		panic("pr_leave");
273 	}
274 
275 	pp->pr_entered_file = NULL;
276 	pp->pr_entered_line = 0;
277 }
278 
279 static __inline void
280 pr_enter_check(struct pool *pp, int (*pr)(const char *, ...))
281 {
282 
283 	if (pp->pr_entered_file != NULL)
284 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
285 		    pp->pr_entered_file, pp->pr_entered_line);
286 }
287 #else
288 #define	pr_log(pp, v, action, file, line)
289 #define	pr_printlog(pp, pi, pr)
290 #define	pr_enter(pp, file, line)
291 #define	pr_leave(pp)
292 #define	pr_enter_check(pp, pr)
293 #endif /* POOL_DIAGNOSTIC */
294 
295 /*
296  * Return the pool page header based on page address.
297  */
298 static __inline struct pool_item_header *
299 pr_find_pagehead(struct pool *pp, caddr_t page)
300 {
301 	struct pool_item_header *ph;
302 
303 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
304 		return ((struct pool_item_header *)(page + pp->pr_phoffset));
305 
306 	for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
307 	     ph != NULL;
308 	     ph = LIST_NEXT(ph, ph_hashlist)) {
309 		if (ph->ph_page == page)
310 			return (ph);
311 	}
312 	return (NULL);
313 }
314 
315 /*
316  * Remove a page from the pool.
317  */
318 void
319 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
320      struct pool_pagelist *pq)
321 {
322 	int s;
323 
324 	/*
325 	 * If the page was idle, decrement the idle page count.
326 	 */
327 	if (ph->ph_nmissing == 0) {
328 #ifdef DIAGNOSTIC
329 		if (pp->pr_nidle == 0)
330 			panic("pr_rmpage: nidle inconsistent");
331 		if (pp->pr_nitems < pp->pr_itemsperpage)
332 			panic("pr_rmpage: nitems inconsistent");
333 #endif
334 		pp->pr_nidle--;
335 	}
336 
337 	pp->pr_nitems -= pp->pr_itemsperpage;
338 
339 	/*
340 	 * Unlink a page from the pool and release it (or queue it for release).
341 	 */
342 	TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
343 	if (pq) {
344 		TAILQ_INSERT_HEAD(pq, ph, ph_pagelist);
345 	} else {
346 		pool_allocator_free(pp, ph->ph_page);
347 		if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
348 			LIST_REMOVE(ph, ph_hashlist);
349 			s = splhigh();
350 			pool_put(&phpool, ph);
351 			splx(s);
352 		}
353 	}
354 	pp->pr_npages--;
355 	pp->pr_npagefree++;
356 
357 	if (pp->pr_curpage == ph) {
358 		/*
359 		 * Find a new non-empty page header, if any.
360 		 * Start search from the page head, to increase the
361 		 * chance for "high water" pages to be freed.
362 		 */
363 		TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
364 			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
365 				break;
366 
367 		pp->pr_curpage = ph;
368 	}
369 }
370 
371 /*
372  * Initialize the given pool resource structure.
373  *
374  * We export this routine to allow other kernel parts to declare
375  * static pools that must be initialized before malloc() is available.
376  */
377 void
378 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
379     const char *wchan, struct pool_allocator *palloc)
380 {
381 	int off, slack, i;
382 
383 #ifdef POOL_DIAGNOSTIC
384 	/*
385 	 * Always log if POOL_DIAGNOSTIC is defined.
386 	 */
387 	if (pool_logsize != 0)
388 		flags |= PR_LOGGING;
389 #endif
390 
391 	/*
392 	 * Check arguments and construct default values.
393 	 */
394 	if (palloc == NULL)
395 		palloc = &pool_allocator_kmem;
396 	if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
397 		if (palloc->pa_pagesz == 0)
398 			palloc->pa_pagesz = PAGE_SIZE;
399 
400 		TAILQ_INIT(&palloc->pa_list);
401 
402 		simple_lock_init(&palloc->pa_slock);
403 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
404 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
405 		palloc->pa_flags |= PA_INITIALIZED;
406 	}
407 
408 	if (align == 0)
409 		align = ALIGN(1);
410 
411 	if (size < sizeof(struct pool_item))
412 		size = sizeof(struct pool_item);
413 
414 	size = roundup(size, align);
415 #ifdef DIAGNOSTIC
416 	if (size > palloc->pa_pagesz)
417 		panic("pool_init: pool item size (%lu) too large",
418 		      (u_long)size);
419 #endif
420 
421 	/*
422 	 * Initialize the pool structure.
423 	 */
424 	TAILQ_INIT(&pp->pr_pagelist);
425 	TAILQ_INIT(&pp->pr_cachelist);
426 	pp->pr_curpage = NULL;
427 	pp->pr_npages = 0;
428 	pp->pr_minitems = 0;
429 	pp->pr_minpages = 0;
430 	pp->pr_maxpages = UINT_MAX;
431 	pp->pr_roflags = flags;
432 	pp->pr_flags = 0;
433 	pp->pr_size = size;
434 	pp->pr_align = align;
435 	pp->pr_wchan = wchan;
436 	pp->pr_alloc = palloc;
437 	pp->pr_nitems = 0;
438 	pp->pr_nout = 0;
439 	pp->pr_hardlimit = UINT_MAX;
440 	pp->pr_hardlimit_warning = NULL;
441 	pp->pr_hardlimit_ratecap.tv_sec = 0;
442 	pp->pr_hardlimit_ratecap.tv_usec = 0;
443 	pp->pr_hardlimit_warning_last.tv_sec = 0;
444 	pp->pr_hardlimit_warning_last.tv_usec = 0;
445 	pp->pr_drain_hook = NULL;
446 	pp->pr_drain_hook_arg = NULL;
447 	pp->pr_serial = ++pool_serial;
448 	if (pool_serial == 0)
449 		panic("pool_init: too much uptime");
450 
451 	/*
452 	 * Decide whether to put the page header off page to avoid
453 	 * wasting too large a part of the page. Off-page page headers
454 	 * go on a hash table, so we can match a returned item
455 	 * with its header based on the page address.
456 	 * We use 1/16 of the page size as the threshold (XXX: tune)
457 	 */
458 	if (pp->pr_size < palloc->pa_pagesz/16) {
459 		/* Use the end of the page for the page header */
460 		pp->pr_roflags |= PR_PHINPAGE;
461 		pp->pr_phoffset = off = palloc->pa_pagesz -
462 		    ALIGN(sizeof(struct pool_item_header));
463 	} else {
464 		/* The page header will be taken from our page header pool */
465 		pp->pr_phoffset = 0;
466 		off = palloc->pa_pagesz;
467 		for (i = 0; i < PR_HASHTABSIZE; i++) {
468 			LIST_INIT(&pp->pr_hashtab[i]);
469 		}
470 	}
471 
472 	/*
473 	 * Alignment is to take place at `ioff' within the item. This means
474 	 * we must reserve up to `align - 1' bytes on the page to allow
475 	 * appropriate positioning of each item.
476 	 *
477 	 * Silently enforce `0 <= ioff < align'.
478 	 */
479 	pp->pr_itemoffset = ioff = ioff % align;
480 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
481 	KASSERT(pp->pr_itemsperpage != 0);
482 
483 	/*
484 	 * Use the slack between the chunks and the page header
485 	 * for "cache coloring".
486 	 */
487 	slack = off - pp->pr_itemsperpage * pp->pr_size;
488 	pp->pr_maxcolor = (slack / align) * align;
489 	pp->pr_curcolor = 0;
490 
491 	pp->pr_nget = 0;
492 	pp->pr_nfail = 0;
493 	pp->pr_nput = 0;
494 	pp->pr_npagealloc = 0;
495 	pp->pr_npagefree = 0;
496 	pp->pr_hiwat = 0;
497 	pp->pr_nidle = 0;
498 
499 #ifdef POOL_DIAGNOSTIC
500 	if (flags & PR_LOGGING) {
501 		if (kmem_map == NULL ||
502 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
503 		     M_TEMP, M_NOWAIT)) == NULL)
504 			pp->pr_roflags &= ~PR_LOGGING;
505 		pp->pr_curlogentry = 0;
506 		pp->pr_logsize = pool_logsize;
507 	}
508 #endif
509 
510 	pp->pr_entered_file = NULL;
511 	pp->pr_entered_line = 0;
512 
513 	simple_lock_init(&pp->pr_slock);
514 
515 	/*
516 	 * Initialize private page header pool and cache magazine pool if we
517 	 * haven't done so yet.
518 	 * XXX LOCKING.
519 	 */
520 	if (phpool.pr_size == 0) {
521 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
522 		    0, "phpool", NULL);
523 		pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
524 		    0, "pcgpool", NULL);
525 	}
526 
527 	/* Insert this into the list of all pools. */
528 	simple_lock(&pool_head_slock);
529 	TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
530 	simple_unlock(&pool_head_slock);
531 
532 	/* Insert into the list of pools using this allocator. */
533 	simple_lock(&palloc->pa_slock);
534 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
535 	simple_unlock(&palloc->pa_slock);
536 }
537 
538 /*
539  * De-commision a pool resource.
540  */
541 void
542 pool_destroy(struct pool *pp)
543 {
544 	struct pool_item_header *ph;
545 	struct pool_cache *pc;
546 
547 	/* Locking order: pool_allocator -> pool */
548 	simple_lock(&pp->pr_alloc->pa_slock);
549 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
550 	simple_unlock(&pp->pr_alloc->pa_slock);
551 
552 	/* Destroy all caches for this pool. */
553 	while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
554 		pool_cache_destroy(pc);
555 
556 #ifdef DIAGNOSTIC
557 	if (pp->pr_nout != 0) {
558 		pr_printlog(pp, NULL, printf);
559 		panic("pool_destroy: pool busy: still out: %u",
560 		    pp->pr_nout);
561 	}
562 #endif
563 
564 	/* Remove all pages */
565 	while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
566 		pr_rmpage(pp, ph, NULL);
567 
568 	/* Remove from global pool list */
569 	simple_lock(&pool_head_slock);
570 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
571 	if (drainpp == pp) {
572 		drainpp = NULL;
573 	}
574 	simple_unlock(&pool_head_slock);
575 
576 #ifdef POOL_DIAGNOSTIC
577 	if ((pp->pr_roflags & PR_LOGGING) != 0)
578 		free(pp->pr_log, M_TEMP);
579 #endif
580 }
581 
582 void
583 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
584 {
585 	/* XXX no locking -- must be used just after pool_init() */
586 #ifdef DIAGNOSTIC
587 	if (pp->pr_drain_hook != NULL)
588 		panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
589 #endif
590 	pp->pr_drain_hook = fn;
591 	pp->pr_drain_hook_arg = arg;
592 }
593 
594 static __inline struct pool_item_header *
595 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
596 {
597 	struct pool_item_header *ph;
598 	int s;
599 
600 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
601 
602 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
603 		ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
604 	else {
605 		s = splhigh();
606 		ph = pool_get(&phpool, flags);
607 		splx(s);
608 	}
609 
610 	return (ph);
611 }
612 
613 /*
614  * Grab an item from the pool; must be called at appropriate spl level
615  */
616 void *
617 #ifdef POOL_DIAGNOSTIC
618 _pool_get(struct pool *pp, int flags, const char *file, long line)
619 #else
620 pool_get(struct pool *pp, int flags)
621 #endif
622 {
623 	struct pool_item *pi;
624 	struct pool_item_header *ph;
625 	void *v;
626 
627 #ifdef DIAGNOSTIC
628 	if (__predict_false(curproc == NULL && /* doing_shutdown == 0 && XXX*/
629 			    (flags & PR_WAITOK) != 0))
630 		panic("pool_get: %s:must have NOWAIT", pp->pr_wchan);
631 
632 #ifdef LOCKDEBUG
633 	if (flags & PR_WAITOK)
634 		simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
635 #endif
636 #endif /* DIAGNOSTIC */
637 
638 	simple_lock(&pp->pr_slock);
639 	pr_enter(pp, file, line);
640 
641  startover:
642 	/*
643 	 * Check to see if we've reached the hard limit.  If we have,
644 	 * and we can wait, then wait until an item has been returned to
645 	 * the pool.
646 	 */
647 #ifdef DIAGNOSTIC
648 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
649 		pr_leave(pp);
650 		simple_unlock(&pp->pr_slock);
651 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
652 	}
653 #endif
654 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
655 		if (pp->pr_drain_hook != NULL) {
656 			/*
657 			 * Since the drain hook is going to free things
658 			 * back to the pool, unlock, call hook, re-lock
659 			 * and check hardlimit condition again.
660 			 */
661 			pr_leave(pp);
662 			simple_unlock(&pp->pr_slock);
663 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
664 			simple_lock(&pp->pr_slock);
665 			pr_enter(pp, file, line);
666 			if (pp->pr_nout < pp->pr_hardlimit)
667 				goto startover;
668 		}
669 
670 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
671 			/*
672 			 * XXX: A warning isn't logged in this case.  Should
673 			 * it be?
674 			 */
675 			pp->pr_flags |= PR_WANTED;
676 			pr_leave(pp);
677 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
678 			pr_enter(pp, file, line);
679 			goto startover;
680 		}
681 
682 		/*
683 		 * Log a message that the hard limit has been hit.
684 		 */
685 		if (pp->pr_hardlimit_warning != NULL &&
686 		    ratecheck(&pp->pr_hardlimit_warning_last,
687 			      &pp->pr_hardlimit_ratecap))
688 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
689 
690 		pp->pr_nfail++;
691 
692 		pr_leave(pp);
693 		simple_unlock(&pp->pr_slock);
694 		return (NULL);
695 	}
696 
697 	/*
698 	 * The convention we use is that if `curpage' is not NULL, then
699 	 * it points at a non-empty bucket. In particular, `curpage'
700 	 * never points at a page header which has PR_PHINPAGE set and
701 	 * has no items in its bucket.
702 	 */
703 	if ((ph = pp->pr_curpage) == NULL) {
704 #ifdef DIAGNOSTIC
705 		if (pp->pr_nitems != 0) {
706 			simple_unlock(&pp->pr_slock);
707 			printf("pool_get: %s: curpage NULL, nitems %u\n",
708 			    pp->pr_wchan, pp->pr_nitems);
709 			panic("pool_get: nitems inconsistent");
710 		}
711 #endif
712 
713 		/*
714 		 * Call the back-end page allocator for more memory.
715 		 * Release the pool lock, as the back-end page allocator
716 		 * may block.
717 		 */
718 		pr_leave(pp);
719 		simple_unlock(&pp->pr_slock);
720 		v = pool_allocator_alloc(pp, flags);
721 		if (__predict_true(v != NULL))
722 			ph = pool_alloc_item_header(pp, v, flags);
723 		simple_lock(&pp->pr_slock);
724 		pr_enter(pp, file, line);
725 
726 		if (__predict_false(v == NULL || ph == NULL)) {
727 			if (v != NULL)
728 				pool_allocator_free(pp, v);
729 
730 			/*
731 			 * We were unable to allocate a page or item
732 			 * header, but we released the lock during
733 			 * allocation, so perhaps items were freed
734 			 * back to the pool.  Check for this case.
735 			 */
736 			if (pp->pr_curpage != NULL)
737 				goto startover;
738 
739 			if ((flags & PR_WAITOK) == 0) {
740 				pp->pr_nfail++;
741 				pr_leave(pp);
742 				simple_unlock(&pp->pr_slock);
743 				return (NULL);
744 			}
745 
746 			/*
747 			 * Wait for items to be returned to this pool.
748 			 *
749 			 * XXX: maybe we should wake up once a second and
750 			 * try again?
751 			 */
752 			pp->pr_flags |= PR_WANTED;
753 			/* PA_WANTED is already set on the allocator. */
754 			pr_leave(pp);
755 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
756 			pr_enter(pp, file, line);
757 			goto startover;
758 		}
759 
760 		/* We have more memory; add it to the pool */
761 		pool_prime_page(pp, v, ph);
762 		pp->pr_npagealloc++;
763 
764 		/* Start the allocation process over. */
765 		goto startover;
766 	}
767 
768 	if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
769 		pr_leave(pp);
770 		simple_unlock(&pp->pr_slock);
771 		panic("pool_get: %s: page empty", pp->pr_wchan);
772 	}
773 #ifdef DIAGNOSTIC
774 	if (__predict_false(pp->pr_nitems == 0)) {
775 		pr_leave(pp);
776 		simple_unlock(&pp->pr_slock);
777 		printf("pool_get: %s: items on itemlist, nitems %u\n",
778 		    pp->pr_wchan, pp->pr_nitems);
779 		panic("pool_get: nitems inconsistent");
780 	}
781 #endif
782 
783 #ifdef POOL_DIAGNOSTIC
784 	pr_log(pp, v, PRLOG_GET, file, line);
785 #endif
786 
787 #ifdef DIAGNOSTIC
788 	if (__predict_false(pi->pi_magic != PI_MAGIC)) {
789 		pr_printlog(pp, pi, printf);
790 		panic("pool_get(%s): free list modified: magic=%x; page %p;"
791 		       " item addr %p",
792 			pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
793 	}
794 #endif
795 
796 	/*
797 	 * Remove from item list.
798 	 */
799 	TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
800 	pp->pr_nitems--;
801 	pp->pr_nout++;
802 	if (ph->ph_nmissing == 0) {
803 #ifdef DIAGNOSTIC
804 		if (__predict_false(pp->pr_nidle == 0))
805 			panic("pool_get: nidle inconsistent");
806 #endif
807 		pp->pr_nidle--;
808 	}
809 	ph->ph_nmissing++;
810 	if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
811 #ifdef DIAGNOSTIC
812 		if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
813 			pr_leave(pp);
814 			simple_unlock(&pp->pr_slock);
815 			panic("pool_get: %s: nmissing inconsistent",
816 			    pp->pr_wchan);
817 		}
818 #endif
819 		/*
820 		 * Find a new non-empty page header, if any.
821 		 * Start search from the page head, to increase
822 		 * the chance for "high water" pages to be freed.
823 		 *
824 		 * Migrate empty pages to the end of the list.  This
825 		 * will speed the update of curpage as pages become
826 		 * idle.  Empty pages intermingled with idle pages
827 		 * is no big deal.  As soon as a page becomes un-empty,
828 		 * it will move back to the head of the list.
829 		 */
830 		TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
831 		TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
832 		TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
833 			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
834 				break;
835 
836 		pp->pr_curpage = ph;
837 	}
838 
839 	pp->pr_nget++;
840 
841 	/*
842 	 * If we have a low water mark and we are now below that low
843 	 * water mark, add more items to the pool.
844 	 */
845 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
846 		/*
847 		 * XXX: Should we log a warning?  Should we set up a timeout
848 		 * to try again in a second or so?  The latter could break
849 		 * a caller's assumptions about interrupt protection, etc.
850 		 */
851 	}
852 
853 	pr_leave(pp);
854 	simple_unlock(&pp->pr_slock);
855 	return (v);
856 }
857 
858 /*
859  * Internal version of pool_put().  Pool is already locked/entered.
860  */
861 void
862 pool_do_put(struct pool *pp, void *v)
863 {
864 	struct pool_item *pi = v;
865 	struct pool_item_header *ph;
866 	caddr_t page;
867 	int s;
868 
869 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
870 
871 	page = (caddr_t)((vaddr_t)v & pp->pr_alloc->pa_pagemask);
872 
873 #ifdef DIAGNOSTIC
874 	if (__predict_false(pp->pr_nout == 0)) {
875 		printf("pool %s: putting with none out\n",
876 		    pp->pr_wchan);
877 		panic("pool_put");
878 	}
879 #endif
880 
881 	if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
882 		pr_printlog(pp, NULL, printf);
883 		panic("pool_put: %s: page header missing", pp->pr_wchan);
884 	}
885 
886 #ifdef LOCKDEBUG
887 	/*
888 	 * Check if we're freeing a locked simple lock.
889 	 */
890 	simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
891 #endif
892 
893 	/*
894 	 * Return to item list.
895 	 */
896 #ifdef DIAGNOSTIC
897 	pi->pi_magic = PI_MAGIC;
898 #endif
899 #ifdef DEBUG
900 	{
901 		int i, *ip = v;
902 
903 		for (i = 0; i < pp->pr_size / sizeof(int); i++) {
904 			*ip++ = PI_MAGIC;
905 		}
906 	}
907 #endif
908 
909 	TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
910 	ph->ph_nmissing--;
911 	pp->pr_nput++;
912 	pp->pr_nitems++;
913 	pp->pr_nout--;
914 
915 	/* Cancel "pool empty" condition if it exists */
916 	if (pp->pr_curpage == NULL)
917 		pp->pr_curpage = ph;
918 
919 	if (pp->pr_flags & PR_WANTED) {
920 		pp->pr_flags &= ~PR_WANTED;
921 		if (ph->ph_nmissing == 0)
922 			pp->pr_nidle++;
923 		wakeup((caddr_t)pp);
924 		return;
925 	}
926 
927 	/*
928 	 * If this page is now complete, do one of two things:
929 	 *
930 	 *	(1) If we have more pages than the page high water
931 	 *	    mark, free the page back to the system.
932 	 *
933 	 *	(2) Move it to the end of the page list, so that
934 	 *	    we minimize our chances of fragmenting the
935 	 *	    pool.  Idle pages migrate to the end (along with
936 	 *	    completely empty pages, so that we find un-empty
937 	 *	    pages more quickly when we update curpage) of the
938 	 *	    list so they can be more easily swept up by
939 	 *	    the pagedaemon when pages are scarce.
940 	 */
941 	if (ph->ph_nmissing == 0) {
942 		pp->pr_nidle++;
943 		if (pp->pr_npages > pp->pr_maxpages ||
944 		    (pp->pr_alloc->pa_flags & PA_WANT) != 0) {
945 			pr_rmpage(pp, ph, NULL);
946 		} else {
947 			TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
948 			TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
949 
950 			/*
951 			 * Update the timestamp on the page.  A page must
952 			 * be idle for some period of time before it can
953 			 * be reclaimed by the pagedaemon.  This minimizes
954 			 * ping-pong'ing for memory.
955 			 */
956 			s = splclock();
957 			ph->ph_time = mono_time;
958 			splx(s);
959 
960 			/*
961 			 * Update the current page pointer.  Just look for
962 			 * the first page with any free items.
963 			 *
964 			 * XXX: Maybe we want an option to look for the
965 			 * page with the fewest available items, to minimize
966 			 * fragmentation?
967 			 */
968 			TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
969 				if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
970 					break;
971 
972 			pp->pr_curpage = ph;
973 		}
974 	}
975 	/*
976 	 * If the page has just become un-empty, move it to the head of
977 	 * the list, and make it the current page.  The next allocation
978 	 * will get the item from this page, instead of further fragmenting
979 	 * the pool.
980 	 */
981 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
982 		TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
983 		TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
984 		pp->pr_curpage = ph;
985 	}
986 }
987 
988 /*
989  * Return resource to the pool; must be called at appropriate spl level
990  */
991 #ifdef POOL_DIAGNOSTIC
992 void
993 _pool_put(struct pool *pp, void *v, const char *file, long line)
994 {
995 
996 	simple_lock(&pp->pr_slock);
997 	pr_enter(pp, file, line);
998 
999 	pr_log(pp, v, PRLOG_PUT, file, line);
1000 
1001 	pool_do_put(pp, v);
1002 
1003 	pr_leave(pp);
1004 	simple_unlock(&pp->pr_slock);
1005 }
1006 #undef pool_put
1007 #endif /* POOL_DIAGNOSTIC */
1008 
1009 void
1010 pool_put(struct pool *pp, void *v)
1011 {
1012 
1013 	simple_lock(&pp->pr_slock);
1014 
1015 	pool_do_put(pp, v);
1016 
1017 	simple_unlock(&pp->pr_slock);
1018 }
1019 
1020 #ifdef POOL_DIAGNOSTIC
1021 #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
1022 #endif
1023 
1024 /*
1025  * Add N items to the pool.
1026  */
1027 int
1028 pool_prime(struct pool *pp, int n)
1029 {
1030 	struct pool_item_header *ph;
1031 	caddr_t cp;
1032 	int newpages;
1033 
1034 	simple_lock(&pp->pr_slock);
1035 
1036 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1037 
1038 	while (newpages-- > 0) {
1039 		simple_unlock(&pp->pr_slock);
1040 		cp = pool_allocator_alloc(pp, PR_NOWAIT);
1041 		if (__predict_true(cp != NULL))
1042 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1043 		simple_lock(&pp->pr_slock);
1044 
1045 		if (__predict_false(cp == NULL || ph == NULL)) {
1046 			if (cp != NULL)
1047 				pool_allocator_free(pp, cp);
1048 			break;
1049 		}
1050 
1051 		pool_prime_page(pp, cp, ph);
1052 		pp->pr_npagealloc++;
1053 		pp->pr_minpages++;
1054 	}
1055 
1056 	if (pp->pr_minpages >= pp->pr_maxpages)
1057 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
1058 
1059 	simple_unlock(&pp->pr_slock);
1060 	return (0);
1061 }
1062 
1063 /*
1064  * Add a page worth of items to the pool.
1065  *
1066  * Note, we must be called with the pool descriptor LOCKED.
1067  */
1068 void
1069 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1070 {
1071 	struct pool_item *pi;
1072 	caddr_t cp = storage;
1073 	unsigned int align = pp->pr_align;
1074 	unsigned int ioff = pp->pr_itemoffset;
1075 	int n;
1076 
1077 #ifdef DIAGNOSTIC
1078 	if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1079 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1080 #endif
1081 
1082 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1083 		LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
1084 		    ph, ph_hashlist);
1085 
1086 	/*
1087 	 * Insert page header.
1088 	 */
1089 	TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1090 	TAILQ_INIT(&ph->ph_itemlist);
1091 	ph->ph_page = storage;
1092 	ph->ph_nmissing = 0;
1093 	memset(&ph->ph_time, 0, sizeof(ph->ph_time));
1094 
1095 	pp->pr_nidle++;
1096 
1097 	/*
1098 	 * Color this page.
1099 	 */
1100 	cp = (caddr_t)(cp + pp->pr_curcolor);
1101 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1102 		pp->pr_curcolor = 0;
1103 
1104 	/*
1105 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1106 	 */
1107 	if (ioff != 0)
1108 		cp = (caddr_t)(cp + (align - ioff));
1109 
1110 	/*
1111 	 * Insert remaining chunks on the bucket list.
1112 	 */
1113 	n = pp->pr_itemsperpage;
1114 	pp->pr_nitems += n;
1115 
1116 	while (n--) {
1117 		pi = (struct pool_item *)cp;
1118 
1119 		/* Insert on page list */
1120 		TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1121 #ifdef DIAGNOSTIC
1122 		pi->pi_magic = PI_MAGIC;
1123 #endif
1124 		cp = (caddr_t)(cp + pp->pr_size);
1125 	}
1126 
1127 	/*
1128 	 * If the pool was depleted, point at the new page.
1129 	 */
1130 	if (pp->pr_curpage == NULL)
1131 		pp->pr_curpage = ph;
1132 
1133 	if (++pp->pr_npages > pp->pr_hiwat)
1134 		pp->pr_hiwat = pp->pr_npages;
1135 }
1136 
1137 /*
1138  * Used by pool_get() when nitems drops below the low water mark.  This
1139  * is used to catch up nitmes with the low water mark.
1140  *
1141  * Note 1, we never wait for memory here, we let the caller decide what to do.
1142  *
1143  * Note 2, we must be called with the pool already locked, and we return
1144  * with it locked.
1145  */
1146 int
1147 pool_catchup(struct pool *pp)
1148 {
1149 	struct pool_item_header *ph;
1150 	caddr_t cp;
1151 	int error = 0;
1152 
1153 	while (POOL_NEEDS_CATCHUP(pp)) {
1154 		/*
1155 		 * Call the page back-end allocator for more memory.
1156 		 *
1157 		 * XXX: We never wait, so should we bother unlocking
1158 		 * the pool descriptor?
1159 		 */
1160 		simple_unlock(&pp->pr_slock);
1161 		cp = pool_allocator_alloc(pp, PR_NOWAIT);
1162 		if (__predict_true(cp != NULL))
1163 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1164 		simple_lock(&pp->pr_slock);
1165 		if (__predict_false(cp == NULL || ph == NULL)) {
1166 			if (cp != NULL)
1167 				pool_allocator_free(pp, cp);
1168 			error = ENOMEM;
1169 			break;
1170 		}
1171 		pool_prime_page(pp, cp, ph);
1172 		pp->pr_npagealloc++;
1173 	}
1174 
1175 	return (error);
1176 }
1177 
1178 void
1179 pool_setlowat(struct pool *pp, int n)
1180 {
1181 
1182 	simple_lock(&pp->pr_slock);
1183 
1184 	pp->pr_minitems = n;
1185 	pp->pr_minpages = (n == 0)
1186 		? 0
1187 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1188 
1189 	/* Make sure we're caught up with the newly-set low water mark. */
1190 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1191 		/*
1192 		 * XXX: Should we log a warning?  Should we set up a timeout
1193 		 * to try again in a second or so?  The latter could break
1194 		 * a caller's assumptions about interrupt protection, etc.
1195 		 */
1196 	}
1197 
1198 	simple_unlock(&pp->pr_slock);
1199 }
1200 
1201 void
1202 pool_sethiwat(struct pool *pp, int n)
1203 {
1204 
1205 	simple_lock(&pp->pr_slock);
1206 
1207 	pp->pr_maxpages = (n == 0)
1208 		? 0
1209 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1210 
1211 	simple_unlock(&pp->pr_slock);
1212 }
1213 
1214 int
1215 pool_sethardlimit(struct pool *pp, unsigned n, const char *warnmess, int ratecap)
1216 {
1217 	int error = 0;
1218 
1219 	simple_lock(&pp->pr_slock);
1220 
1221 	if (n < pp->pr_nout) {
1222 		error = EINVAL;
1223 		goto done;
1224 	}
1225 
1226 	pp->pr_hardlimit = n;
1227 	pp->pr_hardlimit_warning = warnmess;
1228 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1229 	pp->pr_hardlimit_warning_last.tv_sec = 0;
1230 	pp->pr_hardlimit_warning_last.tv_usec = 0;
1231 
1232 	/*
1233 	 * In-line version of pool_sethiwat(), because we don't want to
1234 	 * release the lock.
1235 	 */
1236 	pp->pr_maxpages = (n == 0 || n == UINT_MAX)
1237 		? n
1238 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1239 
1240  done:
1241 	simple_unlock(&pp->pr_slock);
1242 
1243 	return (error);
1244 }
1245 
1246 /*
1247  * Release all complete pages that have not been used recently.
1248  *
1249  * Returns non-zero if any pages have been reclaimed.
1250  */
1251 int
1252 #ifdef POOL_DIAGNOSTIC
1253 _pool_reclaim(struct pool *pp, const char *file, long line)
1254 #else
1255 pool_reclaim(struct pool *pp)
1256 #endif
1257 {
1258 	struct pool_item_header *ph, *phnext;
1259 	struct pool_cache *pc;
1260 	struct timeval curtime;
1261 	struct pool_pagelist pq;
1262 	int s;
1263 
1264 	if (pp->pr_drain_hook != NULL) {
1265 		/*
1266 		 * The drain hook must be called with the pool unlocked.
1267 		 */
1268 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1269 	}
1270 
1271 	if (simple_lock_try(&pp->pr_slock) == 0)
1272 		return (0);
1273 	pr_enter(pp, file, line);
1274 
1275 	TAILQ_INIT(&pq);
1276 
1277 	/*
1278 	 * Reclaim items from the pool's caches.
1279 	 */
1280 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1281 		pool_cache_reclaim(pc);
1282 
1283 	s = splclock();
1284 	curtime = mono_time;
1285 	splx(s);
1286 
1287 	for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
1288 		phnext = TAILQ_NEXT(ph, ph_pagelist);
1289 
1290 		/* Check our minimum page claim */
1291 		if (pp->pr_npages <= pp->pr_minpages)
1292 			break;
1293 
1294 		if (ph->ph_nmissing == 0) {
1295 			struct timeval diff;
1296 			timersub(&curtime, &ph->ph_time, &diff);
1297 			if (diff.tv_sec < pool_inactive_time)
1298 				continue;
1299 
1300 			/*
1301 			 * If freeing this page would put us below
1302 			 * the low water mark, stop now.
1303 			 */
1304 			if ((pp->pr_nitems - pp->pr_itemsperpage) <
1305 			    pp->pr_minitems)
1306 				break;
1307 
1308 			pr_rmpage(pp, ph, &pq);
1309 		}
1310 	}
1311 
1312 	pr_leave(pp);
1313 	simple_unlock(&pp->pr_slock);
1314 	if (TAILQ_EMPTY(&pq))
1315 		return (0);
1316 	while ((ph = TAILQ_FIRST(&pq)) != NULL) {
1317 		TAILQ_REMOVE(&pq, ph, ph_pagelist);
1318 		pool_allocator_free(pp, ph->ph_page);
1319 		if (pp->pr_roflags & PR_PHINPAGE) {
1320 			continue;
1321 		}
1322 		LIST_REMOVE(ph, ph_hashlist);
1323 		s = splhigh();
1324 		pool_put(&phpool, ph);
1325 		splx(s);
1326 	}
1327 
1328 	return (1);
1329 }
1330 
1331 
1332 /*
1333  * Drain pools, one at a time.
1334  *
1335  * Note, we must never be called from an interrupt context.
1336  */
1337 void
1338 pool_drain(void *arg)
1339 {
1340 	struct pool *pp;
1341 	int s;
1342 
1343 	pp = NULL;
1344 	s = splvm();
1345 	simple_lock(&pool_head_slock);
1346 	if (drainpp == NULL) {
1347 		drainpp = TAILQ_FIRST(&pool_head);
1348 	}
1349 	if (drainpp) {
1350 		pp = drainpp;
1351 		drainpp = TAILQ_NEXT(pp, pr_poollist);
1352 	}
1353 	simple_unlock(&pool_head_slock);
1354 	pool_reclaim(pp);
1355 	splx(s);
1356 }
1357 
1358 /*
1359  * Diagnostic helpers.
1360  */
1361 void
1362 pool_printit(struct pool *pp, const char *modif, int (*pr)(const char *, ...))
1363 {
1364 	int s;
1365 
1366 	s = splvm();
1367 	if (simple_lock_try(&pp->pr_slock) == 0) {
1368 		pr("pool %s is locked; try again later\n",
1369 		    pp->pr_wchan);
1370 		splx(s);
1371 		return;
1372 	}
1373 	pool_print1(pp, modif, pr);
1374 	simple_unlock(&pp->pr_slock);
1375 	splx(s);
1376 }
1377 
1378 void
1379 pool_print1(struct pool *pp, const char *modif, int (*pr)(const char *, ...))
1380 {
1381 	struct pool_item_header *ph;
1382 	struct pool_cache *pc;
1383 	struct pool_cache_group *pcg;
1384 #ifdef DIAGNOSTIC
1385 	struct pool_item *pi;
1386 #endif
1387 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1388 	char c;
1389 
1390 	while ((c = *modif++) != '\0') {
1391 		if (c == 'l')
1392 			print_log = 1;
1393 		if (c == 'p')
1394 			print_pagelist = 1;
1395 		if (c == 'c')
1396 			print_cache = 1;
1397 		modif++;
1398 	}
1399 
1400 	(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1401 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1402 	    pp->pr_roflags);
1403 	(*pr)("\talloc %p\n", pp->pr_alloc);
1404 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1405 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1406 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1407 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1408 
1409 	(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1410 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1411 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1412 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1413 
1414 	if (print_pagelist == 0)
1415 		goto skip_pagelist;
1416 
1417 	if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
1418 		(*pr)("\n\tpage list:\n");
1419 	for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
1420 		(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1421 		    ph->ph_page, ph->ph_nmissing,
1422 		    (u_long)ph->ph_time.tv_sec,
1423 		    (u_long)ph->ph_time.tv_usec);
1424 #ifdef DIAGNOSTIC
1425 		TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1426 			if (pi->pi_magic != PI_MAGIC) {
1427 				(*pr)("\t\t\titem %p, magic 0x%x\n",
1428 				    pi, pi->pi_magic);
1429 			}
1430 		}
1431 #endif
1432 	}
1433 	if (pp->pr_curpage == NULL)
1434 		(*pr)("\tno current page\n");
1435 	else
1436 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1437 
1438  skip_pagelist:
1439 
1440 	if (print_log == 0)
1441 		goto skip_log;
1442 
1443 	(*pr)("\n");
1444 	if ((pp->pr_roflags & PR_LOGGING) == 0)
1445 		(*pr)("\tno log\n");
1446 	else
1447 		pr_printlog(pp, NULL, pr);
1448 
1449  skip_log:
1450 
1451 	if (print_cache == 0)
1452 		goto skip_cache;
1453 
1454 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1455 		(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1456 		    pc->pc_allocfrom, pc->pc_freeto);
1457 		(*pr)("\t    hits %lu misses %lu ngroups %lu nitems %lu\n",
1458 		    pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1459 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1460 			(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1461 			for (i = 0; i < PCG_NOBJECTS; i++)
1462 				(*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
1463 		}
1464 	}
1465 
1466  skip_cache:
1467 
1468 	pr_enter_check(pp, pr);
1469 }
1470 
1471 int
1472 pool_chk(struct pool *pp, const char *label)
1473 {
1474 	struct pool_item_header *ph;
1475 	int r = 0;
1476 
1477 	simple_lock(&pp->pr_slock);
1478 
1479 	TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) {
1480 		struct pool_item *pi;
1481 		int n;
1482 		caddr_t page;
1483 
1484 		page = (caddr_t)((vaddr_t)ph & pp->pr_alloc->pa_pagemask);
1485 		if (page != ph->ph_page &&
1486 		    (pp->pr_roflags & PR_PHINPAGE) != 0) {
1487 			if (label != NULL)
1488 				printf("%s: ", label);
1489 			printf("pool(%p:%s): page inconsistency: page %p;"
1490 			       " at page head addr %p (p %p)\n", pp,
1491 				pp->pr_wchan, ph->ph_page,
1492 				ph, page);
1493 			r++;
1494 			goto out;
1495 		}
1496 
1497 		for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1498 		     pi != NULL;
1499 		     pi = TAILQ_NEXT(pi,pi_list), n++) {
1500 
1501 #ifdef DIAGNOSTIC
1502 			if (pi->pi_magic != PI_MAGIC) {
1503 				if (label != NULL)
1504 					printf("%s: ", label);
1505 				printf("pool(%s): free list modified: magic=%x;"
1506 				       " page %p; item ordinal %d;"
1507 				       " addr %p (p %p)\n",
1508 					pp->pr_wchan, pi->pi_magic, ph->ph_page,
1509 					n, pi, page);
1510 				panic("pool");
1511 			}
1512 #endif
1513 			page = (caddr_t)((vaddr_t)pi & pp->pr_alloc->pa_pagemask);
1514 			if (page == ph->ph_page)
1515 				continue;
1516 
1517 			if (label != NULL)
1518 				printf("%s: ", label);
1519 			printf("pool(%p:%s): page inconsistency: page %p;"
1520 			       " item ordinal %d; addr %p (p %p)\n", pp,
1521 				pp->pr_wchan, ph->ph_page,
1522 				n, pi, page);
1523 			r++;
1524 			goto out;
1525 		}
1526 	}
1527 out:
1528 	simple_unlock(&pp->pr_slock);
1529 	return (r);
1530 }
1531 
1532 /*
1533  * pool_cache_init:
1534  *
1535  *	Initialize a pool cache.
1536  *
1537  *	NOTE: If the pool must be protected from interrupts, we expect
1538  *	to be called at the appropriate interrupt priority level.
1539  */
1540 void
1541 pool_cache_init(struct pool_cache *pc, struct pool *pp,
1542     int (*ctor)(void *, void *, int),
1543     void (*dtor)(void *, void *),
1544     void *arg)
1545 {
1546 
1547 	TAILQ_INIT(&pc->pc_grouplist);
1548 	simple_lock_init(&pc->pc_slock);
1549 
1550 	pc->pc_allocfrom = NULL;
1551 	pc->pc_freeto = NULL;
1552 	pc->pc_pool = pp;
1553 
1554 	pc->pc_ctor = ctor;
1555 	pc->pc_dtor = dtor;
1556 	pc->pc_arg  = arg;
1557 
1558 	pc->pc_hits   = 0;
1559 	pc->pc_misses = 0;
1560 
1561 	pc->pc_ngroups = 0;
1562 
1563 	pc->pc_nitems = 0;
1564 
1565 	simple_lock(&pp->pr_slock);
1566 	TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1567 	simple_unlock(&pp->pr_slock);
1568 }
1569 
1570 /*
1571  * pool_cache_destroy:
1572  *
1573  *	Destroy a pool cache.
1574  */
1575 void
1576 pool_cache_destroy(struct pool_cache *pc)
1577 {
1578 	struct pool *pp = pc->pc_pool;
1579 
1580 	/* First, invalidate the entire cache. */
1581 	pool_cache_invalidate(pc);
1582 
1583 	/* ...and remove it from the pool's cache list. */
1584 	simple_lock(&pp->pr_slock);
1585 	TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1586 	simple_unlock(&pp->pr_slock);
1587 }
1588 
1589 static __inline void *
1590 pcg_get(struct pool_cache_group *pcg)
1591 {
1592 	void *object;
1593 	u_int idx;
1594 
1595 	KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1596 	KASSERT(pcg->pcg_avail != 0);
1597 	idx = --pcg->pcg_avail;
1598 
1599 	KASSERT(pcg->pcg_objects[idx] != NULL);
1600 	object = pcg->pcg_objects[idx];
1601 	pcg->pcg_objects[idx] = NULL;
1602 
1603 	return (object);
1604 }
1605 
1606 static __inline void
1607 pcg_put(struct pool_cache_group *pcg, void *object)
1608 {
1609 	u_int idx;
1610 
1611 	KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1612 	idx = pcg->pcg_avail++;
1613 
1614 	KASSERT(pcg->pcg_objects[idx] == NULL);
1615 	pcg->pcg_objects[idx] = object;
1616 }
1617 
1618 /*
1619  * pool_cache_get:
1620  *
1621  *	Get an object from a pool cache.
1622  */
1623 void *
1624 pool_cache_get(struct pool_cache *pc, int flags)
1625 {
1626 	struct pool_cache_group *pcg;
1627 	void *object;
1628 
1629 #ifdef LOCKDEBUG
1630 	if (flags & PR_WAITOK)
1631 		simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1632 #endif
1633 
1634 	simple_lock(&pc->pc_slock);
1635 
1636 	if ((pcg = pc->pc_allocfrom) == NULL) {
1637 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1638 			if (pcg->pcg_avail != 0) {
1639 				pc->pc_allocfrom = pcg;
1640 				goto have_group;
1641 			}
1642 		}
1643 
1644 		/*
1645 		 * No groups with any available objects.  Allocate
1646 		 * a new object, construct it, and return it to
1647 		 * the caller.  We will allocate a group, if necessary,
1648 		 * when the object is freed back to the cache.
1649 		 */
1650 		pc->pc_misses++;
1651 		simple_unlock(&pc->pc_slock);
1652 		object = pool_get(pc->pc_pool, flags);
1653 		if (object != NULL && pc->pc_ctor != NULL) {
1654 			if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1655 				pool_put(pc->pc_pool, object);
1656 				return (NULL);
1657 			}
1658 		}
1659 		return (object);
1660 	}
1661 
1662  have_group:
1663 	pc->pc_hits++;
1664 	pc->pc_nitems--;
1665 	object = pcg_get(pcg);
1666 
1667 	if (pcg->pcg_avail == 0)
1668 		pc->pc_allocfrom = NULL;
1669 
1670 	simple_unlock(&pc->pc_slock);
1671 
1672 	return (object);
1673 }
1674 
1675 /*
1676  * pool_cache_put:
1677  *
1678  *	Put an object back to the pool cache.
1679  */
1680 void
1681 pool_cache_put(struct pool_cache *pc, void *object)
1682 {
1683 	struct pool_cache_group *pcg;
1684 	int s;
1685 
1686 	simple_lock(&pc->pc_slock);
1687 
1688 	if ((pcg = pc->pc_freeto) == NULL) {
1689 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1690 			if (pcg->pcg_avail != PCG_NOBJECTS) {
1691 				pc->pc_freeto = pcg;
1692 				goto have_group;
1693 			}
1694 		}
1695 
1696 		/*
1697 		 * No empty groups to free the object to.  Attempt to
1698 		 * allocate one.
1699 		 */
1700 		simple_unlock(&pc->pc_slock);
1701 		s = splvm();
1702 		pcg = pool_get(&pcgpool, PR_NOWAIT);
1703 		splx(s);
1704 		if (pcg != NULL) {
1705 			memset(pcg, 0, sizeof(*pcg));
1706 			simple_lock(&pc->pc_slock);
1707 			pc->pc_ngroups++;
1708 			TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1709 			if (pc->pc_freeto == NULL)
1710 				pc->pc_freeto = pcg;
1711 			goto have_group;
1712 		}
1713 
1714 		/*
1715 		 * Unable to allocate a cache group; destruct the object
1716 		 * and free it back to the pool.
1717 		 */
1718 		pool_cache_destruct_object(pc, object);
1719 		return;
1720 	}
1721 
1722  have_group:
1723 	pc->pc_nitems++;
1724 	pcg_put(pcg, object);
1725 
1726 	if (pcg->pcg_avail == PCG_NOBJECTS)
1727 		pc->pc_freeto = NULL;
1728 
1729 	simple_unlock(&pc->pc_slock);
1730 }
1731 
1732 /*
1733  * pool_cache_destruct_object:
1734  *
1735  *	Force destruction of an object and its release back into
1736  *	the pool.
1737  */
1738 void
1739 pool_cache_destruct_object(struct pool_cache *pc, void *object)
1740 {
1741 
1742 	if (pc->pc_dtor != NULL)
1743 		(*pc->pc_dtor)(pc->pc_arg, object);
1744 	pool_put(pc->pc_pool, object);
1745 }
1746 
1747 /*
1748  * pool_cache_do_invalidate:
1749  *
1750  *	This internal function implements pool_cache_invalidate() and
1751  *	pool_cache_reclaim().
1752  */
1753 void
1754 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1755     void (*putit)(struct pool *, void *))
1756 {
1757 	struct pool_cache_group *pcg, *npcg;
1758 	void *object;
1759 	int s;
1760 
1761 	for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1762 	     pcg = npcg) {
1763 		npcg = TAILQ_NEXT(pcg, pcg_list);
1764 		while (pcg->pcg_avail != 0) {
1765 			pc->pc_nitems--;
1766 			object = pcg_get(pcg);
1767 			if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1768 				pc->pc_allocfrom = NULL;
1769 			if (pc->pc_dtor != NULL)
1770 				(*pc->pc_dtor)(pc->pc_arg, object);
1771 			(*putit)(pc->pc_pool, object);
1772 		}
1773 		if (free_groups) {
1774 			pc->pc_ngroups--;
1775 			TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1776 			if (pc->pc_freeto == pcg)
1777 				pc->pc_freeto = NULL;
1778 			s = splvm();
1779 			pool_put(&pcgpool, pcg);
1780 			splx(s);
1781 		}
1782 	}
1783 }
1784 
1785 /*
1786  * pool_cache_invalidate:
1787  *
1788  *	Invalidate a pool cache (destruct and release all of the
1789  *	cached objects).
1790  */
1791 void
1792 pool_cache_invalidate(struct pool_cache *pc)
1793 {
1794 
1795 	simple_lock(&pc->pc_slock);
1796 	pool_cache_do_invalidate(pc, 0, pool_put);
1797 	simple_unlock(&pc->pc_slock);
1798 }
1799 
1800 /*
1801  * pool_cache_reclaim:
1802  *
1803  *	Reclaim a pool cache for pool_reclaim().
1804  */
1805 void
1806 pool_cache_reclaim(struct pool_cache *pc)
1807 {
1808 
1809 	simple_lock(&pc->pc_slock);
1810 	pool_cache_do_invalidate(pc, 1, pool_do_put);
1811 	simple_unlock(&pc->pc_slock);
1812 }
1813 
1814 /*
1815  * We have three different sysctls.
1816  * kern.pool.npools - the number of pools.
1817  * kern.pool.pool.<pool#> - the pool struct for the pool#.
1818  * kern.pool.name.<pool#> - the name for pool#.[6~
1819  */
1820 int
1821 sysctl_dopool(int *name, u_int namelen, char *where, size_t *sizep)
1822 {
1823 	struct pool *pp, *foundpool = NULL;
1824 	size_t buflen = where != NULL ? *sizep : 0;
1825 	int npools = 0, s;
1826 	unsigned int lookfor;
1827 	size_t len;
1828 
1829 	switch (*name) {
1830 	case KERN_POOL_NPOOLS:
1831 		if (namelen != 1 || buflen != sizeof(int))
1832 			return (EINVAL);
1833 		lookfor = 0;
1834 		break;
1835 	case KERN_POOL_NAME:
1836 		if (namelen != 2 || buflen < 1)
1837 			return (EINVAL);
1838 		lookfor = name[1];
1839 		break;
1840 	case KERN_POOL_POOL:
1841 		if (namelen != 2 || buflen != sizeof(struct pool))
1842 			return (EINVAL);
1843 		lookfor = name[1];
1844 		break;
1845 	default:
1846 		return (EINVAL);
1847 	}
1848 
1849 	s = splvm();
1850 	simple_lock(&pool_head_slock);
1851 
1852 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1853 		npools++;
1854 		if (lookfor == pp->pr_serial) {
1855 			foundpool = pp;
1856 			break;
1857 		}
1858 	}
1859 
1860 	simple_unlock(&pool_head_slock);
1861 	splx(s);
1862 
1863 	if (lookfor != 0 && foundpool == NULL)
1864 		return (ENOENT);
1865 
1866 	switch (*name) {
1867 	case KERN_POOL_NPOOLS:
1868 		return copyout(&npools, where, buflen);
1869 	case KERN_POOL_NAME:
1870 		len = strlen(foundpool->pr_wchan) + 1;
1871 		if (*sizep < len)
1872 			return (ENOMEM);
1873 		*sizep = len;
1874 		return copyout(foundpool->pr_wchan, where, len);
1875 	case KERN_POOL_POOL:
1876 		return copyout(foundpool, where, buflen);
1877 	}
1878 	/* NOTREACHED */
1879 	return (0); /* XXX - Stupid gcc */
1880 }
1881 
1882 /*
1883  * Pool backend allocators.
1884  *
1885  * Each pool has a backend allocator that handles allocation, deallocation
1886  * and any additional draining that might be needed.
1887  *
1888  * We provide two standard allocators.
1889  *  pool_alloc_kmem - the default used when no allocator is specified.
1890  *  pool_alloc_nointr - used for pools that will not be accessed in
1891  *   interrupt context.
1892  */
1893 void	*pool_page_alloc(struct pool *, int);
1894 void	pool_page_free(struct pool *, void *);
1895 void	*pool_page_alloc_nointr(struct pool *, int);
1896 void	pool_page_free_nointr(struct pool *, void *);
1897 
1898 struct pool_allocator pool_allocator_kmem = {
1899 	pool_page_alloc, pool_page_free, 0,
1900 };
1901 struct pool_allocator pool_allocator_nointr = {
1902 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
1903 };
1904 
1905 /*
1906  * XXX - we have at least three different resources for the same allocation
1907  *  and each resource can be depleted. First we have the ready elements in
1908  *  the pool. Then we have the resource (typically a vm_map) for this
1909  *  allocator, then we have physical memory. Waiting for any of these can
1910  *  be unnecessary when any other is freed, but the kernel doesn't support
1911  *  sleeping on multiple addresses, so we have to fake. The caller sleeps on
1912  *  the pool (so that we can be awakened when an item is returned to the pool),
1913  *  but we set PA_WANT on the allocator. When a page is returned to
1914  *  the allocator and PA_WANT is set pool_allocator_free will wakeup all
1915  *  sleeping pools belonging to this allocator. (XXX - thundering herd).
1916  *  We also wake up the allocator in case someone without a pool (malloc)
1917  *  is sleeping waiting for this allocator.
1918  */
1919 
1920 void *
1921 pool_allocator_alloc(struct pool *org, int flags)
1922 {
1923 	struct pool_allocator *pa = org->pr_alloc;
1924 	int freed;
1925 	void *res;
1926 	int s;
1927 
1928 	do {
1929 		if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1930 			return (res);
1931 		if ((flags & PR_WAITOK) == 0) {
1932 			/*
1933 			 * We only run the drain hook here if PR_NOWAIT.
1934 			 * In other cases the hook will be run in
1935 			 * pool_reclaim.
1936 			 */
1937 			if (org->pr_drain_hook != NULL) {
1938 				(*org->pr_drain_hook)(org->pr_drain_hook_arg,
1939 				    flags);
1940 				if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1941 					return (res);
1942 			}
1943 			break;
1944 		}
1945 		s = splvm();
1946 		simple_lock(&pa->pa_slock);
1947 		freed = pool_allocator_drain(pa, org, 1);
1948 		simple_unlock(&pa->pa_slock);
1949 		splx(s);
1950 	} while (freed);
1951 	return (NULL);
1952 }
1953 
1954 void
1955 pool_allocator_free(struct pool *pp, void *v)
1956 {
1957 	struct pool_allocator *pa = pp->pr_alloc;
1958 	int s;
1959 
1960 	(*pa->pa_free)(pp, v);
1961 
1962 	s = splvm();
1963 	simple_lock(&pa->pa_slock);
1964 	if ((pa->pa_flags & PA_WANT) == 0) {
1965 		simple_unlock(&pa->pa_slock);
1966 		splx(s);
1967 		return;
1968 	}
1969 
1970 	TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
1971 		simple_lock(&pp->pr_slock);
1972 		if ((pp->pr_flags & PR_WANTED) != 0) {
1973 			pp->pr_flags &= ~PR_WANTED;
1974 			wakeup(pp);
1975 		}
1976 		simple_unlock(&pp->pr_slock);
1977 	}
1978 	pa->pa_flags &= ~PA_WANT;
1979 	simple_unlock(&pa->pa_slock);
1980 	splx(s);
1981 }
1982 
1983 /*
1984  * Drain all pools, except 'org', that use this allocator.
1985  *
1986  * Must be called at appropriate spl level and with the allocator locked.
1987  *
1988  * We do this to reclaim va space. pa_alloc is responsible
1989  * for waiting for physical memory.
1990  * XXX - we risk looping forever if start if someone calls
1991  *  pool_destroy on 'start'. But there is no other way to
1992  *  have potentially sleeping pool_reclaim, non-sleeping
1993  *  locks on pool_allocator and some stirring of drained
1994  *  pools in the allocator.
1995  * XXX - maybe we should use pool_head_slock for locking
1996  *  the allocators?
1997  */
1998 int
1999 pool_allocator_drain(struct pool_allocator *pa, struct pool *org, int need)
2000 {
2001 	struct pool *pp, *start;
2002 	int freed;
2003 
2004 	freed = 0;
2005 
2006 	pp = start = TAILQ_FIRST(&pa->pa_list);
2007 	do {
2008 		TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
2009 		TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
2010 		if (pp == org)
2011 			continue;
2012 		simple_unlock(&pa->pa_list);
2013 		freed = pool_reclaim(pp)
2014 		simple_lock(&pa->pa_list);
2015 	} while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && (freed < need));
2016 
2017 	if (!freed) {
2018 		/*
2019 		 * We set PA_WANT here, the caller will most likely
2020 		 * sleep waiting for pages (if not, this won't hurt
2021 		 * that much) and there is no way to set this in the
2022 		 * caller without violating locking order.
2023 		 */
2024 		pa->pa_flags |= PA_WANT;
2025 	}
2026 
2027 	return (freed);
2028 }
2029 
2030 void *
2031 pool_page_alloc(struct pool *pp, int flags)
2032 {
2033 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2034 	void *ret;
2035 	int s;
2036 
2037 	s = splvm();
2038 	ret = (void *)uvm_km_alloc_poolpage(waitok);
2039 	splx(s);
2040 	return (ret);
2041 }
2042 
2043 void
2044 pool_page_free(struct pool *pp, void *v)
2045 {
2046 	int s;
2047 
2048 	s = splvm();
2049 	uvm_km_free_poolpage((vaddr_t)v);
2050 	splx(s);
2051 }
2052 
2053 void *
2054 pool_page_alloc_nointr(struct pool *pp, int flags)
2055 {
2056 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2057 
2058 	return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
2059 	    waitok));
2060 }
2061 
2062 void
2063 pool_page_free_nointr(struct pool *pp, void *v)
2064 {
2065 	uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
2066 }
2067