xref: /netbsd-src/sys/kern/subr_pool.c (revision 1ca5c1b28139779176bd5c13ad7c5f25c0bcd5f8)
1 /*	$NetBSD: subr_pool.c,v 1.65 2001/11/20 06:57:04 enami Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.65 2001/11/20 06:57:04 enami Exp $");
42 
43 #include "opt_pool.h"
44 #include "opt_poollog.h"
45 #include "opt_lockdebug.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/errno.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/lock.h>
54 #include <sys/pool.h>
55 #include <sys/syslog.h>
56 
57 #include <uvm/uvm.h>
58 
59 /*
60  * Pool resource management utility.
61  *
62  * Memory is allocated in pages which are split into pieces according
63  * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
64  * in the pool structure and the individual pool items are on a linked list
65  * headed by `ph_itemlist' in each page header. The memory for building
66  * the page list is either taken from the allocated pages themselves (for
67  * small pool items) or taken from an internal pool of page headers (`phpool').
68  */
69 
70 /* List of all pools */
71 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
72 
73 /* Private pool for page header structures */
74 static struct pool phpool;
75 
76 #ifdef POOL_SUBPAGE
77 /* Pool of subpages for use by normal pools. */
78 static struct pool psppool;
79 #endif
80 
81 /* # of seconds to retain page after last use */
82 int pool_inactive_time = 10;
83 
84 /* Next candidate for drainage (see pool_drain()) */
85 static struct pool	*drainpp;
86 
87 /* This spin lock protects both pool_head and drainpp. */
88 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
89 
90 struct pool_item_header {
91 	/* Page headers */
92 	TAILQ_ENTRY(pool_item_header)
93 				ph_pagelist;	/* pool page list */
94 	TAILQ_HEAD(,pool_item)	ph_itemlist;	/* chunk list for this page */
95 	LIST_ENTRY(pool_item_header)
96 				ph_hashlist;	/* Off-page page headers */
97 	int			ph_nmissing;	/* # of chunks in use */
98 	caddr_t			ph_page;	/* this page's address */
99 	struct timeval		ph_time;	/* last referenced */
100 };
101 TAILQ_HEAD(pool_pagelist,pool_item_header);
102 
103 struct pool_item {
104 #ifdef DIAGNOSTIC
105 	int pi_magic;
106 #endif
107 #define	PI_MAGIC 0xdeadbeef
108 	/* Other entries use only this list entry */
109 	TAILQ_ENTRY(pool_item)	pi_list;
110 };
111 
112 #define	PR_HASH_INDEX(pp,addr) \
113 	(((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
114 
115 #define	POOL_NEEDS_CATCHUP(pp)						\
116 	((pp)->pr_nitems < (pp)->pr_minitems)
117 
118 /*
119  * Pool cache management.
120  *
121  * Pool caches provide a way for constructed objects to be cached by the
122  * pool subsystem.  This can lead to performance improvements by avoiding
123  * needless object construction/destruction; it is deferred until absolutely
124  * necessary.
125  *
126  * Caches are grouped into cache groups.  Each cache group references
127  * up to 16 constructed objects.  When a cache allocates an object
128  * from the pool, it calls the object's constructor and places it into
129  * a cache group.  When a cache group frees an object back to the pool,
130  * it first calls the object's destructor.  This allows the object to
131  * persist in constructed form while freed to the cache.
132  *
133  * Multiple caches may exist for each pool.  This allows a single
134  * object type to have multiple constructed forms.  The pool references
135  * each cache, so that when a pool is drained by the pagedaemon, it can
136  * drain each individual cache as well.  Each time a cache is drained,
137  * the most idle cache group is freed to the pool in its entirety.
138  *
139  * Pool caches are layed on top of pools.  By layering them, we can avoid
140  * the complexity of cache management for pools which would not benefit
141  * from it.
142  */
143 
144 /* The cache group pool. */
145 static struct pool pcgpool;
146 
147 /* The pool cache group. */
148 #define	PCG_NOBJECTS		16
149 struct pool_cache_group {
150 	TAILQ_ENTRY(pool_cache_group)
151 		pcg_list;	/* link in the pool cache's group list */
152 	u_int	pcg_avail;	/* # available objects */
153 				/* pointers to the objects */
154 	void	*pcg_objects[PCG_NOBJECTS];
155 };
156 
157 static void	pool_cache_reclaim(struct pool_cache *);
158 
159 static int	pool_catchup(struct pool *);
160 static void	pool_prime_page(struct pool *, caddr_t,
161 		    struct pool_item_header *);
162 static void	*pool_page_alloc(unsigned long, int, int);
163 static void	pool_page_free(void *, unsigned long, int);
164 #ifdef POOL_SUBPAGE
165 static void	*pool_subpage_alloc(unsigned long, int, int);
166 static void	pool_subpage_free(void *, unsigned long, int);
167 #endif
168 
169 static void pool_print1(struct pool *, const char *,
170 	void (*)(const char *, ...));
171 
172 /*
173  * Pool log entry. An array of these is allocated in pool_init().
174  */
175 struct pool_log {
176 	const char	*pl_file;
177 	long		pl_line;
178 	int		pl_action;
179 #define	PRLOG_GET	1
180 #define	PRLOG_PUT	2
181 	void		*pl_addr;
182 };
183 
184 /* Number of entries in pool log buffers */
185 #ifndef POOL_LOGSIZE
186 #define	POOL_LOGSIZE	10
187 #endif
188 
189 int pool_logsize = POOL_LOGSIZE;
190 
191 #ifdef POOL_DIAGNOSTIC
192 static __inline void
193 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
194 {
195 	int n = pp->pr_curlogentry;
196 	struct pool_log *pl;
197 
198 	if ((pp->pr_roflags & PR_LOGGING) == 0)
199 		return;
200 
201 	/*
202 	 * Fill in the current entry. Wrap around and overwrite
203 	 * the oldest entry if necessary.
204 	 */
205 	pl = &pp->pr_log[n];
206 	pl->pl_file = file;
207 	pl->pl_line = line;
208 	pl->pl_action = action;
209 	pl->pl_addr = v;
210 	if (++n >= pp->pr_logsize)
211 		n = 0;
212 	pp->pr_curlogentry = n;
213 }
214 
215 static void
216 pr_printlog(struct pool *pp, struct pool_item *pi,
217     void (*pr)(const char *, ...))
218 {
219 	int i = pp->pr_logsize;
220 	int n = pp->pr_curlogentry;
221 
222 	if ((pp->pr_roflags & PR_LOGGING) == 0)
223 		return;
224 
225 	/*
226 	 * Print all entries in this pool's log.
227 	 */
228 	while (i-- > 0) {
229 		struct pool_log *pl = &pp->pr_log[n];
230 		if (pl->pl_action != 0) {
231 			if (pi == NULL || pi == pl->pl_addr) {
232 				(*pr)("\tlog entry %d:\n", i);
233 				(*pr)("\t\taction = %s, addr = %p\n",
234 				    pl->pl_action == PRLOG_GET ? "get" : "put",
235 				    pl->pl_addr);
236 				(*pr)("\t\tfile: %s at line %lu\n",
237 				    pl->pl_file, pl->pl_line);
238 			}
239 		}
240 		if (++n >= pp->pr_logsize)
241 			n = 0;
242 	}
243 }
244 
245 static __inline void
246 pr_enter(struct pool *pp, const char *file, long line)
247 {
248 
249 	if (__predict_false(pp->pr_entered_file != NULL)) {
250 		printf("pool %s: reentrancy at file %s line %ld\n",
251 		    pp->pr_wchan, file, line);
252 		printf("         previous entry at file %s line %ld\n",
253 		    pp->pr_entered_file, pp->pr_entered_line);
254 		panic("pr_enter");
255 	}
256 
257 	pp->pr_entered_file = file;
258 	pp->pr_entered_line = line;
259 }
260 
261 static __inline void
262 pr_leave(struct pool *pp)
263 {
264 
265 	if (__predict_false(pp->pr_entered_file == NULL)) {
266 		printf("pool %s not entered?\n", pp->pr_wchan);
267 		panic("pr_leave");
268 	}
269 
270 	pp->pr_entered_file = NULL;
271 	pp->pr_entered_line = 0;
272 }
273 
274 static __inline void
275 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
276 {
277 
278 	if (pp->pr_entered_file != NULL)
279 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
280 		    pp->pr_entered_file, pp->pr_entered_line);
281 }
282 #else
283 #define	pr_log(pp, v, action, file, line)
284 #define	pr_printlog(pp, pi, pr)
285 #define	pr_enter(pp, file, line)
286 #define	pr_leave(pp)
287 #define	pr_enter_check(pp, pr)
288 #endif /* POOL_DIAGNOSTIC */
289 
290 /*
291  * Return the pool page header based on page address.
292  */
293 static __inline struct pool_item_header *
294 pr_find_pagehead(struct pool *pp, caddr_t page)
295 {
296 	struct pool_item_header *ph;
297 
298 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
299 		return ((struct pool_item_header *)(page + pp->pr_phoffset));
300 
301 	for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
302 	     ph != NULL;
303 	     ph = LIST_NEXT(ph, ph_hashlist)) {
304 		if (ph->ph_page == page)
305 			return (ph);
306 	}
307 	return (NULL);
308 }
309 
310 /*
311  * Remove a page from the pool.
312  */
313 static __inline void
314 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
315      struct pool_pagelist *pq)
316 {
317 	int s;
318 
319 	/*
320 	 * If the page was idle, decrement the idle page count.
321 	 */
322 	if (ph->ph_nmissing == 0) {
323 #ifdef DIAGNOSTIC
324 		if (pp->pr_nidle == 0)
325 			panic("pr_rmpage: nidle inconsistent");
326 		if (pp->pr_nitems < pp->pr_itemsperpage)
327 			panic("pr_rmpage: nitems inconsistent");
328 #endif
329 		pp->pr_nidle--;
330 	}
331 
332 	pp->pr_nitems -= pp->pr_itemsperpage;
333 
334 	/*
335 	 * Unlink a page from the pool and release it (or queue it for release).
336 	 */
337 	TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
338 	if (pq) {
339 		TAILQ_INSERT_HEAD(pq, ph, ph_pagelist);
340 	} else {
341 		(*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
342 		if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
343 			LIST_REMOVE(ph, ph_hashlist);
344 			s = splhigh();
345 			pool_put(&phpool, ph);
346 			splx(s);
347 		}
348 	}
349 	pp->pr_npages--;
350 	pp->pr_npagefree++;
351 
352 	if (pp->pr_curpage == ph) {
353 		/*
354 		 * Find a new non-empty page header, if any.
355 		 * Start search from the page head, to increase the
356 		 * chance for "high water" pages to be freed.
357 		 */
358 		TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
359 			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
360 				break;
361 
362 		pp->pr_curpage = ph;
363 	}
364 }
365 
366 /*
367  * Initialize the given pool resource structure.
368  *
369  * We export this routine to allow other kernel parts to declare
370  * static pools that must be initialized before malloc() is available.
371  */
372 void
373 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
374     const char *wchan, size_t pagesz,
375     void *(*alloc)(unsigned long, int, int),
376     void (*release)(void *, unsigned long, int),
377     int mtype)
378 {
379 	int off, slack, i;
380 
381 #ifdef POOL_DIAGNOSTIC
382 	/*
383 	 * Always log if POOL_DIAGNOSTIC is defined.
384 	 */
385 	if (pool_logsize != 0)
386 		flags |= PR_LOGGING;
387 #endif
388 
389 	/*
390 	 * Check arguments and construct default values.
391 	 */
392 	if (!powerof2(pagesz))
393 		panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);
394 
395 	if (alloc == NULL && release == NULL) {
396 #ifdef POOL_SUBPAGE
397 		alloc = pool_subpage_alloc;
398 		release = pool_subpage_free;
399 		pagesz = POOL_SUBPAGE;
400 #else
401 		alloc = pool_page_alloc;
402 		release = pool_page_free;
403 		pagesz = PAGE_SIZE;	/* Rounds to PAGE_SIZE anyhow. */
404 #endif
405 	} else if ((alloc != NULL && release != NULL) == 0) {
406 		/* If you specifiy one, must specify both. */
407 		panic("pool_init: must specify alloc and release together");
408 	}
409 #ifdef POOL_SUBPAGE
410 	else if (alloc == pool_page_alloc_nointr &&
411 	    release == pool_page_free_nointr)
412 		pagesz = POOL_SUBPAGE;
413 #endif
414 
415 	if (pagesz == 0)
416 		pagesz = PAGE_SIZE;
417 
418 	if (align == 0)
419 		align = ALIGN(1);
420 
421 	if (size < sizeof(struct pool_item))
422 		size = sizeof(struct pool_item);
423 
424 	size = ALIGN(size);
425 	if (size > pagesz)
426 		panic("pool_init: pool item size (%lu) too large",
427 		      (u_long)size);
428 
429 	/*
430 	 * Initialize the pool structure.
431 	 */
432 	TAILQ_INIT(&pp->pr_pagelist);
433 	TAILQ_INIT(&pp->pr_cachelist);
434 	pp->pr_curpage = NULL;
435 	pp->pr_npages = 0;
436 	pp->pr_minitems = 0;
437 	pp->pr_minpages = 0;
438 	pp->pr_maxpages = UINT_MAX;
439 	pp->pr_roflags = flags;
440 	pp->pr_flags = 0;
441 	pp->pr_size = size;
442 	pp->pr_align = align;
443 	pp->pr_wchan = wchan;
444 	pp->pr_mtype = mtype;
445 	pp->pr_alloc = alloc;
446 	pp->pr_free = release;
447 	pp->pr_pagesz = pagesz;
448 	pp->pr_pagemask = ~(pagesz - 1);
449 	pp->pr_pageshift = ffs(pagesz) - 1;
450 	pp->pr_nitems = 0;
451 	pp->pr_nout = 0;
452 	pp->pr_hardlimit = UINT_MAX;
453 	pp->pr_hardlimit_warning = NULL;
454 	pp->pr_hardlimit_ratecap.tv_sec = 0;
455 	pp->pr_hardlimit_ratecap.tv_usec = 0;
456 	pp->pr_hardlimit_warning_last.tv_sec = 0;
457 	pp->pr_hardlimit_warning_last.tv_usec = 0;
458 
459 	/*
460 	 * Decide whether to put the page header off page to avoid
461 	 * wasting too large a part of the page. Off-page page headers
462 	 * go on a hash table, so we can match a returned item
463 	 * with its header based on the page address.
464 	 * We use 1/16 of the page size as the threshold (XXX: tune)
465 	 */
466 	if (pp->pr_size < pagesz/16) {
467 		/* Use the end of the page for the page header */
468 		pp->pr_roflags |= PR_PHINPAGE;
469 		pp->pr_phoffset = off =
470 			pagesz - ALIGN(sizeof(struct pool_item_header));
471 	} else {
472 		/* The page header will be taken from our page header pool */
473 		pp->pr_phoffset = 0;
474 		off = pagesz;
475 		for (i = 0; i < PR_HASHTABSIZE; i++) {
476 			LIST_INIT(&pp->pr_hashtab[i]);
477 		}
478 	}
479 
480 	/*
481 	 * Alignment is to take place at `ioff' within the item. This means
482 	 * we must reserve up to `align - 1' bytes on the page to allow
483 	 * appropriate positioning of each item.
484 	 *
485 	 * Silently enforce `0 <= ioff < align'.
486 	 */
487 	pp->pr_itemoffset = ioff = ioff % align;
488 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
489 	KASSERT(pp->pr_itemsperpage != 0);
490 
491 	/*
492 	 * Use the slack between the chunks and the page header
493 	 * for "cache coloring".
494 	 */
495 	slack = off - pp->pr_itemsperpage * pp->pr_size;
496 	pp->pr_maxcolor = (slack / align) * align;
497 	pp->pr_curcolor = 0;
498 
499 	pp->pr_nget = 0;
500 	pp->pr_nfail = 0;
501 	pp->pr_nput = 0;
502 	pp->pr_npagealloc = 0;
503 	pp->pr_npagefree = 0;
504 	pp->pr_hiwat = 0;
505 	pp->pr_nidle = 0;
506 
507 #ifdef POOL_DIAGNOSTIC
508 	if (flags & PR_LOGGING) {
509 		if (kmem_map == NULL ||
510 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
511 		     M_TEMP, M_NOWAIT)) == NULL)
512 			pp->pr_roflags &= ~PR_LOGGING;
513 		pp->pr_curlogentry = 0;
514 		pp->pr_logsize = pool_logsize;
515 	}
516 #endif
517 
518 	pp->pr_entered_file = NULL;
519 	pp->pr_entered_line = 0;
520 
521 	simple_lock_init(&pp->pr_slock);
522 
523 	/*
524 	 * Initialize private page header pool and cache magazine pool if we
525 	 * haven't done so yet.
526 	 * XXX LOCKING.
527 	 */
528 	if (phpool.pr_size == 0) {
529 #ifdef POOL_SUBPAGE
530 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
531 		    "phpool", PAGE_SIZE, pool_page_alloc, pool_page_free, 0);
532 		pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
533 		    PR_RECURSIVE, "psppool", PAGE_SIZE,
534 		    pool_page_alloc, pool_page_free, 0);
535 #else
536 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
537 		    0, "phpool", 0, 0, 0, 0);
538 #endif
539 		pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
540 		    0, "pcgpool", 0, 0, 0, 0);
541 	}
542 
543 	/* Insert into the list of all pools. */
544 	simple_lock(&pool_head_slock);
545 	TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
546 	simple_unlock(&pool_head_slock);
547 }
548 
549 /*
550  * De-commision a pool resource.
551  */
552 void
553 pool_destroy(struct pool *pp)
554 {
555 	struct pool_item_header *ph;
556 	struct pool_cache *pc;
557 
558 	/* Destroy all caches for this pool. */
559 	while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
560 		pool_cache_destroy(pc);
561 
562 #ifdef DIAGNOSTIC
563 	if (pp->pr_nout != 0) {
564 		pr_printlog(pp, NULL, printf);
565 		panic("pool_destroy: pool busy: still out: %u\n",
566 		    pp->pr_nout);
567 	}
568 #endif
569 
570 	/* Remove all pages */
571 	if ((pp->pr_roflags & PR_STATIC) == 0)
572 		while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
573 			pr_rmpage(pp, ph, NULL);
574 
575 	/* Remove from global pool list */
576 	simple_lock(&pool_head_slock);
577 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
578 	if (drainpp == pp) {
579 		drainpp = NULL;
580 	}
581 	simple_unlock(&pool_head_slock);
582 
583 #ifdef POOL_DIAGNOSTIC
584 	if ((pp->pr_roflags & PR_LOGGING) != 0)
585 		free(pp->pr_log, M_TEMP);
586 #endif
587 
588 	if (pp->pr_roflags & PR_FREEHEADER)
589 		free(pp, M_POOL);
590 }
591 
592 static __inline struct pool_item_header *
593 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
594 {
595 	struct pool_item_header *ph;
596 	int s;
597 
598 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
599 
600 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
601 		ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
602 	else {
603 		s = splhigh();
604 		ph = pool_get(&phpool, flags);
605 		splx(s);
606 	}
607 
608 	return (ph);
609 }
610 
611 /*
612  * Grab an item from the pool; must be called at appropriate spl level
613  */
614 void *
615 #ifdef POOL_DIAGNOSTIC
616 _pool_get(struct pool *pp, int flags, const char *file, long line)
617 #else
618 pool_get(struct pool *pp, int flags)
619 #endif
620 {
621 	struct pool_item *pi;
622 	struct pool_item_header *ph;
623 	void *v;
624 
625 #ifdef DIAGNOSTIC
626 	if (__predict_false((pp->pr_roflags & PR_STATIC) &&
627 			    (flags & PR_MALLOCOK))) {
628 		pr_printlog(pp, NULL, printf);
629 		panic("pool_get: static");
630 	}
631 
632 	if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
633 			    (flags & PR_WAITOK) != 0))
634 		panic("pool_get: must have NOWAIT");
635 
636 #ifdef LOCKDEBUG
637 	if (flags & PR_WAITOK)
638 		simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
639 #endif
640 #endif /* DIAGNOSTIC */
641 
642 	simple_lock(&pp->pr_slock);
643 	pr_enter(pp, file, line);
644 
645  startover:
646 	/*
647 	 * Check to see if we've reached the hard limit.  If we have,
648 	 * and we can wait, then wait until an item has been returned to
649 	 * the pool.
650 	 */
651 #ifdef DIAGNOSTIC
652 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
653 		pr_leave(pp);
654 		simple_unlock(&pp->pr_slock);
655 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
656 	}
657 #endif
658 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
659 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
660 			/*
661 			 * XXX: A warning isn't logged in this case.  Should
662 			 * it be?
663 			 */
664 			pp->pr_flags |= PR_WANTED;
665 			pr_leave(pp);
666 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
667 			pr_enter(pp, file, line);
668 			goto startover;
669 		}
670 
671 		/*
672 		 * Log a message that the hard limit has been hit.
673 		 */
674 		if (pp->pr_hardlimit_warning != NULL &&
675 		    ratecheck(&pp->pr_hardlimit_warning_last,
676 			      &pp->pr_hardlimit_ratecap))
677 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
678 
679 		if (flags & PR_URGENT)
680 			panic("pool_get: urgent");
681 
682 		pp->pr_nfail++;
683 
684 		pr_leave(pp);
685 		simple_unlock(&pp->pr_slock);
686 		return (NULL);
687 	}
688 
689 	/*
690 	 * The convention we use is that if `curpage' is not NULL, then
691 	 * it points at a non-empty bucket. In particular, `curpage'
692 	 * never points at a page header which has PR_PHINPAGE set and
693 	 * has no items in its bucket.
694 	 */
695 	if ((ph = pp->pr_curpage) == NULL) {
696 #ifdef DIAGNOSTIC
697 		if (pp->pr_nitems != 0) {
698 			simple_unlock(&pp->pr_slock);
699 			printf("pool_get: %s: curpage NULL, nitems %u\n",
700 			    pp->pr_wchan, pp->pr_nitems);
701 			panic("pool_get: nitems inconsistent\n");
702 		}
703 #endif
704 
705 		/*
706 		 * Call the back-end page allocator for more memory.
707 		 * Release the pool lock, as the back-end page allocator
708 		 * may block.
709 		 */
710 		pr_leave(pp);
711 		simple_unlock(&pp->pr_slock);
712 		v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);
713 		if (__predict_true(v != NULL))
714 			ph = pool_alloc_item_header(pp, v, flags);
715 		simple_lock(&pp->pr_slock);
716 		pr_enter(pp, file, line);
717 
718 		if (__predict_false(v == NULL || ph == NULL)) {
719 			if (v != NULL)
720 				(*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype);
721 
722 			/*
723 			 * We were unable to allocate a page or item
724 			 * header, but we released the lock during
725 			 * allocation, so perhaps items were freed
726 			 * back to the pool.  Check for this case.
727 			 */
728 			if (pp->pr_curpage != NULL)
729 				goto startover;
730 
731 			if (flags & PR_URGENT)
732 				panic("pool_get: urgent");
733 
734 			if ((flags & PR_WAITOK) == 0) {
735 				pp->pr_nfail++;
736 				pr_leave(pp);
737 				simple_unlock(&pp->pr_slock);
738 				return (NULL);
739 			}
740 
741 			/*
742 			 * Wait for items to be returned to this pool.
743 			 *
744 			 * XXX: we actually want to wait just until
745 			 * the page allocator has memory again. Depending
746 			 * on this pool's usage, we might get stuck here
747 			 * for a long time.
748 			 *
749 			 * XXX: maybe we should wake up once a second and
750 			 * try again?
751 			 */
752 			pp->pr_flags |= PR_WANTED;
753 			pr_leave(pp);
754 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
755 			pr_enter(pp, file, line);
756 			goto startover;
757 		}
758 
759 		/* We have more memory; add it to the pool */
760 		pool_prime_page(pp, v, ph);
761 		pp->pr_npagealloc++;
762 
763 		/* Start the allocation process over. */
764 		goto startover;
765 	}
766 
767 	if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
768 		pr_leave(pp);
769 		simple_unlock(&pp->pr_slock);
770 		panic("pool_get: %s: page empty", pp->pr_wchan);
771 	}
772 #ifdef DIAGNOSTIC
773 	if (__predict_false(pp->pr_nitems == 0)) {
774 		pr_leave(pp);
775 		simple_unlock(&pp->pr_slock);
776 		printf("pool_get: %s: items on itemlist, nitems %u\n",
777 		    pp->pr_wchan, pp->pr_nitems);
778 		panic("pool_get: nitems inconsistent\n");
779 	}
780 #endif
781 
782 #ifdef POOL_DIAGNOSTIC
783 	pr_log(pp, v, PRLOG_GET, file, line);
784 #endif
785 
786 #ifdef DIAGNOSTIC
787 	if (__predict_false(pi->pi_magic != PI_MAGIC)) {
788 		pr_printlog(pp, pi, printf);
789 		panic("pool_get(%s): free list modified: magic=%x; page %p;"
790 		       " item addr %p\n",
791 			pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
792 	}
793 #endif
794 
795 	/*
796 	 * Remove from item list.
797 	 */
798 	TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
799 	pp->pr_nitems--;
800 	pp->pr_nout++;
801 	if (ph->ph_nmissing == 0) {
802 #ifdef DIAGNOSTIC
803 		if (__predict_false(pp->pr_nidle == 0))
804 			panic("pool_get: nidle inconsistent");
805 #endif
806 		pp->pr_nidle--;
807 	}
808 	ph->ph_nmissing++;
809 	if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
810 #ifdef DIAGNOSTIC
811 		if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
812 			pr_leave(pp);
813 			simple_unlock(&pp->pr_slock);
814 			panic("pool_get: %s: nmissing inconsistent",
815 			    pp->pr_wchan);
816 		}
817 #endif
818 		/*
819 		 * Find a new non-empty page header, if any.
820 		 * Start search from the page head, to increase
821 		 * the chance for "high water" pages to be freed.
822 		 *
823 		 * Migrate empty pages to the end of the list.  This
824 		 * will speed the update of curpage as pages become
825 		 * idle.  Empty pages intermingled with idle pages
826 		 * is no big deal.  As soon as a page becomes un-empty,
827 		 * it will move back to the head of the list.
828 		 */
829 		TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
830 		TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
831 		TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
832 			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
833 				break;
834 
835 		pp->pr_curpage = ph;
836 	}
837 
838 	pp->pr_nget++;
839 
840 	/*
841 	 * If we have a low water mark and we are now below that low
842 	 * water mark, add more items to the pool.
843 	 */
844 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
845 		/*
846 		 * XXX: Should we log a warning?  Should we set up a timeout
847 		 * to try again in a second or so?  The latter could break
848 		 * a caller's assumptions about interrupt protection, etc.
849 		 */
850 	}
851 
852 	pr_leave(pp);
853 	simple_unlock(&pp->pr_slock);
854 	return (v);
855 }
856 
857 /*
858  * Internal version of pool_put().  Pool is already locked/entered.
859  */
860 static void
861 pool_do_put(struct pool *pp, void *v)
862 {
863 	struct pool_item *pi = v;
864 	struct pool_item_header *ph;
865 	caddr_t page;
866 	int s;
867 
868 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
869 
870 	page = (caddr_t)((u_long)v & pp->pr_pagemask);
871 
872 #ifdef DIAGNOSTIC
873 	if (__predict_false(pp->pr_nout == 0)) {
874 		printf("pool %s: putting with none out\n",
875 		    pp->pr_wchan);
876 		panic("pool_put");
877 	}
878 #endif
879 
880 	if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
881 		pr_printlog(pp, NULL, printf);
882 		panic("pool_put: %s: page header missing", pp->pr_wchan);
883 	}
884 
885 #ifdef LOCKDEBUG
886 	/*
887 	 * Check if we're freeing a locked simple lock.
888 	 */
889 	simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
890 #endif
891 
892 	/*
893 	 * Return to item list.
894 	 */
895 #ifdef DIAGNOSTIC
896 	pi->pi_magic = PI_MAGIC;
897 #endif
898 #ifdef DEBUG
899 	{
900 		int i, *ip = v;
901 
902 		for (i = 0; i < pp->pr_size / sizeof(int); i++) {
903 			*ip++ = PI_MAGIC;
904 		}
905 	}
906 #endif
907 
908 	TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
909 	ph->ph_nmissing--;
910 	pp->pr_nput++;
911 	pp->pr_nitems++;
912 	pp->pr_nout--;
913 
914 	/* Cancel "pool empty" condition if it exists */
915 	if (pp->pr_curpage == NULL)
916 		pp->pr_curpage = ph;
917 
918 	if (pp->pr_flags & PR_WANTED) {
919 		pp->pr_flags &= ~PR_WANTED;
920 		if (ph->ph_nmissing == 0)
921 			pp->pr_nidle++;
922 		wakeup((caddr_t)pp);
923 		return;
924 	}
925 
926 	/*
927 	 * If this page is now complete, do one of two things:
928 	 *
929 	 *	(1) If we have more pages than the page high water
930 	 *	    mark, free the page back to the system.
931 	 *
932 	 *	(2) Move it to the end of the page list, so that
933 	 *	    we minimize our chances of fragmenting the
934 	 *	    pool.  Idle pages migrate to the end (along with
935 	 *	    completely empty pages, so that we find un-empty
936 	 *	    pages more quickly when we update curpage) of the
937 	 *	    list so they can be more easily swept up by
938 	 *	    the pagedaemon when pages are scarce.
939 	 */
940 	if (ph->ph_nmissing == 0) {
941 		pp->pr_nidle++;
942 		if (pp->pr_npages > pp->pr_maxpages) {
943 			pr_rmpage(pp, ph, NULL);
944 		} else {
945 			TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
946 			TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
947 
948 			/*
949 			 * Update the timestamp on the page.  A page must
950 			 * be idle for some period of time before it can
951 			 * be reclaimed by the pagedaemon.  This minimizes
952 			 * ping-pong'ing for memory.
953 			 */
954 			s = splclock();
955 			ph->ph_time = mono_time;
956 			splx(s);
957 
958 			/*
959 			 * Update the current page pointer.  Just look for
960 			 * the first page with any free items.
961 			 *
962 			 * XXX: Maybe we want an option to look for the
963 			 * page with the fewest available items, to minimize
964 			 * fragmentation?
965 			 */
966 			TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
967 				if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
968 					break;
969 
970 			pp->pr_curpage = ph;
971 		}
972 	}
973 	/*
974 	 * If the page has just become un-empty, move it to the head of
975 	 * the list, and make it the current page.  The next allocation
976 	 * will get the item from this page, instead of further fragmenting
977 	 * the pool.
978 	 */
979 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
980 		TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
981 		TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
982 		pp->pr_curpage = ph;
983 	}
984 }
985 
986 /*
987  * Return resource to the pool; must be called at appropriate spl level
988  */
989 #ifdef POOL_DIAGNOSTIC
990 void
991 _pool_put(struct pool *pp, void *v, const char *file, long line)
992 {
993 
994 	simple_lock(&pp->pr_slock);
995 	pr_enter(pp, file, line);
996 
997 	pr_log(pp, v, PRLOG_PUT, file, line);
998 
999 	pool_do_put(pp, v);
1000 
1001 	pr_leave(pp);
1002 	simple_unlock(&pp->pr_slock);
1003 }
1004 #undef pool_put
1005 #endif /* POOL_DIAGNOSTIC */
1006 
1007 void
1008 pool_put(struct pool *pp, void *v)
1009 {
1010 
1011 	simple_lock(&pp->pr_slock);
1012 
1013 	pool_do_put(pp, v);
1014 
1015 	simple_unlock(&pp->pr_slock);
1016 }
1017 
1018 #ifdef POOL_DIAGNOSTIC
1019 #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
1020 #endif
1021 
1022 /*
1023  * Add N items to the pool.
1024  */
1025 int
1026 pool_prime(struct pool *pp, int n)
1027 {
1028 	struct pool_item_header *ph;
1029 	caddr_t cp;
1030 	int newpages, error = 0;
1031 
1032 	simple_lock(&pp->pr_slock);
1033 
1034 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1035 
1036 	while (newpages-- > 0) {
1037 		simple_unlock(&pp->pr_slock);
1038 		cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype);
1039 		if (__predict_true(cp != NULL))
1040 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1041 		simple_lock(&pp->pr_slock);
1042 
1043 		if (__predict_false(cp == NULL || ph == NULL)) {
1044 			error = ENOMEM;
1045 			if (cp != NULL)
1046 				(*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype);
1047 			break;
1048 		}
1049 
1050 		pool_prime_page(pp, cp, ph);
1051 		pp->pr_npagealloc++;
1052 		pp->pr_minpages++;
1053 	}
1054 
1055 	if (pp->pr_minpages >= pp->pr_maxpages)
1056 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
1057 
1058 	simple_unlock(&pp->pr_slock);
1059 	return (0);
1060 }
1061 
1062 /*
1063  * Add a page worth of items to the pool.
1064  *
1065  * Note, we must be called with the pool descriptor LOCKED.
1066  */
1067 static void
1068 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1069 {
1070 	struct pool_item *pi;
1071 	caddr_t cp = storage;
1072 	unsigned int align = pp->pr_align;
1073 	unsigned int ioff = pp->pr_itemoffset;
1074 	int n;
1075 
1076 	if (((u_long)cp & (pp->pr_pagesz - 1)) != 0)
1077 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1078 
1079 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1080 		LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
1081 		    ph, ph_hashlist);
1082 
1083 	/*
1084 	 * Insert page header.
1085 	 */
1086 	TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1087 	TAILQ_INIT(&ph->ph_itemlist);
1088 	ph->ph_page = storage;
1089 	ph->ph_nmissing = 0;
1090 	memset(&ph->ph_time, 0, sizeof(ph->ph_time));
1091 
1092 	pp->pr_nidle++;
1093 
1094 	/*
1095 	 * Color this page.
1096 	 */
1097 	cp = (caddr_t)(cp + pp->pr_curcolor);
1098 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1099 		pp->pr_curcolor = 0;
1100 
1101 	/*
1102 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1103 	 */
1104 	if (ioff != 0)
1105 		cp = (caddr_t)(cp + (align - ioff));
1106 
1107 	/*
1108 	 * Insert remaining chunks on the bucket list.
1109 	 */
1110 	n = pp->pr_itemsperpage;
1111 	pp->pr_nitems += n;
1112 
1113 	while (n--) {
1114 		pi = (struct pool_item *)cp;
1115 
1116 		/* Insert on page list */
1117 		TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1118 #ifdef DIAGNOSTIC
1119 		pi->pi_magic = PI_MAGIC;
1120 #endif
1121 		cp = (caddr_t)(cp + pp->pr_size);
1122 	}
1123 
1124 	/*
1125 	 * If the pool was depleted, point at the new page.
1126 	 */
1127 	if (pp->pr_curpage == NULL)
1128 		pp->pr_curpage = ph;
1129 
1130 	if (++pp->pr_npages > pp->pr_hiwat)
1131 		pp->pr_hiwat = pp->pr_npages;
1132 }
1133 
1134 /*
1135  * Used by pool_get() when nitems drops below the low water mark.  This
1136  * is used to catch up nitmes with the low water mark.
1137  *
1138  * Note 1, we never wait for memory here, we let the caller decide what to do.
1139  *
1140  * Note 2, this doesn't work with static pools.
1141  *
1142  * Note 3, we must be called with the pool already locked, and we return
1143  * with it locked.
1144  */
1145 static int
1146 pool_catchup(struct pool *pp)
1147 {
1148 	struct pool_item_header *ph;
1149 	caddr_t cp;
1150 	int error = 0;
1151 
1152 	if (pp->pr_roflags & PR_STATIC) {
1153 		/*
1154 		 * We dropped below the low water mark, and this is not a
1155 		 * good thing.  Log a warning.
1156 		 *
1157 		 * XXX: rate-limit this?
1158 		 */
1159 		printf("WARNING: static pool `%s' dropped below low water "
1160 		    "mark\n", pp->pr_wchan);
1161 		return (0);
1162 	}
1163 
1164 	while (POOL_NEEDS_CATCHUP(pp)) {
1165 		/*
1166 		 * Call the page back-end allocator for more memory.
1167 		 *
1168 		 * XXX: We never wait, so should we bother unlocking
1169 		 * the pool descriptor?
1170 		 */
1171 		simple_unlock(&pp->pr_slock);
1172 		cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype);
1173 		if (__predict_true(cp != NULL))
1174 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1175 		simple_lock(&pp->pr_slock);
1176 		if (__predict_false(cp == NULL || ph == NULL)) {
1177 			if (cp != NULL)
1178 				(*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype);
1179 			error = ENOMEM;
1180 			break;
1181 		}
1182 		pool_prime_page(pp, cp, ph);
1183 		pp->pr_npagealloc++;
1184 	}
1185 
1186 	return (error);
1187 }
1188 
1189 void
1190 pool_setlowat(struct pool *pp, int n)
1191 {
1192 	int error;
1193 
1194 	simple_lock(&pp->pr_slock);
1195 
1196 	pp->pr_minitems = n;
1197 	pp->pr_minpages = (n == 0)
1198 		? 0
1199 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1200 
1201 	/* Make sure we're caught up with the newly-set low water mark. */
1202 	if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) {
1203 		/*
1204 		 * XXX: Should we log a warning?  Should we set up a timeout
1205 		 * to try again in a second or so?  The latter could break
1206 		 * a caller's assumptions about interrupt protection, etc.
1207 		 */
1208 	}
1209 
1210 	simple_unlock(&pp->pr_slock);
1211 }
1212 
1213 void
1214 pool_sethiwat(struct pool *pp, int n)
1215 {
1216 
1217 	simple_lock(&pp->pr_slock);
1218 
1219 	pp->pr_maxpages = (n == 0)
1220 		? 0
1221 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1222 
1223 	simple_unlock(&pp->pr_slock);
1224 }
1225 
1226 void
1227 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1228 {
1229 
1230 	simple_lock(&pp->pr_slock);
1231 
1232 	pp->pr_hardlimit = n;
1233 	pp->pr_hardlimit_warning = warnmess;
1234 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1235 	pp->pr_hardlimit_warning_last.tv_sec = 0;
1236 	pp->pr_hardlimit_warning_last.tv_usec = 0;
1237 
1238 	/*
1239 	 * In-line version of pool_sethiwat(), because we don't want to
1240 	 * release the lock.
1241 	 */
1242 	pp->pr_maxpages = (n == 0)
1243 		? 0
1244 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1245 
1246 	simple_unlock(&pp->pr_slock);
1247 }
1248 
1249 /*
1250  * Default page allocator.
1251  */
1252 static void *
1253 pool_page_alloc(unsigned long sz, int flags, int mtype)
1254 {
1255 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1256 
1257 	return ((void *)uvm_km_alloc_poolpage(waitok));
1258 }
1259 
1260 static void
1261 pool_page_free(void *v, unsigned long sz, int mtype)
1262 {
1263 
1264 	uvm_km_free_poolpage((vaddr_t)v);
1265 }
1266 
1267 #ifdef POOL_SUBPAGE
1268 /*
1269  * Sub-page allocator, for machines with large hardware pages.
1270  */
1271 static void *
1272 pool_subpage_alloc(unsigned long sz, int flags, int mtype)
1273 {
1274 
1275 	return pool_get(&psppool, flags);
1276 }
1277 
1278 static void
1279 pool_subpage_free(void *v, unsigned long sz, int mtype)
1280 {
1281 
1282 	pool_put(&psppool, v);
1283 }
1284 #endif
1285 
1286 #ifdef POOL_SUBPAGE
1287 /* We don't provide a real nointr allocator.  Maybe later. */
1288 void *
1289 pool_page_alloc_nointr(unsigned long sz, int flags, int mtype)
1290 {
1291 
1292 	return pool_subpage_alloc(sz, flags, mtype);
1293 }
1294 
1295 void
1296 pool_page_free_nointr(void *v, unsigned long sz, int mtype)
1297 {
1298 
1299 	pool_subpage_free(v, sz, mtype);
1300 }
1301 #else
1302 /*
1303  * Alternate pool page allocator for pools that know they will
1304  * never be accessed in interrupt context.
1305  */
1306 void *
1307 pool_page_alloc_nointr(unsigned long sz, int flags, int mtype)
1308 {
1309 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1310 
1311 	return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
1312 	    waitok));
1313 }
1314 
1315 void
1316 pool_page_free_nointr(void *v, unsigned long sz, int mtype)
1317 {
1318 
1319 	uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
1320 }
1321 #endif
1322 
1323 
1324 /*
1325  * Release all complete pages that have not been used recently.
1326  */
1327 void
1328 #ifdef POOL_DIAGNOSTIC
1329 _pool_reclaim(struct pool *pp, const char *file, long line)
1330 #else
1331 pool_reclaim(struct pool *pp)
1332 #endif
1333 {
1334 	struct pool_item_header *ph, *phnext;
1335 	struct pool_cache *pc;
1336 	struct timeval curtime;
1337 	struct pool_pagelist pq;
1338 	int s;
1339 
1340 	if (pp->pr_roflags & PR_STATIC)
1341 		return;
1342 
1343 	if (simple_lock_try(&pp->pr_slock) == 0)
1344 		return;
1345 	pr_enter(pp, file, line);
1346 	TAILQ_INIT(&pq);
1347 
1348 	/*
1349 	 * Reclaim items from the pool's caches.
1350 	 */
1351 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1352 		pool_cache_reclaim(pc);
1353 
1354 	s = splclock();
1355 	curtime = mono_time;
1356 	splx(s);
1357 
1358 	for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
1359 		phnext = TAILQ_NEXT(ph, ph_pagelist);
1360 
1361 		/* Check our minimum page claim */
1362 		if (pp->pr_npages <= pp->pr_minpages)
1363 			break;
1364 
1365 		if (ph->ph_nmissing == 0) {
1366 			struct timeval diff;
1367 			timersub(&curtime, &ph->ph_time, &diff);
1368 			if (diff.tv_sec < pool_inactive_time)
1369 				continue;
1370 
1371 			/*
1372 			 * If freeing this page would put us below
1373 			 * the low water mark, stop now.
1374 			 */
1375 			if ((pp->pr_nitems - pp->pr_itemsperpage) <
1376 			    pp->pr_minitems)
1377 				break;
1378 
1379 			pr_rmpage(pp, ph, &pq);
1380 		}
1381 	}
1382 
1383 	pr_leave(pp);
1384 	simple_unlock(&pp->pr_slock);
1385 	if (TAILQ_EMPTY(&pq)) {
1386 		return;
1387 	}
1388 	while ((ph = TAILQ_FIRST(&pq)) != NULL) {
1389 		TAILQ_REMOVE(&pq, ph, ph_pagelist);
1390 		(*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
1391 		if (pp->pr_roflags & PR_PHINPAGE) {
1392 			continue;
1393 		}
1394 		LIST_REMOVE(ph, ph_hashlist);
1395 		s = splhigh();
1396 		pool_put(&phpool, ph);
1397 		splx(s);
1398 	}
1399 }
1400 
1401 
1402 /*
1403  * Drain pools, one at a time.
1404  *
1405  * Note, we must never be called from an interrupt context.
1406  */
1407 void
1408 pool_drain(void *arg)
1409 {
1410 	struct pool *pp;
1411 	int s;
1412 
1413 	pp = NULL;
1414 	s = splvm();
1415 	simple_lock(&pool_head_slock);
1416 	if (drainpp == NULL) {
1417 		drainpp = TAILQ_FIRST(&pool_head);
1418 	}
1419 	if (drainpp) {
1420 		pp = drainpp;
1421 		drainpp = TAILQ_NEXT(pp, pr_poollist);
1422 	}
1423 	simple_unlock(&pool_head_slock);
1424 	pool_reclaim(pp);
1425 	splx(s);
1426 }
1427 
1428 
1429 /*
1430  * Diagnostic helpers.
1431  */
1432 void
1433 pool_print(struct pool *pp, const char *modif)
1434 {
1435 	int s;
1436 
1437 	s = splvm();
1438 	if (simple_lock_try(&pp->pr_slock) == 0) {
1439 		printf("pool %s is locked; try again later\n",
1440 		    pp->pr_wchan);
1441 		splx(s);
1442 		return;
1443 	}
1444 	pool_print1(pp, modif, printf);
1445 	simple_unlock(&pp->pr_slock);
1446 	splx(s);
1447 }
1448 
1449 void
1450 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1451 {
1452 	int didlock = 0;
1453 
1454 	if (pp == NULL) {
1455 		(*pr)("Must specify a pool to print.\n");
1456 		return;
1457 	}
1458 
1459 	/*
1460 	 * Called from DDB; interrupts should be blocked, and all
1461 	 * other processors should be paused.  We can skip locking
1462 	 * the pool in this case.
1463 	 *
1464 	 * We do a simple_lock_try() just to print the lock
1465 	 * status, however.
1466 	 */
1467 
1468 	if (simple_lock_try(&pp->pr_slock) == 0)
1469 		(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1470 	else
1471 		didlock = 1;
1472 
1473 	pool_print1(pp, modif, pr);
1474 
1475 	if (didlock)
1476 		simple_unlock(&pp->pr_slock);
1477 }
1478 
1479 static void
1480 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1481 {
1482 	struct pool_item_header *ph;
1483 	struct pool_cache *pc;
1484 	struct pool_cache_group *pcg;
1485 #ifdef DIAGNOSTIC
1486 	struct pool_item *pi;
1487 #endif
1488 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1489 	char c;
1490 
1491 	while ((c = *modif++) != '\0') {
1492 		if (c == 'l')
1493 			print_log = 1;
1494 		if (c == 'p')
1495 			print_pagelist = 1;
1496 		if (c == 'c')
1497 			print_cache = 1;
1498 		modif++;
1499 	}
1500 
1501 	(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1502 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1503 	    pp->pr_roflags);
1504 	(*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype);
1505 	(*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free);
1506 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1507 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1508 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1509 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1510 
1511 	(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1512 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1513 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1514 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1515 
1516 	if (print_pagelist == 0)
1517 		goto skip_pagelist;
1518 
1519 	if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
1520 		(*pr)("\n\tpage list:\n");
1521 	for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
1522 		(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1523 		    ph->ph_page, ph->ph_nmissing,
1524 		    (u_long)ph->ph_time.tv_sec,
1525 		    (u_long)ph->ph_time.tv_usec);
1526 #ifdef DIAGNOSTIC
1527 		TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1528 			if (pi->pi_magic != PI_MAGIC) {
1529 				(*pr)("\t\t\titem %p, magic 0x%x\n",
1530 				    pi, pi->pi_magic);
1531 			}
1532 		}
1533 #endif
1534 	}
1535 	if (pp->pr_curpage == NULL)
1536 		(*pr)("\tno current page\n");
1537 	else
1538 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1539 
1540  skip_pagelist:
1541 
1542 	if (print_log == 0)
1543 		goto skip_log;
1544 
1545 	(*pr)("\n");
1546 	if ((pp->pr_roflags & PR_LOGGING) == 0)
1547 		(*pr)("\tno log\n");
1548 	else
1549 		pr_printlog(pp, NULL, pr);
1550 
1551  skip_log:
1552 
1553 	if (print_cache == 0)
1554 		goto skip_cache;
1555 
1556 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1557 		(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1558 		    pc->pc_allocfrom, pc->pc_freeto);
1559 		(*pr)("\t    hits %lu misses %lu ngroups %lu nitems %lu\n",
1560 		    pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1561 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1562 			(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1563 			for (i = 0; i < PCG_NOBJECTS; i++)
1564 				(*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
1565 		}
1566 	}
1567 
1568  skip_cache:
1569 
1570 	pr_enter_check(pp, pr);
1571 }
1572 
1573 int
1574 pool_chk(struct pool *pp, const char *label)
1575 {
1576 	struct pool_item_header *ph;
1577 	int r = 0;
1578 
1579 	simple_lock(&pp->pr_slock);
1580 
1581 	TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) {
1582 		struct pool_item *pi;
1583 		int n;
1584 		caddr_t page;
1585 
1586 		page = (caddr_t)((u_long)ph & pp->pr_pagemask);
1587 		if (page != ph->ph_page &&
1588 		    (pp->pr_roflags & PR_PHINPAGE) != 0) {
1589 			if (label != NULL)
1590 				printf("%s: ", label);
1591 			printf("pool(%p:%s): page inconsistency: page %p;"
1592 			       " at page head addr %p (p %p)\n", pp,
1593 				pp->pr_wchan, ph->ph_page,
1594 				ph, page);
1595 			r++;
1596 			goto out;
1597 		}
1598 
1599 		for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1600 		     pi != NULL;
1601 		     pi = TAILQ_NEXT(pi,pi_list), n++) {
1602 
1603 #ifdef DIAGNOSTIC
1604 			if (pi->pi_magic != PI_MAGIC) {
1605 				if (label != NULL)
1606 					printf("%s: ", label);
1607 				printf("pool(%s): free list modified: magic=%x;"
1608 				       " page %p; item ordinal %d;"
1609 				       " addr %p (p %p)\n",
1610 					pp->pr_wchan, pi->pi_magic, ph->ph_page,
1611 					n, pi, page);
1612 				panic("pool");
1613 			}
1614 #endif
1615 			page = (caddr_t)((u_long)pi & pp->pr_pagemask);
1616 			if (page == ph->ph_page)
1617 				continue;
1618 
1619 			if (label != NULL)
1620 				printf("%s: ", label);
1621 			printf("pool(%p:%s): page inconsistency: page %p;"
1622 			       " item ordinal %d; addr %p (p %p)\n", pp,
1623 				pp->pr_wchan, ph->ph_page,
1624 				n, pi, page);
1625 			r++;
1626 			goto out;
1627 		}
1628 	}
1629 out:
1630 	simple_unlock(&pp->pr_slock);
1631 	return (r);
1632 }
1633 
1634 /*
1635  * pool_cache_init:
1636  *
1637  *	Initialize a pool cache.
1638  *
1639  *	NOTE: If the pool must be protected from interrupts, we expect
1640  *	to be called at the appropriate interrupt priority level.
1641  */
1642 void
1643 pool_cache_init(struct pool_cache *pc, struct pool *pp,
1644     int (*ctor)(void *, void *, int),
1645     void (*dtor)(void *, void *),
1646     void *arg)
1647 {
1648 
1649 	TAILQ_INIT(&pc->pc_grouplist);
1650 	simple_lock_init(&pc->pc_slock);
1651 
1652 	pc->pc_allocfrom = NULL;
1653 	pc->pc_freeto = NULL;
1654 	pc->pc_pool = pp;
1655 
1656 	pc->pc_ctor = ctor;
1657 	pc->pc_dtor = dtor;
1658 	pc->pc_arg  = arg;
1659 
1660 	pc->pc_hits   = 0;
1661 	pc->pc_misses = 0;
1662 
1663 	pc->pc_ngroups = 0;
1664 
1665 	pc->pc_nitems = 0;
1666 
1667 	simple_lock(&pp->pr_slock);
1668 	TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1669 	simple_unlock(&pp->pr_slock);
1670 }
1671 
1672 /*
1673  * pool_cache_destroy:
1674  *
1675  *	Destroy a pool cache.
1676  */
1677 void
1678 pool_cache_destroy(struct pool_cache *pc)
1679 {
1680 	struct pool *pp = pc->pc_pool;
1681 
1682 	/* First, invalidate the entire cache. */
1683 	pool_cache_invalidate(pc);
1684 
1685 	/* ...and remove it from the pool's cache list. */
1686 	simple_lock(&pp->pr_slock);
1687 	TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1688 	simple_unlock(&pp->pr_slock);
1689 }
1690 
1691 static __inline void *
1692 pcg_get(struct pool_cache_group *pcg)
1693 {
1694 	void *object;
1695 	u_int idx;
1696 
1697 	KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1698 	KASSERT(pcg->pcg_avail != 0);
1699 	idx = --pcg->pcg_avail;
1700 
1701 	KASSERT(pcg->pcg_objects[idx] != NULL);
1702 	object = pcg->pcg_objects[idx];
1703 	pcg->pcg_objects[idx] = NULL;
1704 
1705 	return (object);
1706 }
1707 
1708 static __inline void
1709 pcg_put(struct pool_cache_group *pcg, void *object)
1710 {
1711 	u_int idx;
1712 
1713 	KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1714 	idx = pcg->pcg_avail++;
1715 
1716 	KASSERT(pcg->pcg_objects[idx] == NULL);
1717 	pcg->pcg_objects[idx] = object;
1718 }
1719 
1720 /*
1721  * pool_cache_get:
1722  *
1723  *	Get an object from a pool cache.
1724  */
1725 void *
1726 pool_cache_get(struct pool_cache *pc, int flags)
1727 {
1728 	struct pool_cache_group *pcg;
1729 	void *object;
1730 
1731 #ifdef LOCKDEBUG
1732 	if (flags & PR_WAITOK)
1733 		simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1734 #endif
1735 
1736 	simple_lock(&pc->pc_slock);
1737 
1738 	if ((pcg = pc->pc_allocfrom) == NULL) {
1739 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1740 			if (pcg->pcg_avail != 0) {
1741 				pc->pc_allocfrom = pcg;
1742 				goto have_group;
1743 			}
1744 		}
1745 
1746 		/*
1747 		 * No groups with any available objects.  Allocate
1748 		 * a new object, construct it, and return it to
1749 		 * the caller.  We will allocate a group, if necessary,
1750 		 * when the object is freed back to the cache.
1751 		 */
1752 		pc->pc_misses++;
1753 		simple_unlock(&pc->pc_slock);
1754 		object = pool_get(pc->pc_pool, flags);
1755 		if (object != NULL && pc->pc_ctor != NULL) {
1756 			if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1757 				pool_put(pc->pc_pool, object);
1758 				return (NULL);
1759 			}
1760 		}
1761 		return (object);
1762 	}
1763 
1764  have_group:
1765 	pc->pc_hits++;
1766 	pc->pc_nitems--;
1767 	object = pcg_get(pcg);
1768 
1769 	if (pcg->pcg_avail == 0)
1770 		pc->pc_allocfrom = NULL;
1771 
1772 	simple_unlock(&pc->pc_slock);
1773 
1774 	return (object);
1775 }
1776 
1777 /*
1778  * pool_cache_put:
1779  *
1780  *	Put an object back to the pool cache.
1781  */
1782 void
1783 pool_cache_put(struct pool_cache *pc, void *object)
1784 {
1785 	struct pool_cache_group *pcg;
1786 	int s;
1787 
1788 	simple_lock(&pc->pc_slock);
1789 
1790 	if ((pcg = pc->pc_freeto) == NULL) {
1791 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1792 			if (pcg->pcg_avail != PCG_NOBJECTS) {
1793 				pc->pc_freeto = pcg;
1794 				goto have_group;
1795 			}
1796 		}
1797 
1798 		/*
1799 		 * No empty groups to free the object to.  Attempt to
1800 		 * allocate one.
1801 		 */
1802 		simple_unlock(&pc->pc_slock);
1803 		s = splvm();
1804 		pcg = pool_get(&pcgpool, PR_NOWAIT);
1805 		splx(s);
1806 		if (pcg != NULL) {
1807 			memset(pcg, 0, sizeof(*pcg));
1808 			simple_lock(&pc->pc_slock);
1809 			pc->pc_ngroups++;
1810 			TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1811 			if (pc->pc_freeto == NULL)
1812 				pc->pc_freeto = pcg;
1813 			goto have_group;
1814 		}
1815 
1816 		/*
1817 		 * Unable to allocate a cache group; destruct the object
1818 		 * and free it back to the pool.
1819 		 */
1820 		pool_cache_destruct_object(pc, object);
1821 		return;
1822 	}
1823 
1824  have_group:
1825 	pc->pc_nitems++;
1826 	pcg_put(pcg, object);
1827 
1828 	if (pcg->pcg_avail == PCG_NOBJECTS)
1829 		pc->pc_freeto = NULL;
1830 
1831 	simple_unlock(&pc->pc_slock);
1832 }
1833 
1834 /*
1835  * pool_cache_destruct_object:
1836  *
1837  *	Force destruction of an object and its release back into
1838  *	the pool.
1839  */
1840 void
1841 pool_cache_destruct_object(struct pool_cache *pc, void *object)
1842 {
1843 
1844 	if (pc->pc_dtor != NULL)
1845 		(*pc->pc_dtor)(pc->pc_arg, object);
1846 	pool_put(pc->pc_pool, object);
1847 }
1848 
1849 /*
1850  * pool_cache_do_invalidate:
1851  *
1852  *	This internal function implements pool_cache_invalidate() and
1853  *	pool_cache_reclaim().
1854  */
1855 static void
1856 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1857     void (*putit)(struct pool *, void *))
1858 {
1859 	struct pool_cache_group *pcg, *npcg;
1860 	void *object;
1861 	int s;
1862 
1863 	for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1864 	     pcg = npcg) {
1865 		npcg = TAILQ_NEXT(pcg, pcg_list);
1866 		while (pcg->pcg_avail != 0) {
1867 			pc->pc_nitems--;
1868 			object = pcg_get(pcg);
1869 			if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1870 				pc->pc_allocfrom = NULL;
1871 			if (pc->pc_dtor != NULL)
1872 				(*pc->pc_dtor)(pc->pc_arg, object);
1873 			(*putit)(pc->pc_pool, object);
1874 		}
1875 		if (free_groups) {
1876 			pc->pc_ngroups--;
1877 			TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1878 			if (pc->pc_freeto == pcg)
1879 				pc->pc_freeto = NULL;
1880 			s = splvm();
1881 			pool_put(&pcgpool, pcg);
1882 			splx(s);
1883 		}
1884 	}
1885 }
1886 
1887 /*
1888  * pool_cache_invalidate:
1889  *
1890  *	Invalidate a pool cache (destruct and release all of the
1891  *	cached objects).
1892  */
1893 void
1894 pool_cache_invalidate(struct pool_cache *pc)
1895 {
1896 
1897 	simple_lock(&pc->pc_slock);
1898 	pool_cache_do_invalidate(pc, 0, pool_put);
1899 	simple_unlock(&pc->pc_slock);
1900 }
1901 
1902 /*
1903  * pool_cache_reclaim:
1904  *
1905  *	Reclaim a pool cache for pool_reclaim().
1906  */
1907 static void
1908 pool_cache_reclaim(struct pool_cache *pc)
1909 {
1910 
1911 	simple_lock(&pc->pc_slock);
1912 	pool_cache_do_invalidate(pc, 1, pool_do_put);
1913 	simple_unlock(&pc->pc_slock);
1914 }
1915