xref: /netbsd-src/sys/kern/subr_pool.c (revision 06be8101a16cc95f40783b3cb7afd12112103a9a)
1 /*	$NetBSD: subr_pool.c,v 1.64 2001/11/12 15:25:20 lukem Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.64 2001/11/12 15:25:20 lukem Exp $");
42 
43 #include "opt_pool.h"
44 #include "opt_poollog.h"
45 #include "opt_lockdebug.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/errno.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/lock.h>
54 #include <sys/pool.h>
55 #include <sys/syslog.h>
56 
57 #include <uvm/uvm.h>
58 
59 /*
60  * Pool resource management utility.
61  *
62  * Memory is allocated in pages which are split into pieces according
63  * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
64  * in the pool structure and the individual pool items are on a linked list
65  * headed by `ph_itemlist' in each page header. The memory for building
66  * the page list is either taken from the allocated pages themselves (for
67  * small pool items) or taken from an internal pool of page headers (`phpool').
68  */
69 
70 /* List of all pools */
71 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
72 
73 /* Private pool for page header structures */
74 static struct pool phpool;
75 
76 #ifdef POOL_SUBPAGE
77 /* Pool of subpages for use by normal pools. */
78 static struct pool psppool;
79 #endif
80 
81 /* # of seconds to retain page after last use */
82 int pool_inactive_time = 10;
83 
84 /* Next candidate for drainage (see pool_drain()) */
85 static struct pool	*drainpp;
86 
87 /* This spin lock protects both pool_head and drainpp. */
88 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
89 
90 struct pool_item_header {
91 	/* Page headers */
92 	TAILQ_ENTRY(pool_item_header)
93 				ph_pagelist;	/* pool page list */
94 	TAILQ_HEAD(,pool_item)	ph_itemlist;	/* chunk list for this page */
95 	LIST_ENTRY(pool_item_header)
96 				ph_hashlist;	/* Off-page page headers */
97 	int			ph_nmissing;	/* # of chunks in use */
98 	caddr_t			ph_page;	/* this page's address */
99 	struct timeval		ph_time;	/* last referenced */
100 };
101 TAILQ_HEAD(pool_pagelist,pool_item_header);
102 
103 struct pool_item {
104 #ifdef DIAGNOSTIC
105 	int pi_magic;
106 #endif
107 #define	PI_MAGIC 0xdeadbeef
108 	/* Other entries use only this list entry */
109 	TAILQ_ENTRY(pool_item)	pi_list;
110 };
111 
112 #define	PR_HASH_INDEX(pp,addr) \
113 	(((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
114 
115 #define	POOL_NEEDS_CATCHUP(pp)						\
116 	((pp)->pr_nitems < (pp)->pr_minitems)
117 
118 /*
119  * Pool cache management.
120  *
121  * Pool caches provide a way for constructed objects to be cached by the
122  * pool subsystem.  This can lead to performance improvements by avoiding
123  * needless object construction/destruction; it is deferred until absolutely
124  * necessary.
125  *
126  * Caches are grouped into cache groups.  Each cache group references
127  * up to 16 constructed objects.  When a cache allocates an object
128  * from the pool, it calls the object's constructor and places it into
129  * a cache group.  When a cache group frees an object back to the pool,
130  * it first calls the object's destructor.  This allows the object to
131  * persist in constructed form while freed to the cache.
132  *
133  * Multiple caches may exist for each pool.  This allows a single
134  * object type to have multiple constructed forms.  The pool references
135  * each cache, so that when a pool is drained by the pagedaemon, it can
136  * drain each individual cache as well.  Each time a cache is drained,
137  * the most idle cache group is freed to the pool in its entirety.
138  *
139  * Pool caches are layed on top of pools.  By layering them, we can avoid
140  * the complexity of cache management for pools which would not benefit
141  * from it.
142  */
143 
144 /* The cache group pool. */
145 static struct pool pcgpool;
146 
147 /* The pool cache group. */
148 #define	PCG_NOBJECTS		16
149 struct pool_cache_group {
150 	TAILQ_ENTRY(pool_cache_group)
151 		pcg_list;	/* link in the pool cache's group list */
152 	u_int	pcg_avail;	/* # available objects */
153 				/* pointers to the objects */
154 	void	*pcg_objects[PCG_NOBJECTS];
155 };
156 
157 static void	pool_cache_reclaim(struct pool_cache *);
158 
159 static int	pool_catchup(struct pool *);
160 static void	pool_prime_page(struct pool *, caddr_t,
161 		    struct pool_item_header *);
162 static void	*pool_page_alloc(unsigned long, int, int);
163 static void	pool_page_free(void *, unsigned long, int);
164 #ifdef POOL_SUBPAGE
165 static void	*pool_subpage_alloc(unsigned long, int, int);
166 static void	pool_subpage_free(void *, unsigned long, int);
167 #endif
168 
169 static void pool_print1(struct pool *, const char *,
170 	void (*)(const char *, ...));
171 
172 /*
173  * Pool log entry. An array of these is allocated in pool_init().
174  */
175 struct pool_log {
176 	const char	*pl_file;
177 	long		pl_line;
178 	int		pl_action;
179 #define	PRLOG_GET	1
180 #define	PRLOG_PUT	2
181 	void		*pl_addr;
182 };
183 
184 /* Number of entries in pool log buffers */
185 #ifndef POOL_LOGSIZE
186 #define	POOL_LOGSIZE	10
187 #endif
188 
189 int pool_logsize = POOL_LOGSIZE;
190 
191 #ifdef POOL_DIAGNOSTIC
192 static __inline void
193 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
194 {
195 	int n = pp->pr_curlogentry;
196 	struct pool_log *pl;
197 
198 	if ((pp->pr_roflags & PR_LOGGING) == 0)
199 		return;
200 
201 	/*
202 	 * Fill in the current entry. Wrap around and overwrite
203 	 * the oldest entry if necessary.
204 	 */
205 	pl = &pp->pr_log[n];
206 	pl->pl_file = file;
207 	pl->pl_line = line;
208 	pl->pl_action = action;
209 	pl->pl_addr = v;
210 	if (++n >= pp->pr_logsize)
211 		n = 0;
212 	pp->pr_curlogentry = n;
213 }
214 
215 static void
216 pr_printlog(struct pool *pp, struct pool_item *pi,
217     void (*pr)(const char *, ...))
218 {
219 	int i = pp->pr_logsize;
220 	int n = pp->pr_curlogentry;
221 
222 	if ((pp->pr_roflags & PR_LOGGING) == 0)
223 		return;
224 
225 	/*
226 	 * Print all entries in this pool's log.
227 	 */
228 	while (i-- > 0) {
229 		struct pool_log *pl = &pp->pr_log[n];
230 		if (pl->pl_action != 0) {
231 			if (pi == NULL || pi == pl->pl_addr) {
232 				(*pr)("\tlog entry %d:\n", i);
233 				(*pr)("\t\taction = %s, addr = %p\n",
234 				    pl->pl_action == PRLOG_GET ? "get" : "put",
235 				    pl->pl_addr);
236 				(*pr)("\t\tfile: %s at line %lu\n",
237 				    pl->pl_file, pl->pl_line);
238 			}
239 		}
240 		if (++n >= pp->pr_logsize)
241 			n = 0;
242 	}
243 }
244 
245 static __inline void
246 pr_enter(struct pool *pp, const char *file, long line)
247 {
248 
249 	if (__predict_false(pp->pr_entered_file != NULL)) {
250 		printf("pool %s: reentrancy at file %s line %ld\n",
251 		    pp->pr_wchan, file, line);
252 		printf("         previous entry at file %s line %ld\n",
253 		    pp->pr_entered_file, pp->pr_entered_line);
254 		panic("pr_enter");
255 	}
256 
257 	pp->pr_entered_file = file;
258 	pp->pr_entered_line = line;
259 }
260 
261 static __inline void
262 pr_leave(struct pool *pp)
263 {
264 
265 	if (__predict_false(pp->pr_entered_file == NULL)) {
266 		printf("pool %s not entered?\n", pp->pr_wchan);
267 		panic("pr_leave");
268 	}
269 
270 	pp->pr_entered_file = NULL;
271 	pp->pr_entered_line = 0;
272 }
273 
274 static __inline void
275 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
276 {
277 
278 	if (pp->pr_entered_file != NULL)
279 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
280 		    pp->pr_entered_file, pp->pr_entered_line);
281 }
282 #else
283 #define	pr_log(pp, v, action, file, line)
284 #define	pr_printlog(pp, pi, pr)
285 #define	pr_enter(pp, file, line)
286 #define	pr_leave(pp)
287 #define	pr_enter_check(pp, pr)
288 #endif /* POOL_DIAGNOSTIC */
289 
290 /*
291  * Return the pool page header based on page address.
292  */
293 static __inline struct pool_item_header *
294 pr_find_pagehead(struct pool *pp, caddr_t page)
295 {
296 	struct pool_item_header *ph;
297 
298 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
299 		return ((struct pool_item_header *)(page + pp->pr_phoffset));
300 
301 	for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
302 	     ph != NULL;
303 	     ph = LIST_NEXT(ph, ph_hashlist)) {
304 		if (ph->ph_page == page)
305 			return (ph);
306 	}
307 	return (NULL);
308 }
309 
310 /*
311  * Remove a page from the pool.
312  */
313 static __inline void
314 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
315      struct pool_pagelist *pq)
316 {
317 	int s;
318 
319 	/*
320 	 * If the page was idle, decrement the idle page count.
321 	 */
322 	if (ph->ph_nmissing == 0) {
323 #ifdef DIAGNOSTIC
324 		if (pp->pr_nidle == 0)
325 			panic("pr_rmpage: nidle inconsistent");
326 		if (pp->pr_nitems < pp->pr_itemsperpage)
327 			panic("pr_rmpage: nitems inconsistent");
328 #endif
329 		pp->pr_nidle--;
330 	}
331 
332 	pp->pr_nitems -= pp->pr_itemsperpage;
333 
334 	/*
335 	 * Unlink a page from the pool and release it (or queue it for release).
336 	 */
337 	TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
338 	if (pq) {
339 		TAILQ_INSERT_HEAD(pq, ph, ph_pagelist);
340 	} else {
341 		(*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
342 		if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
343 			LIST_REMOVE(ph, ph_hashlist);
344 			s = splhigh();
345 			pool_put(&phpool, ph);
346 			splx(s);
347 		}
348 	}
349 	pp->pr_npages--;
350 	pp->pr_npagefree++;
351 
352 	if (pp->pr_curpage == ph) {
353 		/*
354 		 * Find a new non-empty page header, if any.
355 		 * Start search from the page head, to increase the
356 		 * chance for "high water" pages to be freed.
357 		 */
358 		TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
359 			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
360 				break;
361 
362 		pp->pr_curpage = ph;
363 	}
364 }
365 
366 /*
367  * Initialize the given pool resource structure.
368  *
369  * We export this routine to allow other kernel parts to declare
370  * static pools that must be initialized before malloc() is available.
371  */
372 void
373 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
374     const char *wchan, size_t pagesz,
375     void *(*alloc)(unsigned long, int, int),
376     void (*release)(void *, unsigned long, int),
377     int mtype)
378 {
379 	int off, slack, i;
380 
381 #ifdef POOL_DIAGNOSTIC
382 	/*
383 	 * Always log if POOL_DIAGNOSTIC is defined.
384 	 */
385 	if (pool_logsize != 0)
386 		flags |= PR_LOGGING;
387 #endif
388 
389 	/*
390 	 * Check arguments and construct default values.
391 	 */
392 	if (!powerof2(pagesz))
393 		panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);
394 
395 	if (alloc == NULL && release == NULL) {
396 #ifdef POOL_SUBPAGE
397 		alloc = pool_subpage_alloc;
398 		release = pool_subpage_free;
399 		pagesz = POOL_SUBPAGE;
400 #else
401 		alloc = pool_page_alloc;
402 		release = pool_page_free;
403 		pagesz = PAGE_SIZE;	/* Rounds to PAGE_SIZE anyhow. */
404 #endif
405 	} else if ((alloc != NULL && release != NULL) == 0) {
406 		/* If you specifiy one, must specify both. */
407 		panic("pool_init: must specify alloc and release together");
408 	}
409 #ifdef POOL_SUBPAGE
410 	else if (alloc == pool_page_alloc_nointr &&
411 	    release == pool_page_free_nointr)
412 		pagesz = POOL_SUBPAGE;
413 #endif
414 
415 	if (pagesz == 0)
416 		pagesz = PAGE_SIZE;
417 
418 	if (align == 0)
419 		align = ALIGN(1);
420 
421 	if (size < sizeof(struct pool_item))
422 		size = sizeof(struct pool_item);
423 
424 	size = ALIGN(size);
425 	if (size > pagesz)
426 		panic("pool_init: pool item size (%lu) too large",
427 		      (u_long)size);
428 
429 	/*
430 	 * Initialize the pool structure.
431 	 */
432 	TAILQ_INIT(&pp->pr_pagelist);
433 	TAILQ_INIT(&pp->pr_cachelist);
434 	pp->pr_curpage = NULL;
435 	pp->pr_npages = 0;
436 	pp->pr_minitems = 0;
437 	pp->pr_minpages = 0;
438 	pp->pr_maxpages = UINT_MAX;
439 	pp->pr_roflags = flags;
440 	pp->pr_flags = 0;
441 	pp->pr_size = size;
442 	pp->pr_align = align;
443 	pp->pr_wchan = wchan;
444 	pp->pr_mtype = mtype;
445 	pp->pr_alloc = alloc;
446 	pp->pr_free = release;
447 	pp->pr_pagesz = pagesz;
448 	pp->pr_pagemask = ~(pagesz - 1);
449 	pp->pr_pageshift = ffs(pagesz) - 1;
450 	pp->pr_nitems = 0;
451 	pp->pr_nout = 0;
452 	pp->pr_hardlimit = UINT_MAX;
453 	pp->pr_hardlimit_warning = NULL;
454 	pp->pr_hardlimit_ratecap.tv_sec = 0;
455 	pp->pr_hardlimit_ratecap.tv_usec = 0;
456 	pp->pr_hardlimit_warning_last.tv_sec = 0;
457 	pp->pr_hardlimit_warning_last.tv_usec = 0;
458 
459 	/*
460 	 * Decide whether to put the page header off page to avoid
461 	 * wasting too large a part of the page. Off-page page headers
462 	 * go on a hash table, so we can match a returned item
463 	 * with its header based on the page address.
464 	 * We use 1/16 of the page size as the threshold (XXX: tune)
465 	 */
466 	if (pp->pr_size < pagesz/16) {
467 		/* Use the end of the page for the page header */
468 		pp->pr_roflags |= PR_PHINPAGE;
469 		pp->pr_phoffset = off =
470 			pagesz - ALIGN(sizeof(struct pool_item_header));
471 	} else {
472 		/* The page header will be taken from our page header pool */
473 		pp->pr_phoffset = 0;
474 		off = pagesz;
475 		for (i = 0; i < PR_HASHTABSIZE; i++) {
476 			LIST_INIT(&pp->pr_hashtab[i]);
477 		}
478 	}
479 
480 	/*
481 	 * Alignment is to take place at `ioff' within the item. This means
482 	 * we must reserve up to `align - 1' bytes on the page to allow
483 	 * appropriate positioning of each item.
484 	 *
485 	 * Silently enforce `0 <= ioff < align'.
486 	 */
487 	pp->pr_itemoffset = ioff = ioff % align;
488 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
489 	KASSERT(pp->pr_itemsperpage != 0);
490 
491 	/*
492 	 * Use the slack between the chunks and the page header
493 	 * for "cache coloring".
494 	 */
495 	slack = off - pp->pr_itemsperpage * pp->pr_size;
496 	pp->pr_maxcolor = (slack / align) * align;
497 	pp->pr_curcolor = 0;
498 
499 	pp->pr_nget = 0;
500 	pp->pr_nfail = 0;
501 	pp->pr_nput = 0;
502 	pp->pr_npagealloc = 0;
503 	pp->pr_npagefree = 0;
504 	pp->pr_hiwat = 0;
505 	pp->pr_nidle = 0;
506 
507 #ifdef POOL_DIAGNOSTIC
508 	if (flags & PR_LOGGING) {
509 		if (kmem_map == NULL ||
510 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
511 		     M_TEMP, M_NOWAIT)) == NULL)
512 			pp->pr_roflags &= ~PR_LOGGING;
513 		pp->pr_curlogentry = 0;
514 		pp->pr_logsize = pool_logsize;
515 	}
516 #endif
517 
518 	pp->pr_entered_file = NULL;
519 	pp->pr_entered_line = 0;
520 
521 	simple_lock_init(&pp->pr_slock);
522 
523 	/*
524 	 * Initialize private page header pool and cache magazine pool if we
525 	 * haven't done so yet.
526 	 * XXX LOCKING.
527 	 */
528 	if (phpool.pr_size == 0) {
529 #ifdef POOL_SUBPAGE
530 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
531 		    "phpool", PAGE_SIZE, pool_page_alloc, pool_page_free, 0);
532 		pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
533 		    PR_RECURSIVE, "psppool", PAGE_SIZE,
534 		    pool_page_alloc, pool_page_free, 0);
535 #else
536 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
537 		    0, "phpool", 0, 0, 0, 0);
538 #endif
539 		pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
540 		    0, "pcgpool", 0, 0, 0, 0);
541 	}
542 
543 	/* Insert into the list of all pools. */
544 	simple_lock(&pool_head_slock);
545 	TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
546 	simple_unlock(&pool_head_slock);
547 }
548 
549 /*
550  * De-commision a pool resource.
551  */
552 void
553 pool_destroy(struct pool *pp)
554 {
555 	struct pool_item_header *ph;
556 	struct pool_cache *pc;
557 
558 	/* Destroy all caches for this pool. */
559 	while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
560 		pool_cache_destroy(pc);
561 
562 #ifdef DIAGNOSTIC
563 	if (pp->pr_nout != 0) {
564 		pr_printlog(pp, NULL, printf);
565 		panic("pool_destroy: pool busy: still out: %u\n",
566 		    pp->pr_nout);
567 	}
568 #endif
569 
570 	/* Remove all pages */
571 	if ((pp->pr_roflags & PR_STATIC) == 0)
572 		while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
573 			pr_rmpage(pp, ph, NULL);
574 
575 	/* Remove from global pool list */
576 	simple_lock(&pool_head_slock);
577 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
578 	if (drainpp == pp) {
579 		drainpp = NULL;
580 	}
581 	simple_unlock(&pool_head_slock);
582 
583 #ifdef POOL_DIAGNOSTIC
584 	if ((pp->pr_roflags & PR_LOGGING) != 0)
585 		free(pp->pr_log, M_TEMP);
586 #endif
587 
588 	if (pp->pr_roflags & PR_FREEHEADER)
589 		free(pp, M_POOL);
590 }
591 
592 static __inline struct pool_item_header *
593 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
594 {
595 	struct pool_item_header *ph;
596 	int s;
597 
598 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
599 
600 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
601 		ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
602 	else {
603 		s = splhigh();
604 		ph = pool_get(&phpool, flags);
605 		splx(s);
606 	}
607 
608 	return (ph);
609 }
610 
611 /*
612  * Grab an item from the pool; must be called at appropriate spl level
613  */
614 void *
615 #ifdef POOL_DIAGNOSTIC
616 _pool_get(struct pool *pp, int flags, const char *file, long line)
617 #else
618 pool_get(struct pool *pp, int flags)
619 #endif
620 {
621 	struct pool_item *pi;
622 	struct pool_item_header *ph;
623 	void *v;
624 
625 #ifdef DIAGNOSTIC
626 	if (__predict_false((pp->pr_roflags & PR_STATIC) &&
627 			    (flags & PR_MALLOCOK))) {
628 		pr_printlog(pp, NULL, printf);
629 		panic("pool_get: static");
630 	}
631 
632 	if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
633 			    (flags & PR_WAITOK) != 0))
634 		panic("pool_get: must have NOWAIT");
635 
636 #ifdef LOCKDEBUG
637 	if (flags & PR_WAITOK)
638 		simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
639 #endif
640 #endif /* DIAGNOSTIC */
641 
642 	simple_lock(&pp->pr_slock);
643 	pr_enter(pp, file, line);
644 
645  startover:
646 	/*
647 	 * Check to see if we've reached the hard limit.  If we have,
648 	 * and we can wait, then wait until an item has been returned to
649 	 * the pool.
650 	 */
651 #ifdef DIAGNOSTIC
652 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
653 		pr_leave(pp);
654 		simple_unlock(&pp->pr_slock);
655 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
656 	}
657 #endif
658 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
659 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
660 			/*
661 			 * XXX: A warning isn't logged in this case.  Should
662 			 * it be?
663 			 */
664 			pp->pr_flags |= PR_WANTED;
665 			pr_leave(pp);
666 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
667 			pr_enter(pp, file, line);
668 			goto startover;
669 		}
670 
671 		/*
672 		 * Log a message that the hard limit has been hit.
673 		 */
674 		if (pp->pr_hardlimit_warning != NULL &&
675 		    ratecheck(&pp->pr_hardlimit_warning_last,
676 			      &pp->pr_hardlimit_ratecap))
677 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
678 
679 		if (flags & PR_URGENT)
680 			panic("pool_get: urgent");
681 
682 		pp->pr_nfail++;
683 
684 		pr_leave(pp);
685 		simple_unlock(&pp->pr_slock);
686 		return (NULL);
687 	}
688 
689 	/*
690 	 * The convention we use is that if `curpage' is not NULL, then
691 	 * it points at a non-empty bucket. In particular, `curpage'
692 	 * never points at a page header which has PR_PHINPAGE set and
693 	 * has no items in its bucket.
694 	 */
695 	if ((ph = pp->pr_curpage) == NULL) {
696 #ifdef DIAGNOSTIC
697 		if (pp->pr_nitems != 0) {
698 			simple_unlock(&pp->pr_slock);
699 			printf("pool_get: %s: curpage NULL, nitems %u\n",
700 			    pp->pr_wchan, pp->pr_nitems);
701 			panic("pool_get: nitems inconsistent\n");
702 		}
703 #endif
704 
705 		/*
706 		 * Call the back-end page allocator for more memory.
707 		 * Release the pool lock, as the back-end page allocator
708 		 * may block.
709 		 */
710 		pr_leave(pp);
711 		simple_unlock(&pp->pr_slock);
712 		v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);
713 		if (__predict_true(v != NULL))
714 			ph = pool_alloc_item_header(pp, v, flags);
715 		simple_lock(&pp->pr_slock);
716 		pr_enter(pp, file, line);
717 
718 		if (__predict_false(v == NULL || ph == NULL)) {
719 			if (v != NULL)
720 				(*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype);
721 
722 			/*
723 			 * We were unable to allocate a page or item
724 			 * header, but we released the lock during
725 			 * allocation, so perhaps items were freed
726 			 * back to the pool.  Check for this case.
727 			 */
728 			if (pp->pr_curpage != NULL)
729 				goto startover;
730 
731 			if (flags & PR_URGENT)
732 				panic("pool_get: urgent");
733 
734 			if ((flags & PR_WAITOK) == 0) {
735 				pp->pr_nfail++;
736 				pr_leave(pp);
737 				simple_unlock(&pp->pr_slock);
738 				return (NULL);
739 			}
740 
741 			/*
742 			 * Wait for items to be returned to this pool.
743 			 *
744 			 * XXX: we actually want to wait just until
745 			 * the page allocator has memory again. Depending
746 			 * on this pool's usage, we might get stuck here
747 			 * for a long time.
748 			 *
749 			 * XXX: maybe we should wake up once a second and
750 			 * try again?
751 			 */
752 			pp->pr_flags |= PR_WANTED;
753 			pr_leave(pp);
754 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
755 			pr_enter(pp, file, line);
756 			goto startover;
757 		}
758 
759 		/* We have more memory; add it to the pool */
760 		pool_prime_page(pp, v, ph);
761 		pp->pr_npagealloc++;
762 
763 		/* Start the allocation process over. */
764 		goto startover;
765 	}
766 
767 	if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
768 		pr_leave(pp);
769 		simple_unlock(&pp->pr_slock);
770 		panic("pool_get: %s: page empty", pp->pr_wchan);
771 	}
772 #ifdef DIAGNOSTIC
773 	if (__predict_false(pp->pr_nitems == 0)) {
774 		pr_leave(pp);
775 		simple_unlock(&pp->pr_slock);
776 		printf("pool_get: %s: items on itemlist, nitems %u\n",
777 		    pp->pr_wchan, pp->pr_nitems);
778 		panic("pool_get: nitems inconsistent\n");
779 	}
780 
781 	pr_log(pp, v, PRLOG_GET, file, line);
782 
783 	if (__predict_false(pi->pi_magic != PI_MAGIC)) {
784 		pr_printlog(pp, pi, printf);
785 		panic("pool_get(%s): free list modified: magic=%x; page %p;"
786 		       " item addr %p\n",
787 			pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
788 	}
789 #endif
790 
791 	/*
792 	 * Remove from item list.
793 	 */
794 	TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
795 	pp->pr_nitems--;
796 	pp->pr_nout++;
797 	if (ph->ph_nmissing == 0) {
798 #ifdef DIAGNOSTIC
799 		if (__predict_false(pp->pr_nidle == 0))
800 			panic("pool_get: nidle inconsistent");
801 #endif
802 		pp->pr_nidle--;
803 	}
804 	ph->ph_nmissing++;
805 	if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
806 #ifdef DIAGNOSTIC
807 		if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
808 			pr_leave(pp);
809 			simple_unlock(&pp->pr_slock);
810 			panic("pool_get: %s: nmissing inconsistent",
811 			    pp->pr_wchan);
812 		}
813 #endif
814 		/*
815 		 * Find a new non-empty page header, if any.
816 		 * Start search from the page head, to increase
817 		 * the chance for "high water" pages to be freed.
818 		 *
819 		 * Migrate empty pages to the end of the list.  This
820 		 * will speed the update of curpage as pages become
821 		 * idle.  Empty pages intermingled with idle pages
822 		 * is no big deal.  As soon as a page becomes un-empty,
823 		 * it will move back to the head of the list.
824 		 */
825 		TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
826 		TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
827 		TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
828 			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
829 				break;
830 
831 		pp->pr_curpage = ph;
832 	}
833 
834 	pp->pr_nget++;
835 
836 	/*
837 	 * If we have a low water mark and we are now below that low
838 	 * water mark, add more items to the pool.
839 	 */
840 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
841 		/*
842 		 * XXX: Should we log a warning?  Should we set up a timeout
843 		 * to try again in a second or so?  The latter could break
844 		 * a caller's assumptions about interrupt protection, etc.
845 		 */
846 	}
847 
848 	pr_leave(pp);
849 	simple_unlock(&pp->pr_slock);
850 	return (v);
851 }
852 
853 /*
854  * Internal version of pool_put().  Pool is already locked/entered.
855  */
856 static void
857 pool_do_put(struct pool *pp, void *v)
858 {
859 	struct pool_item *pi = v;
860 	struct pool_item_header *ph;
861 	caddr_t page;
862 	int s;
863 
864 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
865 
866 	page = (caddr_t)((u_long)v & pp->pr_pagemask);
867 
868 #ifdef DIAGNOSTIC
869 	if (__predict_false(pp->pr_nout == 0)) {
870 		printf("pool %s: putting with none out\n",
871 		    pp->pr_wchan);
872 		panic("pool_put");
873 	}
874 #endif
875 
876 	if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
877 		pr_printlog(pp, NULL, printf);
878 		panic("pool_put: %s: page header missing", pp->pr_wchan);
879 	}
880 
881 #ifdef LOCKDEBUG
882 	/*
883 	 * Check if we're freeing a locked simple lock.
884 	 */
885 	simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
886 #endif
887 
888 	/*
889 	 * Return to item list.
890 	 */
891 #ifdef DIAGNOSTIC
892 	pi->pi_magic = PI_MAGIC;
893 #endif
894 #ifdef DEBUG
895 	{
896 		int i, *ip = v;
897 
898 		for (i = 0; i < pp->pr_size / sizeof(int); i++) {
899 			*ip++ = PI_MAGIC;
900 		}
901 	}
902 #endif
903 
904 	TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
905 	ph->ph_nmissing--;
906 	pp->pr_nput++;
907 	pp->pr_nitems++;
908 	pp->pr_nout--;
909 
910 	/* Cancel "pool empty" condition if it exists */
911 	if (pp->pr_curpage == NULL)
912 		pp->pr_curpage = ph;
913 
914 	if (pp->pr_flags & PR_WANTED) {
915 		pp->pr_flags &= ~PR_WANTED;
916 		if (ph->ph_nmissing == 0)
917 			pp->pr_nidle++;
918 		wakeup((caddr_t)pp);
919 		return;
920 	}
921 
922 	/*
923 	 * If this page is now complete, do one of two things:
924 	 *
925 	 *	(1) If we have more pages than the page high water
926 	 *	    mark, free the page back to the system.
927 	 *
928 	 *	(2) Move it to the end of the page list, so that
929 	 *	    we minimize our chances of fragmenting the
930 	 *	    pool.  Idle pages migrate to the end (along with
931 	 *	    completely empty pages, so that we find un-empty
932 	 *	    pages more quickly when we update curpage) of the
933 	 *	    list so they can be more easily swept up by
934 	 *	    the pagedaemon when pages are scarce.
935 	 */
936 	if (ph->ph_nmissing == 0) {
937 		pp->pr_nidle++;
938 		if (pp->pr_npages > pp->pr_maxpages) {
939 			pr_rmpage(pp, ph, NULL);
940 		} else {
941 			TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
942 			TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
943 
944 			/*
945 			 * Update the timestamp on the page.  A page must
946 			 * be idle for some period of time before it can
947 			 * be reclaimed by the pagedaemon.  This minimizes
948 			 * ping-pong'ing for memory.
949 			 */
950 			s = splclock();
951 			ph->ph_time = mono_time;
952 			splx(s);
953 
954 			/*
955 			 * Update the current page pointer.  Just look for
956 			 * the first page with any free items.
957 			 *
958 			 * XXX: Maybe we want an option to look for the
959 			 * page with the fewest available items, to minimize
960 			 * fragmentation?
961 			 */
962 			TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
963 				if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
964 					break;
965 
966 			pp->pr_curpage = ph;
967 		}
968 	}
969 	/*
970 	 * If the page has just become un-empty, move it to the head of
971 	 * the list, and make it the current page.  The next allocation
972 	 * will get the item from this page, instead of further fragmenting
973 	 * the pool.
974 	 */
975 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
976 		TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
977 		TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
978 		pp->pr_curpage = ph;
979 	}
980 }
981 
982 /*
983  * Return resource to the pool; must be called at appropriate spl level
984  */
985 #ifdef POOL_DIAGNOSTIC
986 void
987 _pool_put(struct pool *pp, void *v, const char *file, long line)
988 {
989 
990 	simple_lock(&pp->pr_slock);
991 	pr_enter(pp, file, line);
992 
993 	pr_log(pp, v, PRLOG_PUT, file, line);
994 
995 	pool_do_put(pp, v);
996 
997 	pr_leave(pp);
998 	simple_unlock(&pp->pr_slock);
999 }
1000 #undef pool_put
1001 #endif /* POOL_DIAGNOSTIC */
1002 
1003 void
1004 pool_put(struct pool *pp, void *v)
1005 {
1006 
1007 	simple_lock(&pp->pr_slock);
1008 
1009 	pool_do_put(pp, v);
1010 
1011 	simple_unlock(&pp->pr_slock);
1012 }
1013 
1014 #ifdef POOL_DIAGNOSTIC
1015 #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
1016 #endif
1017 
1018 /*
1019  * Add N items to the pool.
1020  */
1021 int
1022 pool_prime(struct pool *pp, int n)
1023 {
1024 	struct pool_item_header *ph;
1025 	caddr_t cp;
1026 	int newpages, error = 0;
1027 
1028 	simple_lock(&pp->pr_slock);
1029 
1030 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1031 
1032 	while (newpages-- > 0) {
1033 		simple_unlock(&pp->pr_slock);
1034 		cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype);
1035 		if (__predict_true(cp != NULL))
1036 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1037 		simple_lock(&pp->pr_slock);
1038 
1039 		if (__predict_false(cp == NULL || ph == NULL)) {
1040 			error = ENOMEM;
1041 			if (cp != NULL)
1042 				(*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype);
1043 			break;
1044 		}
1045 
1046 		pool_prime_page(pp, cp, ph);
1047 		pp->pr_npagealloc++;
1048 		pp->pr_minpages++;
1049 	}
1050 
1051 	if (pp->pr_minpages >= pp->pr_maxpages)
1052 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
1053 
1054 	simple_unlock(&pp->pr_slock);
1055 	return (0);
1056 }
1057 
1058 /*
1059  * Add a page worth of items to the pool.
1060  *
1061  * Note, we must be called with the pool descriptor LOCKED.
1062  */
1063 static void
1064 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1065 {
1066 	struct pool_item *pi;
1067 	caddr_t cp = storage;
1068 	unsigned int align = pp->pr_align;
1069 	unsigned int ioff = pp->pr_itemoffset;
1070 	int n;
1071 
1072 	if (((u_long)cp & (pp->pr_pagesz - 1)) != 0)
1073 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1074 
1075 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1076 		LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
1077 		    ph, ph_hashlist);
1078 
1079 	/*
1080 	 * Insert page header.
1081 	 */
1082 	TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1083 	TAILQ_INIT(&ph->ph_itemlist);
1084 	ph->ph_page = storage;
1085 	ph->ph_nmissing = 0;
1086 	memset(&ph->ph_time, 0, sizeof(ph->ph_time));
1087 
1088 	pp->pr_nidle++;
1089 
1090 	/*
1091 	 * Color this page.
1092 	 */
1093 	cp = (caddr_t)(cp + pp->pr_curcolor);
1094 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1095 		pp->pr_curcolor = 0;
1096 
1097 	/*
1098 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1099 	 */
1100 	if (ioff != 0)
1101 		cp = (caddr_t)(cp + (align - ioff));
1102 
1103 	/*
1104 	 * Insert remaining chunks on the bucket list.
1105 	 */
1106 	n = pp->pr_itemsperpage;
1107 	pp->pr_nitems += n;
1108 
1109 	while (n--) {
1110 		pi = (struct pool_item *)cp;
1111 
1112 		/* Insert on page list */
1113 		TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1114 #ifdef DIAGNOSTIC
1115 		pi->pi_magic = PI_MAGIC;
1116 #endif
1117 		cp = (caddr_t)(cp + pp->pr_size);
1118 	}
1119 
1120 	/*
1121 	 * If the pool was depleted, point at the new page.
1122 	 */
1123 	if (pp->pr_curpage == NULL)
1124 		pp->pr_curpage = ph;
1125 
1126 	if (++pp->pr_npages > pp->pr_hiwat)
1127 		pp->pr_hiwat = pp->pr_npages;
1128 }
1129 
1130 /*
1131  * Used by pool_get() when nitems drops below the low water mark.  This
1132  * is used to catch up nitmes with the low water mark.
1133  *
1134  * Note 1, we never wait for memory here, we let the caller decide what to do.
1135  *
1136  * Note 2, this doesn't work with static pools.
1137  *
1138  * Note 3, we must be called with the pool already locked, and we return
1139  * with it locked.
1140  */
1141 static int
1142 pool_catchup(struct pool *pp)
1143 {
1144 	struct pool_item_header *ph;
1145 	caddr_t cp;
1146 	int error = 0;
1147 
1148 	if (pp->pr_roflags & PR_STATIC) {
1149 		/*
1150 		 * We dropped below the low water mark, and this is not a
1151 		 * good thing.  Log a warning.
1152 		 *
1153 		 * XXX: rate-limit this?
1154 		 */
1155 		printf("WARNING: static pool `%s' dropped below low water "
1156 		    "mark\n", pp->pr_wchan);
1157 		return (0);
1158 	}
1159 
1160 	while (POOL_NEEDS_CATCHUP(pp)) {
1161 		/*
1162 		 * Call the page back-end allocator for more memory.
1163 		 *
1164 		 * XXX: We never wait, so should we bother unlocking
1165 		 * the pool descriptor?
1166 		 */
1167 		simple_unlock(&pp->pr_slock);
1168 		cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype);
1169 		if (__predict_true(cp != NULL))
1170 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1171 		simple_lock(&pp->pr_slock);
1172 		if (__predict_false(cp == NULL || ph == NULL)) {
1173 			if (cp != NULL)
1174 				(*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype);
1175 			error = ENOMEM;
1176 			break;
1177 		}
1178 		pool_prime_page(pp, cp, ph);
1179 		pp->pr_npagealloc++;
1180 	}
1181 
1182 	return (error);
1183 }
1184 
1185 void
1186 pool_setlowat(struct pool *pp, int n)
1187 {
1188 	int error;
1189 
1190 	simple_lock(&pp->pr_slock);
1191 
1192 	pp->pr_minitems = n;
1193 	pp->pr_minpages = (n == 0)
1194 		? 0
1195 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1196 
1197 	/* Make sure we're caught up with the newly-set low water mark. */
1198 	if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) {
1199 		/*
1200 		 * XXX: Should we log a warning?  Should we set up a timeout
1201 		 * to try again in a second or so?  The latter could break
1202 		 * a caller's assumptions about interrupt protection, etc.
1203 		 */
1204 	}
1205 
1206 	simple_unlock(&pp->pr_slock);
1207 }
1208 
1209 void
1210 pool_sethiwat(struct pool *pp, int n)
1211 {
1212 
1213 	simple_lock(&pp->pr_slock);
1214 
1215 	pp->pr_maxpages = (n == 0)
1216 		? 0
1217 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1218 
1219 	simple_unlock(&pp->pr_slock);
1220 }
1221 
1222 void
1223 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1224 {
1225 
1226 	simple_lock(&pp->pr_slock);
1227 
1228 	pp->pr_hardlimit = n;
1229 	pp->pr_hardlimit_warning = warnmess;
1230 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1231 	pp->pr_hardlimit_warning_last.tv_sec = 0;
1232 	pp->pr_hardlimit_warning_last.tv_usec = 0;
1233 
1234 	/*
1235 	 * In-line version of pool_sethiwat(), because we don't want to
1236 	 * release the lock.
1237 	 */
1238 	pp->pr_maxpages = (n == 0)
1239 		? 0
1240 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1241 
1242 	simple_unlock(&pp->pr_slock);
1243 }
1244 
1245 /*
1246  * Default page allocator.
1247  */
1248 static void *
1249 pool_page_alloc(unsigned long sz, int flags, int mtype)
1250 {
1251 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1252 
1253 	return ((void *)uvm_km_alloc_poolpage(waitok));
1254 }
1255 
1256 static void
1257 pool_page_free(void *v, unsigned long sz, int mtype)
1258 {
1259 
1260 	uvm_km_free_poolpage((vaddr_t)v);
1261 }
1262 
1263 #ifdef POOL_SUBPAGE
1264 /*
1265  * Sub-page allocator, for machines with large hardware pages.
1266  */
1267 static void *
1268 pool_subpage_alloc(unsigned long sz, int flags, int mtype)
1269 {
1270 
1271 	return pool_get(&psppool, flags);
1272 }
1273 
1274 static void
1275 pool_subpage_free(void *v, unsigned long sz, int mtype)
1276 {
1277 
1278 	pool_put(&psppool, v);
1279 }
1280 #endif
1281 
1282 #ifdef POOL_SUBPAGE
1283 /* We don't provide a real nointr allocator.  Maybe later. */
1284 void *
1285 pool_page_alloc_nointr(unsigned long sz, int flags, int mtype)
1286 {
1287 
1288 	return pool_subpage_alloc(sz, flags, mtype);
1289 }
1290 
1291 void
1292 pool_page_free_nointr(void *v, unsigned long sz, int mtype)
1293 {
1294 
1295 	pool_subpage_free(v, sz, mtype);
1296 }
1297 #else
1298 /*
1299  * Alternate pool page allocator for pools that know they will
1300  * never be accessed in interrupt context.
1301  */
1302 void *
1303 pool_page_alloc_nointr(unsigned long sz, int flags, int mtype)
1304 {
1305 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1306 
1307 	return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
1308 	    waitok));
1309 }
1310 
1311 void
1312 pool_page_free_nointr(void *v, unsigned long sz, int mtype)
1313 {
1314 
1315 	uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
1316 }
1317 #endif
1318 
1319 
1320 /*
1321  * Release all complete pages that have not been used recently.
1322  */
1323 void
1324 #ifdef POOL_DIAGNOSTIC
1325 _pool_reclaim(struct pool *pp, const char *file, long line)
1326 #else
1327 pool_reclaim(struct pool *pp)
1328 #endif
1329 {
1330 	struct pool_item_header *ph, *phnext;
1331 	struct pool_cache *pc;
1332 	struct timeval curtime;
1333 	struct pool_pagelist pq;
1334 	int s;
1335 
1336 	if (pp->pr_roflags & PR_STATIC)
1337 		return;
1338 
1339 	if (simple_lock_try(&pp->pr_slock) == 0)
1340 		return;
1341 	pr_enter(pp, file, line);
1342 	TAILQ_INIT(&pq);
1343 
1344 	/*
1345 	 * Reclaim items from the pool's caches.
1346 	 */
1347 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1348 		pool_cache_reclaim(pc);
1349 
1350 	s = splclock();
1351 	curtime = mono_time;
1352 	splx(s);
1353 
1354 	for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
1355 		phnext = TAILQ_NEXT(ph, ph_pagelist);
1356 
1357 		/* Check our minimum page claim */
1358 		if (pp->pr_npages <= pp->pr_minpages)
1359 			break;
1360 
1361 		if (ph->ph_nmissing == 0) {
1362 			struct timeval diff;
1363 			timersub(&curtime, &ph->ph_time, &diff);
1364 			if (diff.tv_sec < pool_inactive_time)
1365 				continue;
1366 
1367 			/*
1368 			 * If freeing this page would put us below
1369 			 * the low water mark, stop now.
1370 			 */
1371 			if ((pp->pr_nitems - pp->pr_itemsperpage) <
1372 			    pp->pr_minitems)
1373 				break;
1374 
1375 			pr_rmpage(pp, ph, &pq);
1376 		}
1377 	}
1378 
1379 	pr_leave(pp);
1380 	simple_unlock(&pp->pr_slock);
1381 	if (TAILQ_EMPTY(&pq)) {
1382 		return;
1383 	}
1384 	while ((ph = TAILQ_FIRST(&pq)) != NULL) {
1385 		TAILQ_REMOVE(&pq, ph, ph_pagelist);
1386 		(*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
1387 		if (pp->pr_roflags & PR_PHINPAGE) {
1388 			continue;
1389 		}
1390 		LIST_REMOVE(ph, ph_hashlist);
1391 		s = splhigh();
1392 		pool_put(&phpool, ph);
1393 		splx(s);
1394 	}
1395 }
1396 
1397 
1398 /*
1399  * Drain pools, one at a time.
1400  *
1401  * Note, we must never be called from an interrupt context.
1402  */
1403 void
1404 pool_drain(void *arg)
1405 {
1406 	struct pool *pp;
1407 	int s;
1408 
1409 	pp = NULL;
1410 	s = splvm();
1411 	simple_lock(&pool_head_slock);
1412 	if (drainpp == NULL) {
1413 		drainpp = TAILQ_FIRST(&pool_head);
1414 	}
1415 	if (drainpp) {
1416 		pp = drainpp;
1417 		drainpp = TAILQ_NEXT(pp, pr_poollist);
1418 	}
1419 	simple_unlock(&pool_head_slock);
1420 	pool_reclaim(pp);
1421 	splx(s);
1422 }
1423 
1424 
1425 /*
1426  * Diagnostic helpers.
1427  */
1428 void
1429 pool_print(struct pool *pp, const char *modif)
1430 {
1431 	int s;
1432 
1433 	s = splvm();
1434 	if (simple_lock_try(&pp->pr_slock) == 0) {
1435 		printf("pool %s is locked; try again later\n",
1436 		    pp->pr_wchan);
1437 		splx(s);
1438 		return;
1439 	}
1440 	pool_print1(pp, modif, printf);
1441 	simple_unlock(&pp->pr_slock);
1442 	splx(s);
1443 }
1444 
1445 void
1446 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1447 {
1448 	int didlock = 0;
1449 
1450 	if (pp == NULL) {
1451 		(*pr)("Must specify a pool to print.\n");
1452 		return;
1453 	}
1454 
1455 	/*
1456 	 * Called from DDB; interrupts should be blocked, and all
1457 	 * other processors should be paused.  We can skip locking
1458 	 * the pool in this case.
1459 	 *
1460 	 * We do a simple_lock_try() just to print the lock
1461 	 * status, however.
1462 	 */
1463 
1464 	if (simple_lock_try(&pp->pr_slock) == 0)
1465 		(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1466 	else
1467 		didlock = 1;
1468 
1469 	pool_print1(pp, modif, pr);
1470 
1471 	if (didlock)
1472 		simple_unlock(&pp->pr_slock);
1473 }
1474 
1475 static void
1476 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1477 {
1478 	struct pool_item_header *ph;
1479 	struct pool_cache *pc;
1480 	struct pool_cache_group *pcg;
1481 #ifdef DIAGNOSTIC
1482 	struct pool_item *pi;
1483 #endif
1484 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1485 	char c;
1486 
1487 	while ((c = *modif++) != '\0') {
1488 		if (c == 'l')
1489 			print_log = 1;
1490 		if (c == 'p')
1491 			print_pagelist = 1;
1492 		if (c == 'c')
1493 			print_cache = 1;
1494 		modif++;
1495 	}
1496 
1497 	(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1498 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1499 	    pp->pr_roflags);
1500 	(*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype);
1501 	(*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free);
1502 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1503 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1504 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1505 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1506 
1507 	(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1508 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1509 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1510 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1511 
1512 	if (print_pagelist == 0)
1513 		goto skip_pagelist;
1514 
1515 	if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
1516 		(*pr)("\n\tpage list:\n");
1517 	for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
1518 		(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1519 		    ph->ph_page, ph->ph_nmissing,
1520 		    (u_long)ph->ph_time.tv_sec,
1521 		    (u_long)ph->ph_time.tv_usec);
1522 #ifdef DIAGNOSTIC
1523 		TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1524 			if (pi->pi_magic != PI_MAGIC) {
1525 				(*pr)("\t\t\titem %p, magic 0x%x\n",
1526 				    pi, pi->pi_magic);
1527 			}
1528 		}
1529 #endif
1530 	}
1531 	if (pp->pr_curpage == NULL)
1532 		(*pr)("\tno current page\n");
1533 	else
1534 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1535 
1536  skip_pagelist:
1537 
1538 	if (print_log == 0)
1539 		goto skip_log;
1540 
1541 	(*pr)("\n");
1542 	if ((pp->pr_roflags & PR_LOGGING) == 0)
1543 		(*pr)("\tno log\n");
1544 	else
1545 		pr_printlog(pp, NULL, pr);
1546 
1547  skip_log:
1548 
1549 	if (print_cache == 0)
1550 		goto skip_cache;
1551 
1552 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1553 		(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1554 		    pc->pc_allocfrom, pc->pc_freeto);
1555 		(*pr)("\t    hits %lu misses %lu ngroups %lu nitems %lu\n",
1556 		    pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1557 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1558 			(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1559 			for (i = 0; i < PCG_NOBJECTS; i++)
1560 				(*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
1561 		}
1562 	}
1563 
1564  skip_cache:
1565 
1566 	pr_enter_check(pp, pr);
1567 }
1568 
1569 int
1570 pool_chk(struct pool *pp, const char *label)
1571 {
1572 	struct pool_item_header *ph;
1573 	int r = 0;
1574 
1575 	simple_lock(&pp->pr_slock);
1576 
1577 	TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) {
1578 		struct pool_item *pi;
1579 		int n;
1580 		caddr_t page;
1581 
1582 		page = (caddr_t)((u_long)ph & pp->pr_pagemask);
1583 		if (page != ph->ph_page &&
1584 		    (pp->pr_roflags & PR_PHINPAGE) != 0) {
1585 			if (label != NULL)
1586 				printf("%s: ", label);
1587 			printf("pool(%p:%s): page inconsistency: page %p;"
1588 			       " at page head addr %p (p %p)\n", pp,
1589 				pp->pr_wchan, ph->ph_page,
1590 				ph, page);
1591 			r++;
1592 			goto out;
1593 		}
1594 
1595 		for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1596 		     pi != NULL;
1597 		     pi = TAILQ_NEXT(pi,pi_list), n++) {
1598 
1599 #ifdef DIAGNOSTIC
1600 			if (pi->pi_magic != PI_MAGIC) {
1601 				if (label != NULL)
1602 					printf("%s: ", label);
1603 				printf("pool(%s): free list modified: magic=%x;"
1604 				       " page %p; item ordinal %d;"
1605 				       " addr %p (p %p)\n",
1606 					pp->pr_wchan, pi->pi_magic, ph->ph_page,
1607 					n, pi, page);
1608 				panic("pool");
1609 			}
1610 #endif
1611 			page = (caddr_t)((u_long)pi & pp->pr_pagemask);
1612 			if (page == ph->ph_page)
1613 				continue;
1614 
1615 			if (label != NULL)
1616 				printf("%s: ", label);
1617 			printf("pool(%p:%s): page inconsistency: page %p;"
1618 			       " item ordinal %d; addr %p (p %p)\n", pp,
1619 				pp->pr_wchan, ph->ph_page,
1620 				n, pi, page);
1621 			r++;
1622 			goto out;
1623 		}
1624 	}
1625 out:
1626 	simple_unlock(&pp->pr_slock);
1627 	return (r);
1628 }
1629 
1630 /*
1631  * pool_cache_init:
1632  *
1633  *	Initialize a pool cache.
1634  *
1635  *	NOTE: If the pool must be protected from interrupts, we expect
1636  *	to be called at the appropriate interrupt priority level.
1637  */
1638 void
1639 pool_cache_init(struct pool_cache *pc, struct pool *pp,
1640     int (*ctor)(void *, void *, int),
1641     void (*dtor)(void *, void *),
1642     void *arg)
1643 {
1644 
1645 	TAILQ_INIT(&pc->pc_grouplist);
1646 	simple_lock_init(&pc->pc_slock);
1647 
1648 	pc->pc_allocfrom = NULL;
1649 	pc->pc_freeto = NULL;
1650 	pc->pc_pool = pp;
1651 
1652 	pc->pc_ctor = ctor;
1653 	pc->pc_dtor = dtor;
1654 	pc->pc_arg  = arg;
1655 
1656 	pc->pc_hits   = 0;
1657 	pc->pc_misses = 0;
1658 
1659 	pc->pc_ngroups = 0;
1660 
1661 	pc->pc_nitems = 0;
1662 
1663 	simple_lock(&pp->pr_slock);
1664 	TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1665 	simple_unlock(&pp->pr_slock);
1666 }
1667 
1668 /*
1669  * pool_cache_destroy:
1670  *
1671  *	Destroy a pool cache.
1672  */
1673 void
1674 pool_cache_destroy(struct pool_cache *pc)
1675 {
1676 	struct pool *pp = pc->pc_pool;
1677 
1678 	/* First, invalidate the entire cache. */
1679 	pool_cache_invalidate(pc);
1680 
1681 	/* ...and remove it from the pool's cache list. */
1682 	simple_lock(&pp->pr_slock);
1683 	TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1684 	simple_unlock(&pp->pr_slock);
1685 }
1686 
1687 static __inline void *
1688 pcg_get(struct pool_cache_group *pcg)
1689 {
1690 	void *object;
1691 	u_int idx;
1692 
1693 	KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1694 	KASSERT(pcg->pcg_avail != 0);
1695 	idx = --pcg->pcg_avail;
1696 
1697 	KASSERT(pcg->pcg_objects[idx] != NULL);
1698 	object = pcg->pcg_objects[idx];
1699 	pcg->pcg_objects[idx] = NULL;
1700 
1701 	return (object);
1702 }
1703 
1704 static __inline void
1705 pcg_put(struct pool_cache_group *pcg, void *object)
1706 {
1707 	u_int idx;
1708 
1709 	KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1710 	idx = pcg->pcg_avail++;
1711 
1712 	KASSERT(pcg->pcg_objects[idx] == NULL);
1713 	pcg->pcg_objects[idx] = object;
1714 }
1715 
1716 /*
1717  * pool_cache_get:
1718  *
1719  *	Get an object from a pool cache.
1720  */
1721 void *
1722 pool_cache_get(struct pool_cache *pc, int flags)
1723 {
1724 	struct pool_cache_group *pcg;
1725 	void *object;
1726 
1727 #ifdef LOCKDEBUG
1728 	if (flags & PR_WAITOK)
1729 		simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1730 #endif
1731 
1732 	simple_lock(&pc->pc_slock);
1733 
1734 	if ((pcg = pc->pc_allocfrom) == NULL) {
1735 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1736 			if (pcg->pcg_avail != 0) {
1737 				pc->pc_allocfrom = pcg;
1738 				goto have_group;
1739 			}
1740 		}
1741 
1742 		/*
1743 		 * No groups with any available objects.  Allocate
1744 		 * a new object, construct it, and return it to
1745 		 * the caller.  We will allocate a group, if necessary,
1746 		 * when the object is freed back to the cache.
1747 		 */
1748 		pc->pc_misses++;
1749 		simple_unlock(&pc->pc_slock);
1750 		object = pool_get(pc->pc_pool, flags);
1751 		if (object != NULL && pc->pc_ctor != NULL) {
1752 			if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1753 				pool_put(pc->pc_pool, object);
1754 				return (NULL);
1755 			}
1756 		}
1757 		return (object);
1758 	}
1759 
1760  have_group:
1761 	pc->pc_hits++;
1762 	pc->pc_nitems--;
1763 	object = pcg_get(pcg);
1764 
1765 	if (pcg->pcg_avail == 0)
1766 		pc->pc_allocfrom = NULL;
1767 
1768 	simple_unlock(&pc->pc_slock);
1769 
1770 	return (object);
1771 }
1772 
1773 /*
1774  * pool_cache_put:
1775  *
1776  *	Put an object back to the pool cache.
1777  */
1778 void
1779 pool_cache_put(struct pool_cache *pc, void *object)
1780 {
1781 	struct pool_cache_group *pcg;
1782 	int s;
1783 
1784 	simple_lock(&pc->pc_slock);
1785 
1786 	if ((pcg = pc->pc_freeto) == NULL) {
1787 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1788 			if (pcg->pcg_avail != PCG_NOBJECTS) {
1789 				pc->pc_freeto = pcg;
1790 				goto have_group;
1791 			}
1792 		}
1793 
1794 		/*
1795 		 * No empty groups to free the object to.  Attempt to
1796 		 * allocate one.
1797 		 */
1798 		simple_unlock(&pc->pc_slock);
1799 		s = splvm();
1800 		pcg = pool_get(&pcgpool, PR_NOWAIT);
1801 		splx(s);
1802 		if (pcg != NULL) {
1803 			memset(pcg, 0, sizeof(*pcg));
1804 			simple_lock(&pc->pc_slock);
1805 			pc->pc_ngroups++;
1806 			TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1807 			if (pc->pc_freeto == NULL)
1808 				pc->pc_freeto = pcg;
1809 			goto have_group;
1810 		}
1811 
1812 		/*
1813 		 * Unable to allocate a cache group; destruct the object
1814 		 * and free it back to the pool.
1815 		 */
1816 		pool_cache_destruct_object(pc, object);
1817 		return;
1818 	}
1819 
1820  have_group:
1821 	pc->pc_nitems++;
1822 	pcg_put(pcg, object);
1823 
1824 	if (pcg->pcg_avail == PCG_NOBJECTS)
1825 		pc->pc_freeto = NULL;
1826 
1827 	simple_unlock(&pc->pc_slock);
1828 }
1829 
1830 /*
1831  * pool_cache_destruct_object:
1832  *
1833  *	Force destruction of an object and its release back into
1834  *	the pool.
1835  */
1836 void
1837 pool_cache_destruct_object(struct pool_cache *pc, void *object)
1838 {
1839 
1840 	if (pc->pc_dtor != NULL)
1841 		(*pc->pc_dtor)(pc->pc_arg, object);
1842 	pool_put(pc->pc_pool, object);
1843 }
1844 
1845 /*
1846  * pool_cache_do_invalidate:
1847  *
1848  *	This internal function implements pool_cache_invalidate() and
1849  *	pool_cache_reclaim().
1850  */
1851 static void
1852 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1853     void (*putit)(struct pool *, void *))
1854 {
1855 	struct pool_cache_group *pcg, *npcg;
1856 	void *object;
1857 	int s;
1858 
1859 	for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1860 	     pcg = npcg) {
1861 		npcg = TAILQ_NEXT(pcg, pcg_list);
1862 		while (pcg->pcg_avail != 0) {
1863 			pc->pc_nitems--;
1864 			object = pcg_get(pcg);
1865 			if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1866 				pc->pc_allocfrom = NULL;
1867 			if (pc->pc_dtor != NULL)
1868 				(*pc->pc_dtor)(pc->pc_arg, object);
1869 			(*putit)(pc->pc_pool, object);
1870 		}
1871 		if (free_groups) {
1872 			pc->pc_ngroups--;
1873 			TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1874 			if (pc->pc_freeto == pcg)
1875 				pc->pc_freeto = NULL;
1876 			s = splvm();
1877 			pool_put(&pcgpool, pcg);
1878 			splx(s);
1879 		}
1880 	}
1881 }
1882 
1883 /*
1884  * pool_cache_invalidate:
1885  *
1886  *	Invalidate a pool cache (destruct and release all of the
1887  *	cached objects).
1888  */
1889 void
1890 pool_cache_invalidate(struct pool_cache *pc)
1891 {
1892 
1893 	simple_lock(&pc->pc_slock);
1894 	pool_cache_do_invalidate(pc, 0, pool_put);
1895 	simple_unlock(&pc->pc_slock);
1896 }
1897 
1898 /*
1899  * pool_cache_reclaim:
1900  *
1901  *	Reclaim a pool cache for pool_reclaim().
1902  */
1903 static void
1904 pool_cache_reclaim(struct pool_cache *pc)
1905 {
1906 
1907 	simple_lock(&pc->pc_slock);
1908 	pool_cache_do_invalidate(pc, 1, pool_do_put);
1909 	simple_unlock(&pc->pc_slock);
1910 }
1911