1 /* $NetBSD: subr_pool.c,v 1.96 2004/06/20 18:19:27 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.96 2004/06/20 18:19:27 thorpej Exp $"); 42 43 #include "opt_pool.h" 44 #include "opt_poollog.h" 45 #include "opt_lockdebug.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/proc.h> 50 #include <sys/errno.h> 51 #include <sys/kernel.h> 52 #include <sys/malloc.h> 53 #include <sys/lock.h> 54 #include <sys/pool.h> 55 #include <sys/syslog.h> 56 57 #include <uvm/uvm.h> 58 59 /* 60 * Pool resource management utility. 61 * 62 * Memory is allocated in pages which are split into pieces according to 63 * the pool item size. Each page is kept on one of three lists in the 64 * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages', 65 * for empty, full and partially-full pages respectively. The individual 66 * pool items are on a linked list headed by `ph_itemlist' in each page 67 * header. The memory for building the page list is either taken from 68 * the allocated pages themselves (for small pool items) or taken from 69 * an internal pool of page headers (`phpool'). 70 */ 71 72 /* List of all pools */ 73 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); 74 75 /* Private pool for page header structures */ 76 static struct pool phpool; 77 78 #ifdef POOL_SUBPAGE 79 /* Pool of subpages for use by normal pools. */ 80 static struct pool psppool; 81 #endif 82 83 /* # of seconds to retain page after last use */ 84 int pool_inactive_time = 10; 85 86 /* Next candidate for drainage (see pool_drain()) */ 87 static struct pool *drainpp; 88 89 /* This spin lock protects both pool_head and drainpp. */ 90 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER; 91 92 struct pool_item_header { 93 /* Page headers */ 94 LIST_ENTRY(pool_item_header) 95 ph_pagelist; /* pool page list */ 96 TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */ 97 SPLAY_ENTRY(pool_item_header) 98 ph_node; /* Off-page page headers */ 99 unsigned int ph_nmissing; /* # of chunks in use */ 100 caddr_t ph_page; /* this page's address */ 101 struct timeval ph_time; /* last referenced */ 102 }; 103 104 struct pool_item { 105 #ifdef DIAGNOSTIC 106 u_int pi_magic; 107 #endif 108 #define PI_MAGIC 0xdeadbeefU 109 /* Other entries use only this list entry */ 110 TAILQ_ENTRY(pool_item) pi_list; 111 }; 112 113 #define POOL_NEEDS_CATCHUP(pp) \ 114 ((pp)->pr_nitems < (pp)->pr_minitems) 115 116 /* 117 * Pool cache management. 118 * 119 * Pool caches provide a way for constructed objects to be cached by the 120 * pool subsystem. This can lead to performance improvements by avoiding 121 * needless object construction/destruction; it is deferred until absolutely 122 * necessary. 123 * 124 * Caches are grouped into cache groups. Each cache group references 125 * up to 16 constructed objects. When a cache allocates an object 126 * from the pool, it calls the object's constructor and places it into 127 * a cache group. When a cache group frees an object back to the pool, 128 * it first calls the object's destructor. This allows the object to 129 * persist in constructed form while freed to the cache. 130 * 131 * Multiple caches may exist for each pool. This allows a single 132 * object type to have multiple constructed forms. The pool references 133 * each cache, so that when a pool is drained by the pagedaemon, it can 134 * drain each individual cache as well. Each time a cache is drained, 135 * the most idle cache group is freed to the pool in its entirety. 136 * 137 * Pool caches are layed on top of pools. By layering them, we can avoid 138 * the complexity of cache management for pools which would not benefit 139 * from it. 140 */ 141 142 /* The cache group pool. */ 143 static struct pool pcgpool; 144 145 static void pool_cache_reclaim(struct pool_cache *); 146 147 static int pool_catchup(struct pool *); 148 static void pool_prime_page(struct pool *, caddr_t, 149 struct pool_item_header *); 150 static void pool_update_curpage(struct pool *); 151 152 void *pool_allocator_alloc(struct pool *, int); 153 void pool_allocator_free(struct pool *, void *); 154 155 static void pool_print_pagelist(struct pool_pagelist *, 156 void (*)(const char *, ...)); 157 static void pool_print1(struct pool *, const char *, 158 void (*)(const char *, ...)); 159 160 static int pool_chk_page(struct pool *, const char *, 161 struct pool_item_header *); 162 163 /* 164 * Pool log entry. An array of these is allocated in pool_init(). 165 */ 166 struct pool_log { 167 const char *pl_file; 168 long pl_line; 169 int pl_action; 170 #define PRLOG_GET 1 171 #define PRLOG_PUT 2 172 void *pl_addr; 173 }; 174 175 #ifdef POOL_DIAGNOSTIC 176 /* Number of entries in pool log buffers */ 177 #ifndef POOL_LOGSIZE 178 #define POOL_LOGSIZE 10 179 #endif 180 181 int pool_logsize = POOL_LOGSIZE; 182 183 static __inline void 184 pr_log(struct pool *pp, void *v, int action, const char *file, long line) 185 { 186 int n = pp->pr_curlogentry; 187 struct pool_log *pl; 188 189 if ((pp->pr_roflags & PR_LOGGING) == 0) 190 return; 191 192 /* 193 * Fill in the current entry. Wrap around and overwrite 194 * the oldest entry if necessary. 195 */ 196 pl = &pp->pr_log[n]; 197 pl->pl_file = file; 198 pl->pl_line = line; 199 pl->pl_action = action; 200 pl->pl_addr = v; 201 if (++n >= pp->pr_logsize) 202 n = 0; 203 pp->pr_curlogentry = n; 204 } 205 206 static void 207 pr_printlog(struct pool *pp, struct pool_item *pi, 208 void (*pr)(const char *, ...)) 209 { 210 int i = pp->pr_logsize; 211 int n = pp->pr_curlogentry; 212 213 if ((pp->pr_roflags & PR_LOGGING) == 0) 214 return; 215 216 /* 217 * Print all entries in this pool's log. 218 */ 219 while (i-- > 0) { 220 struct pool_log *pl = &pp->pr_log[n]; 221 if (pl->pl_action != 0) { 222 if (pi == NULL || pi == pl->pl_addr) { 223 (*pr)("\tlog entry %d:\n", i); 224 (*pr)("\t\taction = %s, addr = %p\n", 225 pl->pl_action == PRLOG_GET ? "get" : "put", 226 pl->pl_addr); 227 (*pr)("\t\tfile: %s at line %lu\n", 228 pl->pl_file, pl->pl_line); 229 } 230 } 231 if (++n >= pp->pr_logsize) 232 n = 0; 233 } 234 } 235 236 static __inline void 237 pr_enter(struct pool *pp, const char *file, long line) 238 { 239 240 if (__predict_false(pp->pr_entered_file != NULL)) { 241 printf("pool %s: reentrancy at file %s line %ld\n", 242 pp->pr_wchan, file, line); 243 printf(" previous entry at file %s line %ld\n", 244 pp->pr_entered_file, pp->pr_entered_line); 245 panic("pr_enter"); 246 } 247 248 pp->pr_entered_file = file; 249 pp->pr_entered_line = line; 250 } 251 252 static __inline void 253 pr_leave(struct pool *pp) 254 { 255 256 if (__predict_false(pp->pr_entered_file == NULL)) { 257 printf("pool %s not entered?\n", pp->pr_wchan); 258 panic("pr_leave"); 259 } 260 261 pp->pr_entered_file = NULL; 262 pp->pr_entered_line = 0; 263 } 264 265 static __inline void 266 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...)) 267 { 268 269 if (pp->pr_entered_file != NULL) 270 (*pr)("\n\tcurrently entered from file %s line %ld\n", 271 pp->pr_entered_file, pp->pr_entered_line); 272 } 273 #else 274 #define pr_log(pp, v, action, file, line) 275 #define pr_printlog(pp, pi, pr) 276 #define pr_enter(pp, file, line) 277 #define pr_leave(pp) 278 #define pr_enter_check(pp, pr) 279 #endif /* POOL_DIAGNOSTIC */ 280 281 static __inline int 282 phtree_compare(struct pool_item_header *a, struct pool_item_header *b) 283 { 284 if (a->ph_page < b->ph_page) 285 return (-1); 286 else if (a->ph_page > b->ph_page) 287 return (1); 288 else 289 return (0); 290 } 291 292 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare); 293 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); 294 295 /* 296 * Return the pool page header based on page address. 297 */ 298 static __inline struct pool_item_header * 299 pr_find_pagehead(struct pool *pp, caddr_t page) 300 { 301 struct pool_item_header *ph, tmp; 302 303 if ((pp->pr_roflags & PR_PHINPAGE) != 0) 304 return ((struct pool_item_header *)(page + pp->pr_phoffset)); 305 306 tmp.ph_page = page; 307 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); 308 return ph; 309 } 310 311 /* 312 * Remove a page from the pool. 313 */ 314 static __inline void 315 pr_rmpage(struct pool *pp, struct pool_item_header *ph, 316 struct pool_pagelist *pq) 317 { 318 int s; 319 320 LOCK_ASSERT(!simple_lock_held(&pp->pr_slock) || pq != NULL); 321 322 /* 323 * If the page was idle, decrement the idle page count. 324 */ 325 if (ph->ph_nmissing == 0) { 326 #ifdef DIAGNOSTIC 327 if (pp->pr_nidle == 0) 328 panic("pr_rmpage: nidle inconsistent"); 329 if (pp->pr_nitems < pp->pr_itemsperpage) 330 panic("pr_rmpage: nitems inconsistent"); 331 #endif 332 pp->pr_nidle--; 333 } 334 335 pp->pr_nitems -= pp->pr_itemsperpage; 336 337 /* 338 * Unlink a page from the pool and release it (or queue it for release). 339 */ 340 LIST_REMOVE(ph, ph_pagelist); 341 if ((pp->pr_roflags & PR_PHINPAGE) == 0) 342 SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); 343 if (pq) { 344 LIST_INSERT_HEAD(pq, ph, ph_pagelist); 345 } else { 346 pool_allocator_free(pp, ph->ph_page); 347 if ((pp->pr_roflags & PR_PHINPAGE) == 0) { 348 s = splvm(); 349 pool_put(&phpool, ph); 350 splx(s); 351 } 352 } 353 pp->pr_npages--; 354 pp->pr_npagefree++; 355 356 pool_update_curpage(pp); 357 } 358 359 /* 360 * Initialize all the pools listed in the "pools" link set. 361 */ 362 void 363 link_pool_init(void) 364 { 365 __link_set_decl(pools, struct link_pool_init); 366 struct link_pool_init * const *pi; 367 368 __link_set_foreach(pi, pools) 369 pool_init((*pi)->pp, (*pi)->size, (*pi)->align, 370 (*pi)->align_offset, (*pi)->flags, (*pi)->wchan, 371 (*pi)->palloc); 372 } 373 374 /* 375 * Initialize the given pool resource structure. 376 * 377 * We export this routine to allow other kernel parts to declare 378 * static pools that must be initialized before malloc() is available. 379 */ 380 void 381 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, 382 const char *wchan, struct pool_allocator *palloc) 383 { 384 int off, slack; 385 size_t trysize, phsize; 386 int s; 387 388 #ifdef POOL_DIAGNOSTIC 389 /* 390 * Always log if POOL_DIAGNOSTIC is defined. 391 */ 392 if (pool_logsize != 0) 393 flags |= PR_LOGGING; 394 #endif 395 396 #ifdef POOL_SUBPAGE 397 /* 398 * XXX We don't provide a real `nointr' back-end 399 * yet; all sub-pages come from a kmem back-end. 400 * maybe some day... 401 */ 402 if (palloc == NULL) { 403 extern struct pool_allocator pool_allocator_kmem_subpage; 404 palloc = &pool_allocator_kmem_subpage; 405 } 406 /* 407 * We'll assume any user-specified back-end allocator 408 * will deal with sub-pages, or simply don't care. 409 */ 410 #else 411 if (palloc == NULL) 412 palloc = &pool_allocator_kmem; 413 #endif /* POOL_SUBPAGE */ 414 if ((palloc->pa_flags & PA_INITIALIZED) == 0) { 415 if (palloc->pa_pagesz == 0) { 416 #ifdef POOL_SUBPAGE 417 if (palloc == &pool_allocator_kmem) 418 palloc->pa_pagesz = PAGE_SIZE; 419 else 420 palloc->pa_pagesz = POOL_SUBPAGE; 421 #else 422 palloc->pa_pagesz = PAGE_SIZE; 423 #endif /* POOL_SUBPAGE */ 424 } 425 426 TAILQ_INIT(&palloc->pa_list); 427 428 simple_lock_init(&palloc->pa_slock); 429 palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); 430 palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; 431 palloc->pa_flags |= PA_INITIALIZED; 432 } 433 434 if (align == 0) 435 align = ALIGN(1); 436 437 if (size < sizeof(struct pool_item)) 438 size = sizeof(struct pool_item); 439 440 size = roundup(size, align); 441 #ifdef DIAGNOSTIC 442 if (size > palloc->pa_pagesz) 443 panic("pool_init: pool item size (%lu) too large", 444 (u_long)size); 445 #endif 446 447 /* 448 * Initialize the pool structure. 449 */ 450 LIST_INIT(&pp->pr_emptypages); 451 LIST_INIT(&pp->pr_fullpages); 452 LIST_INIT(&pp->pr_partpages); 453 TAILQ_INIT(&pp->pr_cachelist); 454 pp->pr_curpage = NULL; 455 pp->pr_npages = 0; 456 pp->pr_minitems = 0; 457 pp->pr_minpages = 0; 458 pp->pr_maxpages = UINT_MAX; 459 pp->pr_roflags = flags; 460 pp->pr_flags = 0; 461 pp->pr_size = size; 462 pp->pr_align = align; 463 pp->pr_wchan = wchan; 464 pp->pr_alloc = palloc; 465 pp->pr_nitems = 0; 466 pp->pr_nout = 0; 467 pp->pr_hardlimit = UINT_MAX; 468 pp->pr_hardlimit_warning = NULL; 469 pp->pr_hardlimit_ratecap.tv_sec = 0; 470 pp->pr_hardlimit_ratecap.tv_usec = 0; 471 pp->pr_hardlimit_warning_last.tv_sec = 0; 472 pp->pr_hardlimit_warning_last.tv_usec = 0; 473 pp->pr_drain_hook = NULL; 474 pp->pr_drain_hook_arg = NULL; 475 476 /* 477 * Decide whether to put the page header off page to avoid 478 * wasting too large a part of the page or too big item. 479 * Off-page page headers go on a hash table, so we can match 480 * a returned item with its header based on the page address. 481 * We use 1/16 of the page size and about 8 times of the item 482 * size as the threshold (XXX: tune) 483 * 484 * However, we'll put the header into the page if we can put 485 * it without wasting any items. 486 * 487 * Silently enforce `0 <= ioff < align'. 488 */ 489 pp->pr_itemoffset = ioff %= align; 490 /* See the comment below about reserved bytes. */ 491 trysize = palloc->pa_pagesz - ((align - ioff) % align); 492 phsize = ALIGN(sizeof(struct pool_item_header)); 493 if (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) || 494 trysize / pp->pr_size == (trysize - phsize) / pp->pr_size) { 495 /* Use the end of the page for the page header */ 496 pp->pr_roflags |= PR_PHINPAGE; 497 pp->pr_phoffset = off = palloc->pa_pagesz - phsize; 498 } else { 499 /* The page header will be taken from our page header pool */ 500 pp->pr_phoffset = 0; 501 off = palloc->pa_pagesz; 502 SPLAY_INIT(&pp->pr_phtree); 503 } 504 505 /* 506 * Alignment is to take place at `ioff' within the item. This means 507 * we must reserve up to `align - 1' bytes on the page to allow 508 * appropriate positioning of each item. 509 */ 510 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; 511 KASSERT(pp->pr_itemsperpage != 0); 512 513 /* 514 * Use the slack between the chunks and the page header 515 * for "cache coloring". 516 */ 517 slack = off - pp->pr_itemsperpage * pp->pr_size; 518 pp->pr_maxcolor = (slack / align) * align; 519 pp->pr_curcolor = 0; 520 521 pp->pr_nget = 0; 522 pp->pr_nfail = 0; 523 pp->pr_nput = 0; 524 pp->pr_npagealloc = 0; 525 pp->pr_npagefree = 0; 526 pp->pr_hiwat = 0; 527 pp->pr_nidle = 0; 528 529 #ifdef POOL_DIAGNOSTIC 530 if (flags & PR_LOGGING) { 531 if (kmem_map == NULL || 532 (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), 533 M_TEMP, M_NOWAIT)) == NULL) 534 pp->pr_roflags &= ~PR_LOGGING; 535 pp->pr_curlogentry = 0; 536 pp->pr_logsize = pool_logsize; 537 } 538 #endif 539 540 pp->pr_entered_file = NULL; 541 pp->pr_entered_line = 0; 542 543 simple_lock_init(&pp->pr_slock); 544 545 /* 546 * Initialize private page header pool and cache magazine pool if we 547 * haven't done so yet. 548 * XXX LOCKING. 549 */ 550 if (phpool.pr_size == 0) { 551 #ifdef POOL_SUBPAGE 552 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0, 553 "phpool", &pool_allocator_kmem); 554 pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, 555 PR_RECURSIVE, "psppool", &pool_allocator_kmem); 556 #else 557 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 558 0, "phpool", NULL); 559 #endif 560 pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, 561 0, "pcgpool", NULL); 562 } 563 564 /* Insert into the list of all pools. */ 565 simple_lock(&pool_head_slock); 566 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); 567 simple_unlock(&pool_head_slock); 568 569 /* Insert this into the list of pools using this allocator. */ 570 s = splvm(); 571 simple_lock(&palloc->pa_slock); 572 TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); 573 simple_unlock(&palloc->pa_slock); 574 splx(s); 575 } 576 577 /* 578 * De-commision a pool resource. 579 */ 580 void 581 pool_destroy(struct pool *pp) 582 { 583 struct pool_item_header *ph; 584 struct pool_cache *pc; 585 int s; 586 587 /* Locking order: pool_allocator -> pool */ 588 s = splvm(); 589 simple_lock(&pp->pr_alloc->pa_slock); 590 TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); 591 simple_unlock(&pp->pr_alloc->pa_slock); 592 splx(s); 593 594 /* Destroy all caches for this pool. */ 595 while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL) 596 pool_cache_destroy(pc); 597 598 #ifdef DIAGNOSTIC 599 if (pp->pr_nout != 0) { 600 pr_printlog(pp, NULL, printf); 601 panic("pool_destroy: pool busy: still out: %u", 602 pp->pr_nout); 603 } 604 #endif 605 606 /* Remove all pages */ 607 while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) 608 pr_rmpage(pp, ph, NULL); 609 KASSERT(LIST_EMPTY(&pp->pr_fullpages)); 610 KASSERT(LIST_EMPTY(&pp->pr_partpages)); 611 612 /* Remove from global pool list */ 613 simple_lock(&pool_head_slock); 614 TAILQ_REMOVE(&pool_head, pp, pr_poollist); 615 if (drainpp == pp) { 616 drainpp = NULL; 617 } 618 simple_unlock(&pool_head_slock); 619 620 #ifdef POOL_DIAGNOSTIC 621 if ((pp->pr_roflags & PR_LOGGING) != 0) 622 free(pp->pr_log, M_TEMP); 623 #endif 624 } 625 626 void 627 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg) 628 { 629 630 /* XXX no locking -- must be used just after pool_init() */ 631 #ifdef DIAGNOSTIC 632 if (pp->pr_drain_hook != NULL) 633 panic("pool_set_drain_hook(%s): already set", pp->pr_wchan); 634 #endif 635 pp->pr_drain_hook = fn; 636 pp->pr_drain_hook_arg = arg; 637 } 638 639 static struct pool_item_header * 640 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags) 641 { 642 struct pool_item_header *ph; 643 int s; 644 645 LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0); 646 647 if ((pp->pr_roflags & PR_PHINPAGE) != 0) 648 ph = (struct pool_item_header *) (storage + pp->pr_phoffset); 649 else { 650 s = splvm(); 651 ph = pool_get(&phpool, flags); 652 splx(s); 653 } 654 655 return (ph); 656 } 657 658 /* 659 * Grab an item from the pool; must be called at appropriate spl level 660 */ 661 void * 662 #ifdef POOL_DIAGNOSTIC 663 _pool_get(struct pool *pp, int flags, const char *file, long line) 664 #else 665 pool_get(struct pool *pp, int flags) 666 #endif 667 { 668 struct pool_item *pi; 669 struct pool_item_header *ph; 670 void *v; 671 672 #ifdef DIAGNOSTIC 673 if (__predict_false(pp->pr_itemsperpage == 0)) 674 panic("pool_get: pool %p: pr_itemsperpage is zero, " 675 "pool not initialized?", pp); 676 if (__predict_false(curlwp == NULL && doing_shutdown == 0 && 677 (flags & PR_WAITOK) != 0)) 678 panic("pool_get: %s: must have NOWAIT", pp->pr_wchan); 679 680 #ifdef LOCKDEBUG 681 if (flags & PR_WAITOK) 682 simple_lock_only_held(NULL, "pool_get(PR_WAITOK)"); 683 #endif 684 #endif /* DIAGNOSTIC */ 685 686 simple_lock(&pp->pr_slock); 687 pr_enter(pp, file, line); 688 689 startover: 690 /* 691 * Check to see if we've reached the hard limit. If we have, 692 * and we can wait, then wait until an item has been returned to 693 * the pool. 694 */ 695 #ifdef DIAGNOSTIC 696 if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { 697 pr_leave(pp); 698 simple_unlock(&pp->pr_slock); 699 panic("pool_get: %s: crossed hard limit", pp->pr_wchan); 700 } 701 #endif 702 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { 703 if (pp->pr_drain_hook != NULL) { 704 /* 705 * Since the drain hook is going to free things 706 * back to the pool, unlock, call the hook, re-lock, 707 * and check the hardlimit condition again. 708 */ 709 pr_leave(pp); 710 simple_unlock(&pp->pr_slock); 711 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); 712 simple_lock(&pp->pr_slock); 713 pr_enter(pp, file, line); 714 if (pp->pr_nout < pp->pr_hardlimit) 715 goto startover; 716 } 717 718 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { 719 /* 720 * XXX: A warning isn't logged in this case. Should 721 * it be? 722 */ 723 pp->pr_flags |= PR_WANTED; 724 pr_leave(pp); 725 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); 726 pr_enter(pp, file, line); 727 goto startover; 728 } 729 730 /* 731 * Log a message that the hard limit has been hit. 732 */ 733 if (pp->pr_hardlimit_warning != NULL && 734 ratecheck(&pp->pr_hardlimit_warning_last, 735 &pp->pr_hardlimit_ratecap)) 736 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); 737 738 pp->pr_nfail++; 739 740 pr_leave(pp); 741 simple_unlock(&pp->pr_slock); 742 return (NULL); 743 } 744 745 /* 746 * The convention we use is that if `curpage' is not NULL, then 747 * it points at a non-empty bucket. In particular, `curpage' 748 * never points at a page header which has PR_PHINPAGE set and 749 * has no items in its bucket. 750 */ 751 if ((ph = pp->pr_curpage) == NULL) { 752 #ifdef DIAGNOSTIC 753 if (pp->pr_nitems != 0) { 754 simple_unlock(&pp->pr_slock); 755 printf("pool_get: %s: curpage NULL, nitems %u\n", 756 pp->pr_wchan, pp->pr_nitems); 757 panic("pool_get: nitems inconsistent"); 758 } 759 #endif 760 761 /* 762 * Call the back-end page allocator for more memory. 763 * Release the pool lock, as the back-end page allocator 764 * may block. 765 */ 766 pr_leave(pp); 767 simple_unlock(&pp->pr_slock); 768 v = pool_allocator_alloc(pp, flags); 769 if (__predict_true(v != NULL)) 770 ph = pool_alloc_item_header(pp, v, flags); 771 772 if (__predict_false(v == NULL || ph == NULL)) { 773 if (v != NULL) 774 pool_allocator_free(pp, v); 775 776 simple_lock(&pp->pr_slock); 777 pr_enter(pp, file, line); 778 779 /* 780 * We were unable to allocate a page or item 781 * header, but we released the lock during 782 * allocation, so perhaps items were freed 783 * back to the pool. Check for this case. 784 */ 785 if (pp->pr_curpage != NULL) 786 goto startover; 787 788 if ((flags & PR_WAITOK) == 0) { 789 pp->pr_nfail++; 790 pr_leave(pp); 791 simple_unlock(&pp->pr_slock); 792 return (NULL); 793 } 794 795 /* 796 * Wait for items to be returned to this pool. 797 * 798 * XXX: maybe we should wake up once a second and 799 * try again? 800 */ 801 pp->pr_flags |= PR_WANTED; 802 /* PA_WANTED is already set on the allocator. */ 803 pr_leave(pp); 804 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); 805 pr_enter(pp, file, line); 806 goto startover; 807 } 808 809 /* We have more memory; add it to the pool */ 810 simple_lock(&pp->pr_slock); 811 pr_enter(pp, file, line); 812 pool_prime_page(pp, v, ph); 813 pp->pr_npagealloc++; 814 815 /* Start the allocation process over. */ 816 goto startover; 817 } 818 if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) { 819 pr_leave(pp); 820 simple_unlock(&pp->pr_slock); 821 panic("pool_get: %s: page empty", pp->pr_wchan); 822 } 823 #ifdef DIAGNOSTIC 824 if (__predict_false(pp->pr_nitems == 0)) { 825 pr_leave(pp); 826 simple_unlock(&pp->pr_slock); 827 printf("pool_get: %s: items on itemlist, nitems %u\n", 828 pp->pr_wchan, pp->pr_nitems); 829 panic("pool_get: nitems inconsistent"); 830 } 831 #endif 832 833 #ifdef POOL_DIAGNOSTIC 834 pr_log(pp, v, PRLOG_GET, file, line); 835 #endif 836 837 #ifdef DIAGNOSTIC 838 if (__predict_false(pi->pi_magic != PI_MAGIC)) { 839 pr_printlog(pp, pi, printf); 840 panic("pool_get(%s): free list modified: magic=%x; page %p;" 841 " item addr %p\n", 842 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); 843 } 844 #endif 845 846 /* 847 * Remove from item list. 848 */ 849 TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list); 850 pp->pr_nitems--; 851 pp->pr_nout++; 852 if (ph->ph_nmissing == 0) { 853 #ifdef DIAGNOSTIC 854 if (__predict_false(pp->pr_nidle == 0)) 855 panic("pool_get: nidle inconsistent"); 856 #endif 857 pp->pr_nidle--; 858 859 /* 860 * This page was previously empty. Move it to the list of 861 * partially-full pages. This page is already curpage. 862 */ 863 LIST_REMOVE(ph, ph_pagelist); 864 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); 865 } 866 ph->ph_nmissing++; 867 if (TAILQ_EMPTY(&ph->ph_itemlist)) { 868 #ifdef DIAGNOSTIC 869 if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) { 870 pr_leave(pp); 871 simple_unlock(&pp->pr_slock); 872 panic("pool_get: %s: nmissing inconsistent", 873 pp->pr_wchan); 874 } 875 #endif 876 /* 877 * This page is now full. Move it to the full list 878 * and select a new current page. 879 */ 880 LIST_REMOVE(ph, ph_pagelist); 881 LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist); 882 pool_update_curpage(pp); 883 } 884 885 pp->pr_nget++; 886 887 /* 888 * If we have a low water mark and we are now below that low 889 * water mark, add more items to the pool. 890 */ 891 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { 892 /* 893 * XXX: Should we log a warning? Should we set up a timeout 894 * to try again in a second or so? The latter could break 895 * a caller's assumptions about interrupt protection, etc. 896 */ 897 } 898 899 pr_leave(pp); 900 simple_unlock(&pp->pr_slock); 901 return (v); 902 } 903 904 /* 905 * Internal version of pool_put(). Pool is already locked/entered. 906 */ 907 static void 908 pool_do_put(struct pool *pp, void *v) 909 { 910 struct pool_item *pi = v; 911 struct pool_item_header *ph; 912 caddr_t page; 913 int s; 914 915 LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); 916 917 page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask); 918 919 #ifdef DIAGNOSTIC 920 if (__predict_false(pp->pr_nout == 0)) { 921 printf("pool %s: putting with none out\n", 922 pp->pr_wchan); 923 panic("pool_put"); 924 } 925 #endif 926 927 if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) { 928 pr_printlog(pp, NULL, printf); 929 panic("pool_put: %s: page header missing", pp->pr_wchan); 930 } 931 932 #ifdef LOCKDEBUG 933 /* 934 * Check if we're freeing a locked simple lock. 935 */ 936 simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size); 937 #endif 938 939 /* 940 * Return to item list. 941 */ 942 #ifdef DIAGNOSTIC 943 pi->pi_magic = PI_MAGIC; 944 #endif 945 #ifdef DEBUG 946 { 947 int i, *ip = v; 948 949 for (i = 0; i < pp->pr_size / sizeof(int); i++) { 950 *ip++ = PI_MAGIC; 951 } 952 } 953 #endif 954 955 TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); 956 KDASSERT(ph->ph_nmissing != 0); 957 ph->ph_nmissing--; 958 pp->pr_nput++; 959 pp->pr_nitems++; 960 pp->pr_nout--; 961 962 /* Cancel "pool empty" condition if it exists */ 963 if (pp->pr_curpage == NULL) 964 pp->pr_curpage = ph; 965 966 if (pp->pr_flags & PR_WANTED) { 967 pp->pr_flags &= ~PR_WANTED; 968 if (ph->ph_nmissing == 0) 969 pp->pr_nidle++; 970 wakeup((caddr_t)pp); 971 return; 972 } 973 974 /* 975 * If this page is now empty, do one of two things: 976 * 977 * (1) If we have more pages than the page high water mark, 978 * free the page back to the system. ONLY CONSIDER 979 * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE 980 * CLAIM. 981 * 982 * (2) Otherwise, move the page to the empty page list. 983 * 984 * Either way, select a new current page (so we use a partially-full 985 * page if one is available). 986 */ 987 if (ph->ph_nmissing == 0) { 988 pp->pr_nidle++; 989 if (pp->pr_npages > pp->pr_minpages && 990 (pp->pr_npages > pp->pr_maxpages || 991 (pp->pr_alloc->pa_flags & PA_WANT) != 0)) { 992 simple_unlock(&pp->pr_slock); 993 pr_rmpage(pp, ph, NULL); 994 simple_lock(&pp->pr_slock); 995 } else { 996 LIST_REMOVE(ph, ph_pagelist); 997 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); 998 999 /* 1000 * Update the timestamp on the page. A page must 1001 * be idle for some period of time before it can 1002 * be reclaimed by the pagedaemon. This minimizes 1003 * ping-pong'ing for memory. 1004 */ 1005 s = splclock(); 1006 ph->ph_time = mono_time; 1007 splx(s); 1008 } 1009 pool_update_curpage(pp); 1010 } 1011 1012 /* 1013 * If the page was previously completely full, move it to the 1014 * partially-full list and make it the current page. The next 1015 * allocation will get the item from this page, instead of 1016 * further fragmenting the pool. 1017 */ 1018 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) { 1019 LIST_REMOVE(ph, ph_pagelist); 1020 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); 1021 pp->pr_curpage = ph; 1022 } 1023 } 1024 1025 /* 1026 * Return resource to the pool; must be called at appropriate spl level 1027 */ 1028 #ifdef POOL_DIAGNOSTIC 1029 void 1030 _pool_put(struct pool *pp, void *v, const char *file, long line) 1031 { 1032 1033 simple_lock(&pp->pr_slock); 1034 pr_enter(pp, file, line); 1035 1036 pr_log(pp, v, PRLOG_PUT, file, line); 1037 1038 pool_do_put(pp, v); 1039 1040 pr_leave(pp); 1041 simple_unlock(&pp->pr_slock); 1042 } 1043 #undef pool_put 1044 #endif /* POOL_DIAGNOSTIC */ 1045 1046 void 1047 pool_put(struct pool *pp, void *v) 1048 { 1049 1050 simple_lock(&pp->pr_slock); 1051 1052 pool_do_put(pp, v); 1053 1054 simple_unlock(&pp->pr_slock); 1055 } 1056 1057 #ifdef POOL_DIAGNOSTIC 1058 #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__) 1059 #endif 1060 1061 /* 1062 * Add N items to the pool. 1063 */ 1064 int 1065 pool_prime(struct pool *pp, int n) 1066 { 1067 struct pool_item_header *ph = NULL; 1068 caddr_t cp; 1069 int newpages; 1070 1071 simple_lock(&pp->pr_slock); 1072 1073 newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1074 1075 while (newpages-- > 0) { 1076 simple_unlock(&pp->pr_slock); 1077 cp = pool_allocator_alloc(pp, PR_NOWAIT); 1078 if (__predict_true(cp != NULL)) 1079 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); 1080 1081 if (__predict_false(cp == NULL || ph == NULL)) { 1082 if (cp != NULL) 1083 pool_allocator_free(pp, cp); 1084 simple_lock(&pp->pr_slock); 1085 break; 1086 } 1087 1088 simple_lock(&pp->pr_slock); 1089 pool_prime_page(pp, cp, ph); 1090 pp->pr_npagealloc++; 1091 pp->pr_minpages++; 1092 } 1093 1094 if (pp->pr_minpages >= pp->pr_maxpages) 1095 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ 1096 1097 simple_unlock(&pp->pr_slock); 1098 return (0); 1099 } 1100 1101 /* 1102 * Add a page worth of items to the pool. 1103 * 1104 * Note, we must be called with the pool descriptor LOCKED. 1105 */ 1106 static void 1107 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph) 1108 { 1109 struct pool_item *pi; 1110 caddr_t cp = storage; 1111 unsigned int align = pp->pr_align; 1112 unsigned int ioff = pp->pr_itemoffset; 1113 int n; 1114 int s; 1115 1116 LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); 1117 1118 #ifdef DIAGNOSTIC 1119 if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) 1120 panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); 1121 #endif 1122 1123 /* 1124 * Insert page header. 1125 */ 1126 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); 1127 TAILQ_INIT(&ph->ph_itemlist); 1128 ph->ph_page = storage; 1129 ph->ph_nmissing = 0; 1130 s = splclock(); 1131 ph->ph_time = mono_time; 1132 splx(s); 1133 if ((pp->pr_roflags & PR_PHINPAGE) == 0) 1134 SPLAY_INSERT(phtree, &pp->pr_phtree, ph); 1135 1136 pp->pr_nidle++; 1137 1138 /* 1139 * Color this page. 1140 */ 1141 cp = (caddr_t)(cp + pp->pr_curcolor); 1142 if ((pp->pr_curcolor += align) > pp->pr_maxcolor) 1143 pp->pr_curcolor = 0; 1144 1145 /* 1146 * Adjust storage to apply aligment to `pr_itemoffset' in each item. 1147 */ 1148 if (ioff != 0) 1149 cp = (caddr_t)(cp + (align - ioff)); 1150 1151 /* 1152 * Insert remaining chunks on the bucket list. 1153 */ 1154 n = pp->pr_itemsperpage; 1155 pp->pr_nitems += n; 1156 1157 while (n--) { 1158 pi = (struct pool_item *)cp; 1159 1160 KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0); 1161 1162 /* Insert on page list */ 1163 TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list); 1164 #ifdef DIAGNOSTIC 1165 pi->pi_magic = PI_MAGIC; 1166 #endif 1167 cp = (caddr_t)(cp + pp->pr_size); 1168 } 1169 1170 /* 1171 * If the pool was depleted, point at the new page. 1172 */ 1173 if (pp->pr_curpage == NULL) 1174 pp->pr_curpage = ph; 1175 1176 if (++pp->pr_npages > pp->pr_hiwat) 1177 pp->pr_hiwat = pp->pr_npages; 1178 } 1179 1180 /* 1181 * Used by pool_get() when nitems drops below the low water mark. This 1182 * is used to catch up pr_nitems with the low water mark. 1183 * 1184 * Note 1, we never wait for memory here, we let the caller decide what to do. 1185 * 1186 * Note 2, we must be called with the pool already locked, and we return 1187 * with it locked. 1188 */ 1189 static int 1190 pool_catchup(struct pool *pp) 1191 { 1192 struct pool_item_header *ph = NULL; 1193 caddr_t cp; 1194 int error = 0; 1195 1196 while (POOL_NEEDS_CATCHUP(pp)) { 1197 /* 1198 * Call the page back-end allocator for more memory. 1199 * 1200 * XXX: We never wait, so should we bother unlocking 1201 * the pool descriptor? 1202 */ 1203 simple_unlock(&pp->pr_slock); 1204 cp = pool_allocator_alloc(pp, PR_NOWAIT); 1205 if (__predict_true(cp != NULL)) 1206 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); 1207 if (__predict_false(cp == NULL || ph == NULL)) { 1208 if (cp != NULL) 1209 pool_allocator_free(pp, cp); 1210 error = ENOMEM; 1211 simple_lock(&pp->pr_slock); 1212 break; 1213 } 1214 simple_lock(&pp->pr_slock); 1215 pool_prime_page(pp, cp, ph); 1216 pp->pr_npagealloc++; 1217 } 1218 1219 return (error); 1220 } 1221 1222 static void 1223 pool_update_curpage(struct pool *pp) 1224 { 1225 1226 pp->pr_curpage = LIST_FIRST(&pp->pr_partpages); 1227 if (pp->pr_curpage == NULL) { 1228 pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); 1229 } 1230 } 1231 1232 void 1233 pool_setlowat(struct pool *pp, int n) 1234 { 1235 1236 simple_lock(&pp->pr_slock); 1237 1238 pp->pr_minitems = n; 1239 pp->pr_minpages = (n == 0) 1240 ? 0 1241 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1242 1243 /* Make sure we're caught up with the newly-set low water mark. */ 1244 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { 1245 /* 1246 * XXX: Should we log a warning? Should we set up a timeout 1247 * to try again in a second or so? The latter could break 1248 * a caller's assumptions about interrupt protection, etc. 1249 */ 1250 } 1251 1252 simple_unlock(&pp->pr_slock); 1253 } 1254 1255 void 1256 pool_sethiwat(struct pool *pp, int n) 1257 { 1258 1259 simple_lock(&pp->pr_slock); 1260 1261 pp->pr_maxpages = (n == 0) 1262 ? 0 1263 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1264 1265 simple_unlock(&pp->pr_slock); 1266 } 1267 1268 void 1269 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap) 1270 { 1271 1272 simple_lock(&pp->pr_slock); 1273 1274 pp->pr_hardlimit = n; 1275 pp->pr_hardlimit_warning = warnmess; 1276 pp->pr_hardlimit_ratecap.tv_sec = ratecap; 1277 pp->pr_hardlimit_warning_last.tv_sec = 0; 1278 pp->pr_hardlimit_warning_last.tv_usec = 0; 1279 1280 /* 1281 * In-line version of pool_sethiwat(), because we don't want to 1282 * release the lock. 1283 */ 1284 pp->pr_maxpages = (n == 0) 1285 ? 0 1286 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1287 1288 simple_unlock(&pp->pr_slock); 1289 } 1290 1291 /* 1292 * Release all complete pages that have not been used recently. 1293 */ 1294 int 1295 #ifdef POOL_DIAGNOSTIC 1296 _pool_reclaim(struct pool *pp, const char *file, long line) 1297 #else 1298 pool_reclaim(struct pool *pp) 1299 #endif 1300 { 1301 struct pool_item_header *ph, *phnext; 1302 struct pool_cache *pc; 1303 struct timeval curtime; 1304 struct pool_pagelist pq; 1305 struct timeval diff; 1306 int s; 1307 1308 if (pp->pr_drain_hook != NULL) { 1309 /* 1310 * The drain hook must be called with the pool unlocked. 1311 */ 1312 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); 1313 } 1314 1315 if (simple_lock_try(&pp->pr_slock) == 0) 1316 return (0); 1317 pr_enter(pp, file, line); 1318 1319 LIST_INIT(&pq); 1320 1321 /* 1322 * Reclaim items from the pool's caches. 1323 */ 1324 TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) 1325 pool_cache_reclaim(pc); 1326 1327 s = splclock(); 1328 curtime = mono_time; 1329 splx(s); 1330 1331 for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) { 1332 phnext = LIST_NEXT(ph, ph_pagelist); 1333 1334 /* Check our minimum page claim */ 1335 if (pp->pr_npages <= pp->pr_minpages) 1336 break; 1337 1338 KASSERT(ph->ph_nmissing == 0); 1339 timersub(&curtime, &ph->ph_time, &diff); 1340 if (diff.tv_sec < pool_inactive_time) 1341 continue; 1342 1343 /* 1344 * If freeing this page would put us below 1345 * the low water mark, stop now. 1346 */ 1347 if ((pp->pr_nitems - pp->pr_itemsperpage) < 1348 pp->pr_minitems) 1349 break; 1350 1351 pr_rmpage(pp, ph, &pq); 1352 } 1353 1354 pr_leave(pp); 1355 simple_unlock(&pp->pr_slock); 1356 if (LIST_EMPTY(&pq)) 1357 return (0); 1358 1359 while ((ph = LIST_FIRST(&pq)) != NULL) { 1360 LIST_REMOVE(ph, ph_pagelist); 1361 pool_allocator_free(pp, ph->ph_page); 1362 if (pp->pr_roflags & PR_PHINPAGE) { 1363 continue; 1364 } 1365 s = splvm(); 1366 pool_put(&phpool, ph); 1367 splx(s); 1368 } 1369 1370 return (1); 1371 } 1372 1373 /* 1374 * Drain pools, one at a time. 1375 * 1376 * Note, we must never be called from an interrupt context. 1377 */ 1378 void 1379 pool_drain(void *arg) 1380 { 1381 struct pool *pp; 1382 int s; 1383 1384 pp = NULL; 1385 s = splvm(); 1386 simple_lock(&pool_head_slock); 1387 if (drainpp == NULL) { 1388 drainpp = TAILQ_FIRST(&pool_head); 1389 } 1390 if (drainpp) { 1391 pp = drainpp; 1392 drainpp = TAILQ_NEXT(pp, pr_poollist); 1393 } 1394 simple_unlock(&pool_head_slock); 1395 pool_reclaim(pp); 1396 splx(s); 1397 } 1398 1399 /* 1400 * Diagnostic helpers. 1401 */ 1402 void 1403 pool_print(struct pool *pp, const char *modif) 1404 { 1405 int s; 1406 1407 s = splvm(); 1408 if (simple_lock_try(&pp->pr_slock) == 0) { 1409 printf("pool %s is locked; try again later\n", 1410 pp->pr_wchan); 1411 splx(s); 1412 return; 1413 } 1414 pool_print1(pp, modif, printf); 1415 simple_unlock(&pp->pr_slock); 1416 splx(s); 1417 } 1418 1419 void 1420 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) 1421 { 1422 int didlock = 0; 1423 1424 if (pp == NULL) { 1425 (*pr)("Must specify a pool to print.\n"); 1426 return; 1427 } 1428 1429 /* 1430 * Called from DDB; interrupts should be blocked, and all 1431 * other processors should be paused. We can skip locking 1432 * the pool in this case. 1433 * 1434 * We do a simple_lock_try() just to print the lock 1435 * status, however. 1436 */ 1437 1438 if (simple_lock_try(&pp->pr_slock) == 0) 1439 (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan); 1440 else 1441 didlock = 1; 1442 1443 pool_print1(pp, modif, pr); 1444 1445 if (didlock) 1446 simple_unlock(&pp->pr_slock); 1447 } 1448 1449 static void 1450 pool_print_pagelist(struct pool_pagelist *pl, void (*pr)(const char *, ...)) 1451 { 1452 struct pool_item_header *ph; 1453 #ifdef DIAGNOSTIC 1454 struct pool_item *pi; 1455 #endif 1456 1457 LIST_FOREACH(ph, pl, ph_pagelist) { 1458 (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n", 1459 ph->ph_page, ph->ph_nmissing, 1460 (u_long)ph->ph_time.tv_sec, 1461 (u_long)ph->ph_time.tv_usec); 1462 #ifdef DIAGNOSTIC 1463 TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) { 1464 if (pi->pi_magic != PI_MAGIC) { 1465 (*pr)("\t\t\titem %p, magic 0x%x\n", 1466 pi, pi->pi_magic); 1467 } 1468 } 1469 #endif 1470 } 1471 } 1472 1473 static void 1474 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) 1475 { 1476 struct pool_item_header *ph; 1477 struct pool_cache *pc; 1478 struct pool_cache_group *pcg; 1479 int i, print_log = 0, print_pagelist = 0, print_cache = 0; 1480 char c; 1481 1482 while ((c = *modif++) != '\0') { 1483 if (c == 'l') 1484 print_log = 1; 1485 if (c == 'p') 1486 print_pagelist = 1; 1487 if (c == 'c') 1488 print_cache = 1; 1489 } 1490 1491 (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", 1492 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, 1493 pp->pr_roflags); 1494 (*pr)("\talloc %p\n", pp->pr_alloc); 1495 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", 1496 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); 1497 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", 1498 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit); 1499 1500 (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n", 1501 pp->pr_nget, pp->pr_nfail, pp->pr_nput); 1502 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n", 1503 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle); 1504 1505 if (print_pagelist == 0) 1506 goto skip_pagelist; 1507 1508 if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) 1509 (*pr)("\n\tempty page list:\n"); 1510 pool_print_pagelist(&pp->pr_emptypages, pr); 1511 if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL) 1512 (*pr)("\n\tfull page list:\n"); 1513 pool_print_pagelist(&pp->pr_fullpages, pr); 1514 if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL) 1515 (*pr)("\n\tpartial-page list:\n"); 1516 pool_print_pagelist(&pp->pr_partpages, pr); 1517 1518 if (pp->pr_curpage == NULL) 1519 (*pr)("\tno current page\n"); 1520 else 1521 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page); 1522 1523 skip_pagelist: 1524 if (print_log == 0) 1525 goto skip_log; 1526 1527 (*pr)("\n"); 1528 if ((pp->pr_roflags & PR_LOGGING) == 0) 1529 (*pr)("\tno log\n"); 1530 else 1531 pr_printlog(pp, NULL, pr); 1532 1533 skip_log: 1534 if (print_cache == 0) 1535 goto skip_cache; 1536 1537 TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) { 1538 (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc, 1539 pc->pc_allocfrom, pc->pc_freeto); 1540 (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n", 1541 pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); 1542 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { 1543 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); 1544 for (i = 0; i < PCG_NOBJECTS; i++) { 1545 if (pcg->pcg_objects[i].pcgo_pa != 1546 POOL_PADDR_INVALID) { 1547 (*pr)("\t\t\t%p, 0x%llx\n", 1548 pcg->pcg_objects[i].pcgo_va, 1549 (unsigned long long) 1550 pcg->pcg_objects[i].pcgo_pa); 1551 } else { 1552 (*pr)("\t\t\t%p\n", 1553 pcg->pcg_objects[i].pcgo_va); 1554 } 1555 } 1556 } 1557 } 1558 1559 skip_cache: 1560 pr_enter_check(pp, pr); 1561 } 1562 1563 static int 1564 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph) 1565 { 1566 struct pool_item *pi; 1567 caddr_t page; 1568 int n; 1569 1570 page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask); 1571 if (page != ph->ph_page && 1572 (pp->pr_roflags & PR_PHINPAGE) != 0) { 1573 if (label != NULL) 1574 printf("%s: ", label); 1575 printf("pool(%p:%s): page inconsistency: page %p;" 1576 " at page head addr %p (p %p)\n", pp, 1577 pp->pr_wchan, ph->ph_page, 1578 ph, page); 1579 return 1; 1580 } 1581 1582 for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0; 1583 pi != NULL; 1584 pi = TAILQ_NEXT(pi,pi_list), n++) { 1585 1586 #ifdef DIAGNOSTIC 1587 if (pi->pi_magic != PI_MAGIC) { 1588 if (label != NULL) 1589 printf("%s: ", label); 1590 printf("pool(%s): free list modified: magic=%x;" 1591 " page %p; item ordinal %d;" 1592 " addr %p (p %p)\n", 1593 pp->pr_wchan, pi->pi_magic, ph->ph_page, 1594 n, pi, page); 1595 panic("pool"); 1596 } 1597 #endif 1598 page = 1599 (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask); 1600 if (page == ph->ph_page) 1601 continue; 1602 1603 if (label != NULL) 1604 printf("%s: ", label); 1605 printf("pool(%p:%s): page inconsistency: page %p;" 1606 " item ordinal %d; addr %p (p %p)\n", pp, 1607 pp->pr_wchan, ph->ph_page, 1608 n, pi, page); 1609 return 1; 1610 } 1611 return 0; 1612 } 1613 1614 1615 int 1616 pool_chk(struct pool *pp, const char *label) 1617 { 1618 struct pool_item_header *ph; 1619 int r = 0; 1620 1621 simple_lock(&pp->pr_slock); 1622 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { 1623 r = pool_chk_page(pp, label, ph); 1624 if (r) { 1625 goto out; 1626 } 1627 } 1628 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { 1629 r = pool_chk_page(pp, label, ph); 1630 if (r) { 1631 goto out; 1632 } 1633 } 1634 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { 1635 r = pool_chk_page(pp, label, ph); 1636 if (r) { 1637 goto out; 1638 } 1639 } 1640 1641 out: 1642 simple_unlock(&pp->pr_slock); 1643 return (r); 1644 } 1645 1646 /* 1647 * pool_cache_init: 1648 * 1649 * Initialize a pool cache. 1650 * 1651 * NOTE: If the pool must be protected from interrupts, we expect 1652 * to be called at the appropriate interrupt priority level. 1653 */ 1654 void 1655 pool_cache_init(struct pool_cache *pc, struct pool *pp, 1656 int (*ctor)(void *, void *, int), 1657 void (*dtor)(void *, void *), 1658 void *arg) 1659 { 1660 1661 TAILQ_INIT(&pc->pc_grouplist); 1662 simple_lock_init(&pc->pc_slock); 1663 1664 pc->pc_allocfrom = NULL; 1665 pc->pc_freeto = NULL; 1666 pc->pc_pool = pp; 1667 1668 pc->pc_ctor = ctor; 1669 pc->pc_dtor = dtor; 1670 pc->pc_arg = arg; 1671 1672 pc->pc_hits = 0; 1673 pc->pc_misses = 0; 1674 1675 pc->pc_ngroups = 0; 1676 1677 pc->pc_nitems = 0; 1678 1679 simple_lock(&pp->pr_slock); 1680 TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist); 1681 simple_unlock(&pp->pr_slock); 1682 } 1683 1684 /* 1685 * pool_cache_destroy: 1686 * 1687 * Destroy a pool cache. 1688 */ 1689 void 1690 pool_cache_destroy(struct pool_cache *pc) 1691 { 1692 struct pool *pp = pc->pc_pool; 1693 1694 /* First, invalidate the entire cache. */ 1695 pool_cache_invalidate(pc); 1696 1697 /* ...and remove it from the pool's cache list. */ 1698 simple_lock(&pp->pr_slock); 1699 TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist); 1700 simple_unlock(&pp->pr_slock); 1701 } 1702 1703 static __inline void * 1704 pcg_get(struct pool_cache_group *pcg, paddr_t *pap) 1705 { 1706 void *object; 1707 u_int idx; 1708 1709 KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); 1710 KASSERT(pcg->pcg_avail != 0); 1711 idx = --pcg->pcg_avail; 1712 1713 KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL); 1714 object = pcg->pcg_objects[idx].pcgo_va; 1715 if (pap != NULL) 1716 *pap = pcg->pcg_objects[idx].pcgo_pa; 1717 pcg->pcg_objects[idx].pcgo_va = NULL; 1718 1719 return (object); 1720 } 1721 1722 static __inline void 1723 pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa) 1724 { 1725 u_int idx; 1726 1727 KASSERT(pcg->pcg_avail < PCG_NOBJECTS); 1728 idx = pcg->pcg_avail++; 1729 1730 KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL); 1731 pcg->pcg_objects[idx].pcgo_va = object; 1732 pcg->pcg_objects[idx].pcgo_pa = pa; 1733 } 1734 1735 /* 1736 * pool_cache_get{,_paddr}: 1737 * 1738 * Get an object from a pool cache (optionally returning 1739 * the physical address of the object). 1740 */ 1741 void * 1742 pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap) 1743 { 1744 struct pool_cache_group *pcg; 1745 void *object; 1746 1747 #ifdef LOCKDEBUG 1748 if (flags & PR_WAITOK) 1749 simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)"); 1750 #endif 1751 1752 simple_lock(&pc->pc_slock); 1753 1754 if ((pcg = pc->pc_allocfrom) == NULL) { 1755 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { 1756 if (pcg->pcg_avail != 0) { 1757 pc->pc_allocfrom = pcg; 1758 goto have_group; 1759 } 1760 } 1761 1762 /* 1763 * No groups with any available objects. Allocate 1764 * a new object, construct it, and return it to 1765 * the caller. We will allocate a group, if necessary, 1766 * when the object is freed back to the cache. 1767 */ 1768 pc->pc_misses++; 1769 simple_unlock(&pc->pc_slock); 1770 object = pool_get(pc->pc_pool, flags); 1771 if (object != NULL && pc->pc_ctor != NULL) { 1772 if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { 1773 pool_put(pc->pc_pool, object); 1774 return (NULL); 1775 } 1776 } 1777 if (object != NULL && pap != NULL) { 1778 #ifdef POOL_VTOPHYS 1779 *pap = POOL_VTOPHYS(object); 1780 #else 1781 *pap = POOL_PADDR_INVALID; 1782 #endif 1783 } 1784 return (object); 1785 } 1786 1787 have_group: 1788 pc->pc_hits++; 1789 pc->pc_nitems--; 1790 object = pcg_get(pcg, pap); 1791 1792 if (pcg->pcg_avail == 0) 1793 pc->pc_allocfrom = NULL; 1794 1795 simple_unlock(&pc->pc_slock); 1796 1797 return (object); 1798 } 1799 1800 /* 1801 * pool_cache_put{,_paddr}: 1802 * 1803 * Put an object back to the pool cache (optionally caching the 1804 * physical address of the object). 1805 */ 1806 void 1807 pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa) 1808 { 1809 struct pool_cache_group *pcg; 1810 int s; 1811 1812 simple_lock(&pc->pc_slock); 1813 1814 if ((pcg = pc->pc_freeto) == NULL) { 1815 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { 1816 if (pcg->pcg_avail != PCG_NOBJECTS) { 1817 pc->pc_freeto = pcg; 1818 goto have_group; 1819 } 1820 } 1821 1822 /* 1823 * No empty groups to free the object to. Attempt to 1824 * allocate one. 1825 */ 1826 simple_unlock(&pc->pc_slock); 1827 s = splvm(); 1828 pcg = pool_get(&pcgpool, PR_NOWAIT); 1829 splx(s); 1830 if (pcg != NULL) { 1831 memset(pcg, 0, sizeof(*pcg)); 1832 simple_lock(&pc->pc_slock); 1833 pc->pc_ngroups++; 1834 TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list); 1835 if (pc->pc_freeto == NULL) 1836 pc->pc_freeto = pcg; 1837 goto have_group; 1838 } 1839 1840 /* 1841 * Unable to allocate a cache group; destruct the object 1842 * and free it back to the pool. 1843 */ 1844 pool_cache_destruct_object(pc, object); 1845 return; 1846 } 1847 1848 have_group: 1849 pc->pc_nitems++; 1850 pcg_put(pcg, object, pa); 1851 1852 if (pcg->pcg_avail == PCG_NOBJECTS) 1853 pc->pc_freeto = NULL; 1854 1855 simple_unlock(&pc->pc_slock); 1856 } 1857 1858 /* 1859 * pool_cache_destruct_object: 1860 * 1861 * Force destruction of an object and its release back into 1862 * the pool. 1863 */ 1864 void 1865 pool_cache_destruct_object(struct pool_cache *pc, void *object) 1866 { 1867 1868 if (pc->pc_dtor != NULL) 1869 (*pc->pc_dtor)(pc->pc_arg, object); 1870 pool_put(pc->pc_pool, object); 1871 } 1872 1873 /* 1874 * pool_cache_do_invalidate: 1875 * 1876 * This internal function implements pool_cache_invalidate() and 1877 * pool_cache_reclaim(). 1878 */ 1879 static void 1880 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups, 1881 void (*putit)(struct pool *, void *)) 1882 { 1883 struct pool_cache_group *pcg, *npcg; 1884 void *object; 1885 int s; 1886 1887 for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; 1888 pcg = npcg) { 1889 npcg = TAILQ_NEXT(pcg, pcg_list); 1890 while (pcg->pcg_avail != 0) { 1891 pc->pc_nitems--; 1892 object = pcg_get(pcg, NULL); 1893 if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg) 1894 pc->pc_allocfrom = NULL; 1895 if (pc->pc_dtor != NULL) 1896 (*pc->pc_dtor)(pc->pc_arg, object); 1897 (*putit)(pc->pc_pool, object); 1898 } 1899 if (free_groups) { 1900 pc->pc_ngroups--; 1901 TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list); 1902 if (pc->pc_freeto == pcg) 1903 pc->pc_freeto = NULL; 1904 s = splvm(); 1905 pool_put(&pcgpool, pcg); 1906 splx(s); 1907 } 1908 } 1909 } 1910 1911 /* 1912 * pool_cache_invalidate: 1913 * 1914 * Invalidate a pool cache (destruct and release all of the 1915 * cached objects). 1916 */ 1917 void 1918 pool_cache_invalidate(struct pool_cache *pc) 1919 { 1920 1921 simple_lock(&pc->pc_slock); 1922 pool_cache_do_invalidate(pc, 0, pool_put); 1923 simple_unlock(&pc->pc_slock); 1924 } 1925 1926 /* 1927 * pool_cache_reclaim: 1928 * 1929 * Reclaim a pool cache for pool_reclaim(). 1930 */ 1931 static void 1932 pool_cache_reclaim(struct pool_cache *pc) 1933 { 1934 1935 simple_lock(&pc->pc_slock); 1936 pool_cache_do_invalidate(pc, 1, pool_do_put); 1937 simple_unlock(&pc->pc_slock); 1938 } 1939 1940 /* 1941 * Pool backend allocators. 1942 * 1943 * Each pool has a backend allocator that handles allocation, deallocation, 1944 * and any additional draining that might be needed. 1945 * 1946 * We provide two standard allocators: 1947 * 1948 * pool_allocator_kmem - the default when no allocator is specified 1949 * 1950 * pool_allocator_nointr - used for pools that will not be accessed 1951 * in interrupt context. 1952 */ 1953 void *pool_page_alloc(struct pool *, int); 1954 void pool_page_free(struct pool *, void *); 1955 1956 struct pool_allocator pool_allocator_kmem = { 1957 pool_page_alloc, pool_page_free, 0, 1958 }; 1959 1960 void *pool_page_alloc_nointr(struct pool *, int); 1961 void pool_page_free_nointr(struct pool *, void *); 1962 1963 struct pool_allocator pool_allocator_nointr = { 1964 pool_page_alloc_nointr, pool_page_free_nointr, 0, 1965 }; 1966 1967 #ifdef POOL_SUBPAGE 1968 void *pool_subpage_alloc(struct pool *, int); 1969 void pool_subpage_free(struct pool *, void *); 1970 1971 struct pool_allocator pool_allocator_kmem_subpage = { 1972 pool_subpage_alloc, pool_subpage_free, 0, 1973 }; 1974 #endif /* POOL_SUBPAGE */ 1975 1976 /* 1977 * We have at least three different resources for the same allocation and 1978 * each resource can be depleted. First, we have the ready elements in the 1979 * pool. Then we have the resource (typically a vm_map) for this allocator. 1980 * Finally, we have physical memory. Waiting for any of these can be 1981 * unnecessary when any other is freed, but the kernel doesn't support 1982 * sleeping on multiple wait channels, so we have to employ another strategy. 1983 * 1984 * The caller sleeps on the pool (so that it can be awakened when an item 1985 * is returned to the pool), but we set PA_WANT on the allocator. When a 1986 * page is returned to the allocator and PA_WANT is set, pool_allocator_free 1987 * will wake up all sleeping pools belonging to this allocator. 1988 * 1989 * XXX Thundering herd. 1990 */ 1991 void * 1992 pool_allocator_alloc(struct pool *org, int flags) 1993 { 1994 struct pool_allocator *pa = org->pr_alloc; 1995 struct pool *pp, *start; 1996 int s, freed; 1997 void *res; 1998 1999 LOCK_ASSERT(!simple_lock_held(&org->pr_slock)); 2000 2001 do { 2002 if ((res = (*pa->pa_alloc)(org, flags)) != NULL) 2003 return (res); 2004 if ((flags & PR_WAITOK) == 0) { 2005 /* 2006 * We only run the drain hookhere if PR_NOWAIT. 2007 * In other cases, the hook will be run in 2008 * pool_reclaim(). 2009 */ 2010 if (org->pr_drain_hook != NULL) { 2011 (*org->pr_drain_hook)(org->pr_drain_hook_arg, 2012 flags); 2013 if ((res = (*pa->pa_alloc)(org, flags)) != NULL) 2014 return (res); 2015 } 2016 break; 2017 } 2018 2019 /* 2020 * Drain all pools, except "org", that use this 2021 * allocator. We do this to reclaim VA space. 2022 * pa_alloc is responsible for waiting for 2023 * physical memory. 2024 * 2025 * XXX We risk looping forever if start if someone 2026 * calls pool_destroy on "start". But there is no 2027 * other way to have potentially sleeping pool_reclaim, 2028 * non-sleeping locks on pool_allocator, and some 2029 * stirring of drained pools in the allocator. 2030 * 2031 * XXX Maybe we should use pool_head_slock for locking 2032 * the allocators? 2033 */ 2034 freed = 0; 2035 2036 s = splvm(); 2037 simple_lock(&pa->pa_slock); 2038 pp = start = TAILQ_FIRST(&pa->pa_list); 2039 do { 2040 TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list); 2041 TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); 2042 if (pp == org) 2043 continue; 2044 simple_unlock(&pa->pa_slock); 2045 freed = pool_reclaim(pp); 2046 simple_lock(&pa->pa_slock); 2047 } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && 2048 freed == 0); 2049 2050 if (freed == 0) { 2051 /* 2052 * We set PA_WANT here, the caller will most likely 2053 * sleep waiting for pages (if not, this won't hurt 2054 * that much), and there is no way to set this in 2055 * the caller without violating locking order. 2056 */ 2057 pa->pa_flags |= PA_WANT; 2058 } 2059 simple_unlock(&pa->pa_slock); 2060 splx(s); 2061 } while (freed); 2062 return (NULL); 2063 } 2064 2065 void 2066 pool_allocator_free(struct pool *pp, void *v) 2067 { 2068 struct pool_allocator *pa = pp->pr_alloc; 2069 int s; 2070 2071 LOCK_ASSERT(!simple_lock_held(&pp->pr_slock)); 2072 2073 (*pa->pa_free)(pp, v); 2074 2075 s = splvm(); 2076 simple_lock(&pa->pa_slock); 2077 if ((pa->pa_flags & PA_WANT) == 0) { 2078 simple_unlock(&pa->pa_slock); 2079 splx(s); 2080 return; 2081 } 2082 2083 TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) { 2084 simple_lock(&pp->pr_slock); 2085 if ((pp->pr_flags & PR_WANTED) != 0) { 2086 pp->pr_flags &= ~PR_WANTED; 2087 wakeup(pp); 2088 } 2089 simple_unlock(&pp->pr_slock); 2090 } 2091 pa->pa_flags &= ~PA_WANT; 2092 simple_unlock(&pa->pa_slock); 2093 splx(s); 2094 } 2095 2096 void * 2097 pool_page_alloc(struct pool *pp, int flags) 2098 { 2099 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; 2100 2101 return ((void *) uvm_km_alloc_poolpage(waitok)); 2102 } 2103 2104 void 2105 pool_page_free(struct pool *pp, void *v) 2106 { 2107 2108 uvm_km_free_poolpage((vaddr_t) v); 2109 } 2110 2111 #ifdef POOL_SUBPAGE 2112 /* Sub-page allocator, for machines with large hardware pages. */ 2113 void * 2114 pool_subpage_alloc(struct pool *pp, int flags) 2115 { 2116 void *v; 2117 int s; 2118 s = splvm(); 2119 v = pool_get(&psppool, flags); 2120 splx(s); 2121 return v; 2122 } 2123 2124 void 2125 pool_subpage_free(struct pool *pp, void *v) 2126 { 2127 int s; 2128 s = splvm(); 2129 pool_put(&psppool, v); 2130 splx(s); 2131 } 2132 2133 /* We don't provide a real nointr allocator. Maybe later. */ 2134 void * 2135 pool_page_alloc_nointr(struct pool *pp, int flags) 2136 { 2137 2138 return (pool_subpage_alloc(pp, flags)); 2139 } 2140 2141 void 2142 pool_page_free_nointr(struct pool *pp, void *v) 2143 { 2144 2145 pool_subpage_free(pp, v); 2146 } 2147 #else 2148 void * 2149 pool_page_alloc_nointr(struct pool *pp, int flags) 2150 { 2151 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; 2152 2153 return ((void *) uvm_km_alloc_poolpage1(kernel_map, 2154 uvm.kernel_object, waitok)); 2155 } 2156 2157 void 2158 pool_page_free_nointr(struct pool *pp, void *v) 2159 { 2160 2161 uvm_km_free_poolpage1(kernel_map, (vaddr_t) v); 2162 } 2163 #endif /* POOL_SUBPAGE */ 2164