1 /* $OpenBSD: subr_pool.c,v 1.39 2003/11/18 06:08:18 tedu Exp $ */ 2 /* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */ 3 4 /*- 5 * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace 10 * Simulation Facility, NASA Ames Research Center. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the NetBSD 23 * Foundation, Inc. and its contributors. 24 * 4. Neither the name of The NetBSD Foundation nor the names of its 25 * contributors may be used to endorse or promote products derived 26 * from this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 38 * POSSIBILITY OF SUCH DAMAGE. 39 */ 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/proc.h> 44 #include <sys/errno.h> 45 #include <sys/kernel.h> 46 #include <sys/malloc.h> 47 #include <sys/lock.h> 48 #include <sys/pool.h> 49 #include <sys/syslog.h> 50 #include <sys/sysctl.h> 51 52 #include <uvm/uvm.h> 53 54 /* 55 * XXX - for now. 56 */ 57 #define SIMPLELOCK_INITIALIZER { SLOCK_UNLOCKED } 58 #ifdef LOCKDEBUG 59 #define simple_lock_freecheck(a, s) do { /* nothing */ } while (0) 60 #define simple_lock_only_held(lkp, str) do { /* nothing */ } while (0) 61 #endif 62 63 /* 64 * Pool resource management utility. 65 * 66 * Memory is allocated in pages which are split into pieces according to 67 * the pool item size. Each page is kept on one of three lists in the 68 * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages', 69 * for empty, full and partially-full pages respectively. The individual 70 * pool items are on a linked list headed by `ph_itemlist' in each page 71 * header. The memory for building the page list is either taken from 72 * the allocated pages themselves (for small pool items) or taken from 73 * an internal pool of page headers (`phpool'). 74 */ 75 76 /* List of all pools */ 77 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); 78 79 /* Private pool for page header structures */ 80 static struct pool phpool; 81 82 /* # of seconds to retain page after last use */ 83 int pool_inactive_time = 10; 84 85 /* Next candidate for drainage (see pool_drain()) */ 86 static struct pool *drainpp; 87 88 /* This spin lock protects both pool_head and drainpp. */ 89 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER; 90 91 struct pool_item_header { 92 /* Page headers */ 93 LIST_ENTRY(pool_item_header) 94 ph_pagelist; /* pool page list */ 95 TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */ 96 SPLAY_ENTRY(pool_item_header) 97 ph_node; /* Off-page page headers */ 98 int ph_nmissing; /* # of chunks in use */ 99 caddr_t ph_page; /* this page's address */ 100 struct timeval ph_time; /* last referenced */ 101 }; 102 103 struct pool_item { 104 #ifdef DIAGNOSTIC 105 int pi_magic; 106 #endif 107 #define PI_MAGIC 0xdeafbeef 108 /* Other entries use only this list entry */ 109 TAILQ_ENTRY(pool_item) pi_list; 110 }; 111 112 #define POOL_NEEDS_CATCHUP(pp) \ 113 ((pp)->pr_nitems < (pp)->pr_minitems) 114 115 /* 116 * Every pool get a unique serial number assigned to it. If this counter 117 * wraps, we're screwed, but we shouldn't create so many pools anyway. 118 */ 119 unsigned int pool_serial; 120 121 /* 122 * Pool cache management. 123 * 124 * Pool caches provide a way for constructed objects to be cached by the 125 * pool subsystem. This can lead to performance improvements by avoiding 126 * needless object construction/destruction; it is deferred until absolutely 127 * necessary. 128 * 129 * Caches are grouped into cache groups. Each cache group references 130 * up to 16 constructed objects. When a cache allocates an object 131 * from the pool, it calls the object's constructor and places it into 132 * a cache group. When a cache group frees an object back to the pool, 133 * it first calls the object's destructor. This allows the object to 134 * persist in constructed form while freed to the cache. 135 * 136 * Multiple caches may exist for each pool. This allows a single 137 * object type to have multiple constructed forms. The pool references 138 * each cache, so that when a pool is drained by the pagedaemon, it can 139 * drain each individual cache as well. Each time a cache is drained, 140 * the most idle cache group is freed to the pool in its entirety. 141 * 142 * Pool caches are layed on top of pools. By layering them, we can avoid 143 * the complexity of cache management for pools which would not benefit 144 * from it. 145 */ 146 147 /* The cache group pool. */ 148 static struct pool pcgpool; 149 150 /* The pool cache group. */ 151 #define PCG_NOBJECTS 16 152 struct pool_cache_group { 153 TAILQ_ENTRY(pool_cache_group) 154 pcg_list; /* link in the pool cache's group list */ 155 u_int pcg_avail; /* # available objects */ 156 /* pointers to the objects */ 157 void *pcg_objects[PCG_NOBJECTS]; 158 }; 159 160 void pool_cache_reclaim(struct pool_cache *); 161 void pool_cache_do_invalidate(struct pool_cache *, int, 162 void (*)(struct pool *, void *)); 163 164 int pool_catchup(struct pool *); 165 void pool_prime_page(struct pool *, caddr_t, struct pool_item_header *); 166 void pool_update_curpage(struct pool *); 167 void pool_do_put(struct pool *, void *); 168 void pr_rmpage(struct pool *, struct pool_item_header *, 169 struct pool_pagelist *); 170 int pool_chk_page(struct pool *, const char *, struct pool_item_header *); 171 172 void *pool_allocator_alloc(struct pool *, int); 173 void pool_allocator_free(struct pool *, void *); 174 175 void pool_print_pagelist(struct pool_pagelist *, int (*)(const char *, ...)); 176 void pool_print1(struct pool *, const char *, int (*)(const char *, ...)); 177 178 179 /* 180 * Pool log entry. An array of these is allocated in pool_init(). 181 */ 182 struct pool_log { 183 const char *pl_file; 184 long pl_line; 185 int pl_action; 186 #define PRLOG_GET 1 187 #define PRLOG_PUT 2 188 void *pl_addr; 189 }; 190 191 /* Number of entries in pool log buffers */ 192 #ifndef POOL_LOGSIZE 193 #define POOL_LOGSIZE 10 194 #endif 195 196 int pool_logsize = POOL_LOGSIZE; 197 198 #ifdef POOL_DIAGNOSTIC 199 static __inline void 200 pr_log(struct pool *pp, void *v, int action, const char *file, long line) 201 { 202 int n = pp->pr_curlogentry; 203 struct pool_log *pl; 204 205 if ((pp->pr_roflags & PR_LOGGING) == 0) 206 return; 207 208 /* 209 * Fill in the current entry. Wrap around and overwrite 210 * the oldest entry if necessary. 211 */ 212 pl = &pp->pr_log[n]; 213 pl->pl_file = file; 214 pl->pl_line = line; 215 pl->pl_action = action; 216 pl->pl_addr = v; 217 if (++n >= pp->pr_logsize) 218 n = 0; 219 pp->pr_curlogentry = n; 220 } 221 222 static void 223 pr_printlog(struct pool *pp, struct pool_item *pi, 224 int (*pr)(const char *, ...)) 225 { 226 int i = pp->pr_logsize; 227 int n = pp->pr_curlogentry; 228 229 if ((pp->pr_roflags & PR_LOGGING) == 0) 230 return; 231 232 /* 233 * Print all entries in this pool's log. 234 */ 235 while (i-- > 0) { 236 struct pool_log *pl = &pp->pr_log[n]; 237 if (pl->pl_action != 0) { 238 if (pi == NULL || pi == pl->pl_addr) { 239 (*pr)("\tlog entry %d:\n", i); 240 (*pr)("\t\taction = %s, addr = %p\n", 241 pl->pl_action == PRLOG_GET ? "get" : "put", 242 pl->pl_addr); 243 (*pr)("\t\tfile: %s at line %lu\n", 244 pl->pl_file, pl->pl_line); 245 } 246 } 247 if (++n >= pp->pr_logsize) 248 n = 0; 249 } 250 } 251 252 static __inline void 253 pr_enter(struct pool *pp, const char *file, long line) 254 { 255 256 if (__predict_false(pp->pr_entered_file != NULL)) { 257 printf("pool %s: reentrancy at file %s line %ld\n", 258 pp->pr_wchan, file, line); 259 printf(" previous entry at file %s line %ld\n", 260 pp->pr_entered_file, pp->pr_entered_line); 261 panic("pr_enter"); 262 } 263 264 pp->pr_entered_file = file; 265 pp->pr_entered_line = line; 266 } 267 268 static __inline void 269 pr_leave(struct pool *pp) 270 { 271 272 if (__predict_false(pp->pr_entered_file == NULL)) { 273 printf("pool %s not entered?\n", pp->pr_wchan); 274 panic("pr_leave"); 275 } 276 277 pp->pr_entered_file = NULL; 278 pp->pr_entered_line = 0; 279 } 280 281 static __inline void 282 pr_enter_check(struct pool *pp, int (*pr)(const char *, ...)) 283 { 284 285 if (pp->pr_entered_file != NULL) 286 (*pr)("\n\tcurrently entered from file %s line %ld\n", 287 pp->pr_entered_file, pp->pr_entered_line); 288 } 289 #else 290 #define pr_log(pp, v, action, file, line) 291 #define pr_printlog(pp, pi, pr) 292 #define pr_enter(pp, file, line) 293 #define pr_leave(pp) 294 #define pr_enter_check(pp, pr) 295 #endif /* POOL_DIAGNOSTIC */ 296 297 static __inline int 298 phtree_compare(struct pool_item_header *a, struct pool_item_header *b) 299 { 300 if (a->ph_page < b->ph_page) 301 return (-1); 302 else if (a->ph_page > b->ph_page) 303 return (1); 304 else 305 return (0); 306 } 307 308 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare); 309 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); 310 311 /* 312 * Return the pool page header based on page address. 313 */ 314 static __inline struct pool_item_header * 315 pr_find_pagehead(struct pool *pp, caddr_t page) 316 { 317 struct pool_item_header *ph, tmp; 318 319 if ((pp->pr_roflags & PR_PHINPAGE) != 0) 320 return ((struct pool_item_header *)(page + pp->pr_phoffset)); 321 322 tmp.ph_page = page; 323 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); 324 return ph; 325 } 326 327 /* 328 * Remove a page from the pool. 329 */ 330 void 331 pr_rmpage(struct pool *pp, struct pool_item_header *ph, 332 struct pool_pagelist *pq) 333 { 334 int s; 335 336 /* 337 * If the page was idle, decrement the idle page count. 338 */ 339 if (ph->ph_nmissing == 0) { 340 #ifdef DIAGNOSTIC 341 if (pp->pr_nidle == 0) 342 panic("pr_rmpage: nidle inconsistent"); 343 if (pp->pr_nitems < pp->pr_itemsperpage) 344 panic("pr_rmpage: nitems inconsistent"); 345 #endif 346 pp->pr_nidle--; 347 } 348 349 pp->pr_nitems -= pp->pr_itemsperpage; 350 351 /* 352 * Unlink a page from the pool and release it (or queue it for release). 353 */ 354 LIST_REMOVE(ph, ph_pagelist); 355 if (pq) { 356 LIST_INSERT_HEAD(pq, ph, ph_pagelist); 357 } else { 358 pool_allocator_free(pp, ph->ph_page); 359 if ((pp->pr_roflags & PR_PHINPAGE) == 0) { 360 SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); 361 s = splhigh(); 362 pool_put(&phpool, ph); 363 splx(s); 364 } 365 } 366 pp->pr_npages--; 367 pp->pr_npagefree++; 368 369 pool_update_curpage(pp); 370 } 371 372 /* 373 * Initialize the given pool resource structure. 374 * 375 * We export this routine to allow other kernel parts to declare 376 * static pools that must be initialized before malloc() is available. 377 */ 378 void 379 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, 380 const char *wchan, struct pool_allocator *palloc) 381 { 382 int off, slack; 383 384 #ifdef POOL_DIAGNOSTIC 385 /* 386 * Always log if POOL_DIAGNOSTIC is defined. 387 */ 388 if (pool_logsize != 0) 389 flags |= PR_LOGGING; 390 #endif 391 392 #ifdef MALLOC_DEBUG 393 if ((flags & PR_DEBUG) && (ioff != 0 || align != 0)) 394 flags &= ~PR_DEBUG; 395 #endif 396 /* 397 * Check arguments and construct default values. 398 */ 399 if (palloc == NULL) 400 palloc = &pool_allocator_kmem; 401 if ((palloc->pa_flags & PA_INITIALIZED) == 0) { 402 if (palloc->pa_pagesz == 0) 403 palloc->pa_pagesz = PAGE_SIZE; 404 405 TAILQ_INIT(&palloc->pa_list); 406 407 simple_lock_init(&palloc->pa_slock); 408 palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); 409 palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; 410 palloc->pa_flags |= PA_INITIALIZED; 411 } 412 413 if (align == 0) 414 align = ALIGN(1); 415 416 if (size < sizeof(struct pool_item)) 417 size = sizeof(struct pool_item); 418 419 size = roundup(size, align); 420 #ifdef DIAGNOSTIC 421 if (size > palloc->pa_pagesz) 422 panic("pool_init: pool item size (%lu) too large", 423 (u_long)size); 424 #endif 425 426 /* 427 * Initialize the pool structure. 428 */ 429 LIST_INIT(&pp->pr_emptypages); 430 LIST_INIT(&pp->pr_fullpages); 431 LIST_INIT(&pp->pr_partpages); 432 TAILQ_INIT(&pp->pr_cachelist); 433 pp->pr_curpage = NULL; 434 pp->pr_npages = 0; 435 pp->pr_minitems = 0; 436 pp->pr_minpages = 0; 437 pp->pr_maxpages = UINT_MAX; 438 pp->pr_roflags = flags; 439 pp->pr_flags = 0; 440 pp->pr_size = size; 441 pp->pr_align = align; 442 pp->pr_wchan = wchan; 443 pp->pr_alloc = palloc; 444 pp->pr_nitems = 0; 445 pp->pr_nout = 0; 446 pp->pr_hardlimit = UINT_MAX; 447 pp->pr_hardlimit_warning = NULL; 448 pp->pr_hardlimit_ratecap.tv_sec = 0; 449 pp->pr_hardlimit_ratecap.tv_usec = 0; 450 pp->pr_hardlimit_warning_last.tv_sec = 0; 451 pp->pr_hardlimit_warning_last.tv_usec = 0; 452 pp->pr_drain_hook = NULL; 453 pp->pr_drain_hook_arg = NULL; 454 pp->pr_serial = ++pool_serial; 455 if (pool_serial == 0) 456 panic("pool_init: too much uptime"); 457 458 /* 459 * Decide whether to put the page header off page to avoid 460 * wasting too large a part of the page. Off-page page headers 461 * go on a hash table, so we can match a returned item 462 * with its header based on the page address. 463 * We use 1/16 of the page size as the threshold (XXX: tune) 464 */ 465 if (pp->pr_size < palloc->pa_pagesz/16) { 466 /* Use the end of the page for the page header */ 467 pp->pr_roflags |= PR_PHINPAGE; 468 pp->pr_phoffset = off = palloc->pa_pagesz - 469 ALIGN(sizeof(struct pool_item_header)); 470 } else { 471 /* The page header will be taken from our page header pool */ 472 pp->pr_phoffset = 0; 473 off = palloc->pa_pagesz; 474 SPLAY_INIT(&pp->pr_phtree); 475 } 476 477 /* 478 * Alignment is to take place at `ioff' within the item. This means 479 * we must reserve up to `align - 1' bytes on the page to allow 480 * appropriate positioning of each item. 481 * 482 * Silently enforce `0 <= ioff < align'. 483 */ 484 pp->pr_itemoffset = ioff = ioff % align; 485 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; 486 KASSERT(pp->pr_itemsperpage != 0); 487 488 /* 489 * Use the slack between the chunks and the page header 490 * for "cache coloring". 491 */ 492 slack = off - pp->pr_itemsperpage * pp->pr_size; 493 pp->pr_maxcolor = (slack / align) * align; 494 pp->pr_curcolor = 0; 495 496 pp->pr_nget = 0; 497 pp->pr_nfail = 0; 498 pp->pr_nput = 0; 499 pp->pr_npagealloc = 0; 500 pp->pr_npagefree = 0; 501 pp->pr_hiwat = 0; 502 pp->pr_nidle = 0; 503 504 #ifdef POOL_DIAGNOSTIC 505 if (flags & PR_LOGGING) { 506 if (kmem_map == NULL || 507 (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), 508 M_TEMP, M_NOWAIT)) == NULL) 509 pp->pr_roflags &= ~PR_LOGGING; 510 pp->pr_curlogentry = 0; 511 pp->pr_logsize = pool_logsize; 512 } 513 #endif 514 515 pp->pr_entered_file = NULL; 516 pp->pr_entered_line = 0; 517 518 simple_lock_init(&pp->pr_slock); 519 520 /* 521 * Initialize private page header pool and cache magazine pool if we 522 * haven't done so yet. 523 * XXX LOCKING. 524 */ 525 if (phpool.pr_size == 0) { 526 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 527 0, "phpool", NULL); 528 pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, 529 0, "pcgpool", NULL); 530 } 531 532 /* Insert this into the list of all pools. */ 533 simple_lock(&pool_head_slock); 534 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); 535 simple_unlock(&pool_head_slock); 536 537 /* Insert into the list of pools using this allocator. */ 538 simple_lock(&palloc->pa_slock); 539 TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); 540 simple_unlock(&palloc->pa_slock); 541 } 542 543 /* 544 * De-commision a pool resource. 545 */ 546 void 547 pool_destroy(struct pool *pp) 548 { 549 struct pool_item_header *ph; 550 struct pool_cache *pc; 551 552 /* Locking order: pool_allocator -> pool */ 553 simple_lock(&pp->pr_alloc->pa_slock); 554 TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); 555 simple_unlock(&pp->pr_alloc->pa_slock); 556 557 /* Destroy all caches for this pool. */ 558 while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL) 559 pool_cache_destroy(pc); 560 561 #ifdef DIAGNOSTIC 562 if (pp->pr_nout != 0) { 563 pr_printlog(pp, NULL, printf); 564 panic("pool_destroy: pool busy: still out: %u", 565 pp->pr_nout); 566 } 567 #endif 568 569 /* Remove all pages */ 570 while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) 571 pr_rmpage(pp, ph, NULL); 572 KASSERT(LIST_EMPTY(&pp->pr_fullpages)); 573 KASSERT(LIST_EMPTY(&pp->pr_partpages)); 574 575 /* Remove from global pool list */ 576 simple_lock(&pool_head_slock); 577 TAILQ_REMOVE(&pool_head, pp, pr_poollist); 578 if (drainpp == pp) { 579 drainpp = NULL; 580 } 581 simple_unlock(&pool_head_slock); 582 583 #ifdef POOL_DIAGNOSTIC 584 if ((pp->pr_roflags & PR_LOGGING) != 0) 585 free(pp->pr_log, M_TEMP); 586 #endif 587 } 588 589 void 590 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg) 591 { 592 /* XXX no locking -- must be used just after pool_init() */ 593 #ifdef DIAGNOSTIC 594 if (pp->pr_drain_hook != NULL) 595 panic("pool_set_drain_hook(%s): already set", pp->pr_wchan); 596 #endif 597 pp->pr_drain_hook = fn; 598 pp->pr_drain_hook_arg = arg; 599 } 600 601 static struct pool_item_header * 602 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags) 603 { 604 struct pool_item_header *ph; 605 int s; 606 607 LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0); 608 609 if ((pp->pr_roflags & PR_PHINPAGE) != 0) 610 ph = (struct pool_item_header *) (storage + pp->pr_phoffset); 611 else { 612 s = splhigh(); 613 ph = pool_get(&phpool, flags); 614 splx(s); 615 } 616 617 return (ph); 618 } 619 620 /* 621 * Grab an item from the pool; must be called at appropriate spl level 622 */ 623 void * 624 #ifdef POOL_DIAGNOSTIC 625 _pool_get(struct pool *pp, int flags, const char *file, long line) 626 #else 627 pool_get(struct pool *pp, int flags) 628 #endif 629 { 630 struct pool_item *pi; 631 struct pool_item_header *ph; 632 void *v; 633 634 #ifdef DIAGNOSTIC 635 if ((flags & PR_WAITOK) != 0) 636 splassert(IPL_NONE); 637 if (__predict_false(curproc == NULL && /* doing_shutdown == 0 && XXX*/ 638 (flags & PR_WAITOK) != 0)) 639 panic("pool_get: %s:must have NOWAIT", pp->pr_wchan); 640 641 #ifdef LOCKDEBUG 642 if (flags & PR_WAITOK) 643 simple_lock_only_held(NULL, "pool_get(PR_WAITOK)"); 644 #endif 645 #endif /* DIAGNOSTIC */ 646 647 #ifdef MALLOC_DEBUG 648 if (pp->pr_roflags & PR_DEBUG) { 649 void *addr; 650 651 addr = NULL; 652 debug_malloc(pp->pr_size, M_DEBUG, 653 (flags & PR_WAITOK) ? M_WAITOK : M_NOWAIT, &addr); 654 return (addr); 655 } 656 #endif 657 658 simple_lock(&pp->pr_slock); 659 pr_enter(pp, file, line); 660 661 startover: 662 /* 663 * Check to see if we've reached the hard limit. If we have, 664 * and we can wait, then wait until an item has been returned to 665 * the pool. 666 */ 667 #ifdef DIAGNOSTIC 668 if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { 669 pr_leave(pp); 670 simple_unlock(&pp->pr_slock); 671 panic("pool_get: %s: crossed hard limit", pp->pr_wchan); 672 } 673 #endif 674 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { 675 if (pp->pr_drain_hook != NULL) { 676 /* 677 * Since the drain hook is going to free things 678 * back to the pool, unlock, call hook, re-lock 679 * and check hardlimit condition again. 680 */ 681 pr_leave(pp); 682 simple_unlock(&pp->pr_slock); 683 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); 684 simple_lock(&pp->pr_slock); 685 pr_enter(pp, file, line); 686 if (pp->pr_nout < pp->pr_hardlimit) 687 goto startover; 688 } 689 690 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { 691 /* 692 * XXX: A warning isn't logged in this case. Should 693 * it be? 694 */ 695 pp->pr_flags |= PR_WANTED; 696 pr_leave(pp); 697 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); 698 pr_enter(pp, file, line); 699 goto startover; 700 } 701 702 /* 703 * Log a message that the hard limit has been hit. 704 */ 705 if (pp->pr_hardlimit_warning != NULL && 706 ratecheck(&pp->pr_hardlimit_warning_last, 707 &pp->pr_hardlimit_ratecap)) 708 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); 709 710 pp->pr_nfail++; 711 712 pr_leave(pp); 713 simple_unlock(&pp->pr_slock); 714 return (NULL); 715 } 716 717 /* 718 * The convention we use is that if `curpage' is not NULL, then 719 * it points at a non-empty bucket. In particular, `curpage' 720 * never points at a page header which has PR_PHINPAGE set and 721 * has no items in its bucket. 722 */ 723 if ((ph = pp->pr_curpage) == NULL) { 724 #ifdef DIAGNOSTIC 725 if (pp->pr_nitems != 0) { 726 simple_unlock(&pp->pr_slock); 727 printf("pool_get: %s: curpage NULL, nitems %u\n", 728 pp->pr_wchan, pp->pr_nitems); 729 panic("pool_get: nitems inconsistent"); 730 } 731 #endif 732 733 /* 734 * Call the back-end page allocator for more memory. 735 * Release the pool lock, as the back-end page allocator 736 * may block. 737 */ 738 pr_leave(pp); 739 simple_unlock(&pp->pr_slock); 740 v = pool_allocator_alloc(pp, flags); 741 if (__predict_true(v != NULL)) 742 ph = pool_alloc_item_header(pp, v, flags); 743 simple_lock(&pp->pr_slock); 744 pr_enter(pp, file, line); 745 746 if (__predict_false(v == NULL || ph == NULL)) { 747 if (v != NULL) 748 pool_allocator_free(pp, v); 749 750 /* 751 * We were unable to allocate a page or item 752 * header, but we released the lock during 753 * allocation, so perhaps items were freed 754 * back to the pool. Check for this case. 755 */ 756 if (pp->pr_curpage != NULL) 757 goto startover; 758 759 if ((flags & PR_WAITOK) == 0) { 760 pp->pr_nfail++; 761 pr_leave(pp); 762 simple_unlock(&pp->pr_slock); 763 return (NULL); 764 } 765 766 /* 767 * Wait for items to be returned to this pool. 768 * 769 * XXX: maybe we should wake up once a second and 770 * try again? 771 */ 772 pp->pr_flags |= PR_WANTED; 773 /* PA_WANTED is already set on the allocator. */ 774 pr_leave(pp); 775 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); 776 pr_enter(pp, file, line); 777 goto startover; 778 } 779 780 /* We have more memory; add it to the pool */ 781 pool_prime_page(pp, v, ph); 782 pp->pr_npagealloc++; 783 784 /* Start the allocation process over. */ 785 goto startover; 786 } 787 if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) { 788 pr_leave(pp); 789 simple_unlock(&pp->pr_slock); 790 panic("pool_get: %s: page empty", pp->pr_wchan); 791 } 792 #ifdef DIAGNOSTIC 793 if (__predict_false(pp->pr_nitems == 0)) { 794 pr_leave(pp); 795 simple_unlock(&pp->pr_slock); 796 printf("pool_get: %s: items on itemlist, nitems %u\n", 797 pp->pr_wchan, pp->pr_nitems); 798 panic("pool_get: nitems inconsistent"); 799 } 800 #endif 801 802 #ifdef POOL_DIAGNOSTIC 803 pr_log(pp, v, PRLOG_GET, file, line); 804 #endif 805 806 #ifdef DIAGNOSTIC 807 if (__predict_false(pi->pi_magic != PI_MAGIC)) { 808 pr_printlog(pp, pi, printf); 809 panic("pool_get(%s): free list modified: magic=%x; page %p;" 810 " item addr %p", 811 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); 812 } 813 #endif 814 815 /* 816 * Remove from item list. 817 */ 818 TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list); 819 pp->pr_nitems--; 820 pp->pr_nout++; 821 if (ph->ph_nmissing == 0) { 822 #ifdef DIAGNOSTIC 823 if (__predict_false(pp->pr_nidle == 0)) 824 panic("pool_get: nidle inconsistent"); 825 #endif 826 pp->pr_nidle--; 827 828 /* 829 * This page was previously empty. Move it to the list of 830 * partially-full pages. This page is already curpage. 831 */ 832 LIST_REMOVE(ph, ph_pagelist); 833 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); 834 } 835 ph->ph_nmissing++; 836 if (TAILQ_EMPTY(&ph->ph_itemlist)) { 837 #ifdef DIAGNOSTIC 838 if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) { 839 pr_leave(pp); 840 simple_unlock(&pp->pr_slock); 841 panic("pool_get: %s: nmissing inconsistent", 842 pp->pr_wchan); 843 } 844 #endif 845 /* 846 * This page is now full. Move it to the full list 847 * and select a new current page. 848 */ 849 LIST_REMOVE(ph, ph_pagelist); 850 LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist); 851 pool_update_curpage(pp); 852 } 853 854 pp->pr_nget++; 855 856 /* 857 * If we have a low water mark and we are now below that low 858 * water mark, add more items to the pool. 859 */ 860 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { 861 /* 862 * XXX: Should we log a warning? Should we set up a timeout 863 * to try again in a second or so? The latter could break 864 * a caller's assumptions about interrupt protection, etc. 865 */ 866 } 867 868 pr_leave(pp); 869 simple_unlock(&pp->pr_slock); 870 return (v); 871 } 872 873 /* 874 * Internal version of pool_put(). Pool is already locked/entered. 875 */ 876 void 877 pool_do_put(struct pool *pp, void *v) 878 { 879 struct pool_item *pi = v; 880 struct pool_item_header *ph; 881 caddr_t page; 882 int s; 883 884 #ifdef MALLOC_DEBUG 885 if (pp->pr_roflags & PR_DEBUG) { 886 debug_free(v, M_DEBUG); 887 return; 888 } 889 #endif 890 891 LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); 892 893 page = (caddr_t)((vaddr_t)v & pp->pr_alloc->pa_pagemask); 894 895 #ifdef DIAGNOSTIC 896 if (__predict_false(pp->pr_nout == 0)) { 897 printf("pool %s: putting with none out\n", 898 pp->pr_wchan); 899 panic("pool_put"); 900 } 901 #endif 902 903 if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) { 904 pr_printlog(pp, NULL, printf); 905 panic("pool_put: %s: page header missing", pp->pr_wchan); 906 } 907 908 #ifdef LOCKDEBUG 909 /* 910 * Check if we're freeing a locked simple lock. 911 */ 912 simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size); 913 #endif 914 915 /* 916 * Return to item list. 917 */ 918 #ifdef DIAGNOSTIC 919 pi->pi_magic = PI_MAGIC; 920 #endif 921 #ifdef DEBUG 922 { 923 int i, *ip = v; 924 925 for (i = 0; i < pp->pr_size / sizeof(int); i++) { 926 *ip++ = PI_MAGIC; 927 } 928 } 929 #endif 930 931 TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); 932 ph->ph_nmissing--; 933 pp->pr_nput++; 934 pp->pr_nitems++; 935 pp->pr_nout--; 936 937 /* Cancel "pool empty" condition if it exists */ 938 if (pp->pr_curpage == NULL) 939 pp->pr_curpage = ph; 940 941 if (pp->pr_flags & PR_WANTED) { 942 pp->pr_flags &= ~PR_WANTED; 943 if (ph->ph_nmissing == 0) 944 pp->pr_nidle++; 945 wakeup((caddr_t)pp); 946 return; 947 } 948 949 /* 950 * If this page is now empty, do one of two things: 951 * 952 * (1) If we have more pages than the page high water mark, 953 * free the page back to the system. 954 * 955 * (2) Otherwise, move the page to the empty page list. 956 * 957 * Either way, select a new current page (so we use a partially-full 958 * page if one is available). 959 */ 960 if (ph->ph_nmissing == 0) { 961 pp->pr_nidle++; 962 if (pp->pr_npages > pp->pr_maxpages || 963 (pp->pr_alloc->pa_flags & PA_WANT) != 0) { 964 pr_rmpage(pp, ph, NULL); 965 } else { 966 LIST_REMOVE(ph, ph_pagelist); 967 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); 968 969 /* 970 * Update the timestamp on the page. A page must 971 * be idle for some period of time before it can 972 * be reclaimed by the pagedaemon. This minimizes 973 * ping-pong'ing for memory. 974 */ 975 s = splclock(); 976 ph->ph_time = mono_time; 977 splx(s); 978 } 979 pool_update_curpage(pp); 980 } 981 982 /* 983 * If the page was previously completely full, move it to the 984 * partially-full list and make it the current page. The next 985 * allocation will get the item from this page, instead of 986 * further fragmenting the pool. 987 */ 988 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) { 989 LIST_REMOVE(ph, ph_pagelist); 990 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); 991 pp->pr_curpage = ph; 992 } 993 } 994 995 /* 996 * Return resource to the pool; must be called at appropriate spl level 997 */ 998 #ifdef POOL_DIAGNOSTIC 999 void 1000 _pool_put(struct pool *pp, void *v, const char *file, long line) 1001 { 1002 1003 simple_lock(&pp->pr_slock); 1004 pr_enter(pp, file, line); 1005 1006 pr_log(pp, v, PRLOG_PUT, file, line); 1007 1008 pool_do_put(pp, v); 1009 1010 pr_leave(pp); 1011 simple_unlock(&pp->pr_slock); 1012 } 1013 #undef pool_put 1014 #endif /* POOL_DIAGNOSTIC */ 1015 1016 void 1017 pool_put(struct pool *pp, void *v) 1018 { 1019 1020 simple_lock(&pp->pr_slock); 1021 1022 pool_do_put(pp, v); 1023 1024 simple_unlock(&pp->pr_slock); 1025 } 1026 1027 #ifdef POOL_DIAGNOSTIC 1028 #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__) 1029 #endif 1030 1031 /* 1032 * Add N items to the pool. 1033 */ 1034 int 1035 pool_prime(struct pool *pp, int n) 1036 { 1037 struct pool_item_header *ph; 1038 caddr_t cp; 1039 int newpages; 1040 1041 simple_lock(&pp->pr_slock); 1042 1043 newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1044 1045 while (newpages-- > 0) { 1046 simple_unlock(&pp->pr_slock); 1047 cp = pool_allocator_alloc(pp, PR_NOWAIT); 1048 if (__predict_true(cp != NULL)) 1049 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); 1050 simple_lock(&pp->pr_slock); 1051 1052 if (__predict_false(cp == NULL || ph == NULL)) { 1053 if (cp != NULL) 1054 pool_allocator_free(pp, cp); 1055 break; 1056 } 1057 1058 pool_prime_page(pp, cp, ph); 1059 pp->pr_npagealloc++; 1060 pp->pr_minpages++; 1061 } 1062 1063 if (pp->pr_minpages >= pp->pr_maxpages) 1064 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ 1065 1066 simple_unlock(&pp->pr_slock); 1067 return (0); 1068 } 1069 1070 /* 1071 * Add a page worth of items to the pool. 1072 * 1073 * Note, we must be called with the pool descriptor LOCKED. 1074 */ 1075 void 1076 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph) 1077 { 1078 struct pool_item *pi; 1079 caddr_t cp = storage; 1080 unsigned int align = pp->pr_align; 1081 unsigned int ioff = pp->pr_itemoffset; 1082 int n; 1083 1084 #ifdef DIAGNOSTIC 1085 if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) 1086 panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); 1087 #endif 1088 1089 /* 1090 * Insert page header. 1091 */ 1092 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); 1093 TAILQ_INIT(&ph->ph_itemlist); 1094 ph->ph_page = storage; 1095 ph->ph_nmissing = 0; 1096 memset(&ph->ph_time, 0, sizeof(ph->ph_time)); 1097 if ((pp->pr_roflags & PR_PHINPAGE) == 0) 1098 SPLAY_INSERT(phtree, &pp->pr_phtree, ph); 1099 1100 pp->pr_nidle++; 1101 1102 /* 1103 * Color this page. 1104 */ 1105 cp = (caddr_t)(cp + pp->pr_curcolor); 1106 if ((pp->pr_curcolor += align) > pp->pr_maxcolor) 1107 pp->pr_curcolor = 0; 1108 1109 /* 1110 * Adjust storage to apply aligment to `pr_itemoffset' in each item. 1111 */ 1112 if (ioff != 0) 1113 cp = (caddr_t)(cp + (align - ioff)); 1114 1115 /* 1116 * Insert remaining chunks on the bucket list. 1117 */ 1118 n = pp->pr_itemsperpage; 1119 pp->pr_nitems += n; 1120 1121 while (n--) { 1122 pi = (struct pool_item *)cp; 1123 1124 KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0); 1125 1126 /* Insert on page list */ 1127 TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list); 1128 #ifdef DIAGNOSTIC 1129 pi->pi_magic = PI_MAGIC; 1130 #endif 1131 cp = (caddr_t)(cp + pp->pr_size); 1132 } 1133 1134 /* 1135 * If the pool was depleted, point at the new page. 1136 */ 1137 if (pp->pr_curpage == NULL) 1138 pp->pr_curpage = ph; 1139 1140 if (++pp->pr_npages > pp->pr_hiwat) 1141 pp->pr_hiwat = pp->pr_npages; 1142 } 1143 1144 /* 1145 * Used by pool_get() when nitems drops below the low water mark. This 1146 * is used to catch up pr_nitems with the low water mark. 1147 * 1148 * Note 1, we never wait for memory here, we let the caller decide what to do. 1149 * 1150 * Note 2, we must be called with the pool already locked, and we return 1151 * with it locked. 1152 */ 1153 int 1154 pool_catchup(struct pool *pp) 1155 { 1156 struct pool_item_header *ph; 1157 caddr_t cp; 1158 int error = 0; 1159 1160 while (POOL_NEEDS_CATCHUP(pp)) { 1161 /* 1162 * Call the page back-end allocator for more memory. 1163 * 1164 * XXX: We never wait, so should we bother unlocking 1165 * the pool descriptor? 1166 */ 1167 simple_unlock(&pp->pr_slock); 1168 cp = pool_allocator_alloc(pp, PR_NOWAIT); 1169 if (__predict_true(cp != NULL)) 1170 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); 1171 simple_lock(&pp->pr_slock); 1172 if (__predict_false(cp == NULL || ph == NULL)) { 1173 if (cp != NULL) 1174 pool_allocator_free(pp, cp); 1175 error = ENOMEM; 1176 break; 1177 } 1178 pool_prime_page(pp, cp, ph); 1179 pp->pr_npagealloc++; 1180 } 1181 1182 return (error); 1183 } 1184 1185 void 1186 pool_update_curpage(struct pool *pp) 1187 { 1188 1189 pp->pr_curpage = LIST_FIRST(&pp->pr_partpages); 1190 if (pp->pr_curpage == NULL) { 1191 pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); 1192 } 1193 } 1194 1195 void 1196 pool_setlowat(struct pool *pp, int n) 1197 { 1198 1199 simple_lock(&pp->pr_slock); 1200 1201 pp->pr_minitems = n; 1202 pp->pr_minpages = (n == 0) 1203 ? 0 1204 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1205 1206 /* Make sure we're caught up with the newly-set low water mark. */ 1207 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { 1208 /* 1209 * XXX: Should we log a warning? Should we set up a timeout 1210 * to try again in a second or so? The latter could break 1211 * a caller's assumptions about interrupt protection, etc. 1212 */ 1213 } 1214 1215 simple_unlock(&pp->pr_slock); 1216 } 1217 1218 void 1219 pool_sethiwat(struct pool *pp, int n) 1220 { 1221 1222 simple_lock(&pp->pr_slock); 1223 1224 pp->pr_maxpages = (n == 0) 1225 ? 0 1226 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1227 1228 simple_unlock(&pp->pr_slock); 1229 } 1230 1231 int 1232 pool_sethardlimit(struct pool *pp, unsigned n, const char *warnmess, int ratecap) 1233 { 1234 int error = 0; 1235 1236 simple_lock(&pp->pr_slock); 1237 1238 if (n < pp->pr_nout) { 1239 error = EINVAL; 1240 goto done; 1241 } 1242 1243 pp->pr_hardlimit = n; 1244 pp->pr_hardlimit_warning = warnmess; 1245 pp->pr_hardlimit_ratecap.tv_sec = ratecap; 1246 pp->pr_hardlimit_warning_last.tv_sec = 0; 1247 pp->pr_hardlimit_warning_last.tv_usec = 0; 1248 1249 /* 1250 * In-line version of pool_sethiwat(), because we don't want to 1251 * release the lock. 1252 */ 1253 pp->pr_maxpages = (n == 0 || n == UINT_MAX) 1254 ? n 1255 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1256 1257 done: 1258 simple_unlock(&pp->pr_slock); 1259 1260 return (error); 1261 } 1262 1263 /* 1264 * Release all complete pages that have not been used recently. 1265 * 1266 * Returns non-zero if any pages have been reclaimed. 1267 */ 1268 int 1269 #ifdef POOL_DIAGNOSTIC 1270 _pool_reclaim(struct pool *pp, const char *file, long line) 1271 #else 1272 pool_reclaim(struct pool *pp) 1273 #endif 1274 { 1275 struct pool_item_header *ph, *phnext; 1276 struct pool_cache *pc; 1277 struct timeval curtime; 1278 struct pool_pagelist pq; 1279 struct timeval diff; 1280 int s; 1281 1282 if (pp->pr_drain_hook != NULL) { 1283 /* 1284 * The drain hook must be called with the pool unlocked. 1285 */ 1286 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); 1287 } 1288 1289 if (simple_lock_try(&pp->pr_slock) == 0) 1290 return (0); 1291 pr_enter(pp, file, line); 1292 1293 LIST_INIT(&pq); 1294 1295 /* 1296 * Reclaim items from the pool's caches. 1297 */ 1298 TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) 1299 pool_cache_reclaim(pc); 1300 1301 s = splclock(); 1302 curtime = mono_time; 1303 splx(s); 1304 1305 for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) { 1306 phnext = LIST_NEXT(ph, ph_pagelist); 1307 1308 /* Check our minimum page claim */ 1309 if (pp->pr_npages <= pp->pr_minpages) 1310 break; 1311 1312 KASSERT(ph->ph_nmissing == 0); 1313 timersub(&curtime, &ph->ph_time, &diff); 1314 if (diff.tv_sec < pool_inactive_time) 1315 continue; 1316 1317 /* 1318 * If freeing this page would put us below 1319 * the low water mark, stop now. 1320 */ 1321 if ((pp->pr_nitems - pp->pr_itemsperpage) < 1322 pp->pr_minitems) 1323 break; 1324 1325 pr_rmpage(pp, ph, &pq); 1326 } 1327 1328 pr_leave(pp); 1329 simple_unlock(&pp->pr_slock); 1330 if (LIST_EMPTY(&pq)) 1331 return (0); 1332 while ((ph = LIST_FIRST(&pq)) != NULL) { 1333 LIST_REMOVE(ph, ph_pagelist); 1334 pool_allocator_free(pp, ph->ph_page); 1335 if (pp->pr_roflags & PR_PHINPAGE) { 1336 continue; 1337 } 1338 SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); 1339 s = splhigh(); 1340 pool_put(&phpool, ph); 1341 splx(s); 1342 } 1343 1344 return (1); 1345 } 1346 1347 1348 /* 1349 * Drain pools, one at a time. 1350 * 1351 * Note, we must never be called from an interrupt context. 1352 */ 1353 void 1354 pool_drain(void *arg) 1355 { 1356 struct pool *pp; 1357 int s; 1358 1359 pp = NULL; 1360 s = splvm(); 1361 simple_lock(&pool_head_slock); 1362 if (drainpp == NULL) { 1363 drainpp = TAILQ_FIRST(&pool_head); 1364 } 1365 if (drainpp) { 1366 pp = drainpp; 1367 drainpp = TAILQ_NEXT(pp, pr_poollist); 1368 } 1369 simple_unlock(&pool_head_slock); 1370 pool_reclaim(pp); 1371 splx(s); 1372 } 1373 1374 /* 1375 * Diagnostic helpers. 1376 */ 1377 void 1378 pool_printit(struct pool *pp, const char *modif, int (*pr)(const char *, ...)) 1379 { 1380 int s; 1381 1382 s = splvm(); 1383 if (simple_lock_try(&pp->pr_slock) == 0) { 1384 pr("pool %s is locked; try again later\n", 1385 pp->pr_wchan); 1386 splx(s); 1387 return; 1388 } 1389 pool_print1(pp, modif, pr); 1390 simple_unlock(&pp->pr_slock); 1391 splx(s); 1392 } 1393 1394 void 1395 pool_print_pagelist(struct pool_pagelist *pl, int (*pr)(const char *, ...)) 1396 { 1397 struct pool_item_header *ph; 1398 #ifdef DIAGNOSTIC 1399 struct pool_item *pi; 1400 #endif 1401 1402 LIST_FOREACH(ph, pl, ph_pagelist) { 1403 (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n", 1404 ph->ph_page, ph->ph_nmissing, 1405 (u_long)ph->ph_time.tv_sec, 1406 (u_long)ph->ph_time.tv_usec); 1407 #ifdef DIAGNOSTIC 1408 TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) { 1409 if (pi->pi_magic != PI_MAGIC) { 1410 (*pr)("\t\t\titem %p, magic 0x%x\n", 1411 pi, pi->pi_magic); 1412 } 1413 } 1414 #endif 1415 } 1416 } 1417 1418 void 1419 pool_print1(struct pool *pp, const char *modif, int (*pr)(const char *, ...)) 1420 { 1421 struct pool_item_header *ph; 1422 struct pool_cache *pc; 1423 struct pool_cache_group *pcg; 1424 int i, print_log = 0, print_pagelist = 0, print_cache = 0; 1425 char c; 1426 1427 while ((c = *modif++) != '\0') { 1428 if (c == 'l') 1429 print_log = 1; 1430 if (c == 'p') 1431 print_pagelist = 1; 1432 if (c == 'c') 1433 print_cache = 1; 1434 modif++; 1435 } 1436 1437 (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", 1438 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, 1439 pp->pr_roflags); 1440 (*pr)("\talloc %p\n", pp->pr_alloc); 1441 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", 1442 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); 1443 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", 1444 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit); 1445 1446 (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n", 1447 pp->pr_nget, pp->pr_nfail, pp->pr_nput); 1448 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n", 1449 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle); 1450 1451 if (print_pagelist == 0) 1452 goto skip_pagelist; 1453 1454 if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) 1455 (*pr)("\n\tempty page list:\n"); 1456 pool_print_pagelist(&pp->pr_emptypages, pr); 1457 if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL) 1458 (*pr)("\n\tfull page list:\n"); 1459 pool_print_pagelist(&pp->pr_fullpages, pr); 1460 if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL) 1461 (*pr)("\n\tpartial-page list:\n"); 1462 pool_print_pagelist(&pp->pr_partpages, pr); 1463 1464 if (pp->pr_curpage == NULL) 1465 (*pr)("\tno current page\n"); 1466 else 1467 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page); 1468 1469 skip_pagelist: 1470 if (print_log == 0) 1471 goto skip_log; 1472 1473 (*pr)("\n"); 1474 if ((pp->pr_roflags & PR_LOGGING) == 0) 1475 (*pr)("\tno log\n"); 1476 else 1477 pr_printlog(pp, NULL, pr); 1478 1479 skip_log: 1480 if (print_cache == 0) 1481 goto skip_cache; 1482 1483 TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) { 1484 (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc, 1485 pc->pc_allocfrom, pc->pc_freeto); 1486 (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n", 1487 pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); 1488 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { 1489 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); 1490 for (i = 0; i < PCG_NOBJECTS; i++) 1491 (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]); 1492 } 1493 } 1494 1495 skip_cache: 1496 pr_enter_check(pp, pr); 1497 } 1498 1499 int 1500 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph) 1501 { 1502 struct pool_item *pi; 1503 caddr_t page; 1504 int n; 1505 1506 page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask); 1507 if (page != ph->ph_page && 1508 (pp->pr_roflags & PR_PHINPAGE) != 0) { 1509 if (label != NULL) 1510 printf("%s: ", label); 1511 printf("pool(%p:%s): page inconsistency: page %p;" 1512 " at page head addr %p (p %p)\n", pp, 1513 pp->pr_wchan, ph->ph_page, 1514 ph, page); 1515 return 1; 1516 } 1517 1518 for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0; 1519 pi != NULL; 1520 pi = TAILQ_NEXT(pi,pi_list), n++) { 1521 1522 #ifdef DIAGNOSTIC 1523 if (pi->pi_magic != PI_MAGIC) { 1524 if (label != NULL) 1525 printf("%s: ", label); 1526 printf("pool(%s): free list modified: magic=%x;" 1527 " page %p; item ordinal %d;" 1528 " addr %p (p %p)\n", 1529 pp->pr_wchan, pi->pi_magic, ph->ph_page, 1530 n, pi, page); 1531 panic("pool"); 1532 } 1533 #endif 1534 page = 1535 (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask); 1536 if (page == ph->ph_page) 1537 continue; 1538 1539 if (label != NULL) 1540 printf("%s: ", label); 1541 printf("pool(%p:%s): page inconsistency: page %p;" 1542 " item ordinal %d; addr %p (p %p)\n", pp, 1543 pp->pr_wchan, ph->ph_page, 1544 n, pi, page); 1545 return 1; 1546 } 1547 return 0; 1548 } 1549 1550 int 1551 pool_chk(struct pool *pp, const char *label) 1552 { 1553 struct pool_item_header *ph; 1554 int r = 0; 1555 1556 simple_lock(&pp->pr_slock); 1557 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { 1558 r = pool_chk_page(pp, label, ph); 1559 if (r) { 1560 goto out; 1561 } 1562 } 1563 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { 1564 r = pool_chk_page(pp, label, ph); 1565 if (r) { 1566 goto out; 1567 } 1568 } 1569 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { 1570 r = pool_chk_page(pp, label, ph); 1571 if (r) { 1572 goto out; 1573 } 1574 } 1575 1576 out: 1577 simple_unlock(&pp->pr_slock); 1578 return (r); 1579 } 1580 1581 /* 1582 * pool_cache_init: 1583 * 1584 * Initialize a pool cache. 1585 * 1586 * NOTE: If the pool must be protected from interrupts, we expect 1587 * to be called at the appropriate interrupt priority level. 1588 */ 1589 void 1590 pool_cache_init(struct pool_cache *pc, struct pool *pp, 1591 int (*ctor)(void *, void *, int), 1592 void (*dtor)(void *, void *), 1593 void *arg) 1594 { 1595 1596 TAILQ_INIT(&pc->pc_grouplist); 1597 simple_lock_init(&pc->pc_slock); 1598 1599 pc->pc_allocfrom = NULL; 1600 pc->pc_freeto = NULL; 1601 pc->pc_pool = pp; 1602 1603 pc->pc_ctor = ctor; 1604 pc->pc_dtor = dtor; 1605 pc->pc_arg = arg; 1606 1607 pc->pc_hits = 0; 1608 pc->pc_misses = 0; 1609 1610 pc->pc_ngroups = 0; 1611 1612 pc->pc_nitems = 0; 1613 1614 simple_lock(&pp->pr_slock); 1615 TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist); 1616 simple_unlock(&pp->pr_slock); 1617 } 1618 1619 /* 1620 * pool_cache_destroy: 1621 * 1622 * Destroy a pool cache. 1623 */ 1624 void 1625 pool_cache_destroy(struct pool_cache *pc) 1626 { 1627 struct pool *pp = pc->pc_pool; 1628 1629 /* First, invalidate the entire cache. */ 1630 pool_cache_invalidate(pc); 1631 1632 /* ...and remove it from the pool's cache list. */ 1633 simple_lock(&pp->pr_slock); 1634 TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist); 1635 simple_unlock(&pp->pr_slock); 1636 } 1637 1638 static __inline void * 1639 pcg_get(struct pool_cache_group *pcg) 1640 { 1641 void *object; 1642 u_int idx; 1643 1644 KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); 1645 KASSERT(pcg->pcg_avail != 0); 1646 idx = --pcg->pcg_avail; 1647 1648 KASSERT(pcg->pcg_objects[idx] != NULL); 1649 object = pcg->pcg_objects[idx]; 1650 pcg->pcg_objects[idx] = NULL; 1651 1652 return (object); 1653 } 1654 1655 static __inline void 1656 pcg_put(struct pool_cache_group *pcg, void *object) 1657 { 1658 u_int idx; 1659 1660 KASSERT(pcg->pcg_avail < PCG_NOBJECTS); 1661 idx = pcg->pcg_avail++; 1662 1663 KASSERT(pcg->pcg_objects[idx] == NULL); 1664 pcg->pcg_objects[idx] = object; 1665 } 1666 1667 /* 1668 * pool_cache_get: 1669 * 1670 * Get an object from a pool cache. 1671 */ 1672 void * 1673 pool_cache_get(struct pool_cache *pc, int flags) 1674 { 1675 struct pool_cache_group *pcg; 1676 void *object; 1677 1678 #ifdef LOCKDEBUG 1679 if (flags & PR_WAITOK) 1680 simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)"); 1681 #endif 1682 1683 simple_lock(&pc->pc_slock); 1684 1685 if ((pcg = pc->pc_allocfrom) == NULL) { 1686 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { 1687 if (pcg->pcg_avail != 0) { 1688 pc->pc_allocfrom = pcg; 1689 goto have_group; 1690 } 1691 } 1692 1693 /* 1694 * No groups with any available objects. Allocate 1695 * a new object, construct it, and return it to 1696 * the caller. We will allocate a group, if necessary, 1697 * when the object is freed back to the cache. 1698 */ 1699 pc->pc_misses++; 1700 simple_unlock(&pc->pc_slock); 1701 object = pool_get(pc->pc_pool, flags); 1702 if (object != NULL && pc->pc_ctor != NULL) { 1703 if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { 1704 pool_put(pc->pc_pool, object); 1705 return (NULL); 1706 } 1707 } 1708 return (object); 1709 } 1710 1711 have_group: 1712 pc->pc_hits++; 1713 pc->pc_nitems--; 1714 object = pcg_get(pcg); 1715 1716 if (pcg->pcg_avail == 0) 1717 pc->pc_allocfrom = NULL; 1718 1719 simple_unlock(&pc->pc_slock); 1720 1721 return (object); 1722 } 1723 1724 /* 1725 * pool_cache_put: 1726 * 1727 * Put an object back to the pool cache. 1728 */ 1729 void 1730 pool_cache_put(struct pool_cache *pc, void *object) 1731 { 1732 struct pool_cache_group *pcg; 1733 int s; 1734 1735 simple_lock(&pc->pc_slock); 1736 1737 if ((pcg = pc->pc_freeto) == NULL) { 1738 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { 1739 if (pcg->pcg_avail != PCG_NOBJECTS) { 1740 pc->pc_freeto = pcg; 1741 goto have_group; 1742 } 1743 } 1744 1745 /* 1746 * No empty groups to free the object to. Attempt to 1747 * allocate one. 1748 */ 1749 simple_unlock(&pc->pc_slock); 1750 s = splvm(); 1751 pcg = pool_get(&pcgpool, PR_NOWAIT); 1752 splx(s); 1753 if (pcg != NULL) { 1754 memset(pcg, 0, sizeof(*pcg)); 1755 simple_lock(&pc->pc_slock); 1756 pc->pc_ngroups++; 1757 TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list); 1758 if (pc->pc_freeto == NULL) 1759 pc->pc_freeto = pcg; 1760 goto have_group; 1761 } 1762 1763 /* 1764 * Unable to allocate a cache group; destruct the object 1765 * and free it back to the pool. 1766 */ 1767 pool_cache_destruct_object(pc, object); 1768 return; 1769 } 1770 1771 have_group: 1772 pc->pc_nitems++; 1773 pcg_put(pcg, object); 1774 1775 if (pcg->pcg_avail == PCG_NOBJECTS) 1776 pc->pc_freeto = NULL; 1777 1778 simple_unlock(&pc->pc_slock); 1779 } 1780 1781 /* 1782 * pool_cache_destruct_object: 1783 * 1784 * Force destruction of an object and its release back into 1785 * the pool. 1786 */ 1787 void 1788 pool_cache_destruct_object(struct pool_cache *pc, void *object) 1789 { 1790 1791 if (pc->pc_dtor != NULL) 1792 (*pc->pc_dtor)(pc->pc_arg, object); 1793 pool_put(pc->pc_pool, object); 1794 } 1795 1796 /* 1797 * pool_cache_do_invalidate: 1798 * 1799 * This internal function implements pool_cache_invalidate() and 1800 * pool_cache_reclaim(). 1801 */ 1802 void 1803 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups, 1804 void (*putit)(struct pool *, void *)) 1805 { 1806 struct pool_cache_group *pcg, *npcg; 1807 void *object; 1808 int s; 1809 1810 for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; 1811 pcg = npcg) { 1812 npcg = TAILQ_NEXT(pcg, pcg_list); 1813 while (pcg->pcg_avail != 0) { 1814 pc->pc_nitems--; 1815 object = pcg_get(pcg); 1816 if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg) 1817 pc->pc_allocfrom = NULL; 1818 if (pc->pc_dtor != NULL) 1819 (*pc->pc_dtor)(pc->pc_arg, object); 1820 (*putit)(pc->pc_pool, object); 1821 } 1822 if (free_groups) { 1823 pc->pc_ngroups--; 1824 TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list); 1825 if (pc->pc_freeto == pcg) 1826 pc->pc_freeto = NULL; 1827 s = splvm(); 1828 pool_put(&pcgpool, pcg); 1829 splx(s); 1830 } 1831 } 1832 } 1833 1834 /* 1835 * pool_cache_invalidate: 1836 * 1837 * Invalidate a pool cache (destruct and release all of the 1838 * cached objects). 1839 */ 1840 void 1841 pool_cache_invalidate(struct pool_cache *pc) 1842 { 1843 1844 simple_lock(&pc->pc_slock); 1845 pool_cache_do_invalidate(pc, 0, pool_put); 1846 simple_unlock(&pc->pc_slock); 1847 } 1848 1849 /* 1850 * pool_cache_reclaim: 1851 * 1852 * Reclaim a pool cache for pool_reclaim(). 1853 */ 1854 void 1855 pool_cache_reclaim(struct pool_cache *pc) 1856 { 1857 1858 simple_lock(&pc->pc_slock); 1859 pool_cache_do_invalidate(pc, 1, pool_do_put); 1860 simple_unlock(&pc->pc_slock); 1861 } 1862 1863 /* 1864 * We have three different sysctls. 1865 * kern.pool.npools - the number of pools. 1866 * kern.pool.pool.<pool#> - the pool struct for the pool#. 1867 * kern.pool.name.<pool#> - the name for pool#.[6~ 1868 */ 1869 int 1870 sysctl_dopool(int *name, u_int namelen, char *where, size_t *sizep) 1871 { 1872 struct pool *pp, *foundpool = NULL; 1873 size_t buflen = where != NULL ? *sizep : 0; 1874 int npools = 0, s; 1875 unsigned int lookfor; 1876 size_t len; 1877 1878 switch (*name) { 1879 case KERN_POOL_NPOOLS: 1880 if (namelen != 1 || buflen != sizeof(int)) 1881 return (EINVAL); 1882 lookfor = 0; 1883 break; 1884 case KERN_POOL_NAME: 1885 if (namelen != 2 || buflen < 1) 1886 return (EINVAL); 1887 lookfor = name[1]; 1888 break; 1889 case KERN_POOL_POOL: 1890 if (namelen != 2 || buflen != sizeof(struct pool)) 1891 return (EINVAL); 1892 lookfor = name[1]; 1893 break; 1894 default: 1895 return (EINVAL); 1896 } 1897 1898 s = splvm(); 1899 simple_lock(&pool_head_slock); 1900 1901 TAILQ_FOREACH(pp, &pool_head, pr_poollist) { 1902 npools++; 1903 if (lookfor == pp->pr_serial) { 1904 foundpool = pp; 1905 break; 1906 } 1907 } 1908 1909 simple_unlock(&pool_head_slock); 1910 splx(s); 1911 1912 if (lookfor != 0 && foundpool == NULL) 1913 return (ENOENT); 1914 1915 switch (*name) { 1916 case KERN_POOL_NPOOLS: 1917 return copyout(&npools, where, buflen); 1918 case KERN_POOL_NAME: 1919 len = strlen(foundpool->pr_wchan) + 1; 1920 if (*sizep < len) 1921 return (ENOMEM); 1922 *sizep = len; 1923 return copyout(foundpool->pr_wchan, where, len); 1924 case KERN_POOL_POOL: 1925 return copyout(foundpool, where, buflen); 1926 } 1927 /* NOTREACHED */ 1928 return (0); /* XXX - Stupid gcc */ 1929 } 1930 1931 /* 1932 * Pool backend allocators. 1933 * 1934 * Each pool has a backend allocator that handles allocation, deallocation 1935 * and any additional draining that might be needed. 1936 * 1937 * We provide two standard allocators. 1938 * pool_alloc_kmem - the default used when no allocator is specified. 1939 * pool_alloc_nointr - used for pools that will not be accessed in 1940 * interrupt context. 1941 */ 1942 void *pool_page_alloc(struct pool *, int); 1943 void pool_page_free(struct pool *, void *); 1944 void *pool_page_alloc_nointr(struct pool *, int); 1945 void pool_page_free_nointr(struct pool *, void *); 1946 1947 struct pool_allocator pool_allocator_kmem = { 1948 pool_page_alloc, pool_page_free, 0, 1949 }; 1950 struct pool_allocator pool_allocator_nointr = { 1951 pool_page_alloc_nointr, pool_page_free_nointr, 0, 1952 }; 1953 1954 /* 1955 * XXX - we have at least three different resources for the same allocation 1956 * and each resource can be depleted. First we have the ready elements in 1957 * the pool. Then we have the resource (typically a vm_map) for this 1958 * allocator, then we have physical memory. Waiting for any of these can 1959 * be unnecessary when any other is freed, but the kernel doesn't support 1960 * sleeping on multiple addresses, so we have to fake. The caller sleeps on 1961 * the pool (so that we can be awakened when an item is returned to the pool), 1962 * but we set PA_WANT on the allocator. When a page is returned to 1963 * the allocator and PA_WANT is set pool_allocator_free will wakeup all 1964 * sleeping pools belonging to this allocator. (XXX - thundering herd). 1965 * We also wake up the allocator in case someone without a pool (malloc) 1966 * is sleeping waiting for this allocator. 1967 */ 1968 1969 void * 1970 pool_allocator_alloc(struct pool *org, int flags) 1971 { 1972 struct pool_allocator *pa = org->pr_alloc; 1973 int freed; 1974 void *res; 1975 int s; 1976 1977 do { 1978 if ((res = (*pa->pa_alloc)(org, flags)) != NULL) 1979 return (res); 1980 if ((flags & PR_WAITOK) == 0) { 1981 /* 1982 * We only run the drain hook here if PR_NOWAIT. 1983 * In other cases the hook will be run in 1984 * pool_reclaim. 1985 */ 1986 if (org->pr_drain_hook != NULL) { 1987 (*org->pr_drain_hook)(org->pr_drain_hook_arg, 1988 flags); 1989 if ((res = (*pa->pa_alloc)(org, flags)) != NULL) 1990 return (res); 1991 } 1992 break; 1993 } 1994 s = splvm(); 1995 simple_lock(&pa->pa_slock); 1996 freed = pool_allocator_drain(pa, org, 1); 1997 simple_unlock(&pa->pa_slock); 1998 splx(s); 1999 } while (freed); 2000 return (NULL); 2001 } 2002 2003 void 2004 pool_allocator_free(struct pool *pp, void *v) 2005 { 2006 struct pool_allocator *pa = pp->pr_alloc; 2007 int s; 2008 2009 (*pa->pa_free)(pp, v); 2010 2011 s = splvm(); 2012 simple_lock(&pa->pa_slock); 2013 if ((pa->pa_flags & PA_WANT) == 0) { 2014 simple_unlock(&pa->pa_slock); 2015 splx(s); 2016 return; 2017 } 2018 2019 TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) { 2020 simple_lock(&pp->pr_slock); 2021 if ((pp->pr_flags & PR_WANTED) != 0) { 2022 pp->pr_flags &= ~PR_WANTED; 2023 wakeup(pp); 2024 } 2025 simple_unlock(&pp->pr_slock); 2026 } 2027 pa->pa_flags &= ~PA_WANT; 2028 simple_unlock(&pa->pa_slock); 2029 splx(s); 2030 } 2031 2032 /* 2033 * Drain all pools, except 'org', that use this allocator. 2034 * 2035 * Must be called at appropriate spl level and with the allocator locked. 2036 * 2037 * We do this to reclaim va space. pa_alloc is responsible 2038 * for waiting for physical memory. 2039 * XXX - we risk looping forever if start if someone calls 2040 * pool_destroy on 'start'. But there is no other way to 2041 * have potentially sleeping pool_reclaim, non-sleeping 2042 * locks on pool_allocator and some stirring of drained 2043 * pools in the allocator. 2044 * XXX - maybe we should use pool_head_slock for locking 2045 * the allocators? 2046 */ 2047 int 2048 pool_allocator_drain(struct pool_allocator *pa, struct pool *org, int need) 2049 { 2050 struct pool *pp, *start; 2051 int freed; 2052 2053 freed = 0; 2054 2055 pp = start = TAILQ_FIRST(&pa->pa_list); 2056 do { 2057 TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list); 2058 TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); 2059 if (pp == org) 2060 continue; 2061 simple_unlock(&pa->pa_list); 2062 freed = pool_reclaim(pp) 2063 simple_lock(&pa->pa_list); 2064 } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && (freed < need)); 2065 2066 if (!freed) { 2067 /* 2068 * We set PA_WANT here, the caller will most likely 2069 * sleep waiting for pages (if not, this won't hurt 2070 * that much) and there is no way to set this in the 2071 * caller without violating locking order. 2072 */ 2073 pa->pa_flags |= PA_WANT; 2074 } 2075 2076 return (freed); 2077 } 2078 2079 void * 2080 pool_page_alloc(struct pool *pp, int flags) 2081 { 2082 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; 2083 2084 return ((void *)uvm_km_alloc_poolpage1(kmem_map, uvmexp.kmem_object, 2085 waitok)); 2086 } 2087 2088 void 2089 pool_page_free(struct pool *pp, void *v) 2090 { 2091 2092 uvm_km_free_poolpage1(kmem_map, (vaddr_t)v); 2093 } 2094 2095 void * 2096 pool_page_alloc_nointr(struct pool *pp, int flags) 2097 { 2098 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; 2099 2100 splassert(IPL_NONE); 2101 2102 return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object, 2103 waitok)); 2104 } 2105 2106 void 2107 pool_page_free_nointr(struct pool *pp, void *v) 2108 { 2109 splassert(IPL_NONE); 2110 2111 uvm_km_free_poolpage1(kernel_map, (vaddr_t)v); 2112 } 2113