1 /* $NetBSD: subr_pool.c,v 1.141 2007/12/13 02:45:10 yamt Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center, and by Andrew Doran. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.141 2007/12/13 02:45:10 yamt Exp $"); 42 43 #include "opt_ddb.h" 44 #include "opt_pool.h" 45 #include "opt_poollog.h" 46 #include "opt_lockdebug.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/bitops.h> 51 #include <sys/proc.h> 52 #include <sys/errno.h> 53 #include <sys/kernel.h> 54 #include <sys/malloc.h> 55 #include <sys/lock.h> 56 #include <sys/pool.h> 57 #include <sys/syslog.h> 58 #include <sys/debug.h> 59 #include <sys/lockdebug.h> 60 #include <sys/xcall.h> 61 #include <sys/cpu.h> 62 63 #include <uvm/uvm.h> 64 65 /* 66 * Pool resource management utility. 67 * 68 * Memory is allocated in pages which are split into pieces according to 69 * the pool item size. Each page is kept on one of three lists in the 70 * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages', 71 * for empty, full and partially-full pages respectively. The individual 72 * pool items are on a linked list headed by `ph_itemlist' in each page 73 * header. The memory for building the page list is either taken from 74 * the allocated pages themselves (for small pool items) or taken from 75 * an internal pool of page headers (`phpool'). 76 */ 77 78 /* List of all pools */ 79 LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head); 80 81 /* List of all caches. */ 82 LIST_HEAD(,pool_cache) pool_cache_head = 83 LIST_HEAD_INITIALIZER(pool_cache_head); 84 85 /* Private pool for page header structures */ 86 #define PHPOOL_MAX 8 87 static struct pool phpool[PHPOOL_MAX]; 88 #define PHPOOL_FREELIST_NELEM(idx) \ 89 (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx))) 90 91 #ifdef POOL_SUBPAGE 92 /* Pool of subpages for use by normal pools. */ 93 static struct pool psppool; 94 #endif 95 96 static SLIST_HEAD(, pool_allocator) pa_deferinitq = 97 SLIST_HEAD_INITIALIZER(pa_deferinitq); 98 99 static void *pool_page_alloc_meta(struct pool *, int); 100 static void pool_page_free_meta(struct pool *, void *); 101 102 /* allocator for pool metadata */ 103 struct pool_allocator pool_allocator_meta = { 104 pool_page_alloc_meta, pool_page_free_meta, 105 .pa_backingmapptr = &kmem_map, 106 }; 107 108 /* # of seconds to retain page after last use */ 109 int pool_inactive_time = 10; 110 111 /* Next candidate for drainage (see pool_drain()) */ 112 static struct pool *drainpp; 113 114 /* This lock protects both pool_head and drainpp. */ 115 static kmutex_t pool_head_lock; 116 static kcondvar_t pool_busy; 117 118 typedef uint32_t pool_item_bitmap_t; 119 #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) 120 #define BITMAP_MASK (BITMAP_SIZE - 1) 121 122 struct pool_item_header { 123 /* Page headers */ 124 LIST_ENTRY(pool_item_header) 125 ph_pagelist; /* pool page list */ 126 SPLAY_ENTRY(pool_item_header) 127 ph_node; /* Off-page page headers */ 128 void * ph_page; /* this page's address */ 129 struct timeval ph_time; /* last referenced */ 130 uint16_t ph_nmissing; /* # of chunks in use */ 131 uint16_t ph_off; /* start offset in page */ 132 union { 133 /* !PR_NOTOUCH */ 134 struct { 135 LIST_HEAD(, pool_item) 136 phu_itemlist; /* chunk list for this page */ 137 } phu_normal; 138 /* PR_NOTOUCH */ 139 struct { 140 pool_item_bitmap_t phu_bitmap[1]; 141 } phu_notouch; 142 } ph_u; 143 }; 144 #define ph_itemlist ph_u.phu_normal.phu_itemlist 145 #define ph_bitmap ph_u.phu_notouch.phu_bitmap 146 147 struct pool_item { 148 #ifdef DIAGNOSTIC 149 u_int pi_magic; 150 #endif 151 #define PI_MAGIC 0xdeaddeadU 152 /* Other entries use only this list entry */ 153 LIST_ENTRY(pool_item) pi_list; 154 }; 155 156 #define POOL_NEEDS_CATCHUP(pp) \ 157 ((pp)->pr_nitems < (pp)->pr_minitems) 158 159 /* 160 * Pool cache management. 161 * 162 * Pool caches provide a way for constructed objects to be cached by the 163 * pool subsystem. This can lead to performance improvements by avoiding 164 * needless object construction/destruction; it is deferred until absolutely 165 * necessary. 166 * 167 * Caches are grouped into cache groups. Each cache group references up 168 * to PCG_NUMOBJECTS constructed objects. When a cache allocates an 169 * object from the pool, it calls the object's constructor and places it 170 * into a cache group. When a cache group frees an object back to the 171 * pool, it first calls the object's destructor. This allows the object 172 * to persist in constructed form while freed to the cache. 173 * 174 * The pool references each cache, so that when a pool is drained by the 175 * pagedaemon, it can drain each individual cache as well. Each time a 176 * cache is drained, the most idle cache group is freed to the pool in 177 * its entirety. 178 * 179 * Pool caches are layed on top of pools. By layering them, we can avoid 180 * the complexity of cache management for pools which would not benefit 181 * from it. 182 */ 183 184 static struct pool pcgpool; 185 static struct pool cache_pool; 186 static struct pool cache_cpu_pool; 187 188 static pool_cache_cpu_t *pool_cache_put_slow(pool_cache_cpu_t *, int *, 189 void *, paddr_t); 190 static pool_cache_cpu_t *pool_cache_get_slow(pool_cache_cpu_t *, int *, 191 void **, paddr_t *, int); 192 static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); 193 static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); 194 static void pool_cache_xcall(pool_cache_t); 195 196 static int pool_catchup(struct pool *); 197 static void pool_prime_page(struct pool *, void *, 198 struct pool_item_header *); 199 static void pool_update_curpage(struct pool *); 200 201 static int pool_grow(struct pool *, int); 202 static void *pool_allocator_alloc(struct pool *, int); 203 static void pool_allocator_free(struct pool *, void *); 204 205 static void pool_print_pagelist(struct pool *, struct pool_pagelist *, 206 void (*)(const char *, ...)); 207 static void pool_print1(struct pool *, const char *, 208 void (*)(const char *, ...)); 209 210 static int pool_chk_page(struct pool *, const char *, 211 struct pool_item_header *); 212 213 /* 214 * Pool log entry. An array of these is allocated in pool_init(). 215 */ 216 struct pool_log { 217 const char *pl_file; 218 long pl_line; 219 int pl_action; 220 #define PRLOG_GET 1 221 #define PRLOG_PUT 2 222 void *pl_addr; 223 }; 224 225 #ifdef POOL_DIAGNOSTIC 226 /* Number of entries in pool log buffers */ 227 #ifndef POOL_LOGSIZE 228 #define POOL_LOGSIZE 10 229 #endif 230 231 int pool_logsize = POOL_LOGSIZE; 232 233 static inline void 234 pr_log(struct pool *pp, void *v, int action, const char *file, long line) 235 { 236 int n = pp->pr_curlogentry; 237 struct pool_log *pl; 238 239 if ((pp->pr_roflags & PR_LOGGING) == 0) 240 return; 241 242 /* 243 * Fill in the current entry. Wrap around and overwrite 244 * the oldest entry if necessary. 245 */ 246 pl = &pp->pr_log[n]; 247 pl->pl_file = file; 248 pl->pl_line = line; 249 pl->pl_action = action; 250 pl->pl_addr = v; 251 if (++n >= pp->pr_logsize) 252 n = 0; 253 pp->pr_curlogentry = n; 254 } 255 256 static void 257 pr_printlog(struct pool *pp, struct pool_item *pi, 258 void (*pr)(const char *, ...)) 259 { 260 int i = pp->pr_logsize; 261 int n = pp->pr_curlogentry; 262 263 if ((pp->pr_roflags & PR_LOGGING) == 0) 264 return; 265 266 /* 267 * Print all entries in this pool's log. 268 */ 269 while (i-- > 0) { 270 struct pool_log *pl = &pp->pr_log[n]; 271 if (pl->pl_action != 0) { 272 if (pi == NULL || pi == pl->pl_addr) { 273 (*pr)("\tlog entry %d:\n", i); 274 (*pr)("\t\taction = %s, addr = %p\n", 275 pl->pl_action == PRLOG_GET ? "get" : "put", 276 pl->pl_addr); 277 (*pr)("\t\tfile: %s at line %lu\n", 278 pl->pl_file, pl->pl_line); 279 } 280 } 281 if (++n >= pp->pr_logsize) 282 n = 0; 283 } 284 } 285 286 static inline void 287 pr_enter(struct pool *pp, const char *file, long line) 288 { 289 290 if (__predict_false(pp->pr_entered_file != NULL)) { 291 printf("pool %s: reentrancy at file %s line %ld\n", 292 pp->pr_wchan, file, line); 293 printf(" previous entry at file %s line %ld\n", 294 pp->pr_entered_file, pp->pr_entered_line); 295 panic("pr_enter"); 296 } 297 298 pp->pr_entered_file = file; 299 pp->pr_entered_line = line; 300 } 301 302 static inline void 303 pr_leave(struct pool *pp) 304 { 305 306 if (__predict_false(pp->pr_entered_file == NULL)) { 307 printf("pool %s not entered?\n", pp->pr_wchan); 308 panic("pr_leave"); 309 } 310 311 pp->pr_entered_file = NULL; 312 pp->pr_entered_line = 0; 313 } 314 315 static inline void 316 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...)) 317 { 318 319 if (pp->pr_entered_file != NULL) 320 (*pr)("\n\tcurrently entered from file %s line %ld\n", 321 pp->pr_entered_file, pp->pr_entered_line); 322 } 323 #else 324 #define pr_log(pp, v, action, file, line) 325 #define pr_printlog(pp, pi, pr) 326 #define pr_enter(pp, file, line) 327 #define pr_leave(pp) 328 #define pr_enter_check(pp, pr) 329 #endif /* POOL_DIAGNOSTIC */ 330 331 static inline unsigned int 332 pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, 333 const void *v) 334 { 335 const char *cp = v; 336 unsigned int idx; 337 338 KASSERT(pp->pr_roflags & PR_NOTOUCH); 339 idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size; 340 KASSERT(idx < pp->pr_itemsperpage); 341 return idx; 342 } 343 344 static inline void 345 pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph, 346 void *obj) 347 { 348 unsigned int idx = pr_item_notouch_index(pp, ph, obj); 349 pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE); 350 pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK); 351 352 KASSERT((*bitmap & mask) == 0); 353 *bitmap |= mask; 354 } 355 356 static inline void * 357 pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph) 358 { 359 pool_item_bitmap_t *bitmap = ph->ph_bitmap; 360 unsigned int idx; 361 int i; 362 363 for (i = 0; ; i++) { 364 int bit; 365 366 KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage); 367 bit = ffs32(bitmap[i]); 368 if (bit) { 369 pool_item_bitmap_t mask; 370 371 bit--; 372 idx = (i * BITMAP_SIZE) + bit; 373 mask = 1 << bit; 374 KASSERT((bitmap[i] & mask) != 0); 375 bitmap[i] &= ~mask; 376 break; 377 } 378 } 379 KASSERT(idx < pp->pr_itemsperpage); 380 return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size; 381 } 382 383 static inline void 384 pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph) 385 { 386 pool_item_bitmap_t *bitmap = ph->ph_bitmap; 387 const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE); 388 int i; 389 390 for (i = 0; i < n; i++) { 391 bitmap[i] = (pool_item_bitmap_t)-1; 392 } 393 } 394 395 static inline int 396 phtree_compare(struct pool_item_header *a, struct pool_item_header *b) 397 { 398 399 /* 400 * we consider pool_item_header with smaller ph_page bigger. 401 * (this unnatural ordering is for the benefit of pr_find_pagehead.) 402 */ 403 404 if (a->ph_page < b->ph_page) 405 return (1); 406 else if (a->ph_page > b->ph_page) 407 return (-1); 408 else 409 return (0); 410 } 411 412 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare); 413 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); 414 415 static inline struct pool_item_header * 416 pr_find_pagehead_noalign(struct pool *pp, void *v) 417 { 418 struct pool_item_header *ph, tmp; 419 420 tmp.ph_page = (void *)(uintptr_t)v; 421 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); 422 if (ph == NULL) { 423 ph = SPLAY_ROOT(&pp->pr_phtree); 424 if (ph != NULL && phtree_compare(&tmp, ph) >= 0) { 425 ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph); 426 } 427 KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0); 428 } 429 430 return ph; 431 } 432 433 /* 434 * Return the pool page header based on item address. 435 */ 436 static inline struct pool_item_header * 437 pr_find_pagehead(struct pool *pp, void *v) 438 { 439 struct pool_item_header *ph, tmp; 440 441 if ((pp->pr_roflags & PR_NOALIGN) != 0) { 442 ph = pr_find_pagehead_noalign(pp, v); 443 } else { 444 void *page = 445 (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask); 446 447 if ((pp->pr_roflags & PR_PHINPAGE) != 0) { 448 ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset); 449 } else { 450 tmp.ph_page = page; 451 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); 452 } 453 } 454 455 KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) || 456 ((char *)ph->ph_page <= (char *)v && 457 (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz)); 458 return ph; 459 } 460 461 static void 462 pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq) 463 { 464 struct pool_item_header *ph; 465 466 while ((ph = LIST_FIRST(pq)) != NULL) { 467 LIST_REMOVE(ph, ph_pagelist); 468 pool_allocator_free(pp, ph->ph_page); 469 if ((pp->pr_roflags & PR_PHINPAGE) == 0) 470 pool_put(pp->pr_phpool, ph); 471 } 472 } 473 474 /* 475 * Remove a page from the pool. 476 */ 477 static inline void 478 pr_rmpage(struct pool *pp, struct pool_item_header *ph, 479 struct pool_pagelist *pq) 480 { 481 482 KASSERT(mutex_owned(&pp->pr_lock)); 483 484 /* 485 * If the page was idle, decrement the idle page count. 486 */ 487 if (ph->ph_nmissing == 0) { 488 #ifdef DIAGNOSTIC 489 if (pp->pr_nidle == 0) 490 panic("pr_rmpage: nidle inconsistent"); 491 if (pp->pr_nitems < pp->pr_itemsperpage) 492 panic("pr_rmpage: nitems inconsistent"); 493 #endif 494 pp->pr_nidle--; 495 } 496 497 pp->pr_nitems -= pp->pr_itemsperpage; 498 499 /* 500 * Unlink the page from the pool and queue it for release. 501 */ 502 LIST_REMOVE(ph, ph_pagelist); 503 if ((pp->pr_roflags & PR_PHINPAGE) == 0) 504 SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); 505 LIST_INSERT_HEAD(pq, ph, ph_pagelist); 506 507 pp->pr_npages--; 508 pp->pr_npagefree++; 509 510 pool_update_curpage(pp); 511 } 512 513 static bool 514 pa_starved_p(struct pool_allocator *pa) 515 { 516 517 if (pa->pa_backingmap != NULL) { 518 return vm_map_starved_p(pa->pa_backingmap); 519 } 520 return false; 521 } 522 523 static int 524 pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg) 525 { 526 struct pool *pp = obj; 527 struct pool_allocator *pa = pp->pr_alloc; 528 529 KASSERT(&pp->pr_reclaimerentry == ce); 530 pool_reclaim(pp); 531 if (!pa_starved_p(pa)) { 532 return CALLBACK_CHAIN_ABORT; 533 } 534 return CALLBACK_CHAIN_CONTINUE; 535 } 536 537 static void 538 pool_reclaim_register(struct pool *pp) 539 { 540 struct vm_map *map = pp->pr_alloc->pa_backingmap; 541 int s; 542 543 if (map == NULL) { 544 return; 545 } 546 547 s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */ 548 callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback, 549 &pp->pr_reclaimerentry, pp, pool_reclaim_callback); 550 splx(s); 551 } 552 553 static void 554 pool_reclaim_unregister(struct pool *pp) 555 { 556 struct vm_map *map = pp->pr_alloc->pa_backingmap; 557 int s; 558 559 if (map == NULL) { 560 return; 561 } 562 563 s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */ 564 callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback, 565 &pp->pr_reclaimerentry); 566 splx(s); 567 } 568 569 static void 570 pa_reclaim_register(struct pool_allocator *pa) 571 { 572 struct vm_map *map = *pa->pa_backingmapptr; 573 struct pool *pp; 574 575 KASSERT(pa->pa_backingmap == NULL); 576 if (map == NULL) { 577 SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q); 578 return; 579 } 580 pa->pa_backingmap = map; 581 TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) { 582 pool_reclaim_register(pp); 583 } 584 } 585 586 /* 587 * Initialize all the pools listed in the "pools" link set. 588 */ 589 void 590 pool_subsystem_init(void) 591 { 592 struct pool_allocator *pa; 593 __link_set_decl(pools, struct link_pool_init); 594 struct link_pool_init * const *pi; 595 596 mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); 597 cv_init(&pool_busy, "poolbusy"); 598 599 __link_set_foreach(pi, pools) 600 pool_init((*pi)->pp, (*pi)->size, (*pi)->align, 601 (*pi)->align_offset, (*pi)->flags, (*pi)->wchan, 602 (*pi)->palloc, (*pi)->ipl); 603 604 while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { 605 KASSERT(pa->pa_backingmapptr != NULL); 606 KASSERT(*pa->pa_backingmapptr != NULL); 607 SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q); 608 pa_reclaim_register(pa); 609 } 610 611 pool_init(&cache_pool, sizeof(struct pool_cache), CACHE_LINE_SIZE, 612 0, 0, "pcache", &pool_allocator_nointr, IPL_NONE); 613 614 pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), CACHE_LINE_SIZE, 615 0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE); 616 } 617 618 /* 619 * Initialize the given pool resource structure. 620 * 621 * We export this routine to allow other kernel parts to declare 622 * static pools that must be initialized before malloc() is available. 623 */ 624 void 625 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, 626 const char *wchan, struct pool_allocator *palloc, int ipl) 627 { 628 #ifdef DEBUG 629 struct pool *pp1; 630 #endif 631 size_t trysize, phsize; 632 int off, slack; 633 634 #ifdef DEBUG 635 /* 636 * Check that the pool hasn't already been initialised and 637 * added to the list of all pools. 638 */ 639 LIST_FOREACH(pp1, &pool_head, pr_poollist) { 640 if (pp == pp1) 641 panic("pool_init: pool %s already initialised", 642 wchan); 643 } 644 #endif 645 646 #ifdef POOL_DIAGNOSTIC 647 /* 648 * Always log if POOL_DIAGNOSTIC is defined. 649 */ 650 if (pool_logsize != 0) 651 flags |= PR_LOGGING; 652 #endif 653 654 if (palloc == NULL) 655 palloc = &pool_allocator_kmem; 656 #ifdef POOL_SUBPAGE 657 if (size > palloc->pa_pagesz) { 658 if (palloc == &pool_allocator_kmem) 659 palloc = &pool_allocator_kmem_fullpage; 660 else if (palloc == &pool_allocator_nointr) 661 palloc = &pool_allocator_nointr_fullpage; 662 } 663 #endif /* POOL_SUBPAGE */ 664 if ((palloc->pa_flags & PA_INITIALIZED) == 0) { 665 if (palloc->pa_pagesz == 0) 666 palloc->pa_pagesz = PAGE_SIZE; 667 668 TAILQ_INIT(&palloc->pa_list); 669 670 mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM); 671 palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); 672 palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; 673 674 if (palloc->pa_backingmapptr != NULL) { 675 pa_reclaim_register(palloc); 676 } 677 palloc->pa_flags |= PA_INITIALIZED; 678 } 679 680 if (align == 0) 681 align = ALIGN(1); 682 683 if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item)) 684 size = sizeof(struct pool_item); 685 686 size = roundup(size, align); 687 #ifdef DIAGNOSTIC 688 if (size > palloc->pa_pagesz) 689 panic("pool_init: pool item size (%zu) too large", size); 690 #endif 691 692 /* 693 * Initialize the pool structure. 694 */ 695 LIST_INIT(&pp->pr_emptypages); 696 LIST_INIT(&pp->pr_fullpages); 697 LIST_INIT(&pp->pr_partpages); 698 pp->pr_cache = NULL; 699 pp->pr_curpage = NULL; 700 pp->pr_npages = 0; 701 pp->pr_minitems = 0; 702 pp->pr_minpages = 0; 703 pp->pr_maxpages = UINT_MAX; 704 pp->pr_roflags = flags; 705 pp->pr_flags = 0; 706 pp->pr_size = size; 707 pp->pr_align = align; 708 pp->pr_wchan = wchan; 709 pp->pr_alloc = palloc; 710 pp->pr_nitems = 0; 711 pp->pr_nout = 0; 712 pp->pr_hardlimit = UINT_MAX; 713 pp->pr_hardlimit_warning = NULL; 714 pp->pr_hardlimit_ratecap.tv_sec = 0; 715 pp->pr_hardlimit_ratecap.tv_usec = 0; 716 pp->pr_hardlimit_warning_last.tv_sec = 0; 717 pp->pr_hardlimit_warning_last.tv_usec = 0; 718 pp->pr_drain_hook = NULL; 719 pp->pr_drain_hook_arg = NULL; 720 pp->pr_freecheck = NULL; 721 722 /* 723 * Decide whether to put the page header off page to avoid 724 * wasting too large a part of the page or too big item. 725 * Off-page page headers go on a hash table, so we can match 726 * a returned item with its header based on the page address. 727 * We use 1/16 of the page size and about 8 times of the item 728 * size as the threshold (XXX: tune) 729 * 730 * However, we'll put the header into the page if we can put 731 * it without wasting any items. 732 * 733 * Silently enforce `0 <= ioff < align'. 734 */ 735 pp->pr_itemoffset = ioff %= align; 736 /* See the comment below about reserved bytes. */ 737 trysize = palloc->pa_pagesz - ((align - ioff) % align); 738 phsize = ALIGN(sizeof(struct pool_item_header)); 739 if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 && 740 (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) || 741 trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) { 742 /* Use the end of the page for the page header */ 743 pp->pr_roflags |= PR_PHINPAGE; 744 pp->pr_phoffset = off = palloc->pa_pagesz - phsize; 745 } else { 746 /* The page header will be taken from our page header pool */ 747 pp->pr_phoffset = 0; 748 off = palloc->pa_pagesz; 749 SPLAY_INIT(&pp->pr_phtree); 750 } 751 752 /* 753 * Alignment is to take place at `ioff' within the item. This means 754 * we must reserve up to `align - 1' bytes on the page to allow 755 * appropriate positioning of each item. 756 */ 757 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; 758 KASSERT(pp->pr_itemsperpage != 0); 759 if ((pp->pr_roflags & PR_NOTOUCH)) { 760 int idx; 761 762 for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx); 763 idx++) { 764 /* nothing */ 765 } 766 if (idx >= PHPOOL_MAX) { 767 /* 768 * if you see this panic, consider to tweak 769 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM. 770 */ 771 panic("%s: too large itemsperpage(%d) for PR_NOTOUCH", 772 pp->pr_wchan, pp->pr_itemsperpage); 773 } 774 pp->pr_phpool = &phpool[idx]; 775 } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) { 776 pp->pr_phpool = &phpool[0]; 777 } 778 #if defined(DIAGNOSTIC) 779 else { 780 pp->pr_phpool = NULL; 781 } 782 #endif 783 784 /* 785 * Use the slack between the chunks and the page header 786 * for "cache coloring". 787 */ 788 slack = off - pp->pr_itemsperpage * pp->pr_size; 789 pp->pr_maxcolor = (slack / align) * align; 790 pp->pr_curcolor = 0; 791 792 pp->pr_nget = 0; 793 pp->pr_nfail = 0; 794 pp->pr_nput = 0; 795 pp->pr_npagealloc = 0; 796 pp->pr_npagefree = 0; 797 pp->pr_hiwat = 0; 798 pp->pr_nidle = 0; 799 pp->pr_refcnt = 0; 800 801 #ifdef POOL_DIAGNOSTIC 802 if (flags & PR_LOGGING) { 803 if (kmem_map == NULL || 804 (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), 805 M_TEMP, M_NOWAIT)) == NULL) 806 pp->pr_roflags &= ~PR_LOGGING; 807 pp->pr_curlogentry = 0; 808 pp->pr_logsize = pool_logsize; 809 } 810 #endif 811 812 pp->pr_entered_file = NULL; 813 pp->pr_entered_line = 0; 814 815 /* 816 * XXXAD hack to prevent IP input processing from blocking. 817 */ 818 if (ipl == IPL_SOFTNET) { 819 mutex_init(&pp->pr_lock, MUTEX_DEFAULT, IPL_VM); 820 } else { 821 mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); 822 } 823 cv_init(&pp->pr_cv, wchan); 824 pp->pr_ipl = ipl; 825 826 /* 827 * Initialize private page header pool and cache magazine pool if we 828 * haven't done so yet. 829 * XXX LOCKING. 830 */ 831 if (phpool[0].pr_size == 0) { 832 int idx; 833 for (idx = 0; idx < PHPOOL_MAX; idx++) { 834 static char phpool_names[PHPOOL_MAX][6+1+6+1]; 835 int nelem; 836 size_t sz; 837 838 nelem = PHPOOL_FREELIST_NELEM(idx); 839 snprintf(phpool_names[idx], sizeof(phpool_names[idx]), 840 "phpool-%d", nelem); 841 sz = sizeof(struct pool_item_header); 842 if (nelem) { 843 sz = offsetof(struct pool_item_header, 844 ph_bitmap[howmany(nelem, BITMAP_SIZE)]); 845 } 846 pool_init(&phpool[idx], sz, 0, 0, 0, 847 phpool_names[idx], &pool_allocator_meta, IPL_VM); 848 } 849 #ifdef POOL_SUBPAGE 850 pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, 851 PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); 852 #endif 853 pool_init(&pcgpool, sizeof(pcg_t), CACHE_LINE_SIZE, 0, 0, 854 "cachegrp", &pool_allocator_meta, IPL_VM); 855 } 856 857 if (__predict_true(!cold)) { 858 /* Insert into the list of all pools. */ 859 mutex_enter(&pool_head_lock); 860 LIST_INSERT_HEAD(&pool_head, pp, pr_poollist); 861 mutex_exit(&pool_head_lock); 862 863 /* Insert this into the list of pools using this allocator. */ 864 mutex_enter(&palloc->pa_lock); 865 TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); 866 mutex_exit(&palloc->pa_lock); 867 } else { 868 LIST_INSERT_HEAD(&pool_head, pp, pr_poollist); 869 TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); 870 } 871 872 pool_reclaim_register(pp); 873 } 874 875 /* 876 * De-commision a pool resource. 877 */ 878 void 879 pool_destroy(struct pool *pp) 880 { 881 struct pool_pagelist pq; 882 struct pool_item_header *ph; 883 884 /* Remove from global pool list */ 885 mutex_enter(&pool_head_lock); 886 while (pp->pr_refcnt != 0) 887 cv_wait(&pool_busy, &pool_head_lock); 888 LIST_REMOVE(pp, pr_poollist); 889 if (drainpp == pp) 890 drainpp = NULL; 891 mutex_exit(&pool_head_lock); 892 893 /* Remove this pool from its allocator's list of pools. */ 894 pool_reclaim_unregister(pp); 895 mutex_enter(&pp->pr_alloc->pa_lock); 896 TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); 897 mutex_exit(&pp->pr_alloc->pa_lock); 898 899 mutex_enter(&pp->pr_lock); 900 901 KASSERT(pp->pr_cache == NULL); 902 903 #ifdef DIAGNOSTIC 904 if (pp->pr_nout != 0) { 905 pr_printlog(pp, NULL, printf); 906 panic("pool_destroy: pool busy: still out: %u", 907 pp->pr_nout); 908 } 909 #endif 910 911 KASSERT(LIST_EMPTY(&pp->pr_fullpages)); 912 KASSERT(LIST_EMPTY(&pp->pr_partpages)); 913 914 /* Remove all pages */ 915 LIST_INIT(&pq); 916 while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) 917 pr_rmpage(pp, ph, &pq); 918 919 mutex_exit(&pp->pr_lock); 920 921 pr_pagelist_free(pp, &pq); 922 923 #ifdef POOL_DIAGNOSTIC 924 if ((pp->pr_roflags & PR_LOGGING) != 0) 925 free(pp->pr_log, M_TEMP); 926 #endif 927 928 cv_destroy(&pp->pr_cv); 929 mutex_destroy(&pp->pr_lock); 930 } 931 932 void 933 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg) 934 { 935 936 /* XXX no locking -- must be used just after pool_init() */ 937 #ifdef DIAGNOSTIC 938 if (pp->pr_drain_hook != NULL) 939 panic("pool_set_drain_hook(%s): already set", pp->pr_wchan); 940 #endif 941 pp->pr_drain_hook = fn; 942 pp->pr_drain_hook_arg = arg; 943 } 944 945 static struct pool_item_header * 946 pool_alloc_item_header(struct pool *pp, void *storage, int flags) 947 { 948 struct pool_item_header *ph; 949 950 if ((pp->pr_roflags & PR_PHINPAGE) != 0) 951 ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset); 952 else 953 ph = pool_get(pp->pr_phpool, flags); 954 955 return (ph); 956 } 957 958 /* 959 * Grab an item from the pool. 960 */ 961 void * 962 #ifdef POOL_DIAGNOSTIC 963 _pool_get(struct pool *pp, int flags, const char *file, long line) 964 #else 965 pool_get(struct pool *pp, int flags) 966 #endif 967 { 968 struct pool_item *pi; 969 struct pool_item_header *ph; 970 void *v; 971 972 #ifdef DIAGNOSTIC 973 if (__predict_false(pp->pr_itemsperpage == 0)) 974 panic("pool_get: pool %p: pr_itemsperpage is zero, " 975 "pool not initialized?", pp); 976 if (__predict_false(curlwp == NULL && doing_shutdown == 0 && 977 (flags & PR_WAITOK) != 0)) 978 panic("pool_get: %s: must have NOWAIT", pp->pr_wchan); 979 980 #endif /* DIAGNOSTIC */ 981 #ifdef LOCKDEBUG 982 if (flags & PR_WAITOK) 983 ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)"); 984 #endif 985 986 mutex_enter(&pp->pr_lock); 987 pr_enter(pp, file, line); 988 989 startover: 990 /* 991 * Check to see if we've reached the hard limit. If we have, 992 * and we can wait, then wait until an item has been returned to 993 * the pool. 994 */ 995 #ifdef DIAGNOSTIC 996 if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { 997 pr_leave(pp); 998 mutex_exit(&pp->pr_lock); 999 panic("pool_get: %s: crossed hard limit", pp->pr_wchan); 1000 } 1001 #endif 1002 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { 1003 if (pp->pr_drain_hook != NULL) { 1004 /* 1005 * Since the drain hook is going to free things 1006 * back to the pool, unlock, call the hook, re-lock, 1007 * and check the hardlimit condition again. 1008 */ 1009 pr_leave(pp); 1010 mutex_exit(&pp->pr_lock); 1011 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); 1012 mutex_enter(&pp->pr_lock); 1013 pr_enter(pp, file, line); 1014 if (pp->pr_nout < pp->pr_hardlimit) 1015 goto startover; 1016 } 1017 1018 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { 1019 /* 1020 * XXX: A warning isn't logged in this case. Should 1021 * it be? 1022 */ 1023 pp->pr_flags |= PR_WANTED; 1024 pr_leave(pp); 1025 cv_wait(&pp->pr_cv, &pp->pr_lock); 1026 pr_enter(pp, file, line); 1027 goto startover; 1028 } 1029 1030 /* 1031 * Log a message that the hard limit has been hit. 1032 */ 1033 if (pp->pr_hardlimit_warning != NULL && 1034 ratecheck(&pp->pr_hardlimit_warning_last, 1035 &pp->pr_hardlimit_ratecap)) 1036 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); 1037 1038 pp->pr_nfail++; 1039 1040 pr_leave(pp); 1041 mutex_exit(&pp->pr_lock); 1042 return (NULL); 1043 } 1044 1045 /* 1046 * The convention we use is that if `curpage' is not NULL, then 1047 * it points at a non-empty bucket. In particular, `curpage' 1048 * never points at a page header which has PR_PHINPAGE set and 1049 * has no items in its bucket. 1050 */ 1051 if ((ph = pp->pr_curpage) == NULL) { 1052 int error; 1053 1054 #ifdef DIAGNOSTIC 1055 if (pp->pr_nitems != 0) { 1056 mutex_exit(&pp->pr_lock); 1057 printf("pool_get: %s: curpage NULL, nitems %u\n", 1058 pp->pr_wchan, pp->pr_nitems); 1059 panic("pool_get: nitems inconsistent"); 1060 } 1061 #endif 1062 1063 /* 1064 * Call the back-end page allocator for more memory. 1065 * Release the pool lock, as the back-end page allocator 1066 * may block. 1067 */ 1068 pr_leave(pp); 1069 error = pool_grow(pp, flags); 1070 pr_enter(pp, file, line); 1071 if (error != 0) { 1072 /* 1073 * We were unable to allocate a page or item 1074 * header, but we released the lock during 1075 * allocation, so perhaps items were freed 1076 * back to the pool. Check for this case. 1077 */ 1078 if (pp->pr_curpage != NULL) 1079 goto startover; 1080 1081 pp->pr_nfail++; 1082 pr_leave(pp); 1083 mutex_exit(&pp->pr_lock); 1084 return (NULL); 1085 } 1086 1087 /* Start the allocation process over. */ 1088 goto startover; 1089 } 1090 if (pp->pr_roflags & PR_NOTOUCH) { 1091 #ifdef DIAGNOSTIC 1092 if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) { 1093 pr_leave(pp); 1094 mutex_exit(&pp->pr_lock); 1095 panic("pool_get: %s: page empty", pp->pr_wchan); 1096 } 1097 #endif 1098 v = pr_item_notouch_get(pp, ph); 1099 #ifdef POOL_DIAGNOSTIC 1100 pr_log(pp, v, PRLOG_GET, file, line); 1101 #endif 1102 } else { 1103 v = pi = LIST_FIRST(&ph->ph_itemlist); 1104 if (__predict_false(v == NULL)) { 1105 pr_leave(pp); 1106 mutex_exit(&pp->pr_lock); 1107 panic("pool_get: %s: page empty", pp->pr_wchan); 1108 } 1109 #ifdef DIAGNOSTIC 1110 if (__predict_false(pp->pr_nitems == 0)) { 1111 pr_leave(pp); 1112 mutex_exit(&pp->pr_lock); 1113 printf("pool_get: %s: items on itemlist, nitems %u\n", 1114 pp->pr_wchan, pp->pr_nitems); 1115 panic("pool_get: nitems inconsistent"); 1116 } 1117 #endif 1118 1119 #ifdef POOL_DIAGNOSTIC 1120 pr_log(pp, v, PRLOG_GET, file, line); 1121 #endif 1122 1123 #ifdef DIAGNOSTIC 1124 if (__predict_false(pi->pi_magic != PI_MAGIC)) { 1125 pr_printlog(pp, pi, printf); 1126 panic("pool_get(%s): free list modified: " 1127 "magic=%x; page %p; item addr %p\n", 1128 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); 1129 } 1130 #endif 1131 1132 /* 1133 * Remove from item list. 1134 */ 1135 LIST_REMOVE(pi, pi_list); 1136 } 1137 pp->pr_nitems--; 1138 pp->pr_nout++; 1139 if (ph->ph_nmissing == 0) { 1140 #ifdef DIAGNOSTIC 1141 if (__predict_false(pp->pr_nidle == 0)) 1142 panic("pool_get: nidle inconsistent"); 1143 #endif 1144 pp->pr_nidle--; 1145 1146 /* 1147 * This page was previously empty. Move it to the list of 1148 * partially-full pages. This page is already curpage. 1149 */ 1150 LIST_REMOVE(ph, ph_pagelist); 1151 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); 1152 } 1153 ph->ph_nmissing++; 1154 if (ph->ph_nmissing == pp->pr_itemsperpage) { 1155 #ifdef DIAGNOSTIC 1156 if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 && 1157 !LIST_EMPTY(&ph->ph_itemlist))) { 1158 pr_leave(pp); 1159 mutex_exit(&pp->pr_lock); 1160 panic("pool_get: %s: nmissing inconsistent", 1161 pp->pr_wchan); 1162 } 1163 #endif 1164 /* 1165 * This page is now full. Move it to the full list 1166 * and select a new current page. 1167 */ 1168 LIST_REMOVE(ph, ph_pagelist); 1169 LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist); 1170 pool_update_curpage(pp); 1171 } 1172 1173 pp->pr_nget++; 1174 pr_leave(pp); 1175 1176 /* 1177 * If we have a low water mark and we are now below that low 1178 * water mark, add more items to the pool. 1179 */ 1180 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { 1181 /* 1182 * XXX: Should we log a warning? Should we set up a timeout 1183 * to try again in a second or so? The latter could break 1184 * a caller's assumptions about interrupt protection, etc. 1185 */ 1186 } 1187 1188 mutex_exit(&pp->pr_lock); 1189 KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0); 1190 FREECHECK_OUT(&pp->pr_freecheck, v); 1191 return (v); 1192 } 1193 1194 /* 1195 * Internal version of pool_put(). Pool is already locked/entered. 1196 */ 1197 static void 1198 pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq) 1199 { 1200 struct pool_item *pi = v; 1201 struct pool_item_header *ph; 1202 1203 KASSERT(mutex_owned(&pp->pr_lock)); 1204 FREECHECK_IN(&pp->pr_freecheck, v); 1205 LOCKDEBUG_MEM_CHECK(v, pp->pr_size); 1206 1207 #ifdef DIAGNOSTIC 1208 if (__predict_false(pp->pr_nout == 0)) { 1209 printf("pool %s: putting with none out\n", 1210 pp->pr_wchan); 1211 panic("pool_put"); 1212 } 1213 #endif 1214 1215 if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { 1216 pr_printlog(pp, NULL, printf); 1217 panic("pool_put: %s: page header missing", pp->pr_wchan); 1218 } 1219 1220 /* 1221 * Return to item list. 1222 */ 1223 if (pp->pr_roflags & PR_NOTOUCH) { 1224 pr_item_notouch_put(pp, ph, v); 1225 } else { 1226 #ifdef DIAGNOSTIC 1227 pi->pi_magic = PI_MAGIC; 1228 #endif 1229 #ifdef DEBUG 1230 { 1231 int i, *ip = v; 1232 1233 for (i = 0; i < pp->pr_size / sizeof(int); i++) { 1234 *ip++ = PI_MAGIC; 1235 } 1236 } 1237 #endif 1238 1239 LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); 1240 } 1241 KDASSERT(ph->ph_nmissing != 0); 1242 ph->ph_nmissing--; 1243 pp->pr_nput++; 1244 pp->pr_nitems++; 1245 pp->pr_nout--; 1246 1247 /* Cancel "pool empty" condition if it exists */ 1248 if (pp->pr_curpage == NULL) 1249 pp->pr_curpage = ph; 1250 1251 if (pp->pr_flags & PR_WANTED) { 1252 pp->pr_flags &= ~PR_WANTED; 1253 if (ph->ph_nmissing == 0) 1254 pp->pr_nidle++; 1255 cv_broadcast(&pp->pr_cv); 1256 return; 1257 } 1258 1259 /* 1260 * If this page is now empty, do one of two things: 1261 * 1262 * (1) If we have more pages than the page high water mark, 1263 * free the page back to the system. ONLY CONSIDER 1264 * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE 1265 * CLAIM. 1266 * 1267 * (2) Otherwise, move the page to the empty page list. 1268 * 1269 * Either way, select a new current page (so we use a partially-full 1270 * page if one is available). 1271 */ 1272 if (ph->ph_nmissing == 0) { 1273 pp->pr_nidle++; 1274 if (pp->pr_npages > pp->pr_minpages && 1275 (pp->pr_npages > pp->pr_maxpages || 1276 pa_starved_p(pp->pr_alloc))) { 1277 pr_rmpage(pp, ph, pq); 1278 } else { 1279 LIST_REMOVE(ph, ph_pagelist); 1280 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); 1281 1282 /* 1283 * Update the timestamp on the page. A page must 1284 * be idle for some period of time before it can 1285 * be reclaimed by the pagedaemon. This minimizes 1286 * ping-pong'ing for memory. 1287 */ 1288 getmicrotime(&ph->ph_time); 1289 } 1290 pool_update_curpage(pp); 1291 } 1292 1293 /* 1294 * If the page was previously completely full, move it to the 1295 * partially-full list and make it the current page. The next 1296 * allocation will get the item from this page, instead of 1297 * further fragmenting the pool. 1298 */ 1299 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) { 1300 LIST_REMOVE(ph, ph_pagelist); 1301 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); 1302 pp->pr_curpage = ph; 1303 } 1304 } 1305 1306 /* 1307 * Return resource to the pool. 1308 */ 1309 #ifdef POOL_DIAGNOSTIC 1310 void 1311 _pool_put(struct pool *pp, void *v, const char *file, long line) 1312 { 1313 struct pool_pagelist pq; 1314 1315 LIST_INIT(&pq); 1316 1317 mutex_enter(&pp->pr_lock); 1318 pr_enter(pp, file, line); 1319 1320 pr_log(pp, v, PRLOG_PUT, file, line); 1321 1322 pool_do_put(pp, v, &pq); 1323 1324 pr_leave(pp); 1325 mutex_exit(&pp->pr_lock); 1326 1327 pr_pagelist_free(pp, &pq); 1328 } 1329 #undef pool_put 1330 #endif /* POOL_DIAGNOSTIC */ 1331 1332 void 1333 pool_put(struct pool *pp, void *v) 1334 { 1335 struct pool_pagelist pq; 1336 1337 LIST_INIT(&pq); 1338 1339 mutex_enter(&pp->pr_lock); 1340 pool_do_put(pp, v, &pq); 1341 mutex_exit(&pp->pr_lock); 1342 1343 pr_pagelist_free(pp, &pq); 1344 } 1345 1346 #ifdef POOL_DIAGNOSTIC 1347 #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__) 1348 #endif 1349 1350 /* 1351 * pool_grow: grow a pool by a page. 1352 * 1353 * => called with pool locked. 1354 * => unlock and relock the pool. 1355 * => return with pool locked. 1356 */ 1357 1358 static int 1359 pool_grow(struct pool *pp, int flags) 1360 { 1361 struct pool_item_header *ph = NULL; 1362 char *cp; 1363 1364 mutex_exit(&pp->pr_lock); 1365 cp = pool_allocator_alloc(pp, flags); 1366 if (__predict_true(cp != NULL)) { 1367 ph = pool_alloc_item_header(pp, cp, flags); 1368 } 1369 if (__predict_false(cp == NULL || ph == NULL)) { 1370 if (cp != NULL) { 1371 pool_allocator_free(pp, cp); 1372 } 1373 mutex_enter(&pp->pr_lock); 1374 return ENOMEM; 1375 } 1376 1377 mutex_enter(&pp->pr_lock); 1378 pool_prime_page(pp, cp, ph); 1379 pp->pr_npagealloc++; 1380 return 0; 1381 } 1382 1383 /* 1384 * Add N items to the pool. 1385 */ 1386 int 1387 pool_prime(struct pool *pp, int n) 1388 { 1389 int newpages; 1390 int error = 0; 1391 1392 mutex_enter(&pp->pr_lock); 1393 1394 newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1395 1396 while (newpages-- > 0) { 1397 error = pool_grow(pp, PR_NOWAIT); 1398 if (error) { 1399 break; 1400 } 1401 pp->pr_minpages++; 1402 } 1403 1404 if (pp->pr_minpages >= pp->pr_maxpages) 1405 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ 1406 1407 mutex_exit(&pp->pr_lock); 1408 return error; 1409 } 1410 1411 /* 1412 * Add a page worth of items to the pool. 1413 * 1414 * Note, we must be called with the pool descriptor LOCKED. 1415 */ 1416 static void 1417 pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph) 1418 { 1419 struct pool_item *pi; 1420 void *cp = storage; 1421 const unsigned int align = pp->pr_align; 1422 const unsigned int ioff = pp->pr_itemoffset; 1423 int n; 1424 1425 KASSERT(mutex_owned(&pp->pr_lock)); 1426 1427 #ifdef DIAGNOSTIC 1428 if ((pp->pr_roflags & PR_NOALIGN) == 0 && 1429 ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) 1430 panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); 1431 #endif 1432 1433 /* 1434 * Insert page header. 1435 */ 1436 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); 1437 LIST_INIT(&ph->ph_itemlist); 1438 ph->ph_page = storage; 1439 ph->ph_nmissing = 0; 1440 getmicrotime(&ph->ph_time); 1441 if ((pp->pr_roflags & PR_PHINPAGE) == 0) 1442 SPLAY_INSERT(phtree, &pp->pr_phtree, ph); 1443 1444 pp->pr_nidle++; 1445 1446 /* 1447 * Color this page. 1448 */ 1449 ph->ph_off = pp->pr_curcolor; 1450 cp = (char *)cp + ph->ph_off; 1451 if ((pp->pr_curcolor += align) > pp->pr_maxcolor) 1452 pp->pr_curcolor = 0; 1453 1454 /* 1455 * Adjust storage to apply aligment to `pr_itemoffset' in each item. 1456 */ 1457 if (ioff != 0) 1458 cp = (char *)cp + align - ioff; 1459 1460 KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0); 1461 1462 /* 1463 * Insert remaining chunks on the bucket list. 1464 */ 1465 n = pp->pr_itemsperpage; 1466 pp->pr_nitems += n; 1467 1468 if (pp->pr_roflags & PR_NOTOUCH) { 1469 pr_item_notouch_init(pp, ph); 1470 } else { 1471 while (n--) { 1472 pi = (struct pool_item *)cp; 1473 1474 KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0); 1475 1476 /* Insert on page list */ 1477 LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); 1478 #ifdef DIAGNOSTIC 1479 pi->pi_magic = PI_MAGIC; 1480 #endif 1481 cp = (char *)cp + pp->pr_size; 1482 1483 KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0); 1484 } 1485 } 1486 1487 /* 1488 * If the pool was depleted, point at the new page. 1489 */ 1490 if (pp->pr_curpage == NULL) 1491 pp->pr_curpage = ph; 1492 1493 if (++pp->pr_npages > pp->pr_hiwat) 1494 pp->pr_hiwat = pp->pr_npages; 1495 } 1496 1497 /* 1498 * Used by pool_get() when nitems drops below the low water mark. This 1499 * is used to catch up pr_nitems with the low water mark. 1500 * 1501 * Note 1, we never wait for memory here, we let the caller decide what to do. 1502 * 1503 * Note 2, we must be called with the pool already locked, and we return 1504 * with it locked. 1505 */ 1506 static int 1507 pool_catchup(struct pool *pp) 1508 { 1509 int error = 0; 1510 1511 while (POOL_NEEDS_CATCHUP(pp)) { 1512 error = pool_grow(pp, PR_NOWAIT); 1513 if (error) { 1514 break; 1515 } 1516 } 1517 return error; 1518 } 1519 1520 static void 1521 pool_update_curpage(struct pool *pp) 1522 { 1523 1524 pp->pr_curpage = LIST_FIRST(&pp->pr_partpages); 1525 if (pp->pr_curpage == NULL) { 1526 pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); 1527 } 1528 } 1529 1530 void 1531 pool_setlowat(struct pool *pp, int n) 1532 { 1533 1534 mutex_enter(&pp->pr_lock); 1535 1536 pp->pr_minitems = n; 1537 pp->pr_minpages = (n == 0) 1538 ? 0 1539 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1540 1541 /* Make sure we're caught up with the newly-set low water mark. */ 1542 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { 1543 /* 1544 * XXX: Should we log a warning? Should we set up a timeout 1545 * to try again in a second or so? The latter could break 1546 * a caller's assumptions about interrupt protection, etc. 1547 */ 1548 } 1549 1550 mutex_exit(&pp->pr_lock); 1551 } 1552 1553 void 1554 pool_sethiwat(struct pool *pp, int n) 1555 { 1556 1557 mutex_enter(&pp->pr_lock); 1558 1559 pp->pr_maxpages = (n == 0) 1560 ? 0 1561 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1562 1563 mutex_exit(&pp->pr_lock); 1564 } 1565 1566 void 1567 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap) 1568 { 1569 1570 mutex_enter(&pp->pr_lock); 1571 1572 pp->pr_hardlimit = n; 1573 pp->pr_hardlimit_warning = warnmess; 1574 pp->pr_hardlimit_ratecap.tv_sec = ratecap; 1575 pp->pr_hardlimit_warning_last.tv_sec = 0; 1576 pp->pr_hardlimit_warning_last.tv_usec = 0; 1577 1578 /* 1579 * In-line version of pool_sethiwat(), because we don't want to 1580 * release the lock. 1581 */ 1582 pp->pr_maxpages = (n == 0) 1583 ? 0 1584 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1585 1586 mutex_exit(&pp->pr_lock); 1587 } 1588 1589 /* 1590 * Release all complete pages that have not been used recently. 1591 */ 1592 int 1593 #ifdef POOL_DIAGNOSTIC 1594 _pool_reclaim(struct pool *pp, const char *file, long line) 1595 #else 1596 pool_reclaim(struct pool *pp) 1597 #endif 1598 { 1599 struct pool_item_header *ph, *phnext; 1600 struct pool_pagelist pq; 1601 struct timeval curtime, diff; 1602 bool klock; 1603 int rv; 1604 1605 if (pp->pr_drain_hook != NULL) { 1606 /* 1607 * The drain hook must be called with the pool unlocked. 1608 */ 1609 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); 1610 } 1611 1612 /* 1613 * XXXSMP Because mutexes at IPL_SOFTXXX are still spinlocks, 1614 * and we are called from the pagedaemon without kernel_lock. 1615 * Does not apply to IPL_SOFTBIO. 1616 */ 1617 if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK || 1618 pp->pr_ipl == IPL_SOFTSERIAL) { 1619 KERNEL_LOCK(1, NULL); 1620 klock = true; 1621 } else 1622 klock = false; 1623 1624 /* Reclaim items from the pool's cache (if any). */ 1625 if (pp->pr_cache != NULL) 1626 pool_cache_invalidate(pp->pr_cache); 1627 1628 if (mutex_tryenter(&pp->pr_lock) == 0) { 1629 if (klock) { 1630 KERNEL_UNLOCK_ONE(NULL); 1631 } 1632 return (0); 1633 } 1634 pr_enter(pp, file, line); 1635 1636 LIST_INIT(&pq); 1637 1638 getmicrotime(&curtime); 1639 1640 for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) { 1641 phnext = LIST_NEXT(ph, ph_pagelist); 1642 1643 /* Check our minimum page claim */ 1644 if (pp->pr_npages <= pp->pr_minpages) 1645 break; 1646 1647 KASSERT(ph->ph_nmissing == 0); 1648 timersub(&curtime, &ph->ph_time, &diff); 1649 if (diff.tv_sec < pool_inactive_time 1650 && !pa_starved_p(pp->pr_alloc)) 1651 continue; 1652 1653 /* 1654 * If freeing this page would put us below 1655 * the low water mark, stop now. 1656 */ 1657 if ((pp->pr_nitems - pp->pr_itemsperpage) < 1658 pp->pr_minitems) 1659 break; 1660 1661 pr_rmpage(pp, ph, &pq); 1662 } 1663 1664 pr_leave(pp); 1665 mutex_exit(&pp->pr_lock); 1666 1667 if (LIST_EMPTY(&pq)) 1668 rv = 0; 1669 else { 1670 pr_pagelist_free(pp, &pq); 1671 rv = 1; 1672 } 1673 1674 if (klock) { 1675 KERNEL_UNLOCK_ONE(NULL); 1676 } 1677 1678 return (rv); 1679 } 1680 1681 /* 1682 * Drain pools, one at a time. This is a two stage process; 1683 * drain_start kicks off a cross call to drain CPU-level caches 1684 * if the pool has an associated pool_cache. drain_end waits 1685 * for those cross calls to finish, and then drains the cache 1686 * (if any) and pool. 1687 * 1688 * Note, must never be called from interrupt context. 1689 */ 1690 void 1691 pool_drain_start(struct pool **ppp, uint64_t *wp) 1692 { 1693 struct pool *pp; 1694 1695 KASSERT(!LIST_EMPTY(&pool_head)); 1696 1697 pp = NULL; 1698 1699 /* Find next pool to drain, and add a reference. */ 1700 mutex_enter(&pool_head_lock); 1701 do { 1702 if (drainpp == NULL) { 1703 drainpp = LIST_FIRST(&pool_head); 1704 } 1705 if (drainpp != NULL) { 1706 pp = drainpp; 1707 drainpp = LIST_NEXT(pp, pr_poollist); 1708 } 1709 /* 1710 * Skip completely idle pools. We depend on at least 1711 * one pool in the system being active. 1712 */ 1713 } while (pp == NULL || pp->pr_npages == 0); 1714 pp->pr_refcnt++; 1715 mutex_exit(&pool_head_lock); 1716 1717 /* If there is a pool_cache, drain CPU level caches. */ 1718 *ppp = pp; 1719 if (pp->pr_cache != NULL) { 1720 *wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, 1721 pp->pr_cache, NULL); 1722 } 1723 } 1724 1725 void 1726 pool_drain_end(struct pool *pp, uint64_t where) 1727 { 1728 1729 if (pp == NULL) 1730 return; 1731 1732 KASSERT(pp->pr_refcnt > 0); 1733 1734 /* Wait for remote draining to complete. */ 1735 if (pp->pr_cache != NULL) 1736 xc_wait(where); 1737 1738 /* Drain the cache (if any) and pool.. */ 1739 pool_reclaim(pp); 1740 1741 /* Finally, unlock the pool. */ 1742 mutex_enter(&pool_head_lock); 1743 pp->pr_refcnt--; 1744 cv_broadcast(&pool_busy); 1745 mutex_exit(&pool_head_lock); 1746 } 1747 1748 /* 1749 * Diagnostic helpers. 1750 */ 1751 void 1752 pool_print(struct pool *pp, const char *modif) 1753 { 1754 1755 pool_print1(pp, modif, printf); 1756 } 1757 1758 void 1759 pool_printall(const char *modif, void (*pr)(const char *, ...)) 1760 { 1761 struct pool *pp; 1762 1763 LIST_FOREACH(pp, &pool_head, pr_poollist) { 1764 pool_printit(pp, modif, pr); 1765 } 1766 } 1767 1768 void 1769 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) 1770 { 1771 1772 if (pp == NULL) { 1773 (*pr)("Must specify a pool to print.\n"); 1774 return; 1775 } 1776 1777 pool_print1(pp, modif, pr); 1778 } 1779 1780 static void 1781 pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl, 1782 void (*pr)(const char *, ...)) 1783 { 1784 struct pool_item_header *ph; 1785 #ifdef DIAGNOSTIC 1786 struct pool_item *pi; 1787 #endif 1788 1789 LIST_FOREACH(ph, pl, ph_pagelist) { 1790 (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n", 1791 ph->ph_page, ph->ph_nmissing, 1792 (u_long)ph->ph_time.tv_sec, 1793 (u_long)ph->ph_time.tv_usec); 1794 #ifdef DIAGNOSTIC 1795 if (!(pp->pr_roflags & PR_NOTOUCH)) { 1796 LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) { 1797 if (pi->pi_magic != PI_MAGIC) { 1798 (*pr)("\t\t\titem %p, magic 0x%x\n", 1799 pi, pi->pi_magic); 1800 } 1801 } 1802 } 1803 #endif 1804 } 1805 } 1806 1807 static void 1808 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) 1809 { 1810 struct pool_item_header *ph; 1811 pool_cache_t pc; 1812 pcg_t *pcg; 1813 pool_cache_cpu_t *cc; 1814 uint64_t cpuhit, cpumiss; 1815 int i, print_log = 0, print_pagelist = 0, print_cache = 0; 1816 char c; 1817 1818 while ((c = *modif++) != '\0') { 1819 if (c == 'l') 1820 print_log = 1; 1821 if (c == 'p') 1822 print_pagelist = 1; 1823 if (c == 'c') 1824 print_cache = 1; 1825 } 1826 1827 if ((pc = pp->pr_cache) != NULL) { 1828 (*pr)("POOL CACHE"); 1829 } else { 1830 (*pr)("POOL"); 1831 } 1832 1833 (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n", 1834 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, 1835 pp->pr_roflags); 1836 (*pr)("\talloc %p\n", pp->pr_alloc); 1837 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", 1838 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); 1839 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", 1840 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit); 1841 1842 (*pr)("\tnget %lu, nfail %lu, nput %lu\n", 1843 pp->pr_nget, pp->pr_nfail, pp->pr_nput); 1844 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n", 1845 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle); 1846 1847 if (print_pagelist == 0) 1848 goto skip_pagelist; 1849 1850 if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) 1851 (*pr)("\n\tempty page list:\n"); 1852 pool_print_pagelist(pp, &pp->pr_emptypages, pr); 1853 if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL) 1854 (*pr)("\n\tfull page list:\n"); 1855 pool_print_pagelist(pp, &pp->pr_fullpages, pr); 1856 if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL) 1857 (*pr)("\n\tpartial-page list:\n"); 1858 pool_print_pagelist(pp, &pp->pr_partpages, pr); 1859 1860 if (pp->pr_curpage == NULL) 1861 (*pr)("\tno current page\n"); 1862 else 1863 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page); 1864 1865 skip_pagelist: 1866 if (print_log == 0) 1867 goto skip_log; 1868 1869 (*pr)("\n"); 1870 if ((pp->pr_roflags & PR_LOGGING) == 0) 1871 (*pr)("\tno log\n"); 1872 else { 1873 pr_printlog(pp, NULL, pr); 1874 } 1875 1876 skip_log: 1877 1878 #define PR_GROUPLIST(pcg) \ 1879 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \ 1880 for (i = 0; i < PCG_NOBJECTS; i++) { \ 1881 if (pcg->pcg_objects[i].pcgo_pa != \ 1882 POOL_PADDR_INVALID) { \ 1883 (*pr)("\t\t\t%p, 0x%llx\n", \ 1884 pcg->pcg_objects[i].pcgo_va, \ 1885 (unsigned long long) \ 1886 pcg->pcg_objects[i].pcgo_pa); \ 1887 } else { \ 1888 (*pr)("\t\t\t%p\n", \ 1889 pcg->pcg_objects[i].pcgo_va); \ 1890 } \ 1891 } 1892 1893 if (pc != NULL) { 1894 cpuhit = 0; 1895 cpumiss = 0; 1896 for (i = 0; i < MAXCPUS; i++) { 1897 if ((cc = pc->pc_cpus[i]) == NULL) 1898 continue; 1899 cpuhit += cc->cc_hits; 1900 cpumiss += cc->cc_misses; 1901 } 1902 (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss); 1903 (*pr)("\tcache layer hits %llu misses %llu\n", 1904 pc->pc_hits, pc->pc_misses); 1905 (*pr)("\tcache layer entry uncontended %llu contended %llu\n", 1906 pc->pc_hits + pc->pc_misses - pc->pc_contended, 1907 pc->pc_contended); 1908 (*pr)("\tcache layer empty groups %u full groups %u\n", 1909 pc->pc_nempty, pc->pc_nfull); 1910 if (print_cache) { 1911 (*pr)("\tfull cache groups:\n"); 1912 for (pcg = pc->pc_fullgroups; pcg != NULL; 1913 pcg = pcg->pcg_next) { 1914 PR_GROUPLIST(pcg); 1915 } 1916 (*pr)("\tempty cache groups:\n"); 1917 for (pcg = pc->pc_emptygroups; pcg != NULL; 1918 pcg = pcg->pcg_next) { 1919 PR_GROUPLIST(pcg); 1920 } 1921 } 1922 } 1923 #undef PR_GROUPLIST 1924 1925 pr_enter_check(pp, pr); 1926 } 1927 1928 static int 1929 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph) 1930 { 1931 struct pool_item *pi; 1932 void *page; 1933 int n; 1934 1935 if ((pp->pr_roflags & PR_NOALIGN) == 0) { 1936 page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask); 1937 if (page != ph->ph_page && 1938 (pp->pr_roflags & PR_PHINPAGE) != 0) { 1939 if (label != NULL) 1940 printf("%s: ", label); 1941 printf("pool(%p:%s): page inconsistency: page %p;" 1942 " at page head addr %p (p %p)\n", pp, 1943 pp->pr_wchan, ph->ph_page, 1944 ph, page); 1945 return 1; 1946 } 1947 } 1948 1949 if ((pp->pr_roflags & PR_NOTOUCH) != 0) 1950 return 0; 1951 1952 for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0; 1953 pi != NULL; 1954 pi = LIST_NEXT(pi,pi_list), n++) { 1955 1956 #ifdef DIAGNOSTIC 1957 if (pi->pi_magic != PI_MAGIC) { 1958 if (label != NULL) 1959 printf("%s: ", label); 1960 printf("pool(%s): free list modified: magic=%x;" 1961 " page %p; item ordinal %d; addr %p\n", 1962 pp->pr_wchan, pi->pi_magic, ph->ph_page, 1963 n, pi); 1964 panic("pool"); 1965 } 1966 #endif 1967 if ((pp->pr_roflags & PR_NOALIGN) != 0) { 1968 continue; 1969 } 1970 page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask); 1971 if (page == ph->ph_page) 1972 continue; 1973 1974 if (label != NULL) 1975 printf("%s: ", label); 1976 printf("pool(%p:%s): page inconsistency: page %p;" 1977 " item ordinal %d; addr %p (p %p)\n", pp, 1978 pp->pr_wchan, ph->ph_page, 1979 n, pi, page); 1980 return 1; 1981 } 1982 return 0; 1983 } 1984 1985 1986 int 1987 pool_chk(struct pool *pp, const char *label) 1988 { 1989 struct pool_item_header *ph; 1990 int r = 0; 1991 1992 mutex_enter(&pp->pr_lock); 1993 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { 1994 r = pool_chk_page(pp, label, ph); 1995 if (r) { 1996 goto out; 1997 } 1998 } 1999 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { 2000 r = pool_chk_page(pp, label, ph); 2001 if (r) { 2002 goto out; 2003 } 2004 } 2005 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { 2006 r = pool_chk_page(pp, label, ph); 2007 if (r) { 2008 goto out; 2009 } 2010 } 2011 2012 out: 2013 mutex_exit(&pp->pr_lock); 2014 return (r); 2015 } 2016 2017 /* 2018 * pool_cache_init: 2019 * 2020 * Initialize a pool cache. 2021 */ 2022 pool_cache_t 2023 pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags, 2024 const char *wchan, struct pool_allocator *palloc, int ipl, 2025 int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg) 2026 { 2027 pool_cache_t pc; 2028 2029 pc = pool_get(&cache_pool, PR_WAITOK); 2030 if (pc == NULL) 2031 return NULL; 2032 2033 pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan, 2034 palloc, ipl, ctor, dtor, arg); 2035 2036 return pc; 2037 } 2038 2039 /* 2040 * pool_cache_bootstrap: 2041 * 2042 * Kernel-private version of pool_cache_init(). The caller 2043 * provides initial storage. 2044 */ 2045 void 2046 pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align, 2047 u_int align_offset, u_int flags, const char *wchan, 2048 struct pool_allocator *palloc, int ipl, 2049 int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), 2050 void *arg) 2051 { 2052 CPU_INFO_ITERATOR cii; 2053 struct cpu_info *ci; 2054 struct pool *pp; 2055 2056 pp = &pc->pc_pool; 2057 if (palloc == NULL && ipl == IPL_NONE) 2058 palloc = &pool_allocator_nointr; 2059 pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl); 2060 2061 /* 2062 * XXXAD hack to prevent IP input processing from blocking. 2063 */ 2064 if (ipl == IPL_SOFTNET) { 2065 mutex_init(&pc->pc_lock, MUTEX_DEFAULT, IPL_VM); 2066 } else { 2067 mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl); 2068 } 2069 2070 if (ctor == NULL) { 2071 ctor = (int (*)(void *, void *, int))nullop; 2072 } 2073 if (dtor == NULL) { 2074 dtor = (void (*)(void *, void *))nullop; 2075 } 2076 2077 pc->pc_emptygroups = NULL; 2078 pc->pc_fullgroups = NULL; 2079 pc->pc_partgroups = NULL; 2080 pc->pc_ctor = ctor; 2081 pc->pc_dtor = dtor; 2082 pc->pc_arg = arg; 2083 pc->pc_hits = 0; 2084 pc->pc_misses = 0; 2085 pc->pc_nempty = 0; 2086 pc->pc_npart = 0; 2087 pc->pc_nfull = 0; 2088 pc->pc_contended = 0; 2089 pc->pc_refcnt = 0; 2090 pc->pc_freecheck = NULL; 2091 2092 /* Allocate per-CPU caches. */ 2093 memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); 2094 pc->pc_ncpu = 0; 2095 if (ncpu < 2) { 2096 /* XXX For sparc: boot CPU is not attached yet. */ 2097 pool_cache_cpu_init1(curcpu(), pc); 2098 } else { 2099 for (CPU_INFO_FOREACH(cii, ci)) { 2100 pool_cache_cpu_init1(ci, pc); 2101 } 2102 } 2103 2104 if (__predict_true(!cold)) { 2105 mutex_enter(&pp->pr_lock); 2106 pp->pr_cache = pc; 2107 mutex_exit(&pp->pr_lock); 2108 mutex_enter(&pool_head_lock); 2109 LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist); 2110 mutex_exit(&pool_head_lock); 2111 } else { 2112 pp->pr_cache = pc; 2113 LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist); 2114 } 2115 } 2116 2117 /* 2118 * pool_cache_destroy: 2119 * 2120 * Destroy a pool cache. 2121 */ 2122 void 2123 pool_cache_destroy(pool_cache_t pc) 2124 { 2125 struct pool *pp = &pc->pc_pool; 2126 pool_cache_cpu_t *cc; 2127 pcg_t *pcg; 2128 int i; 2129 2130 /* Remove it from the global list. */ 2131 mutex_enter(&pool_head_lock); 2132 while (pc->pc_refcnt != 0) 2133 cv_wait(&pool_busy, &pool_head_lock); 2134 LIST_REMOVE(pc, pc_cachelist); 2135 mutex_exit(&pool_head_lock); 2136 2137 /* First, invalidate the entire cache. */ 2138 pool_cache_invalidate(pc); 2139 2140 /* Disassociate it from the pool. */ 2141 mutex_enter(&pp->pr_lock); 2142 pp->pr_cache = NULL; 2143 mutex_exit(&pp->pr_lock); 2144 2145 /* Destroy per-CPU data */ 2146 for (i = 0; i < MAXCPUS; i++) { 2147 if ((cc = pc->pc_cpus[i]) == NULL) 2148 continue; 2149 if ((pcg = cc->cc_current) != NULL) { 2150 pcg->pcg_next = NULL; 2151 pool_cache_invalidate_groups(pc, pcg); 2152 } 2153 if ((pcg = cc->cc_previous) != NULL) { 2154 pcg->pcg_next = NULL; 2155 pool_cache_invalidate_groups(pc, pcg); 2156 } 2157 if (cc != &pc->pc_cpu0) 2158 pool_put(&cache_cpu_pool, cc); 2159 } 2160 2161 /* Finally, destroy it. */ 2162 mutex_destroy(&pc->pc_lock); 2163 pool_destroy(pp); 2164 pool_put(&cache_pool, pc); 2165 } 2166 2167 /* 2168 * pool_cache_cpu_init1: 2169 * 2170 * Called for each pool_cache whenever a new CPU is attached. 2171 */ 2172 static void 2173 pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc) 2174 { 2175 pool_cache_cpu_t *cc; 2176 int index; 2177 2178 index = ci->ci_index; 2179 2180 KASSERT(index < MAXCPUS); 2181 KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0); 2182 2183 if ((cc = pc->pc_cpus[index]) != NULL) { 2184 KASSERT(cc->cc_cpuindex == index); 2185 return; 2186 } 2187 2188 /* 2189 * The first CPU is 'free'. This needs to be the case for 2190 * bootstrap - we may not be able to allocate yet. 2191 */ 2192 if (pc->pc_ncpu == 0) { 2193 cc = &pc->pc_cpu0; 2194 pc->pc_ncpu = 1; 2195 } else { 2196 mutex_enter(&pc->pc_lock); 2197 pc->pc_ncpu++; 2198 mutex_exit(&pc->pc_lock); 2199 cc = pool_get(&cache_cpu_pool, PR_WAITOK); 2200 } 2201 2202 cc->cc_ipl = pc->pc_pool.pr_ipl; 2203 cc->cc_iplcookie = makeiplcookie(cc->cc_ipl); 2204 cc->cc_cache = pc; 2205 cc->cc_cpuindex = index; 2206 cc->cc_hits = 0; 2207 cc->cc_misses = 0; 2208 cc->cc_current = NULL; 2209 cc->cc_previous = NULL; 2210 2211 pc->pc_cpus[index] = cc; 2212 } 2213 2214 /* 2215 * pool_cache_cpu_init: 2216 * 2217 * Called whenever a new CPU is attached. 2218 */ 2219 void 2220 pool_cache_cpu_init(struct cpu_info *ci) 2221 { 2222 pool_cache_t pc; 2223 2224 mutex_enter(&pool_head_lock); 2225 LIST_FOREACH(pc, &pool_cache_head, pc_cachelist) { 2226 pc->pc_refcnt++; 2227 mutex_exit(&pool_head_lock); 2228 2229 pool_cache_cpu_init1(ci, pc); 2230 2231 mutex_enter(&pool_head_lock); 2232 pc->pc_refcnt--; 2233 cv_broadcast(&pool_busy); 2234 } 2235 mutex_exit(&pool_head_lock); 2236 } 2237 2238 /* 2239 * pool_cache_reclaim: 2240 * 2241 * Reclaim memory from a pool cache. 2242 */ 2243 bool 2244 pool_cache_reclaim(pool_cache_t pc) 2245 { 2246 2247 return pool_reclaim(&pc->pc_pool); 2248 } 2249 2250 static void 2251 pool_cache_destruct_object1(pool_cache_t pc, void *object) 2252 { 2253 2254 (*pc->pc_dtor)(pc->pc_arg, object); 2255 pool_put(&pc->pc_pool, object); 2256 } 2257 2258 /* 2259 * pool_cache_destruct_object: 2260 * 2261 * Force destruction of an object and its release back into 2262 * the pool. 2263 */ 2264 void 2265 pool_cache_destruct_object(pool_cache_t pc, void *object) 2266 { 2267 2268 FREECHECK_IN(&pc->pc_freecheck, object); 2269 2270 pool_cache_destruct_object1(pc, object); 2271 } 2272 2273 /* 2274 * pool_cache_invalidate_groups: 2275 * 2276 * Invalidate a chain of groups and destruct all objects. 2277 */ 2278 static void 2279 pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg) 2280 { 2281 void *object; 2282 pcg_t *next; 2283 int i; 2284 2285 for (; pcg != NULL; pcg = next) { 2286 next = pcg->pcg_next; 2287 2288 for (i = 0; i < pcg->pcg_avail; i++) { 2289 object = pcg->pcg_objects[i].pcgo_va; 2290 pool_cache_destruct_object1(pc, object); 2291 } 2292 2293 pool_put(&pcgpool, pcg); 2294 } 2295 } 2296 2297 /* 2298 * pool_cache_invalidate: 2299 * 2300 * Invalidate a pool cache (destruct and release all of the 2301 * cached objects). Does not reclaim objects from the pool. 2302 */ 2303 void 2304 pool_cache_invalidate(pool_cache_t pc) 2305 { 2306 pcg_t *full, *empty, *part; 2307 2308 mutex_enter(&pc->pc_lock); 2309 full = pc->pc_fullgroups; 2310 empty = pc->pc_emptygroups; 2311 part = pc->pc_partgroups; 2312 pc->pc_fullgroups = NULL; 2313 pc->pc_emptygroups = NULL; 2314 pc->pc_partgroups = NULL; 2315 pc->pc_nfull = 0; 2316 pc->pc_nempty = 0; 2317 pc->pc_npart = 0; 2318 mutex_exit(&pc->pc_lock); 2319 2320 pool_cache_invalidate_groups(pc, full); 2321 pool_cache_invalidate_groups(pc, empty); 2322 pool_cache_invalidate_groups(pc, part); 2323 } 2324 2325 void 2326 pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) 2327 { 2328 2329 pool_set_drain_hook(&pc->pc_pool, fn, arg); 2330 } 2331 2332 void 2333 pool_cache_setlowat(pool_cache_t pc, int n) 2334 { 2335 2336 pool_setlowat(&pc->pc_pool, n); 2337 } 2338 2339 void 2340 pool_cache_sethiwat(pool_cache_t pc, int n) 2341 { 2342 2343 pool_sethiwat(&pc->pc_pool, n); 2344 } 2345 2346 void 2347 pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap) 2348 { 2349 2350 pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap); 2351 } 2352 2353 static inline pool_cache_cpu_t * 2354 pool_cache_cpu_enter(pool_cache_t pc, int *s) 2355 { 2356 pool_cache_cpu_t *cc; 2357 2358 /* 2359 * Prevent other users of the cache from accessing our 2360 * CPU-local data. To avoid touching shared state, we 2361 * pull the neccessary information from CPU local data. 2362 */ 2363 crit_enter(); 2364 cc = pc->pc_cpus[curcpu()->ci_index]; 2365 KASSERT(cc->cc_cache == pc); 2366 if (cc->cc_ipl != IPL_NONE) { 2367 *s = splraiseipl(cc->cc_iplcookie); 2368 } 2369 KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0); 2370 2371 return cc; 2372 } 2373 2374 static inline void 2375 pool_cache_cpu_exit(pool_cache_cpu_t *cc, int *s) 2376 { 2377 2378 /* No longer need exclusive access to the per-CPU data. */ 2379 if (cc->cc_ipl != IPL_NONE) { 2380 splx(*s); 2381 } 2382 crit_exit(); 2383 } 2384 2385 #if __GNUC_PREREQ__(3, 0) 2386 __attribute ((noinline)) 2387 #endif 2388 pool_cache_cpu_t * 2389 pool_cache_get_slow(pool_cache_cpu_t *cc, int *s, void **objectp, 2390 paddr_t *pap, int flags) 2391 { 2392 pcg_t *pcg, *cur; 2393 uint64_t ncsw; 2394 pool_cache_t pc; 2395 void *object; 2396 2397 pc = cc->cc_cache; 2398 cc->cc_misses++; 2399 2400 /* 2401 * Nothing was available locally. Try and grab a group 2402 * from the cache. 2403 */ 2404 if (!mutex_tryenter(&pc->pc_lock)) { 2405 ncsw = curlwp->l_ncsw; 2406 mutex_enter(&pc->pc_lock); 2407 pc->pc_contended++; 2408 2409 /* 2410 * If we context switched while locking, then 2411 * our view of the per-CPU data is invalid: 2412 * retry. 2413 */ 2414 if (curlwp->l_ncsw != ncsw) { 2415 mutex_exit(&pc->pc_lock); 2416 pool_cache_cpu_exit(cc, s); 2417 return pool_cache_cpu_enter(pc, s); 2418 } 2419 } 2420 2421 if ((pcg = pc->pc_fullgroups) != NULL) { 2422 /* 2423 * If there's a full group, release our empty 2424 * group back to the cache. Install the full 2425 * group as cc_current and return. 2426 */ 2427 if ((cur = cc->cc_current) != NULL) { 2428 KASSERT(cur->pcg_avail == 0); 2429 cur->pcg_next = pc->pc_emptygroups; 2430 pc->pc_emptygroups = cur; 2431 pc->pc_nempty++; 2432 } 2433 KASSERT(pcg->pcg_avail == PCG_NOBJECTS); 2434 cc->cc_current = pcg; 2435 pc->pc_fullgroups = pcg->pcg_next; 2436 pc->pc_hits++; 2437 pc->pc_nfull--; 2438 mutex_exit(&pc->pc_lock); 2439 return cc; 2440 } 2441 2442 /* 2443 * Nothing available locally or in cache. Take the slow 2444 * path: fetch a new object from the pool and construct 2445 * it. 2446 */ 2447 pc->pc_misses++; 2448 mutex_exit(&pc->pc_lock); 2449 pool_cache_cpu_exit(cc, s); 2450 2451 object = pool_get(&pc->pc_pool, flags); 2452 *objectp = object; 2453 if (object == NULL) 2454 return NULL; 2455 2456 if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { 2457 pool_put(&pc->pc_pool, object); 2458 *objectp = NULL; 2459 return NULL; 2460 } 2461 2462 KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) & 2463 (pc->pc_pool.pr_align - 1)) == 0); 2464 2465 if (pap != NULL) { 2466 #ifdef POOL_VTOPHYS 2467 *pap = POOL_VTOPHYS(object); 2468 #else 2469 *pap = POOL_PADDR_INVALID; 2470 #endif 2471 } 2472 2473 FREECHECK_OUT(&pc->pc_freecheck, object); 2474 return NULL; 2475 } 2476 2477 /* 2478 * pool_cache_get{,_paddr}: 2479 * 2480 * Get an object from a pool cache (optionally returning 2481 * the physical address of the object). 2482 */ 2483 void * 2484 pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap) 2485 { 2486 pool_cache_cpu_t *cc; 2487 pcg_t *pcg; 2488 void *object; 2489 int s; 2490 2491 #ifdef LOCKDEBUG 2492 if (flags & PR_WAITOK) 2493 ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)"); 2494 #endif 2495 2496 cc = pool_cache_cpu_enter(pc, &s); 2497 do { 2498 /* Try and allocate an object from the current group. */ 2499 pcg = cc->cc_current; 2500 if (pcg != NULL && pcg->pcg_avail > 0) { 2501 object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va; 2502 if (pap != NULL) 2503 *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; 2504 pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; 2505 KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); 2506 KASSERT(object != NULL); 2507 cc->cc_hits++; 2508 pool_cache_cpu_exit(cc, &s); 2509 FREECHECK_OUT(&pc->pc_freecheck, object); 2510 return object; 2511 } 2512 2513 /* 2514 * That failed. If the previous group isn't empty, swap 2515 * it with the current group and allocate from there. 2516 */ 2517 pcg = cc->cc_previous; 2518 if (pcg != NULL && pcg->pcg_avail > 0) { 2519 cc->cc_previous = cc->cc_current; 2520 cc->cc_current = pcg; 2521 continue; 2522 } 2523 2524 /* 2525 * Can't allocate from either group: try the slow path. 2526 * If get_slow() allocated an object for us, or if 2527 * no more objects are available, it will return NULL. 2528 * Otherwise, we need to retry. 2529 */ 2530 cc = pool_cache_get_slow(cc, &s, &object, pap, flags); 2531 } while (cc != NULL); 2532 2533 return object; 2534 } 2535 2536 #if __GNUC_PREREQ__(3, 0) 2537 __attribute ((noinline)) 2538 #endif 2539 pool_cache_cpu_t * 2540 pool_cache_put_slow(pool_cache_cpu_t *cc, int *s, void *object, paddr_t pa) 2541 { 2542 pcg_t *pcg, *cur; 2543 uint64_t ncsw; 2544 pool_cache_t pc; 2545 2546 pc = cc->cc_cache; 2547 cc->cc_misses++; 2548 2549 /* 2550 * No free slots locally. Try to grab an empty, unused 2551 * group from the cache. 2552 */ 2553 if (!mutex_tryenter(&pc->pc_lock)) { 2554 ncsw = curlwp->l_ncsw; 2555 mutex_enter(&pc->pc_lock); 2556 pc->pc_contended++; 2557 2558 /* 2559 * If we context switched while locking, then 2560 * our view of the per-CPU data is invalid: 2561 * retry. 2562 */ 2563 if (curlwp->l_ncsw != ncsw) { 2564 mutex_exit(&pc->pc_lock); 2565 pool_cache_cpu_exit(cc, s); 2566 return pool_cache_cpu_enter(pc, s); 2567 } 2568 } 2569 2570 if ((pcg = pc->pc_emptygroups) != NULL) { 2571 /* 2572 * If there's a empty group, release our full 2573 * group back to the cache. Install the empty 2574 * group as cc_current and return. 2575 */ 2576 if ((cur = cc->cc_current) != NULL) { 2577 KASSERT(cur->pcg_avail == PCG_NOBJECTS); 2578 cur->pcg_next = pc->pc_fullgroups; 2579 pc->pc_fullgroups = cur; 2580 pc->pc_nfull++; 2581 } 2582 KASSERT(pcg->pcg_avail == 0); 2583 cc->cc_current = pcg; 2584 pc->pc_emptygroups = pcg->pcg_next; 2585 pc->pc_hits++; 2586 pc->pc_nempty--; 2587 mutex_exit(&pc->pc_lock); 2588 return cc; 2589 } 2590 2591 /* 2592 * Nothing available locally or in cache. Take the 2593 * slow path and try to allocate a new group that we 2594 * can release to. 2595 */ 2596 pc->pc_misses++; 2597 mutex_exit(&pc->pc_lock); 2598 pool_cache_cpu_exit(cc, s); 2599 2600 /* 2601 * If we can't allocate a new group, just throw the 2602 * object away. 2603 */ 2604 pcg = pool_get(&pcgpool, PR_NOWAIT); 2605 if (pcg == NULL) { 2606 pool_cache_destruct_object(pc, object); 2607 return NULL; 2608 } 2609 #ifdef DIAGNOSTIC 2610 memset(pcg, 0, sizeof(*pcg)); 2611 #else 2612 pcg->pcg_avail = 0; 2613 #endif 2614 2615 /* 2616 * Add the empty group to the cache and try again. 2617 */ 2618 mutex_enter(&pc->pc_lock); 2619 pcg->pcg_next = pc->pc_emptygroups; 2620 pc->pc_emptygroups = pcg; 2621 pc->pc_nempty++; 2622 mutex_exit(&pc->pc_lock); 2623 2624 return pool_cache_cpu_enter(pc, s); 2625 } 2626 2627 /* 2628 * pool_cache_put{,_paddr}: 2629 * 2630 * Put an object back to the pool cache (optionally caching the 2631 * physical address of the object). 2632 */ 2633 void 2634 pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa) 2635 { 2636 pool_cache_cpu_t *cc; 2637 pcg_t *pcg; 2638 int s; 2639 2640 FREECHECK_IN(&pc->pc_freecheck, object); 2641 2642 cc = pool_cache_cpu_enter(pc, &s); 2643 do { 2644 /* If the current group isn't full, release it there. */ 2645 pcg = cc->cc_current; 2646 if (pcg != NULL && pcg->pcg_avail < PCG_NOBJECTS) { 2647 KASSERT(pcg->pcg_objects[pcg->pcg_avail].pcgo_va 2648 == NULL); 2649 pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object; 2650 pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa; 2651 pcg->pcg_avail++; 2652 cc->cc_hits++; 2653 pool_cache_cpu_exit(cc, &s); 2654 return; 2655 } 2656 2657 /* 2658 * That failed. If the previous group is empty, swap 2659 * it with the current group and try again. 2660 */ 2661 pcg = cc->cc_previous; 2662 if (pcg != NULL && pcg->pcg_avail == 0) { 2663 cc->cc_previous = cc->cc_current; 2664 cc->cc_current = pcg; 2665 continue; 2666 } 2667 2668 /* 2669 * Can't free to either group: try the slow path. 2670 * If put_slow() releases the object for us, it 2671 * will return NULL. Otherwise we need to retry. 2672 */ 2673 cc = pool_cache_put_slow(cc, &s, object, pa); 2674 } while (cc != NULL); 2675 } 2676 2677 /* 2678 * pool_cache_xcall: 2679 * 2680 * Transfer objects from the per-CPU cache to the global cache. 2681 * Run within a cross-call thread. 2682 */ 2683 static void 2684 pool_cache_xcall(pool_cache_t pc) 2685 { 2686 pool_cache_cpu_t *cc; 2687 pcg_t *prev, *cur, **list; 2688 int s = 0; /* XXXgcc */ 2689 2690 cc = pool_cache_cpu_enter(pc, &s); 2691 cur = cc->cc_current; 2692 cc->cc_current = NULL; 2693 prev = cc->cc_previous; 2694 cc->cc_previous = NULL; 2695 pool_cache_cpu_exit(cc, &s); 2696 2697 /* 2698 * XXXSMP Go to splvm to prevent kernel_lock from being taken, 2699 * because locks at IPL_SOFTXXX are still spinlocks. Does not 2700 * apply to IPL_SOFTBIO. Cross-call threads do not take the 2701 * kernel_lock. 2702 */ 2703 s = splvm(); 2704 mutex_enter(&pc->pc_lock); 2705 if (cur != NULL) { 2706 if (cur->pcg_avail == PCG_NOBJECTS) { 2707 list = &pc->pc_fullgroups; 2708 pc->pc_nfull++; 2709 } else if (cur->pcg_avail == 0) { 2710 list = &pc->pc_emptygroups; 2711 pc->pc_nempty++; 2712 } else { 2713 list = &pc->pc_partgroups; 2714 pc->pc_npart++; 2715 } 2716 cur->pcg_next = *list; 2717 *list = cur; 2718 } 2719 if (prev != NULL) { 2720 if (prev->pcg_avail == PCG_NOBJECTS) { 2721 list = &pc->pc_fullgroups; 2722 pc->pc_nfull++; 2723 } else if (prev->pcg_avail == 0) { 2724 list = &pc->pc_emptygroups; 2725 pc->pc_nempty++; 2726 } else { 2727 list = &pc->pc_partgroups; 2728 pc->pc_npart++; 2729 } 2730 prev->pcg_next = *list; 2731 *list = prev; 2732 } 2733 mutex_exit(&pc->pc_lock); 2734 splx(s); 2735 } 2736 2737 /* 2738 * Pool backend allocators. 2739 * 2740 * Each pool has a backend allocator that handles allocation, deallocation, 2741 * and any additional draining that might be needed. 2742 * 2743 * We provide two standard allocators: 2744 * 2745 * pool_allocator_kmem - the default when no allocator is specified 2746 * 2747 * pool_allocator_nointr - used for pools that will not be accessed 2748 * in interrupt context. 2749 */ 2750 void *pool_page_alloc(struct pool *, int); 2751 void pool_page_free(struct pool *, void *); 2752 2753 #ifdef POOL_SUBPAGE 2754 struct pool_allocator pool_allocator_kmem_fullpage = { 2755 pool_page_alloc, pool_page_free, 0, 2756 .pa_backingmapptr = &kmem_map, 2757 }; 2758 #else 2759 struct pool_allocator pool_allocator_kmem = { 2760 pool_page_alloc, pool_page_free, 0, 2761 .pa_backingmapptr = &kmem_map, 2762 }; 2763 #endif 2764 2765 void *pool_page_alloc_nointr(struct pool *, int); 2766 void pool_page_free_nointr(struct pool *, void *); 2767 2768 #ifdef POOL_SUBPAGE 2769 struct pool_allocator pool_allocator_nointr_fullpage = { 2770 pool_page_alloc_nointr, pool_page_free_nointr, 0, 2771 .pa_backingmapptr = &kernel_map, 2772 }; 2773 #else 2774 struct pool_allocator pool_allocator_nointr = { 2775 pool_page_alloc_nointr, pool_page_free_nointr, 0, 2776 .pa_backingmapptr = &kernel_map, 2777 }; 2778 #endif 2779 2780 #ifdef POOL_SUBPAGE 2781 void *pool_subpage_alloc(struct pool *, int); 2782 void pool_subpage_free(struct pool *, void *); 2783 2784 struct pool_allocator pool_allocator_kmem = { 2785 pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, 2786 .pa_backingmapptr = &kmem_map, 2787 }; 2788 2789 void *pool_subpage_alloc_nointr(struct pool *, int); 2790 void pool_subpage_free_nointr(struct pool *, void *); 2791 2792 struct pool_allocator pool_allocator_nointr = { 2793 pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, 2794 .pa_backingmapptr = &kmem_map, 2795 }; 2796 #endif /* POOL_SUBPAGE */ 2797 2798 static void * 2799 pool_allocator_alloc(struct pool *pp, int flags) 2800 { 2801 struct pool_allocator *pa = pp->pr_alloc; 2802 void *res; 2803 2804 res = (*pa->pa_alloc)(pp, flags); 2805 if (res == NULL && (flags & PR_WAITOK) == 0) { 2806 /* 2807 * We only run the drain hook here if PR_NOWAIT. 2808 * In other cases, the hook will be run in 2809 * pool_reclaim(). 2810 */ 2811 if (pp->pr_drain_hook != NULL) { 2812 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); 2813 res = (*pa->pa_alloc)(pp, flags); 2814 } 2815 } 2816 return res; 2817 } 2818 2819 static void 2820 pool_allocator_free(struct pool *pp, void *v) 2821 { 2822 struct pool_allocator *pa = pp->pr_alloc; 2823 2824 (*pa->pa_free)(pp, v); 2825 } 2826 2827 void * 2828 pool_page_alloc(struct pool *pp, int flags) 2829 { 2830 bool waitok = (flags & PR_WAITOK) ? true : false; 2831 2832 return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok)); 2833 } 2834 2835 void 2836 pool_page_free(struct pool *pp, void *v) 2837 { 2838 2839 uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v); 2840 } 2841 2842 static void * 2843 pool_page_alloc_meta(struct pool *pp, int flags) 2844 { 2845 bool waitok = (flags & PR_WAITOK) ? true : false; 2846 2847 return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok)); 2848 } 2849 2850 static void 2851 pool_page_free_meta(struct pool *pp, void *v) 2852 { 2853 2854 uvm_km_free_poolpage(kmem_map, (vaddr_t) v); 2855 } 2856 2857 #ifdef POOL_SUBPAGE 2858 /* Sub-page allocator, for machines with large hardware pages. */ 2859 void * 2860 pool_subpage_alloc(struct pool *pp, int flags) 2861 { 2862 return pool_get(&psppool, flags); 2863 } 2864 2865 void 2866 pool_subpage_free(struct pool *pp, void *v) 2867 { 2868 pool_put(&psppool, v); 2869 } 2870 2871 /* We don't provide a real nointr allocator. Maybe later. */ 2872 void * 2873 pool_subpage_alloc_nointr(struct pool *pp, int flags) 2874 { 2875 2876 return (pool_subpage_alloc(pp, flags)); 2877 } 2878 2879 void 2880 pool_subpage_free_nointr(struct pool *pp, void *v) 2881 { 2882 2883 pool_subpage_free(pp, v); 2884 } 2885 #endif /* POOL_SUBPAGE */ 2886 void * 2887 pool_page_alloc_nointr(struct pool *pp, int flags) 2888 { 2889 bool waitok = (flags & PR_WAITOK) ? true : false; 2890 2891 return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok)); 2892 } 2893 2894 void 2895 pool_page_free_nointr(struct pool *pp, void *v) 2896 { 2897 2898 uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v); 2899 } 2900 2901 #if defined(DDB) 2902 static bool 2903 pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr) 2904 { 2905 2906 return (uintptr_t)ph->ph_page <= addr && 2907 addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz; 2908 } 2909 2910 void 2911 pool_whatis(uintptr_t addr, void (*pr)(const char *, ...)) 2912 { 2913 struct pool *pp; 2914 2915 LIST_FOREACH(pp, &pool_head, pr_poollist) { 2916 struct pool_item_header *ph; 2917 uintptr_t item; 2918 2919 if ((pp->pr_roflags & PR_PHINPAGE) != 0) { 2920 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { 2921 if (pool_in_page(pp, ph, addr)) { 2922 goto found; 2923 } 2924 } 2925 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { 2926 if (pool_in_page(pp, ph, addr)) { 2927 goto found; 2928 } 2929 } 2930 continue; 2931 } else { 2932 ph = pr_find_pagehead_noalign(pp, (void *)addr); 2933 if (ph == NULL || !pool_in_page(pp, ph, addr)) { 2934 continue; 2935 } 2936 } 2937 found: 2938 item = (uintptr_t)ph->ph_page + ph->ph_off; 2939 item = item + rounddown(addr - item, pp->pr_size); 2940 (*pr)("%p is %p+%zu from POOL '%s'\n", 2941 (void *)addr, item, (size_t)(addr - item), 2942 pp->pr_wchan); 2943 } 2944 } 2945 #endif /* defined(DDB) */ 2946