1 /* $NetBSD: subr_pool.c,v 1.228 2018/12/02 21:00:13 maxv Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015 5 * The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace 10 * Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by 11 * Maxime Villard. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.228 2018/12/02 21:00:13 maxv Exp $"); 37 38 #ifdef _KERNEL_OPT 39 #include "opt_ddb.h" 40 #include "opt_lockdebug.h" 41 #include "opt_kleak.h" 42 #endif 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/sysctl.h> 47 #include <sys/bitops.h> 48 #include <sys/proc.h> 49 #include <sys/errno.h> 50 #include <sys/kernel.h> 51 #include <sys/vmem.h> 52 #include <sys/pool.h> 53 #include <sys/syslog.h> 54 #include <sys/debug.h> 55 #include <sys/lockdebug.h> 56 #include <sys/xcall.h> 57 #include <sys/cpu.h> 58 #include <sys/atomic.h> 59 #include <sys/asan.h> 60 61 #include <uvm/uvm_extern.h> 62 63 /* 64 * Pool resource management utility. 65 * 66 * Memory is allocated in pages which are split into pieces according to 67 * the pool item size. Each page is kept on one of three lists in the 68 * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages', 69 * for empty, full and partially-full pages respectively. The individual 70 * pool items are on a linked list headed by `ph_itemlist' in each page 71 * header. The memory for building the page list is either taken from 72 * the allocated pages themselves (for small pool items) or taken from 73 * an internal pool of page headers (`phpool'). 74 */ 75 76 /* List of all pools. Non static as needed by 'vmstat -m' */ 77 TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); 78 79 /* Private pool for page header structures */ 80 #define PHPOOL_MAX 8 81 static struct pool phpool[PHPOOL_MAX]; 82 #define PHPOOL_FREELIST_NELEM(idx) \ 83 (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx))) 84 85 #ifdef POOL_SUBPAGE 86 /* Pool of subpages for use by normal pools. */ 87 static struct pool psppool; 88 #endif 89 90 #if defined(KASAN) 91 #define POOL_REDZONE 92 #endif 93 94 #ifdef POOL_REDZONE 95 # ifdef KASAN 96 # define POOL_REDZONE_SIZE 8 97 # else 98 # define POOL_REDZONE_SIZE 2 99 # endif 100 static void pool_redzone_init(struct pool *, size_t); 101 static void pool_redzone_fill(struct pool *, void *); 102 static void pool_redzone_check(struct pool *, void *); 103 #else 104 # define pool_redzone_init(pp, sz) /* NOTHING */ 105 # define pool_redzone_fill(pp, ptr) /* NOTHING */ 106 # define pool_redzone_check(pp, ptr) /* NOTHING */ 107 #endif 108 109 #ifdef KLEAK 110 static void pool_kleak_fill(struct pool *, void *); 111 static void pool_cache_kleak_fill(pool_cache_t, void *); 112 #else 113 #define pool_kleak_fill(pp, ptr) __nothing 114 #define pool_cache_kleak_fill(pc, ptr) __nothing 115 #endif 116 117 static void *pool_page_alloc_meta(struct pool *, int); 118 static void pool_page_free_meta(struct pool *, void *); 119 120 /* allocator for pool metadata */ 121 struct pool_allocator pool_allocator_meta = { 122 .pa_alloc = pool_page_alloc_meta, 123 .pa_free = pool_page_free_meta, 124 .pa_pagesz = 0 125 }; 126 127 #define POOL_ALLOCATOR_BIG_BASE 13 128 extern struct pool_allocator pool_allocator_big[]; 129 static int pool_bigidx(size_t); 130 131 /* # of seconds to retain page after last use */ 132 int pool_inactive_time = 10; 133 134 /* Next candidate for drainage (see pool_drain()) */ 135 static struct pool *drainpp; 136 137 /* This lock protects both pool_head and drainpp. */ 138 static kmutex_t pool_head_lock; 139 static kcondvar_t pool_busy; 140 141 /* This lock protects initialization of a potentially shared pool allocator */ 142 static kmutex_t pool_allocator_lock; 143 144 typedef uint32_t pool_item_bitmap_t; 145 #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) 146 #define BITMAP_MASK (BITMAP_SIZE - 1) 147 148 struct pool_item_header { 149 /* Page headers */ 150 LIST_ENTRY(pool_item_header) 151 ph_pagelist; /* pool page list */ 152 SPLAY_ENTRY(pool_item_header) 153 ph_node; /* Off-page page headers */ 154 void * ph_page; /* this page's address */ 155 uint32_t ph_time; /* last referenced */ 156 uint16_t ph_nmissing; /* # of chunks in use */ 157 uint16_t ph_off; /* start offset in page */ 158 union { 159 /* !PR_NOTOUCH */ 160 struct { 161 LIST_HEAD(, pool_item) 162 phu_itemlist; /* chunk list for this page */ 163 } phu_normal; 164 /* PR_NOTOUCH */ 165 struct { 166 pool_item_bitmap_t phu_bitmap[1]; 167 } phu_notouch; 168 } ph_u; 169 }; 170 #define ph_itemlist ph_u.phu_normal.phu_itemlist 171 #define ph_bitmap ph_u.phu_notouch.phu_bitmap 172 173 struct pool_item { 174 #ifdef DIAGNOSTIC 175 u_int pi_magic; 176 #endif 177 #define PI_MAGIC 0xdeaddeadU 178 /* Other entries use only this list entry */ 179 LIST_ENTRY(pool_item) pi_list; 180 }; 181 182 #define POOL_NEEDS_CATCHUP(pp) \ 183 ((pp)->pr_nitems < (pp)->pr_minitems) 184 185 /* 186 * Pool cache management. 187 * 188 * Pool caches provide a way for constructed objects to be cached by the 189 * pool subsystem. This can lead to performance improvements by avoiding 190 * needless object construction/destruction; it is deferred until absolutely 191 * necessary. 192 * 193 * Caches are grouped into cache groups. Each cache group references up 194 * to PCG_NUMOBJECTS constructed objects. When a cache allocates an 195 * object from the pool, it calls the object's constructor and places it 196 * into a cache group. When a cache group frees an object back to the 197 * pool, it first calls the object's destructor. This allows the object 198 * to persist in constructed form while freed to the cache. 199 * 200 * The pool references each cache, so that when a pool is drained by the 201 * pagedaemon, it can drain each individual cache as well. Each time a 202 * cache is drained, the most idle cache group is freed to the pool in 203 * its entirety. 204 * 205 * Pool caches are layed on top of pools. By layering them, we can avoid 206 * the complexity of cache management for pools which would not benefit 207 * from it. 208 */ 209 210 static struct pool pcg_normal_pool; 211 static struct pool pcg_large_pool; 212 static struct pool cache_pool; 213 static struct pool cache_cpu_pool; 214 215 pool_cache_t pnbuf_cache; /* pathname buffer cache */ 216 217 /* List of all caches. */ 218 TAILQ_HEAD(,pool_cache) pool_cache_head = 219 TAILQ_HEAD_INITIALIZER(pool_cache_head); 220 221 int pool_cache_disable; /* global disable for caching */ 222 static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */ 223 224 static bool pool_cache_put_slow(pool_cache_cpu_t *, int, 225 void *); 226 static bool pool_cache_get_slow(pool_cache_cpu_t *, int, 227 void **, paddr_t *, int); 228 static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); 229 static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); 230 static void pool_cache_invalidate_cpu(pool_cache_t, u_int); 231 static void pool_cache_transfer(pool_cache_t); 232 233 static int pool_catchup(struct pool *); 234 static void pool_prime_page(struct pool *, void *, 235 struct pool_item_header *); 236 static void pool_update_curpage(struct pool *); 237 238 static int pool_grow(struct pool *, int); 239 static void *pool_allocator_alloc(struct pool *, int); 240 static void pool_allocator_free(struct pool *, void *); 241 242 static void pool_print_pagelist(struct pool *, struct pool_pagelist *, 243 void (*)(const char *, ...) __printflike(1, 2)); 244 static void pool_print1(struct pool *, const char *, 245 void (*)(const char *, ...) __printflike(1, 2)); 246 247 static int pool_chk_page(struct pool *, const char *, 248 struct pool_item_header *); 249 250 static inline unsigned int 251 pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, 252 const void *v) 253 { 254 const char *cp = v; 255 unsigned int idx; 256 257 KASSERT(pp->pr_roflags & PR_NOTOUCH); 258 idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size; 259 KASSERT(idx < pp->pr_itemsperpage); 260 return idx; 261 } 262 263 static inline void 264 pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph, 265 void *obj) 266 { 267 unsigned int idx = pr_item_notouch_index(pp, ph, obj); 268 pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE); 269 pool_item_bitmap_t mask = 1U << (idx & BITMAP_MASK); 270 271 KASSERT((*bitmap & mask) == 0); 272 *bitmap |= mask; 273 } 274 275 static inline void * 276 pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph) 277 { 278 pool_item_bitmap_t *bitmap = ph->ph_bitmap; 279 unsigned int idx; 280 int i; 281 282 for (i = 0; ; i++) { 283 int bit; 284 285 KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage); 286 bit = ffs32(bitmap[i]); 287 if (bit) { 288 pool_item_bitmap_t mask; 289 290 bit--; 291 idx = (i * BITMAP_SIZE) + bit; 292 mask = 1U << bit; 293 KASSERT((bitmap[i] & mask) != 0); 294 bitmap[i] &= ~mask; 295 break; 296 } 297 } 298 KASSERT(idx < pp->pr_itemsperpage); 299 return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size; 300 } 301 302 static inline void 303 pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph) 304 { 305 pool_item_bitmap_t *bitmap = ph->ph_bitmap; 306 const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE); 307 int i; 308 309 for (i = 0; i < n; i++) { 310 bitmap[i] = (pool_item_bitmap_t)-1; 311 } 312 } 313 314 static inline int 315 phtree_compare(struct pool_item_header *a, struct pool_item_header *b) 316 { 317 318 /* 319 * we consider pool_item_header with smaller ph_page bigger. 320 * (this unnatural ordering is for the benefit of pr_find_pagehead.) 321 */ 322 323 if (a->ph_page < b->ph_page) 324 return (1); 325 else if (a->ph_page > b->ph_page) 326 return (-1); 327 else 328 return (0); 329 } 330 331 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare); 332 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); 333 334 static inline struct pool_item_header * 335 pr_find_pagehead_noalign(struct pool *pp, void *v) 336 { 337 struct pool_item_header *ph, tmp; 338 339 tmp.ph_page = (void *)(uintptr_t)v; 340 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); 341 if (ph == NULL) { 342 ph = SPLAY_ROOT(&pp->pr_phtree); 343 if (ph != NULL && phtree_compare(&tmp, ph) >= 0) { 344 ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph); 345 } 346 KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0); 347 } 348 349 return ph; 350 } 351 352 /* 353 * Return the pool page header based on item address. 354 */ 355 static inline struct pool_item_header * 356 pr_find_pagehead(struct pool *pp, void *v) 357 { 358 struct pool_item_header *ph, tmp; 359 360 if ((pp->pr_roflags & PR_NOALIGN) != 0) { 361 ph = pr_find_pagehead_noalign(pp, v); 362 } else { 363 void *page = 364 (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask); 365 366 if ((pp->pr_roflags & PR_PHINPAGE) != 0) { 367 ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset); 368 } else { 369 tmp.ph_page = page; 370 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); 371 } 372 } 373 374 KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) || 375 ((char *)ph->ph_page <= (char *)v && 376 (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz)); 377 return ph; 378 } 379 380 static void 381 pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq) 382 { 383 struct pool_item_header *ph; 384 385 while ((ph = LIST_FIRST(pq)) != NULL) { 386 LIST_REMOVE(ph, ph_pagelist); 387 pool_allocator_free(pp, ph->ph_page); 388 if ((pp->pr_roflags & PR_PHINPAGE) == 0) 389 pool_put(pp->pr_phpool, ph); 390 } 391 } 392 393 /* 394 * Remove a page from the pool. 395 */ 396 static inline void 397 pr_rmpage(struct pool *pp, struct pool_item_header *ph, 398 struct pool_pagelist *pq) 399 { 400 401 KASSERT(mutex_owned(&pp->pr_lock)); 402 403 /* 404 * If the page was idle, decrement the idle page count. 405 */ 406 if (ph->ph_nmissing == 0) { 407 KASSERT(pp->pr_nidle != 0); 408 KASSERTMSG((pp->pr_nitems >= pp->pr_itemsperpage), 409 "nitems=%u < itemsperpage=%u", 410 pp->pr_nitems, pp->pr_itemsperpage); 411 pp->pr_nidle--; 412 } 413 414 pp->pr_nitems -= pp->pr_itemsperpage; 415 416 /* 417 * Unlink the page from the pool and queue it for release. 418 */ 419 LIST_REMOVE(ph, ph_pagelist); 420 if ((pp->pr_roflags & PR_PHINPAGE) == 0) 421 SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); 422 LIST_INSERT_HEAD(pq, ph, ph_pagelist); 423 424 pp->pr_npages--; 425 pp->pr_npagefree++; 426 427 pool_update_curpage(pp); 428 } 429 430 /* 431 * Initialize all the pools listed in the "pools" link set. 432 */ 433 void 434 pool_subsystem_init(void) 435 { 436 size_t size; 437 int idx; 438 439 mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); 440 mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE); 441 cv_init(&pool_busy, "poolbusy"); 442 443 /* 444 * Initialize private page header pool and cache magazine pool if we 445 * haven't done so yet. 446 */ 447 for (idx = 0; idx < PHPOOL_MAX; idx++) { 448 static char phpool_names[PHPOOL_MAX][6+1+6+1]; 449 int nelem; 450 size_t sz; 451 452 nelem = PHPOOL_FREELIST_NELEM(idx); 453 snprintf(phpool_names[idx], sizeof(phpool_names[idx]), 454 "phpool-%d", nelem); 455 sz = sizeof(struct pool_item_header); 456 if (nelem) { 457 sz = offsetof(struct pool_item_header, 458 ph_bitmap[howmany(nelem, BITMAP_SIZE)]); 459 } 460 pool_init(&phpool[idx], sz, 0, 0, 0, 461 phpool_names[idx], &pool_allocator_meta, IPL_VM); 462 } 463 #ifdef POOL_SUBPAGE 464 pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, 465 PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); 466 #endif 467 468 size = sizeof(pcg_t) + 469 (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t); 470 pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0, 471 "pcgnormal", &pool_allocator_meta, IPL_VM); 472 473 size = sizeof(pcg_t) + 474 (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t); 475 pool_init(&pcg_large_pool, size, coherency_unit, 0, 0, 476 "pcglarge", &pool_allocator_meta, IPL_VM); 477 478 pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit, 479 0, 0, "pcache", &pool_allocator_meta, IPL_NONE); 480 481 pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit, 482 0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE); 483 } 484 485 /* 486 * Initialize the given pool resource structure. 487 * 488 * We export this routine to allow other kernel parts to declare 489 * static pools that must be initialized before kmem(9) is available. 490 */ 491 void 492 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, 493 const char *wchan, struct pool_allocator *palloc, int ipl) 494 { 495 struct pool *pp1; 496 size_t trysize, phsize, prsize; 497 int off, slack; 498 499 #ifdef DEBUG 500 if (__predict_true(!cold)) 501 mutex_enter(&pool_head_lock); 502 /* 503 * Check that the pool hasn't already been initialised and 504 * added to the list of all pools. 505 */ 506 TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { 507 if (pp == pp1) 508 panic("%s: [%s] already initialised", __func__, 509 wchan); 510 } 511 if (__predict_true(!cold)) 512 mutex_exit(&pool_head_lock); 513 #endif 514 515 if (palloc == NULL) 516 palloc = &pool_allocator_kmem; 517 #ifdef POOL_SUBPAGE 518 if (size > palloc->pa_pagesz) { 519 if (palloc == &pool_allocator_kmem) 520 palloc = &pool_allocator_kmem_fullpage; 521 else if (palloc == &pool_allocator_nointr) 522 palloc = &pool_allocator_nointr_fullpage; 523 } 524 #endif /* POOL_SUBPAGE */ 525 if (!cold) 526 mutex_enter(&pool_allocator_lock); 527 if (palloc->pa_refcnt++ == 0) { 528 if (palloc->pa_pagesz == 0) 529 palloc->pa_pagesz = PAGE_SIZE; 530 531 TAILQ_INIT(&palloc->pa_list); 532 533 mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM); 534 palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); 535 palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; 536 } 537 if (!cold) 538 mutex_exit(&pool_allocator_lock); 539 540 if (align == 0) 541 align = ALIGN(1); 542 543 prsize = size; 544 if ((flags & PR_NOTOUCH) == 0 && prsize < sizeof(struct pool_item)) 545 prsize = sizeof(struct pool_item); 546 547 prsize = roundup(prsize, align); 548 KASSERTMSG((prsize <= palloc->pa_pagesz), 549 "%s: [%s] pool item size (%zu) larger than page size (%u)", 550 __func__, wchan, prsize, palloc->pa_pagesz); 551 552 /* 553 * Initialize the pool structure. 554 */ 555 LIST_INIT(&pp->pr_emptypages); 556 LIST_INIT(&pp->pr_fullpages); 557 LIST_INIT(&pp->pr_partpages); 558 pp->pr_cache = NULL; 559 pp->pr_curpage = NULL; 560 pp->pr_npages = 0; 561 pp->pr_minitems = 0; 562 pp->pr_minpages = 0; 563 pp->pr_maxpages = UINT_MAX; 564 pp->pr_roflags = flags; 565 pp->pr_flags = 0; 566 pp->pr_size = prsize; 567 pp->pr_align = align; 568 pp->pr_wchan = wchan; 569 pp->pr_alloc = palloc; 570 pp->pr_nitems = 0; 571 pp->pr_nout = 0; 572 pp->pr_hardlimit = UINT_MAX; 573 pp->pr_hardlimit_warning = NULL; 574 pp->pr_hardlimit_ratecap.tv_sec = 0; 575 pp->pr_hardlimit_ratecap.tv_usec = 0; 576 pp->pr_hardlimit_warning_last.tv_sec = 0; 577 pp->pr_hardlimit_warning_last.tv_usec = 0; 578 pp->pr_drain_hook = NULL; 579 pp->pr_drain_hook_arg = NULL; 580 pp->pr_freecheck = NULL; 581 pool_redzone_init(pp, size); 582 583 /* 584 * Decide whether to put the page header off page to avoid 585 * wasting too large a part of the page or too big item. 586 * Off-page page headers go on a hash table, so we can match 587 * a returned item with its header based on the page address. 588 * We use 1/16 of the page size and about 8 times of the item 589 * size as the threshold (XXX: tune) 590 * 591 * However, we'll put the header into the page if we can put 592 * it without wasting any items. 593 * 594 * Silently enforce `0 <= ioff < align'. 595 */ 596 pp->pr_itemoffset = ioff %= align; 597 /* See the comment below about reserved bytes. */ 598 trysize = palloc->pa_pagesz - ((align - ioff) % align); 599 phsize = ALIGN(sizeof(struct pool_item_header)); 600 if (pp->pr_roflags & PR_PHINPAGE || 601 ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 && 602 (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) || 603 trysize / pp->pr_size == (trysize - phsize) / pp->pr_size))) { 604 /* Use the end of the page for the page header */ 605 pp->pr_roflags |= PR_PHINPAGE; 606 pp->pr_phoffset = off = palloc->pa_pagesz - phsize; 607 } else { 608 /* The page header will be taken from our page header pool */ 609 pp->pr_phoffset = 0; 610 off = palloc->pa_pagesz; 611 SPLAY_INIT(&pp->pr_phtree); 612 } 613 614 /* 615 * Alignment is to take place at `ioff' within the item. This means 616 * we must reserve up to `align - 1' bytes on the page to allow 617 * appropriate positioning of each item. 618 */ 619 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; 620 KASSERT(pp->pr_itemsperpage != 0); 621 if ((pp->pr_roflags & PR_NOTOUCH)) { 622 int idx; 623 624 for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx); 625 idx++) { 626 /* nothing */ 627 } 628 if (idx >= PHPOOL_MAX) { 629 /* 630 * if you see this panic, consider to tweak 631 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM. 632 */ 633 panic("%s: [%s] too large itemsperpage(%d) for " 634 "PR_NOTOUCH", __func__, 635 pp->pr_wchan, pp->pr_itemsperpage); 636 } 637 pp->pr_phpool = &phpool[idx]; 638 } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) { 639 pp->pr_phpool = &phpool[0]; 640 } 641 #if defined(DIAGNOSTIC) 642 else { 643 pp->pr_phpool = NULL; 644 } 645 #endif 646 647 /* 648 * Use the slack between the chunks and the page header 649 * for "cache coloring". 650 */ 651 slack = off - pp->pr_itemsperpage * pp->pr_size; 652 pp->pr_maxcolor = (slack / align) * align; 653 pp->pr_curcolor = 0; 654 655 pp->pr_nget = 0; 656 pp->pr_nfail = 0; 657 pp->pr_nput = 0; 658 pp->pr_npagealloc = 0; 659 pp->pr_npagefree = 0; 660 pp->pr_hiwat = 0; 661 pp->pr_nidle = 0; 662 pp->pr_refcnt = 0; 663 664 mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); 665 cv_init(&pp->pr_cv, wchan); 666 pp->pr_ipl = ipl; 667 668 /* Insert into the list of all pools. */ 669 if (!cold) 670 mutex_enter(&pool_head_lock); 671 TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { 672 if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0) 673 break; 674 } 675 if (pp1 == NULL) 676 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); 677 else 678 TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist); 679 if (!cold) 680 mutex_exit(&pool_head_lock); 681 682 /* Insert this into the list of pools using this allocator. */ 683 if (!cold) 684 mutex_enter(&palloc->pa_lock); 685 TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); 686 if (!cold) 687 mutex_exit(&palloc->pa_lock); 688 } 689 690 /* 691 * De-commision a pool resource. 692 */ 693 void 694 pool_destroy(struct pool *pp) 695 { 696 struct pool_pagelist pq; 697 struct pool_item_header *ph; 698 699 /* Remove from global pool list */ 700 mutex_enter(&pool_head_lock); 701 while (pp->pr_refcnt != 0) 702 cv_wait(&pool_busy, &pool_head_lock); 703 TAILQ_REMOVE(&pool_head, pp, pr_poollist); 704 if (drainpp == pp) 705 drainpp = NULL; 706 mutex_exit(&pool_head_lock); 707 708 /* Remove this pool from its allocator's list of pools. */ 709 mutex_enter(&pp->pr_alloc->pa_lock); 710 TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); 711 mutex_exit(&pp->pr_alloc->pa_lock); 712 713 mutex_enter(&pool_allocator_lock); 714 if (--pp->pr_alloc->pa_refcnt == 0) 715 mutex_destroy(&pp->pr_alloc->pa_lock); 716 mutex_exit(&pool_allocator_lock); 717 718 mutex_enter(&pp->pr_lock); 719 720 KASSERT(pp->pr_cache == NULL); 721 KASSERTMSG((pp->pr_nout == 0), 722 "%s: pool busy: still out: %u", __func__, pp->pr_nout); 723 KASSERT(LIST_EMPTY(&pp->pr_fullpages)); 724 KASSERT(LIST_EMPTY(&pp->pr_partpages)); 725 726 /* Remove all pages */ 727 LIST_INIT(&pq); 728 while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) 729 pr_rmpage(pp, ph, &pq); 730 731 mutex_exit(&pp->pr_lock); 732 733 pr_pagelist_free(pp, &pq); 734 cv_destroy(&pp->pr_cv); 735 mutex_destroy(&pp->pr_lock); 736 } 737 738 void 739 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg) 740 { 741 742 /* XXX no locking -- must be used just after pool_init() */ 743 KASSERTMSG((pp->pr_drain_hook == NULL), 744 "%s: [%s] already set", __func__, pp->pr_wchan); 745 pp->pr_drain_hook = fn; 746 pp->pr_drain_hook_arg = arg; 747 } 748 749 static struct pool_item_header * 750 pool_alloc_item_header(struct pool *pp, void *storage, int flags) 751 { 752 struct pool_item_header *ph; 753 754 if ((pp->pr_roflags & PR_PHINPAGE) != 0) 755 ph = (void *)((char *)storage + pp->pr_phoffset); 756 else 757 ph = pool_get(pp->pr_phpool, flags); 758 759 return (ph); 760 } 761 762 /* 763 * Grab an item from the pool. 764 */ 765 void * 766 pool_get(struct pool *pp, int flags) 767 { 768 struct pool_item *pi; 769 struct pool_item_header *ph; 770 void *v; 771 772 KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK)); 773 KASSERTMSG((pp->pr_itemsperpage != 0), 774 "%s: [%s] pr_itemsperpage is zero, " 775 "pool not initialized?", __func__, pp->pr_wchan); 776 KASSERTMSG((!(cpu_intr_p() || cpu_softintr_p()) 777 || pp->pr_ipl != IPL_NONE || cold || panicstr != NULL), 778 "%s: [%s] is IPL_NONE, but called from interrupt context", 779 __func__, pp->pr_wchan); 780 if (flags & PR_WAITOK) { 781 ASSERT_SLEEPABLE(); 782 } 783 784 mutex_enter(&pp->pr_lock); 785 startover: 786 /* 787 * Check to see if we've reached the hard limit. If we have, 788 * and we can wait, then wait until an item has been returned to 789 * the pool. 790 */ 791 KASSERTMSG((pp->pr_nout <= pp->pr_hardlimit), 792 "%s: %s: crossed hard limit", __func__, pp->pr_wchan); 793 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { 794 if (pp->pr_drain_hook != NULL) { 795 /* 796 * Since the drain hook is going to free things 797 * back to the pool, unlock, call the hook, re-lock, 798 * and check the hardlimit condition again. 799 */ 800 mutex_exit(&pp->pr_lock); 801 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); 802 mutex_enter(&pp->pr_lock); 803 if (pp->pr_nout < pp->pr_hardlimit) 804 goto startover; 805 } 806 807 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { 808 /* 809 * XXX: A warning isn't logged in this case. Should 810 * it be? 811 */ 812 pp->pr_flags |= PR_WANTED; 813 do { 814 cv_wait(&pp->pr_cv, &pp->pr_lock); 815 } while (pp->pr_flags & PR_WANTED); 816 goto startover; 817 } 818 819 /* 820 * Log a message that the hard limit has been hit. 821 */ 822 if (pp->pr_hardlimit_warning != NULL && 823 ratecheck(&pp->pr_hardlimit_warning_last, 824 &pp->pr_hardlimit_ratecap)) 825 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); 826 827 pp->pr_nfail++; 828 829 mutex_exit(&pp->pr_lock); 830 KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0); 831 return (NULL); 832 } 833 834 /* 835 * The convention we use is that if `curpage' is not NULL, then 836 * it points at a non-empty bucket. In particular, `curpage' 837 * never points at a page header which has PR_PHINPAGE set and 838 * has no items in its bucket. 839 */ 840 if ((ph = pp->pr_curpage) == NULL) { 841 int error; 842 843 KASSERTMSG((pp->pr_nitems == 0), 844 "%s: [%s] curpage NULL, inconsistent nitems %u", 845 __func__, pp->pr_wchan, pp->pr_nitems); 846 847 /* 848 * Call the back-end page allocator for more memory. 849 * Release the pool lock, as the back-end page allocator 850 * may block. 851 */ 852 error = pool_grow(pp, flags); 853 if (error != 0) { 854 /* 855 * pool_grow aborts when another thread 856 * is allocating a new page. Retry if it 857 * waited for it. 858 */ 859 if (error == ERESTART) 860 goto startover; 861 862 /* 863 * We were unable to allocate a page or item 864 * header, but we released the lock during 865 * allocation, so perhaps items were freed 866 * back to the pool. Check for this case. 867 */ 868 if (pp->pr_curpage != NULL) 869 goto startover; 870 871 pp->pr_nfail++; 872 mutex_exit(&pp->pr_lock); 873 KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT); 874 return (NULL); 875 } 876 877 /* Start the allocation process over. */ 878 goto startover; 879 } 880 if (pp->pr_roflags & PR_NOTOUCH) { 881 KASSERTMSG((ph->ph_nmissing < pp->pr_itemsperpage), 882 "%s: %s: page empty", __func__, pp->pr_wchan); 883 v = pr_item_notouch_get(pp, ph); 884 } else { 885 v = pi = LIST_FIRST(&ph->ph_itemlist); 886 if (__predict_false(v == NULL)) { 887 mutex_exit(&pp->pr_lock); 888 panic("%s: [%s] page empty", __func__, pp->pr_wchan); 889 } 890 KASSERTMSG((pp->pr_nitems > 0), 891 "%s: [%s] nitems %u inconsistent on itemlist", 892 __func__, pp->pr_wchan, pp->pr_nitems); 893 KASSERTMSG((pi->pi_magic == PI_MAGIC), 894 "%s: [%s] free list modified: " 895 "magic=%x; page %p; item addr %p", __func__, 896 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); 897 898 /* 899 * Remove from item list. 900 */ 901 LIST_REMOVE(pi, pi_list); 902 } 903 pp->pr_nitems--; 904 pp->pr_nout++; 905 if (ph->ph_nmissing == 0) { 906 KASSERT(pp->pr_nidle > 0); 907 pp->pr_nidle--; 908 909 /* 910 * This page was previously empty. Move it to the list of 911 * partially-full pages. This page is already curpage. 912 */ 913 LIST_REMOVE(ph, ph_pagelist); 914 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); 915 } 916 ph->ph_nmissing++; 917 if (ph->ph_nmissing == pp->pr_itemsperpage) { 918 KASSERTMSG(((pp->pr_roflags & PR_NOTOUCH) || 919 LIST_EMPTY(&ph->ph_itemlist)), 920 "%s: [%s] nmissing (%u) inconsistent", __func__, 921 pp->pr_wchan, ph->ph_nmissing); 922 /* 923 * This page is now full. Move it to the full list 924 * and select a new current page. 925 */ 926 LIST_REMOVE(ph, ph_pagelist); 927 LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist); 928 pool_update_curpage(pp); 929 } 930 931 pp->pr_nget++; 932 933 /* 934 * If we have a low water mark and we are now below that low 935 * water mark, add more items to the pool. 936 */ 937 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { 938 /* 939 * XXX: Should we log a warning? Should we set up a timeout 940 * to try again in a second or so? The latter could break 941 * a caller's assumptions about interrupt protection, etc. 942 */ 943 } 944 945 mutex_exit(&pp->pr_lock); 946 KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0); 947 FREECHECK_OUT(&pp->pr_freecheck, v); 948 pool_redzone_fill(pp, v); 949 pool_kleak_fill(pp, v); 950 return (v); 951 } 952 953 /* 954 * Internal version of pool_put(). Pool is already locked/entered. 955 */ 956 static void 957 pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq) 958 { 959 struct pool_item *pi = v; 960 struct pool_item_header *ph; 961 962 KASSERT(mutex_owned(&pp->pr_lock)); 963 pool_redzone_check(pp, v); 964 FREECHECK_IN(&pp->pr_freecheck, v); 965 LOCKDEBUG_MEM_CHECK(v, pp->pr_size); 966 967 KASSERTMSG((pp->pr_nout > 0), 968 "%s: [%s] putting with none out", __func__, pp->pr_wchan); 969 970 if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { 971 panic("%s: [%s] page header missing", __func__, pp->pr_wchan); 972 } 973 974 /* 975 * Return to item list. 976 */ 977 if (pp->pr_roflags & PR_NOTOUCH) { 978 pr_item_notouch_put(pp, ph, v); 979 } else { 980 #ifdef DIAGNOSTIC 981 pi->pi_magic = PI_MAGIC; 982 #endif 983 #ifdef DEBUG 984 { 985 int i, *ip = v; 986 987 for (i = 0; i < pp->pr_size / sizeof(int); i++) { 988 *ip++ = PI_MAGIC; 989 } 990 } 991 #endif 992 993 LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); 994 } 995 KDASSERT(ph->ph_nmissing != 0); 996 ph->ph_nmissing--; 997 pp->pr_nput++; 998 pp->pr_nitems++; 999 pp->pr_nout--; 1000 1001 /* Cancel "pool empty" condition if it exists */ 1002 if (pp->pr_curpage == NULL) 1003 pp->pr_curpage = ph; 1004 1005 if (pp->pr_flags & PR_WANTED) { 1006 pp->pr_flags &= ~PR_WANTED; 1007 cv_broadcast(&pp->pr_cv); 1008 } 1009 1010 /* 1011 * If this page is now empty, do one of two things: 1012 * 1013 * (1) If we have more pages than the page high water mark, 1014 * free the page back to the system. ONLY CONSIDER 1015 * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE 1016 * CLAIM. 1017 * 1018 * (2) Otherwise, move the page to the empty page list. 1019 * 1020 * Either way, select a new current page (so we use a partially-full 1021 * page if one is available). 1022 */ 1023 if (ph->ph_nmissing == 0) { 1024 pp->pr_nidle++; 1025 if (pp->pr_npages > pp->pr_minpages && 1026 pp->pr_npages > pp->pr_maxpages) { 1027 pr_rmpage(pp, ph, pq); 1028 } else { 1029 LIST_REMOVE(ph, ph_pagelist); 1030 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); 1031 1032 /* 1033 * Update the timestamp on the page. A page must 1034 * be idle for some period of time before it can 1035 * be reclaimed by the pagedaemon. This minimizes 1036 * ping-pong'ing for memory. 1037 * 1038 * note for 64-bit time_t: truncating to 32-bit is not 1039 * a problem for our usage. 1040 */ 1041 ph->ph_time = time_uptime; 1042 } 1043 pool_update_curpage(pp); 1044 } 1045 1046 /* 1047 * If the page was previously completely full, move it to the 1048 * partially-full list and make it the current page. The next 1049 * allocation will get the item from this page, instead of 1050 * further fragmenting the pool. 1051 */ 1052 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) { 1053 LIST_REMOVE(ph, ph_pagelist); 1054 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); 1055 pp->pr_curpage = ph; 1056 } 1057 } 1058 1059 void 1060 pool_put(struct pool *pp, void *v) 1061 { 1062 struct pool_pagelist pq; 1063 1064 LIST_INIT(&pq); 1065 1066 mutex_enter(&pp->pr_lock); 1067 pool_do_put(pp, v, &pq); 1068 mutex_exit(&pp->pr_lock); 1069 1070 pr_pagelist_free(pp, &pq); 1071 } 1072 1073 /* 1074 * pool_grow: grow a pool by a page. 1075 * 1076 * => called with pool locked. 1077 * => unlock and relock the pool. 1078 * => return with pool locked. 1079 */ 1080 1081 static int 1082 pool_grow(struct pool *pp, int flags) 1083 { 1084 /* 1085 * If there's a pool_grow in progress, wait for it to complete 1086 * and try again from the top. 1087 */ 1088 if (pp->pr_flags & PR_GROWING) { 1089 if (flags & PR_WAITOK) { 1090 do { 1091 cv_wait(&pp->pr_cv, &pp->pr_lock); 1092 } while (pp->pr_flags & PR_GROWING); 1093 return ERESTART; 1094 } else { 1095 if (pp->pr_flags & PR_GROWINGNOWAIT) { 1096 /* 1097 * This needs an unlock/relock dance so 1098 * that the other caller has a chance to 1099 * run and actually do the thing. Note 1100 * that this is effectively a busy-wait. 1101 */ 1102 mutex_exit(&pp->pr_lock); 1103 mutex_enter(&pp->pr_lock); 1104 return ERESTART; 1105 } 1106 return EWOULDBLOCK; 1107 } 1108 } 1109 pp->pr_flags |= PR_GROWING; 1110 if (flags & PR_WAITOK) 1111 mutex_exit(&pp->pr_lock); 1112 else 1113 pp->pr_flags |= PR_GROWINGNOWAIT; 1114 1115 char *cp = pool_allocator_alloc(pp, flags); 1116 if (__predict_false(cp == NULL)) 1117 goto out; 1118 1119 struct pool_item_header *ph = pool_alloc_item_header(pp, cp, flags); 1120 if (__predict_false(ph == NULL)) { 1121 pool_allocator_free(pp, cp); 1122 goto out; 1123 } 1124 1125 if (flags & PR_WAITOK) 1126 mutex_enter(&pp->pr_lock); 1127 pool_prime_page(pp, cp, ph); 1128 pp->pr_npagealloc++; 1129 KASSERT(pp->pr_flags & PR_GROWING); 1130 pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT); 1131 /* 1132 * If anyone was waiting for pool_grow, notify them that we 1133 * may have just done it. 1134 */ 1135 cv_broadcast(&pp->pr_cv); 1136 return 0; 1137 out: 1138 if (flags & PR_WAITOK) 1139 mutex_enter(&pp->pr_lock); 1140 KASSERT(pp->pr_flags & PR_GROWING); 1141 pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT); 1142 return ENOMEM; 1143 } 1144 1145 /* 1146 * Add N items to the pool. 1147 */ 1148 int 1149 pool_prime(struct pool *pp, int n) 1150 { 1151 int newpages; 1152 int error = 0; 1153 1154 mutex_enter(&pp->pr_lock); 1155 1156 newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1157 1158 while (newpages > 0) { 1159 error = pool_grow(pp, PR_NOWAIT); 1160 if (error) { 1161 if (error == ERESTART) 1162 continue; 1163 break; 1164 } 1165 pp->pr_minpages++; 1166 newpages--; 1167 } 1168 1169 if (pp->pr_minpages >= pp->pr_maxpages) 1170 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ 1171 1172 mutex_exit(&pp->pr_lock); 1173 return error; 1174 } 1175 1176 /* 1177 * Add a page worth of items to the pool. 1178 * 1179 * Note, we must be called with the pool descriptor LOCKED. 1180 */ 1181 static void 1182 pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph) 1183 { 1184 struct pool_item *pi; 1185 void *cp = storage; 1186 const unsigned int align = pp->pr_align; 1187 const unsigned int ioff = pp->pr_itemoffset; 1188 int n; 1189 1190 KASSERT(mutex_owned(&pp->pr_lock)); 1191 KASSERTMSG(((pp->pr_roflags & PR_NOALIGN) || 1192 (((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) == 0)), 1193 "%s: [%s] unaligned page: %p", __func__, pp->pr_wchan, cp); 1194 1195 /* 1196 * Insert page header. 1197 */ 1198 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); 1199 LIST_INIT(&ph->ph_itemlist); 1200 ph->ph_page = storage; 1201 ph->ph_nmissing = 0; 1202 ph->ph_time = time_uptime; 1203 if ((pp->pr_roflags & PR_PHINPAGE) == 0) 1204 SPLAY_INSERT(phtree, &pp->pr_phtree, ph); 1205 1206 pp->pr_nidle++; 1207 1208 /* 1209 * Color this page. 1210 */ 1211 ph->ph_off = pp->pr_curcolor; 1212 cp = (char *)cp + ph->ph_off; 1213 if ((pp->pr_curcolor += align) > pp->pr_maxcolor) 1214 pp->pr_curcolor = 0; 1215 1216 /* 1217 * Adjust storage to apply aligment to `pr_itemoffset' in each item. 1218 */ 1219 if (ioff != 0) 1220 cp = (char *)cp + align - ioff; 1221 1222 KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0); 1223 1224 /* 1225 * Insert remaining chunks on the bucket list. 1226 */ 1227 n = pp->pr_itemsperpage; 1228 pp->pr_nitems += n; 1229 1230 if (pp->pr_roflags & PR_NOTOUCH) { 1231 pr_item_notouch_init(pp, ph); 1232 } else { 1233 while (n--) { 1234 pi = (struct pool_item *)cp; 1235 1236 KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0); 1237 1238 /* Insert on page list */ 1239 LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); 1240 #ifdef DIAGNOSTIC 1241 pi->pi_magic = PI_MAGIC; 1242 #endif 1243 cp = (char *)cp + pp->pr_size; 1244 1245 KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0); 1246 } 1247 } 1248 1249 /* 1250 * If the pool was depleted, point at the new page. 1251 */ 1252 if (pp->pr_curpage == NULL) 1253 pp->pr_curpage = ph; 1254 1255 if (++pp->pr_npages > pp->pr_hiwat) 1256 pp->pr_hiwat = pp->pr_npages; 1257 } 1258 1259 /* 1260 * Used by pool_get() when nitems drops below the low water mark. This 1261 * is used to catch up pr_nitems with the low water mark. 1262 * 1263 * Note 1, we never wait for memory here, we let the caller decide what to do. 1264 * 1265 * Note 2, we must be called with the pool already locked, and we return 1266 * with it locked. 1267 */ 1268 static int 1269 pool_catchup(struct pool *pp) 1270 { 1271 int error = 0; 1272 1273 while (POOL_NEEDS_CATCHUP(pp)) { 1274 error = pool_grow(pp, PR_NOWAIT); 1275 if (error) { 1276 if (error == ERESTART) 1277 continue; 1278 break; 1279 } 1280 } 1281 return error; 1282 } 1283 1284 static void 1285 pool_update_curpage(struct pool *pp) 1286 { 1287 1288 pp->pr_curpage = LIST_FIRST(&pp->pr_partpages); 1289 if (pp->pr_curpage == NULL) { 1290 pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); 1291 } 1292 KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) || 1293 (pp->pr_curpage != NULL && pp->pr_nitems > 0)); 1294 } 1295 1296 void 1297 pool_setlowat(struct pool *pp, int n) 1298 { 1299 1300 mutex_enter(&pp->pr_lock); 1301 1302 pp->pr_minitems = n; 1303 pp->pr_minpages = (n == 0) 1304 ? 0 1305 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1306 1307 /* Make sure we're caught up with the newly-set low water mark. */ 1308 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { 1309 /* 1310 * XXX: Should we log a warning? Should we set up a timeout 1311 * to try again in a second or so? The latter could break 1312 * a caller's assumptions about interrupt protection, etc. 1313 */ 1314 } 1315 1316 mutex_exit(&pp->pr_lock); 1317 } 1318 1319 void 1320 pool_sethiwat(struct pool *pp, int n) 1321 { 1322 1323 mutex_enter(&pp->pr_lock); 1324 1325 pp->pr_maxpages = (n == 0) 1326 ? 0 1327 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1328 1329 mutex_exit(&pp->pr_lock); 1330 } 1331 1332 void 1333 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap) 1334 { 1335 1336 mutex_enter(&pp->pr_lock); 1337 1338 pp->pr_hardlimit = n; 1339 pp->pr_hardlimit_warning = warnmess; 1340 pp->pr_hardlimit_ratecap.tv_sec = ratecap; 1341 pp->pr_hardlimit_warning_last.tv_sec = 0; 1342 pp->pr_hardlimit_warning_last.tv_usec = 0; 1343 1344 /* 1345 * In-line version of pool_sethiwat(), because we don't want to 1346 * release the lock. 1347 */ 1348 pp->pr_maxpages = (n == 0) 1349 ? 0 1350 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1351 1352 mutex_exit(&pp->pr_lock); 1353 } 1354 1355 /* 1356 * Release all complete pages that have not been used recently. 1357 * 1358 * Must not be called from interrupt context. 1359 */ 1360 int 1361 pool_reclaim(struct pool *pp) 1362 { 1363 struct pool_item_header *ph, *phnext; 1364 struct pool_pagelist pq; 1365 uint32_t curtime; 1366 bool klock; 1367 int rv; 1368 1369 KASSERT(!cpu_intr_p() && !cpu_softintr_p()); 1370 1371 if (pp->pr_drain_hook != NULL) { 1372 /* 1373 * The drain hook must be called with the pool unlocked. 1374 */ 1375 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); 1376 } 1377 1378 /* 1379 * XXXSMP Because we do not want to cause non-MPSAFE code 1380 * to block. 1381 */ 1382 if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK || 1383 pp->pr_ipl == IPL_SOFTSERIAL) { 1384 KERNEL_LOCK(1, NULL); 1385 klock = true; 1386 } else 1387 klock = false; 1388 1389 /* Reclaim items from the pool's cache (if any). */ 1390 if (pp->pr_cache != NULL) 1391 pool_cache_invalidate(pp->pr_cache); 1392 1393 if (mutex_tryenter(&pp->pr_lock) == 0) { 1394 if (klock) { 1395 KERNEL_UNLOCK_ONE(NULL); 1396 } 1397 return (0); 1398 } 1399 1400 LIST_INIT(&pq); 1401 1402 curtime = time_uptime; 1403 1404 for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) { 1405 phnext = LIST_NEXT(ph, ph_pagelist); 1406 1407 /* Check our minimum page claim */ 1408 if (pp->pr_npages <= pp->pr_minpages) 1409 break; 1410 1411 KASSERT(ph->ph_nmissing == 0); 1412 if (curtime - ph->ph_time < pool_inactive_time) 1413 continue; 1414 1415 /* 1416 * If freeing this page would put us below 1417 * the low water mark, stop now. 1418 */ 1419 if ((pp->pr_nitems - pp->pr_itemsperpage) < 1420 pp->pr_minitems) 1421 break; 1422 1423 pr_rmpage(pp, ph, &pq); 1424 } 1425 1426 mutex_exit(&pp->pr_lock); 1427 1428 if (LIST_EMPTY(&pq)) 1429 rv = 0; 1430 else { 1431 pr_pagelist_free(pp, &pq); 1432 rv = 1; 1433 } 1434 1435 if (klock) { 1436 KERNEL_UNLOCK_ONE(NULL); 1437 } 1438 1439 return (rv); 1440 } 1441 1442 /* 1443 * Drain pools, one at a time. The drained pool is returned within ppp. 1444 * 1445 * Note, must never be called from interrupt context. 1446 */ 1447 bool 1448 pool_drain(struct pool **ppp) 1449 { 1450 bool reclaimed; 1451 struct pool *pp; 1452 1453 KASSERT(!TAILQ_EMPTY(&pool_head)); 1454 1455 pp = NULL; 1456 1457 /* Find next pool to drain, and add a reference. */ 1458 mutex_enter(&pool_head_lock); 1459 do { 1460 if (drainpp == NULL) { 1461 drainpp = TAILQ_FIRST(&pool_head); 1462 } 1463 if (drainpp != NULL) { 1464 pp = drainpp; 1465 drainpp = TAILQ_NEXT(pp, pr_poollist); 1466 } 1467 /* 1468 * Skip completely idle pools. We depend on at least 1469 * one pool in the system being active. 1470 */ 1471 } while (pp == NULL || pp->pr_npages == 0); 1472 pp->pr_refcnt++; 1473 mutex_exit(&pool_head_lock); 1474 1475 /* Drain the cache (if any) and pool.. */ 1476 reclaimed = pool_reclaim(pp); 1477 1478 /* Finally, unlock the pool. */ 1479 mutex_enter(&pool_head_lock); 1480 pp->pr_refcnt--; 1481 cv_broadcast(&pool_busy); 1482 mutex_exit(&pool_head_lock); 1483 1484 if (ppp != NULL) 1485 *ppp = pp; 1486 1487 return reclaimed; 1488 } 1489 1490 /* 1491 * Calculate the total number of pages consumed by pools. 1492 */ 1493 int 1494 pool_totalpages(void) 1495 { 1496 struct pool *pp; 1497 uint64_t total = 0; 1498 1499 mutex_enter(&pool_head_lock); 1500 TAILQ_FOREACH(pp, &pool_head, pr_poollist) { 1501 uint64_t bytes = pp->pr_npages * pp->pr_alloc->pa_pagesz; 1502 1503 if ((pp->pr_roflags & PR_RECURSIVE) != 0) 1504 bytes -= (pp->pr_nout * pp->pr_size); 1505 total += bytes; 1506 } 1507 mutex_exit(&pool_head_lock); 1508 1509 return atop(total); 1510 } 1511 1512 /* 1513 * Diagnostic helpers. 1514 */ 1515 1516 void 1517 pool_printall(const char *modif, void (*pr)(const char *, ...)) 1518 { 1519 struct pool *pp; 1520 1521 TAILQ_FOREACH(pp, &pool_head, pr_poollist) { 1522 pool_printit(pp, modif, pr); 1523 } 1524 } 1525 1526 void 1527 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) 1528 { 1529 1530 if (pp == NULL) { 1531 (*pr)("Must specify a pool to print.\n"); 1532 return; 1533 } 1534 1535 pool_print1(pp, modif, pr); 1536 } 1537 1538 static void 1539 pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl, 1540 void (*pr)(const char *, ...)) 1541 { 1542 struct pool_item_header *ph; 1543 struct pool_item *pi __diagused; 1544 1545 LIST_FOREACH(ph, pl, ph_pagelist) { 1546 (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n", 1547 ph->ph_page, ph->ph_nmissing, ph->ph_time); 1548 #ifdef DIAGNOSTIC 1549 if (!(pp->pr_roflags & PR_NOTOUCH)) { 1550 LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) { 1551 if (pi->pi_magic != PI_MAGIC) { 1552 (*pr)("\t\t\titem %p, magic 0x%x\n", 1553 pi, pi->pi_magic); 1554 } 1555 } 1556 } 1557 #endif 1558 } 1559 } 1560 1561 static void 1562 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) 1563 { 1564 struct pool_item_header *ph; 1565 pool_cache_t pc; 1566 pcg_t *pcg; 1567 pool_cache_cpu_t *cc; 1568 uint64_t cpuhit, cpumiss; 1569 int i, print_log = 0, print_pagelist = 0, print_cache = 0; 1570 char c; 1571 1572 while ((c = *modif++) != '\0') { 1573 if (c == 'l') 1574 print_log = 1; 1575 if (c == 'p') 1576 print_pagelist = 1; 1577 if (c == 'c') 1578 print_cache = 1; 1579 } 1580 1581 if ((pc = pp->pr_cache) != NULL) { 1582 (*pr)("POOL CACHE"); 1583 } else { 1584 (*pr)("POOL"); 1585 } 1586 1587 (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n", 1588 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, 1589 pp->pr_roflags); 1590 (*pr)("\talloc %p\n", pp->pr_alloc); 1591 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", 1592 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); 1593 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", 1594 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit); 1595 1596 (*pr)("\tnget %lu, nfail %lu, nput %lu\n", 1597 pp->pr_nget, pp->pr_nfail, pp->pr_nput); 1598 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n", 1599 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle); 1600 1601 if (print_pagelist == 0) 1602 goto skip_pagelist; 1603 1604 if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) 1605 (*pr)("\n\tempty page list:\n"); 1606 pool_print_pagelist(pp, &pp->pr_emptypages, pr); 1607 if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL) 1608 (*pr)("\n\tfull page list:\n"); 1609 pool_print_pagelist(pp, &pp->pr_fullpages, pr); 1610 if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL) 1611 (*pr)("\n\tpartial-page list:\n"); 1612 pool_print_pagelist(pp, &pp->pr_partpages, pr); 1613 1614 if (pp->pr_curpage == NULL) 1615 (*pr)("\tno current page\n"); 1616 else 1617 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page); 1618 1619 skip_pagelist: 1620 if (print_log == 0) 1621 goto skip_log; 1622 1623 (*pr)("\n"); 1624 1625 skip_log: 1626 1627 #define PR_GROUPLIST(pcg) \ 1628 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \ 1629 for (i = 0; i < pcg->pcg_size; i++) { \ 1630 if (pcg->pcg_objects[i].pcgo_pa != \ 1631 POOL_PADDR_INVALID) { \ 1632 (*pr)("\t\t\t%p, 0x%llx\n", \ 1633 pcg->pcg_objects[i].pcgo_va, \ 1634 (unsigned long long) \ 1635 pcg->pcg_objects[i].pcgo_pa); \ 1636 } else { \ 1637 (*pr)("\t\t\t%p\n", \ 1638 pcg->pcg_objects[i].pcgo_va); \ 1639 } \ 1640 } 1641 1642 if (pc != NULL) { 1643 cpuhit = 0; 1644 cpumiss = 0; 1645 for (i = 0; i < __arraycount(pc->pc_cpus); i++) { 1646 if ((cc = pc->pc_cpus[i]) == NULL) 1647 continue; 1648 cpuhit += cc->cc_hits; 1649 cpumiss += cc->cc_misses; 1650 } 1651 (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss); 1652 (*pr)("\tcache layer hits %llu misses %llu\n", 1653 pc->pc_hits, pc->pc_misses); 1654 (*pr)("\tcache layer entry uncontended %llu contended %llu\n", 1655 pc->pc_hits + pc->pc_misses - pc->pc_contended, 1656 pc->pc_contended); 1657 (*pr)("\tcache layer empty groups %u full groups %u\n", 1658 pc->pc_nempty, pc->pc_nfull); 1659 if (print_cache) { 1660 (*pr)("\tfull cache groups:\n"); 1661 for (pcg = pc->pc_fullgroups; pcg != NULL; 1662 pcg = pcg->pcg_next) { 1663 PR_GROUPLIST(pcg); 1664 } 1665 (*pr)("\tempty cache groups:\n"); 1666 for (pcg = pc->pc_emptygroups; pcg != NULL; 1667 pcg = pcg->pcg_next) { 1668 PR_GROUPLIST(pcg); 1669 } 1670 } 1671 } 1672 #undef PR_GROUPLIST 1673 } 1674 1675 static int 1676 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph) 1677 { 1678 struct pool_item *pi; 1679 void *page; 1680 int n; 1681 1682 if ((pp->pr_roflags & PR_NOALIGN) == 0) { 1683 page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask); 1684 if (page != ph->ph_page && 1685 (pp->pr_roflags & PR_PHINPAGE) != 0) { 1686 if (label != NULL) 1687 printf("%s: ", label); 1688 printf("pool(%p:%s): page inconsistency: page %p;" 1689 " at page head addr %p (p %p)\n", pp, 1690 pp->pr_wchan, ph->ph_page, 1691 ph, page); 1692 return 1; 1693 } 1694 } 1695 1696 if ((pp->pr_roflags & PR_NOTOUCH) != 0) 1697 return 0; 1698 1699 for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0; 1700 pi != NULL; 1701 pi = LIST_NEXT(pi,pi_list), n++) { 1702 1703 #ifdef DIAGNOSTIC 1704 if (pi->pi_magic != PI_MAGIC) { 1705 if (label != NULL) 1706 printf("%s: ", label); 1707 printf("pool(%s): free list modified: magic=%x;" 1708 " page %p; item ordinal %d; addr %p\n", 1709 pp->pr_wchan, pi->pi_magic, ph->ph_page, 1710 n, pi); 1711 panic("pool"); 1712 } 1713 #endif 1714 if ((pp->pr_roflags & PR_NOALIGN) != 0) { 1715 continue; 1716 } 1717 page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask); 1718 if (page == ph->ph_page) 1719 continue; 1720 1721 if (label != NULL) 1722 printf("%s: ", label); 1723 printf("pool(%p:%s): page inconsistency: page %p;" 1724 " item ordinal %d; addr %p (p %p)\n", pp, 1725 pp->pr_wchan, ph->ph_page, 1726 n, pi, page); 1727 return 1; 1728 } 1729 return 0; 1730 } 1731 1732 1733 int 1734 pool_chk(struct pool *pp, const char *label) 1735 { 1736 struct pool_item_header *ph; 1737 int r = 0; 1738 1739 mutex_enter(&pp->pr_lock); 1740 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { 1741 r = pool_chk_page(pp, label, ph); 1742 if (r) { 1743 goto out; 1744 } 1745 } 1746 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { 1747 r = pool_chk_page(pp, label, ph); 1748 if (r) { 1749 goto out; 1750 } 1751 } 1752 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { 1753 r = pool_chk_page(pp, label, ph); 1754 if (r) { 1755 goto out; 1756 } 1757 } 1758 1759 out: 1760 mutex_exit(&pp->pr_lock); 1761 return (r); 1762 } 1763 1764 /* 1765 * pool_cache_init: 1766 * 1767 * Initialize a pool cache. 1768 */ 1769 pool_cache_t 1770 pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags, 1771 const char *wchan, struct pool_allocator *palloc, int ipl, 1772 int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg) 1773 { 1774 pool_cache_t pc; 1775 1776 pc = pool_get(&cache_pool, PR_WAITOK); 1777 if (pc == NULL) 1778 return NULL; 1779 1780 pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan, 1781 palloc, ipl, ctor, dtor, arg); 1782 1783 return pc; 1784 } 1785 1786 /* 1787 * pool_cache_bootstrap: 1788 * 1789 * Kernel-private version of pool_cache_init(). The caller 1790 * provides initial storage. 1791 */ 1792 void 1793 pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align, 1794 u_int align_offset, u_int flags, const char *wchan, 1795 struct pool_allocator *palloc, int ipl, 1796 int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), 1797 void *arg) 1798 { 1799 CPU_INFO_ITERATOR cii; 1800 pool_cache_t pc1; 1801 struct cpu_info *ci; 1802 struct pool *pp; 1803 1804 pp = &pc->pc_pool; 1805 if (palloc == NULL && ipl == IPL_NONE) { 1806 if (size > PAGE_SIZE) { 1807 int bigidx = pool_bigidx(size); 1808 1809 palloc = &pool_allocator_big[bigidx]; 1810 } else 1811 palloc = &pool_allocator_nointr; 1812 } 1813 pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl); 1814 mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl); 1815 1816 if (ctor == NULL) { 1817 ctor = (int (*)(void *, void *, int))nullop; 1818 } 1819 if (dtor == NULL) { 1820 dtor = (void (*)(void *, void *))nullop; 1821 } 1822 1823 pc->pc_emptygroups = NULL; 1824 pc->pc_fullgroups = NULL; 1825 pc->pc_partgroups = NULL; 1826 pc->pc_ctor = ctor; 1827 pc->pc_dtor = dtor; 1828 pc->pc_arg = arg; 1829 pc->pc_hits = 0; 1830 pc->pc_misses = 0; 1831 pc->pc_nempty = 0; 1832 pc->pc_npart = 0; 1833 pc->pc_nfull = 0; 1834 pc->pc_contended = 0; 1835 pc->pc_refcnt = 0; 1836 pc->pc_freecheck = NULL; 1837 1838 if ((flags & PR_LARGECACHE) != 0) { 1839 pc->pc_pcgsize = PCG_NOBJECTS_LARGE; 1840 pc->pc_pcgpool = &pcg_large_pool; 1841 } else { 1842 pc->pc_pcgsize = PCG_NOBJECTS_NORMAL; 1843 pc->pc_pcgpool = &pcg_normal_pool; 1844 } 1845 1846 /* Allocate per-CPU caches. */ 1847 memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); 1848 pc->pc_ncpu = 0; 1849 if (ncpu < 2) { 1850 /* XXX For sparc: boot CPU is not attached yet. */ 1851 pool_cache_cpu_init1(curcpu(), pc); 1852 } else { 1853 for (CPU_INFO_FOREACH(cii, ci)) { 1854 pool_cache_cpu_init1(ci, pc); 1855 } 1856 } 1857 1858 /* Add to list of all pools. */ 1859 if (__predict_true(!cold)) 1860 mutex_enter(&pool_head_lock); 1861 TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) { 1862 if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0) 1863 break; 1864 } 1865 if (pc1 == NULL) 1866 TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist); 1867 else 1868 TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist); 1869 if (__predict_true(!cold)) 1870 mutex_exit(&pool_head_lock); 1871 1872 membar_sync(); 1873 pp->pr_cache = pc; 1874 } 1875 1876 /* 1877 * pool_cache_destroy: 1878 * 1879 * Destroy a pool cache. 1880 */ 1881 void 1882 pool_cache_destroy(pool_cache_t pc) 1883 { 1884 1885 pool_cache_bootstrap_destroy(pc); 1886 pool_put(&cache_pool, pc); 1887 } 1888 1889 /* 1890 * pool_cache_bootstrap_destroy: 1891 * 1892 * Destroy a pool cache. 1893 */ 1894 void 1895 pool_cache_bootstrap_destroy(pool_cache_t pc) 1896 { 1897 struct pool *pp = &pc->pc_pool; 1898 u_int i; 1899 1900 /* Remove it from the global list. */ 1901 mutex_enter(&pool_head_lock); 1902 while (pc->pc_refcnt != 0) 1903 cv_wait(&pool_busy, &pool_head_lock); 1904 TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist); 1905 mutex_exit(&pool_head_lock); 1906 1907 /* First, invalidate the entire cache. */ 1908 pool_cache_invalidate(pc); 1909 1910 /* Disassociate it from the pool. */ 1911 mutex_enter(&pp->pr_lock); 1912 pp->pr_cache = NULL; 1913 mutex_exit(&pp->pr_lock); 1914 1915 /* Destroy per-CPU data */ 1916 for (i = 0; i < __arraycount(pc->pc_cpus); i++) 1917 pool_cache_invalidate_cpu(pc, i); 1918 1919 /* Finally, destroy it. */ 1920 mutex_destroy(&pc->pc_lock); 1921 pool_destroy(pp); 1922 } 1923 1924 /* 1925 * pool_cache_cpu_init1: 1926 * 1927 * Called for each pool_cache whenever a new CPU is attached. 1928 */ 1929 static void 1930 pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc) 1931 { 1932 pool_cache_cpu_t *cc; 1933 int index; 1934 1935 index = ci->ci_index; 1936 1937 KASSERT(index < __arraycount(pc->pc_cpus)); 1938 1939 if ((cc = pc->pc_cpus[index]) != NULL) { 1940 KASSERT(cc->cc_cpuindex == index); 1941 return; 1942 } 1943 1944 /* 1945 * The first CPU is 'free'. This needs to be the case for 1946 * bootstrap - we may not be able to allocate yet. 1947 */ 1948 if (pc->pc_ncpu == 0) { 1949 cc = &pc->pc_cpu0; 1950 pc->pc_ncpu = 1; 1951 } else { 1952 mutex_enter(&pc->pc_lock); 1953 pc->pc_ncpu++; 1954 mutex_exit(&pc->pc_lock); 1955 cc = pool_get(&cache_cpu_pool, PR_WAITOK); 1956 } 1957 1958 cc->cc_ipl = pc->pc_pool.pr_ipl; 1959 cc->cc_iplcookie = makeiplcookie(cc->cc_ipl); 1960 cc->cc_cache = pc; 1961 cc->cc_cpuindex = index; 1962 cc->cc_hits = 0; 1963 cc->cc_misses = 0; 1964 cc->cc_current = __UNCONST(&pcg_dummy); 1965 cc->cc_previous = __UNCONST(&pcg_dummy); 1966 1967 pc->pc_cpus[index] = cc; 1968 } 1969 1970 /* 1971 * pool_cache_cpu_init: 1972 * 1973 * Called whenever a new CPU is attached. 1974 */ 1975 void 1976 pool_cache_cpu_init(struct cpu_info *ci) 1977 { 1978 pool_cache_t pc; 1979 1980 mutex_enter(&pool_head_lock); 1981 TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) { 1982 pc->pc_refcnt++; 1983 mutex_exit(&pool_head_lock); 1984 1985 pool_cache_cpu_init1(ci, pc); 1986 1987 mutex_enter(&pool_head_lock); 1988 pc->pc_refcnt--; 1989 cv_broadcast(&pool_busy); 1990 } 1991 mutex_exit(&pool_head_lock); 1992 } 1993 1994 /* 1995 * pool_cache_reclaim: 1996 * 1997 * Reclaim memory from a pool cache. 1998 */ 1999 bool 2000 pool_cache_reclaim(pool_cache_t pc) 2001 { 2002 2003 return pool_reclaim(&pc->pc_pool); 2004 } 2005 2006 static void 2007 pool_cache_destruct_object1(pool_cache_t pc, void *object) 2008 { 2009 2010 (*pc->pc_dtor)(pc->pc_arg, object); 2011 pool_put(&pc->pc_pool, object); 2012 } 2013 2014 /* 2015 * pool_cache_destruct_object: 2016 * 2017 * Force destruction of an object and its release back into 2018 * the pool. 2019 */ 2020 void 2021 pool_cache_destruct_object(pool_cache_t pc, void *object) 2022 { 2023 2024 FREECHECK_IN(&pc->pc_freecheck, object); 2025 2026 pool_cache_destruct_object1(pc, object); 2027 } 2028 2029 /* 2030 * pool_cache_invalidate_groups: 2031 * 2032 * Invalidate a chain of groups and destruct all objects. 2033 */ 2034 static void 2035 pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg) 2036 { 2037 void *object; 2038 pcg_t *next; 2039 int i; 2040 2041 for (; pcg != NULL; pcg = next) { 2042 next = pcg->pcg_next; 2043 2044 for (i = 0; i < pcg->pcg_avail; i++) { 2045 object = pcg->pcg_objects[i].pcgo_va; 2046 pool_cache_destruct_object1(pc, object); 2047 } 2048 2049 if (pcg->pcg_size == PCG_NOBJECTS_LARGE) { 2050 pool_put(&pcg_large_pool, pcg); 2051 } else { 2052 KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL); 2053 pool_put(&pcg_normal_pool, pcg); 2054 } 2055 } 2056 } 2057 2058 /* 2059 * pool_cache_invalidate: 2060 * 2061 * Invalidate a pool cache (destruct and release all of the 2062 * cached objects). Does not reclaim objects from the pool. 2063 * 2064 * Note: For pool caches that provide constructed objects, there 2065 * is an assumption that another level of synchronization is occurring 2066 * between the input to the constructor and the cache invalidation. 2067 * 2068 * Invalidation is a costly process and should not be called from 2069 * interrupt context. 2070 */ 2071 void 2072 pool_cache_invalidate(pool_cache_t pc) 2073 { 2074 uint64_t where; 2075 pcg_t *full, *empty, *part; 2076 2077 KASSERT(!cpu_intr_p() && !cpu_softintr_p()); 2078 2079 if (ncpu < 2 || !mp_online) { 2080 /* 2081 * We might be called early enough in the boot process 2082 * for the CPU data structures to not be fully initialized. 2083 * In this case, transfer the content of the local CPU's 2084 * cache back into global cache as only this CPU is currently 2085 * running. 2086 */ 2087 pool_cache_transfer(pc); 2088 } else { 2089 /* 2090 * Signal all CPUs that they must transfer their local 2091 * cache back to the global pool then wait for the xcall to 2092 * complete. 2093 */ 2094 where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer, 2095 pc, NULL); 2096 xc_wait(where); 2097 } 2098 2099 /* Empty pool caches, then invalidate objects */ 2100 mutex_enter(&pc->pc_lock); 2101 full = pc->pc_fullgroups; 2102 empty = pc->pc_emptygroups; 2103 part = pc->pc_partgroups; 2104 pc->pc_fullgroups = NULL; 2105 pc->pc_emptygroups = NULL; 2106 pc->pc_partgroups = NULL; 2107 pc->pc_nfull = 0; 2108 pc->pc_nempty = 0; 2109 pc->pc_npart = 0; 2110 mutex_exit(&pc->pc_lock); 2111 2112 pool_cache_invalidate_groups(pc, full); 2113 pool_cache_invalidate_groups(pc, empty); 2114 pool_cache_invalidate_groups(pc, part); 2115 } 2116 2117 /* 2118 * pool_cache_invalidate_cpu: 2119 * 2120 * Invalidate all CPU-bound cached objects in pool cache, the CPU being 2121 * identified by its associated index. 2122 * It is caller's responsibility to ensure that no operation is 2123 * taking place on this pool cache while doing this invalidation. 2124 * WARNING: as no inter-CPU locking is enforced, trying to invalidate 2125 * pool cached objects from a CPU different from the one currently running 2126 * may result in an undefined behaviour. 2127 */ 2128 static void 2129 pool_cache_invalidate_cpu(pool_cache_t pc, u_int index) 2130 { 2131 pool_cache_cpu_t *cc; 2132 pcg_t *pcg; 2133 2134 if ((cc = pc->pc_cpus[index]) == NULL) 2135 return; 2136 2137 if ((pcg = cc->cc_current) != &pcg_dummy) { 2138 pcg->pcg_next = NULL; 2139 pool_cache_invalidate_groups(pc, pcg); 2140 } 2141 if ((pcg = cc->cc_previous) != &pcg_dummy) { 2142 pcg->pcg_next = NULL; 2143 pool_cache_invalidate_groups(pc, pcg); 2144 } 2145 if (cc != &pc->pc_cpu0) 2146 pool_put(&cache_cpu_pool, cc); 2147 2148 } 2149 2150 void 2151 pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) 2152 { 2153 2154 pool_set_drain_hook(&pc->pc_pool, fn, arg); 2155 } 2156 2157 void 2158 pool_cache_setlowat(pool_cache_t pc, int n) 2159 { 2160 2161 pool_setlowat(&pc->pc_pool, n); 2162 } 2163 2164 void 2165 pool_cache_sethiwat(pool_cache_t pc, int n) 2166 { 2167 2168 pool_sethiwat(&pc->pc_pool, n); 2169 } 2170 2171 void 2172 pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap) 2173 { 2174 2175 pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap); 2176 } 2177 2178 static bool __noinline 2179 pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp, 2180 paddr_t *pap, int flags) 2181 { 2182 pcg_t *pcg, *cur; 2183 uint64_t ncsw; 2184 pool_cache_t pc; 2185 void *object; 2186 2187 KASSERT(cc->cc_current->pcg_avail == 0); 2188 KASSERT(cc->cc_previous->pcg_avail == 0); 2189 2190 pc = cc->cc_cache; 2191 cc->cc_misses++; 2192 2193 /* 2194 * Nothing was available locally. Try and grab a group 2195 * from the cache. 2196 */ 2197 if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { 2198 ncsw = curlwp->l_ncsw; 2199 mutex_enter(&pc->pc_lock); 2200 pc->pc_contended++; 2201 2202 /* 2203 * If we context switched while locking, then 2204 * our view of the per-CPU data is invalid: 2205 * retry. 2206 */ 2207 if (curlwp->l_ncsw != ncsw) { 2208 mutex_exit(&pc->pc_lock); 2209 return true; 2210 } 2211 } 2212 2213 if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) { 2214 /* 2215 * If there's a full group, release our empty 2216 * group back to the cache. Install the full 2217 * group as cc_current and return. 2218 */ 2219 if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) { 2220 KASSERT(cur->pcg_avail == 0); 2221 cur->pcg_next = pc->pc_emptygroups; 2222 pc->pc_emptygroups = cur; 2223 pc->pc_nempty++; 2224 } 2225 KASSERT(pcg->pcg_avail == pcg->pcg_size); 2226 cc->cc_current = pcg; 2227 pc->pc_fullgroups = pcg->pcg_next; 2228 pc->pc_hits++; 2229 pc->pc_nfull--; 2230 mutex_exit(&pc->pc_lock); 2231 return true; 2232 } 2233 2234 /* 2235 * Nothing available locally or in cache. Take the slow 2236 * path: fetch a new object from the pool and construct 2237 * it. 2238 */ 2239 pc->pc_misses++; 2240 mutex_exit(&pc->pc_lock); 2241 splx(s); 2242 2243 object = pool_get(&pc->pc_pool, flags); 2244 *objectp = object; 2245 if (__predict_false(object == NULL)) { 2246 KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT); 2247 return false; 2248 } 2249 2250 if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) { 2251 pool_put(&pc->pc_pool, object); 2252 *objectp = NULL; 2253 return false; 2254 } 2255 2256 KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) & 2257 (pc->pc_pool.pr_align - 1)) == 0); 2258 2259 if (pap != NULL) { 2260 #ifdef POOL_VTOPHYS 2261 *pap = POOL_VTOPHYS(object); 2262 #else 2263 *pap = POOL_PADDR_INVALID; 2264 #endif 2265 } 2266 2267 FREECHECK_OUT(&pc->pc_freecheck, object); 2268 pool_redzone_fill(&pc->pc_pool, object); 2269 pool_cache_kleak_fill(pc, object); 2270 return false; 2271 } 2272 2273 /* 2274 * pool_cache_get{,_paddr}: 2275 * 2276 * Get an object from a pool cache (optionally returning 2277 * the physical address of the object). 2278 */ 2279 void * 2280 pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap) 2281 { 2282 pool_cache_cpu_t *cc; 2283 pcg_t *pcg; 2284 void *object; 2285 int s; 2286 2287 KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK)); 2288 KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) || 2289 (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL), 2290 "%s: [%s] is IPL_NONE, but called from interrupt context", 2291 __func__, pc->pc_pool.pr_wchan); 2292 2293 if (flags & PR_WAITOK) { 2294 ASSERT_SLEEPABLE(); 2295 } 2296 2297 /* Lock out interrupts and disable preemption. */ 2298 s = splvm(); 2299 while (/* CONSTCOND */ true) { 2300 /* Try and allocate an object from the current group. */ 2301 cc = pc->pc_cpus[curcpu()->ci_index]; 2302 KASSERT(cc->cc_cache == pc); 2303 pcg = cc->cc_current; 2304 if (__predict_true(pcg->pcg_avail > 0)) { 2305 object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va; 2306 if (__predict_false(pap != NULL)) 2307 *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; 2308 #if defined(DIAGNOSTIC) 2309 pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; 2310 KASSERT(pcg->pcg_avail < pcg->pcg_size); 2311 KASSERT(object != NULL); 2312 #endif 2313 cc->cc_hits++; 2314 splx(s); 2315 FREECHECK_OUT(&pc->pc_freecheck, object); 2316 pool_redzone_fill(&pc->pc_pool, object); 2317 pool_cache_kleak_fill(pc, object); 2318 return object; 2319 } 2320 2321 /* 2322 * That failed. If the previous group isn't empty, swap 2323 * it with the current group and allocate from there. 2324 */ 2325 pcg = cc->cc_previous; 2326 if (__predict_true(pcg->pcg_avail > 0)) { 2327 cc->cc_previous = cc->cc_current; 2328 cc->cc_current = pcg; 2329 continue; 2330 } 2331 2332 /* 2333 * Can't allocate from either group: try the slow path. 2334 * If get_slow() allocated an object for us, or if 2335 * no more objects are available, it will return false. 2336 * Otherwise, we need to retry. 2337 */ 2338 if (!pool_cache_get_slow(cc, s, &object, pap, flags)) 2339 break; 2340 } 2341 2342 /* 2343 * We would like to KASSERT(object || (flags & PR_NOWAIT)), but 2344 * pool_cache_get can fail even in the PR_WAITOK case, if the 2345 * constructor fails. 2346 */ 2347 return object; 2348 } 2349 2350 static bool __noinline 2351 pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object) 2352 { 2353 struct lwp *l = curlwp; 2354 pcg_t *pcg, *cur; 2355 uint64_t ncsw; 2356 pool_cache_t pc; 2357 2358 KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size); 2359 KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size); 2360 2361 pc = cc->cc_cache; 2362 pcg = NULL; 2363 cc->cc_misses++; 2364 ncsw = l->l_ncsw; 2365 2366 /* 2367 * If there are no empty groups in the cache then allocate one 2368 * while still unlocked. 2369 */ 2370 if (__predict_false(pc->pc_emptygroups == NULL)) { 2371 if (__predict_true(!pool_cache_disable)) { 2372 pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); 2373 } 2374 /* 2375 * If pool_get() blocked, then our view of 2376 * the per-CPU data is invalid: retry. 2377 */ 2378 if (__predict_false(l->l_ncsw != ncsw)) { 2379 if (pcg != NULL) { 2380 pool_put(pc->pc_pcgpool, pcg); 2381 } 2382 return true; 2383 } 2384 if (__predict_true(pcg != NULL)) { 2385 pcg->pcg_avail = 0; 2386 pcg->pcg_size = pc->pc_pcgsize; 2387 } 2388 } 2389 2390 /* Lock the cache. */ 2391 if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { 2392 mutex_enter(&pc->pc_lock); 2393 pc->pc_contended++; 2394 2395 /* 2396 * If we context switched while locking, then our view of 2397 * the per-CPU data is invalid: retry. 2398 */ 2399 if (__predict_false(l->l_ncsw != ncsw)) { 2400 mutex_exit(&pc->pc_lock); 2401 if (pcg != NULL) { 2402 pool_put(pc->pc_pcgpool, pcg); 2403 } 2404 return true; 2405 } 2406 } 2407 2408 /* If there are no empty groups in the cache then allocate one. */ 2409 if (pcg == NULL && pc->pc_emptygroups != NULL) { 2410 pcg = pc->pc_emptygroups; 2411 pc->pc_emptygroups = pcg->pcg_next; 2412 pc->pc_nempty--; 2413 } 2414 2415 /* 2416 * If there's a empty group, release our full group back 2417 * to the cache. Install the empty group to the local CPU 2418 * and return. 2419 */ 2420 if (pcg != NULL) { 2421 KASSERT(pcg->pcg_avail == 0); 2422 if (__predict_false(cc->cc_previous == &pcg_dummy)) { 2423 cc->cc_previous = pcg; 2424 } else { 2425 cur = cc->cc_current; 2426 if (__predict_true(cur != &pcg_dummy)) { 2427 KASSERT(cur->pcg_avail == cur->pcg_size); 2428 cur->pcg_next = pc->pc_fullgroups; 2429 pc->pc_fullgroups = cur; 2430 pc->pc_nfull++; 2431 } 2432 cc->cc_current = pcg; 2433 } 2434 pc->pc_hits++; 2435 mutex_exit(&pc->pc_lock); 2436 return true; 2437 } 2438 2439 /* 2440 * Nothing available locally or in cache, and we didn't 2441 * allocate an empty group. Take the slow path and destroy 2442 * the object here and now. 2443 */ 2444 pc->pc_misses++; 2445 mutex_exit(&pc->pc_lock); 2446 splx(s); 2447 pool_cache_destruct_object(pc, object); 2448 2449 return false; 2450 } 2451 2452 /* 2453 * pool_cache_put{,_paddr}: 2454 * 2455 * Put an object back to the pool cache (optionally caching the 2456 * physical address of the object). 2457 */ 2458 void 2459 pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa) 2460 { 2461 pool_cache_cpu_t *cc; 2462 pcg_t *pcg; 2463 int s; 2464 2465 KASSERT(object != NULL); 2466 pool_redzone_check(&pc->pc_pool, object); 2467 FREECHECK_IN(&pc->pc_freecheck, object); 2468 2469 /* Lock out interrupts and disable preemption. */ 2470 s = splvm(); 2471 while (/* CONSTCOND */ true) { 2472 /* If the current group isn't full, release it there. */ 2473 cc = pc->pc_cpus[curcpu()->ci_index]; 2474 KASSERT(cc->cc_cache == pc); 2475 pcg = cc->cc_current; 2476 if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) { 2477 pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object; 2478 pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa; 2479 pcg->pcg_avail++; 2480 cc->cc_hits++; 2481 splx(s); 2482 return; 2483 } 2484 2485 /* 2486 * That failed. If the previous group isn't full, swap 2487 * it with the current group and try again. 2488 */ 2489 pcg = cc->cc_previous; 2490 if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) { 2491 cc->cc_previous = cc->cc_current; 2492 cc->cc_current = pcg; 2493 continue; 2494 } 2495 2496 /* 2497 * Can't free to either group: try the slow path. 2498 * If put_slow() releases the object for us, it 2499 * will return false. Otherwise we need to retry. 2500 */ 2501 if (!pool_cache_put_slow(cc, s, object)) 2502 break; 2503 } 2504 } 2505 2506 /* 2507 * pool_cache_transfer: 2508 * 2509 * Transfer objects from the per-CPU cache to the global cache. 2510 * Run within a cross-call thread. 2511 */ 2512 static void 2513 pool_cache_transfer(pool_cache_t pc) 2514 { 2515 pool_cache_cpu_t *cc; 2516 pcg_t *prev, *cur, **list; 2517 int s; 2518 2519 s = splvm(); 2520 mutex_enter(&pc->pc_lock); 2521 cc = pc->pc_cpus[curcpu()->ci_index]; 2522 cur = cc->cc_current; 2523 cc->cc_current = __UNCONST(&pcg_dummy); 2524 prev = cc->cc_previous; 2525 cc->cc_previous = __UNCONST(&pcg_dummy); 2526 if (cur != &pcg_dummy) { 2527 if (cur->pcg_avail == cur->pcg_size) { 2528 list = &pc->pc_fullgroups; 2529 pc->pc_nfull++; 2530 } else if (cur->pcg_avail == 0) { 2531 list = &pc->pc_emptygroups; 2532 pc->pc_nempty++; 2533 } else { 2534 list = &pc->pc_partgroups; 2535 pc->pc_npart++; 2536 } 2537 cur->pcg_next = *list; 2538 *list = cur; 2539 } 2540 if (prev != &pcg_dummy) { 2541 if (prev->pcg_avail == prev->pcg_size) { 2542 list = &pc->pc_fullgroups; 2543 pc->pc_nfull++; 2544 } else if (prev->pcg_avail == 0) { 2545 list = &pc->pc_emptygroups; 2546 pc->pc_nempty++; 2547 } else { 2548 list = &pc->pc_partgroups; 2549 pc->pc_npart++; 2550 } 2551 prev->pcg_next = *list; 2552 *list = prev; 2553 } 2554 mutex_exit(&pc->pc_lock); 2555 splx(s); 2556 } 2557 2558 /* 2559 * Pool backend allocators. 2560 * 2561 * Each pool has a backend allocator that handles allocation, deallocation, 2562 * and any additional draining that might be needed. 2563 * 2564 * We provide two standard allocators: 2565 * 2566 * pool_allocator_kmem - the default when no allocator is specified 2567 * 2568 * pool_allocator_nointr - used for pools that will not be accessed 2569 * in interrupt context. 2570 */ 2571 void *pool_page_alloc(struct pool *, int); 2572 void pool_page_free(struct pool *, void *); 2573 2574 #ifdef POOL_SUBPAGE 2575 struct pool_allocator pool_allocator_kmem_fullpage = { 2576 .pa_alloc = pool_page_alloc, 2577 .pa_free = pool_page_free, 2578 .pa_pagesz = 0 2579 }; 2580 #else 2581 struct pool_allocator pool_allocator_kmem = { 2582 .pa_alloc = pool_page_alloc, 2583 .pa_free = pool_page_free, 2584 .pa_pagesz = 0 2585 }; 2586 #endif 2587 2588 #ifdef POOL_SUBPAGE 2589 struct pool_allocator pool_allocator_nointr_fullpage = { 2590 .pa_alloc = pool_page_alloc, 2591 .pa_free = pool_page_free, 2592 .pa_pagesz = 0 2593 }; 2594 #else 2595 struct pool_allocator pool_allocator_nointr = { 2596 .pa_alloc = pool_page_alloc, 2597 .pa_free = pool_page_free, 2598 .pa_pagesz = 0 2599 }; 2600 #endif 2601 2602 #ifdef POOL_SUBPAGE 2603 void *pool_subpage_alloc(struct pool *, int); 2604 void pool_subpage_free(struct pool *, void *); 2605 2606 struct pool_allocator pool_allocator_kmem = { 2607 .pa_alloc = pool_subpage_alloc, 2608 .pa_free = pool_subpage_free, 2609 .pa_pagesz = POOL_SUBPAGE 2610 }; 2611 2612 struct pool_allocator pool_allocator_nointr = { 2613 .pa_alloc = pool_subpage_alloc, 2614 .pa_free = pool_subpage_free, 2615 .pa_pagesz = POOL_SUBPAGE 2616 }; 2617 #endif /* POOL_SUBPAGE */ 2618 2619 struct pool_allocator pool_allocator_big[] = { 2620 { 2621 .pa_alloc = pool_page_alloc, 2622 .pa_free = pool_page_free, 2623 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 0), 2624 }, 2625 { 2626 .pa_alloc = pool_page_alloc, 2627 .pa_free = pool_page_free, 2628 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 1), 2629 }, 2630 { 2631 .pa_alloc = pool_page_alloc, 2632 .pa_free = pool_page_free, 2633 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 2), 2634 }, 2635 { 2636 .pa_alloc = pool_page_alloc, 2637 .pa_free = pool_page_free, 2638 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 3), 2639 }, 2640 { 2641 .pa_alloc = pool_page_alloc, 2642 .pa_free = pool_page_free, 2643 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 4), 2644 }, 2645 { 2646 .pa_alloc = pool_page_alloc, 2647 .pa_free = pool_page_free, 2648 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 5), 2649 }, 2650 { 2651 .pa_alloc = pool_page_alloc, 2652 .pa_free = pool_page_free, 2653 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 6), 2654 }, 2655 { 2656 .pa_alloc = pool_page_alloc, 2657 .pa_free = pool_page_free, 2658 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 7), 2659 } 2660 }; 2661 2662 static int 2663 pool_bigidx(size_t size) 2664 { 2665 int i; 2666 2667 for (i = 0; i < __arraycount(pool_allocator_big); i++) { 2668 if (1 << (i + POOL_ALLOCATOR_BIG_BASE) >= size) 2669 return i; 2670 } 2671 panic("pool item size %zu too large, use a custom allocator", size); 2672 } 2673 2674 static void * 2675 pool_allocator_alloc(struct pool *pp, int flags) 2676 { 2677 struct pool_allocator *pa = pp->pr_alloc; 2678 void *res; 2679 2680 res = (*pa->pa_alloc)(pp, flags); 2681 if (res == NULL && (flags & PR_WAITOK) == 0) { 2682 /* 2683 * We only run the drain hook here if PR_NOWAIT. 2684 * In other cases, the hook will be run in 2685 * pool_reclaim(). 2686 */ 2687 if (pp->pr_drain_hook != NULL) { 2688 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); 2689 res = (*pa->pa_alloc)(pp, flags); 2690 } 2691 } 2692 return res; 2693 } 2694 2695 static void 2696 pool_allocator_free(struct pool *pp, void *v) 2697 { 2698 struct pool_allocator *pa = pp->pr_alloc; 2699 2700 (*pa->pa_free)(pp, v); 2701 } 2702 2703 void * 2704 pool_page_alloc(struct pool *pp, int flags) 2705 { 2706 const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; 2707 vmem_addr_t va; 2708 int ret; 2709 2710 ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz, 2711 vflags | VM_INSTANTFIT, &va); 2712 2713 return ret ? NULL : (void *)va; 2714 } 2715 2716 void 2717 pool_page_free(struct pool *pp, void *v) 2718 { 2719 2720 uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz); 2721 } 2722 2723 static void * 2724 pool_page_alloc_meta(struct pool *pp, int flags) 2725 { 2726 const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; 2727 vmem_addr_t va; 2728 int ret; 2729 2730 ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz, 2731 vflags | VM_INSTANTFIT, &va); 2732 2733 return ret ? NULL : (void *)va; 2734 } 2735 2736 static void 2737 pool_page_free_meta(struct pool *pp, void *v) 2738 { 2739 2740 vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz); 2741 } 2742 2743 #ifdef KLEAK 2744 static void 2745 pool_kleak_fill(struct pool *pp, void *p) 2746 { 2747 if (__predict_false(pp->pr_roflags & PR_NOTOUCH)) { 2748 return; 2749 } 2750 kleak_fill_area(p, pp->pr_size); 2751 } 2752 2753 static void 2754 pool_cache_kleak_fill(pool_cache_t pc, void *p) 2755 { 2756 if (__predict_false(pc->pc_ctor != NULL || pc->pc_dtor != NULL)) { 2757 return; 2758 } 2759 pool_kleak_fill(&pc->pc_pool, p); 2760 } 2761 #endif 2762 2763 #ifdef POOL_REDZONE 2764 #if defined(_LP64) 2765 # define PRIME 0x9e37fffffffc0000UL 2766 #else /* defined(_LP64) */ 2767 # define PRIME 0x9e3779b1 2768 #endif /* defined(_LP64) */ 2769 #define STATIC_BYTE 0xFE 2770 CTASSERT(POOL_REDZONE_SIZE > 1); 2771 2772 #ifndef KASAN 2773 static inline uint8_t 2774 pool_pattern_generate(const void *p) 2775 { 2776 return (uint8_t)(((uintptr_t)p) * PRIME 2777 >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT); 2778 } 2779 #endif 2780 2781 static void 2782 pool_redzone_init(struct pool *pp, size_t requested_size) 2783 { 2784 size_t redzsz; 2785 size_t nsz; 2786 2787 #ifdef KASAN 2788 redzsz = requested_size; 2789 kasan_add_redzone(&redzsz); 2790 redzsz -= requested_size; 2791 #else 2792 redzsz = POOL_REDZONE_SIZE; 2793 #endif 2794 2795 if (pp->pr_roflags & PR_NOTOUCH) { 2796 pp->pr_reqsize = 0; 2797 pp->pr_redzone = false; 2798 return; 2799 } 2800 2801 /* 2802 * We may have extended the requested size earlier; check if 2803 * there's naturally space in the padding for a red zone. 2804 */ 2805 if (pp->pr_size - requested_size >= redzsz) { 2806 pp->pr_reqsize = requested_size; 2807 pp->pr_redzone = true; 2808 return; 2809 } 2810 2811 /* 2812 * No space in the natural padding; check if we can extend a 2813 * bit the size of the pool. 2814 */ 2815 nsz = roundup(pp->pr_size + redzsz, pp->pr_align); 2816 if (nsz <= pp->pr_alloc->pa_pagesz) { 2817 /* Ok, we can */ 2818 pp->pr_size = nsz; 2819 pp->pr_reqsize = requested_size; 2820 pp->pr_redzone = true; 2821 } else { 2822 /* No space for a red zone... snif :'( */ 2823 pp->pr_reqsize = 0; 2824 pp->pr_redzone = false; 2825 printf("pool redzone disabled for '%s'\n", pp->pr_wchan); 2826 } 2827 } 2828 2829 static void 2830 pool_redzone_fill(struct pool *pp, void *p) 2831 { 2832 if (!pp->pr_redzone) 2833 return; 2834 #ifdef KASAN 2835 size_t size_with_redzone = pp->pr_reqsize; 2836 kasan_add_redzone(&size_with_redzone); 2837 kasan_alloc(p, pp->pr_reqsize, size_with_redzone); 2838 #else 2839 uint8_t *cp, pat; 2840 const uint8_t *ep; 2841 2842 cp = (uint8_t *)p + pp->pr_reqsize; 2843 ep = cp + POOL_REDZONE_SIZE; 2844 2845 /* 2846 * We really don't want the first byte of the red zone to be '\0'; 2847 * an off-by-one in a string may not be properly detected. 2848 */ 2849 pat = pool_pattern_generate(cp); 2850 *cp = (pat == '\0') ? STATIC_BYTE: pat; 2851 cp++; 2852 2853 while (cp < ep) { 2854 *cp = pool_pattern_generate(cp); 2855 cp++; 2856 } 2857 #endif 2858 } 2859 2860 static void 2861 pool_redzone_check(struct pool *pp, void *p) 2862 { 2863 if (!pp->pr_redzone) 2864 return; 2865 #ifdef KASAN 2866 size_t size_with_redzone = pp->pr_reqsize; 2867 kasan_add_redzone(&size_with_redzone); 2868 kasan_free(p, size_with_redzone); 2869 #else 2870 uint8_t *cp, pat, expected; 2871 const uint8_t *ep; 2872 2873 cp = (uint8_t *)p + pp->pr_reqsize; 2874 ep = cp + POOL_REDZONE_SIZE; 2875 2876 pat = pool_pattern_generate(cp); 2877 expected = (pat == '\0') ? STATIC_BYTE: pat; 2878 if (__predict_false(expected != *cp)) { 2879 printf("%s: %p: 0x%02x != 0x%02x\n", 2880 __func__, cp, *cp, expected); 2881 } 2882 cp++; 2883 2884 while (cp < ep) { 2885 expected = pool_pattern_generate(cp); 2886 if (__predict_false(*cp != expected)) { 2887 printf("%s: %p: 0x%02x != 0x%02x\n", 2888 __func__, cp, *cp, expected); 2889 } 2890 cp++; 2891 } 2892 #endif 2893 } 2894 2895 #endif /* POOL_REDZONE */ 2896 2897 2898 #ifdef POOL_SUBPAGE 2899 /* Sub-page allocator, for machines with large hardware pages. */ 2900 void * 2901 pool_subpage_alloc(struct pool *pp, int flags) 2902 { 2903 return pool_get(&psppool, flags); 2904 } 2905 2906 void 2907 pool_subpage_free(struct pool *pp, void *v) 2908 { 2909 pool_put(&psppool, v); 2910 } 2911 2912 #endif /* POOL_SUBPAGE */ 2913 2914 #if defined(DDB) 2915 static bool 2916 pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr) 2917 { 2918 2919 return (uintptr_t)ph->ph_page <= addr && 2920 addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz; 2921 } 2922 2923 static bool 2924 pool_in_item(struct pool *pp, void *item, uintptr_t addr) 2925 { 2926 2927 return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size; 2928 } 2929 2930 static bool 2931 pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr) 2932 { 2933 int i; 2934 2935 if (pcg == NULL) { 2936 return false; 2937 } 2938 for (i = 0; i < pcg->pcg_avail; i++) { 2939 if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) { 2940 return true; 2941 } 2942 } 2943 return false; 2944 } 2945 2946 static bool 2947 pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr) 2948 { 2949 2950 if ((pp->pr_roflags & PR_NOTOUCH) != 0) { 2951 unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr); 2952 pool_item_bitmap_t *bitmap = 2953 ph->ph_bitmap + (idx / BITMAP_SIZE); 2954 pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK); 2955 2956 return (*bitmap & mask) == 0; 2957 } else { 2958 struct pool_item *pi; 2959 2960 LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) { 2961 if (pool_in_item(pp, pi, addr)) { 2962 return false; 2963 } 2964 } 2965 return true; 2966 } 2967 } 2968 2969 void 2970 pool_whatis(uintptr_t addr, void (*pr)(const char *, ...)) 2971 { 2972 struct pool *pp; 2973 2974 TAILQ_FOREACH(pp, &pool_head, pr_poollist) { 2975 struct pool_item_header *ph; 2976 uintptr_t item; 2977 bool allocated = true; 2978 bool incache = false; 2979 bool incpucache = false; 2980 char cpucachestr[32]; 2981 2982 if ((pp->pr_roflags & PR_PHINPAGE) != 0) { 2983 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { 2984 if (pool_in_page(pp, ph, addr)) { 2985 goto found; 2986 } 2987 } 2988 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { 2989 if (pool_in_page(pp, ph, addr)) { 2990 allocated = 2991 pool_allocated(pp, ph, addr); 2992 goto found; 2993 } 2994 } 2995 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { 2996 if (pool_in_page(pp, ph, addr)) { 2997 allocated = false; 2998 goto found; 2999 } 3000 } 3001 continue; 3002 } else { 3003 ph = pr_find_pagehead_noalign(pp, (void *)addr); 3004 if (ph == NULL || !pool_in_page(pp, ph, addr)) { 3005 continue; 3006 } 3007 allocated = pool_allocated(pp, ph, addr); 3008 } 3009 found: 3010 if (allocated && pp->pr_cache) { 3011 pool_cache_t pc = pp->pr_cache; 3012 struct pool_cache_group *pcg; 3013 int i; 3014 3015 for (pcg = pc->pc_fullgroups; pcg != NULL; 3016 pcg = pcg->pcg_next) { 3017 if (pool_in_cg(pp, pcg, addr)) { 3018 incache = true; 3019 goto print; 3020 } 3021 } 3022 for (i = 0; i < __arraycount(pc->pc_cpus); i++) { 3023 pool_cache_cpu_t *cc; 3024 3025 if ((cc = pc->pc_cpus[i]) == NULL) { 3026 continue; 3027 } 3028 if (pool_in_cg(pp, cc->cc_current, addr) || 3029 pool_in_cg(pp, cc->cc_previous, addr)) { 3030 struct cpu_info *ci = 3031 cpu_lookup(i); 3032 3033 incpucache = true; 3034 snprintf(cpucachestr, 3035 sizeof(cpucachestr), 3036 "cached by CPU %u", 3037 ci->ci_index); 3038 goto print; 3039 } 3040 } 3041 } 3042 print: 3043 item = (uintptr_t)ph->ph_page + ph->ph_off; 3044 item = item + rounddown(addr - item, pp->pr_size); 3045 (*pr)("%p is %p+%zu in POOL '%s' (%s)\n", 3046 (void *)addr, item, (size_t)(addr - item), 3047 pp->pr_wchan, 3048 incpucache ? cpucachestr : 3049 incache ? "cached" : allocated ? "allocated" : "free"); 3050 } 3051 } 3052 #endif /* defined(DDB) */ 3053 3054 static int 3055 pool_sysctl(SYSCTLFN_ARGS) 3056 { 3057 struct pool_sysctl data; 3058 struct pool *pp; 3059 struct pool_cache *pc; 3060 pool_cache_cpu_t *cc; 3061 int error; 3062 size_t i, written; 3063 3064 if (oldp == NULL) { 3065 *oldlenp = 0; 3066 TAILQ_FOREACH(pp, &pool_head, pr_poollist) 3067 *oldlenp += sizeof(data); 3068 return 0; 3069 } 3070 3071 memset(&data, 0, sizeof(data)); 3072 error = 0; 3073 written = 0; 3074 TAILQ_FOREACH(pp, &pool_head, pr_poollist) { 3075 if (written + sizeof(data) > *oldlenp) 3076 break; 3077 strlcpy(data.pr_wchan, pp->pr_wchan, sizeof(data.pr_wchan)); 3078 data.pr_pagesize = pp->pr_alloc->pa_pagesz; 3079 data.pr_flags = pp->pr_roflags | pp->pr_flags; 3080 #define COPY(field) data.field = pp->field 3081 COPY(pr_size); 3082 3083 COPY(pr_itemsperpage); 3084 COPY(pr_nitems); 3085 COPY(pr_nout); 3086 COPY(pr_hardlimit); 3087 COPY(pr_npages); 3088 COPY(pr_minpages); 3089 COPY(pr_maxpages); 3090 3091 COPY(pr_nget); 3092 COPY(pr_nfail); 3093 COPY(pr_nput); 3094 COPY(pr_npagealloc); 3095 COPY(pr_npagefree); 3096 COPY(pr_hiwat); 3097 COPY(pr_nidle); 3098 #undef COPY 3099 3100 data.pr_cache_nmiss_pcpu = 0; 3101 data.pr_cache_nhit_pcpu = 0; 3102 if (pp->pr_cache) { 3103 pc = pp->pr_cache; 3104 data.pr_cache_meta_size = pc->pc_pcgsize; 3105 data.pr_cache_nfull = pc->pc_nfull; 3106 data.pr_cache_npartial = pc->pc_npart; 3107 data.pr_cache_nempty = pc->pc_nempty; 3108 data.pr_cache_ncontended = pc->pc_contended; 3109 data.pr_cache_nmiss_global = pc->pc_misses; 3110 data.pr_cache_nhit_global = pc->pc_hits; 3111 for (i = 0; i < pc->pc_ncpu; ++i) { 3112 cc = pc->pc_cpus[i]; 3113 if (cc == NULL) 3114 continue; 3115 data.pr_cache_nmiss_pcpu += cc->cc_misses; 3116 data.pr_cache_nhit_pcpu += cc->cc_hits; 3117 } 3118 } else { 3119 data.pr_cache_meta_size = 0; 3120 data.pr_cache_nfull = 0; 3121 data.pr_cache_npartial = 0; 3122 data.pr_cache_nempty = 0; 3123 data.pr_cache_ncontended = 0; 3124 data.pr_cache_nmiss_global = 0; 3125 data.pr_cache_nhit_global = 0; 3126 } 3127 3128 error = sysctl_copyout(l, &data, oldp, sizeof(data)); 3129 if (error) 3130 break; 3131 written += sizeof(data); 3132 oldp = (char *)oldp + sizeof(data); 3133 } 3134 3135 *oldlenp = written; 3136 return error; 3137 } 3138 3139 SYSCTL_SETUP(sysctl_pool_setup, "sysctl kern.pool setup") 3140 { 3141 const struct sysctlnode *rnode = NULL; 3142 3143 sysctl_createv(clog, 0, NULL, &rnode, 3144 CTLFLAG_PERMANENT, 3145 CTLTYPE_STRUCT, "pool", 3146 SYSCTL_DESCR("Get pool statistics"), 3147 pool_sysctl, 0, NULL, 0, 3148 CTL_KERN, CTL_CREATE, CTL_EOL); 3149 } 3150