1 /* $NetBSD: subr_pool.c,v 1.292 2024/12/07 23:23:25 chs Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015, 2018, 5 * 2020, 2021 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace 10 * Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by 11 * Maxime Villard. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.292 2024/12/07 23:23:25 chs Exp $"); 37 38 #ifdef _KERNEL_OPT 39 #include "opt_ddb.h" 40 #include "opt_lockdebug.h" 41 #include "opt_pool.h" 42 #endif 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/sysctl.h> 47 #include <sys/bitops.h> 48 #include <sys/proc.h> 49 #include <sys/errno.h> 50 #include <sys/kernel.h> 51 #include <sys/vmem.h> 52 #include <sys/pool.h> 53 #include <sys/syslog.h> 54 #include <sys/debug.h> 55 #include <sys/lock.h> 56 #include <sys/lockdebug.h> 57 #include <sys/xcall.h> 58 #include <sys/cpu.h> 59 #include <sys/atomic.h> 60 #include <sys/asan.h> 61 #include <sys/msan.h> 62 #include <sys/fault.h> 63 64 #include <uvm/uvm_extern.h> 65 66 /* 67 * Pool resource management utility. 68 * 69 * Memory is allocated in pages which are split into pieces according to 70 * the pool item size. Each page is kept on one of three lists in the 71 * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages', 72 * for empty, full and partially-full pages respectively. The individual 73 * pool items are on a linked list headed by `ph_itemlist' in each page 74 * header. The memory for building the page list is either taken from 75 * the allocated pages themselves (for small pool items) or taken from 76 * an internal pool of page headers (`phpool'). 77 */ 78 79 /* List of all pools. Non static as needed by 'vmstat -m' */ 80 TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); 81 82 /* Private pool for page header structures */ 83 #define PHPOOL_MAX 8 84 static struct pool phpool[PHPOOL_MAX]; 85 #define PHPOOL_FREELIST_NELEM(idx) \ 86 (((idx) == 0) ? BITMAP_MIN_SIZE : BITMAP_SIZE * (1 << (idx))) 87 88 #if !defined(KMSAN) && (defined(DIAGNOSTIC) || defined(KASAN)) 89 #define POOL_REDZONE 90 #endif 91 92 #if defined(POOL_QUARANTINE) 93 #define POOL_NOCACHE 94 #endif 95 96 #ifdef POOL_REDZONE 97 # ifdef KASAN 98 # define POOL_REDZONE_SIZE 8 99 # else 100 # define POOL_REDZONE_SIZE 2 101 # endif 102 static void pool_redzone_init(struct pool *, size_t); 103 static void pool_redzone_fill(struct pool *, void *); 104 static void pool_redzone_check(struct pool *, void *); 105 static void pool_cache_redzone_check(pool_cache_t, void *); 106 #else 107 # define pool_redzone_init(pp, sz) __nothing 108 # define pool_redzone_fill(pp, ptr) __nothing 109 # define pool_redzone_check(pp, ptr) __nothing 110 # define pool_cache_redzone_check(pc, ptr) __nothing 111 #endif 112 113 #ifdef KMSAN 114 static inline void pool_get_kmsan(struct pool *, void *); 115 static inline void pool_put_kmsan(struct pool *, void *); 116 static inline void pool_cache_get_kmsan(pool_cache_t, void *); 117 static inline void pool_cache_put_kmsan(pool_cache_t, void *); 118 #else 119 #define pool_get_kmsan(pp, ptr) __nothing 120 #define pool_put_kmsan(pp, ptr) __nothing 121 #define pool_cache_get_kmsan(pc, ptr) __nothing 122 #define pool_cache_put_kmsan(pc, ptr) __nothing 123 #endif 124 125 #ifdef POOL_QUARANTINE 126 static void pool_quarantine_init(struct pool *); 127 static void pool_quarantine_flush(struct pool *); 128 static bool pool_put_quarantine(struct pool *, void *, 129 struct pool_pagelist *); 130 #else 131 #define pool_quarantine_init(a) __nothing 132 #define pool_quarantine_flush(a) __nothing 133 #define pool_put_quarantine(a, b, c) false 134 #endif 135 136 #ifdef POOL_NOCACHE 137 static bool pool_cache_put_nocache(pool_cache_t, void *); 138 #else 139 #define pool_cache_put_nocache(a, b) false 140 #endif 141 142 #define NO_CTOR __FPTRCAST(int (*)(void *, void *, int), nullop) 143 #define NO_DTOR __FPTRCAST(void (*)(void *, void *), nullop) 144 145 #define pc_has_pser(pc) (((pc)->pc_roflags & PR_PSERIALIZE) != 0) 146 #define pc_has_ctor(pc) ((pc)->pc_ctor != NO_CTOR) 147 #define pc_has_dtor(pc) ((pc)->pc_dtor != NO_DTOR) 148 149 #define pp_has_pser(pp) (((pp)->pr_roflags & PR_PSERIALIZE) != 0) 150 151 #define pool_barrier() xc_barrier(0) 152 153 /* 154 * Pool backend allocators. 155 * 156 * Each pool has a backend allocator that handles allocation, deallocation, 157 * and any additional draining that might be needed. 158 * 159 * We provide two standard allocators: 160 * 161 * pool_allocator_kmem - the default when no allocator is specified 162 * 163 * pool_allocator_nointr - used for pools that will not be accessed 164 * in interrupt context. 165 */ 166 void *pool_page_alloc(struct pool *, int); 167 void pool_page_free(struct pool *, void *); 168 169 static void *pool_page_alloc_meta(struct pool *, int); 170 static void pool_page_free_meta(struct pool *, void *); 171 172 struct pool_allocator pool_allocator_kmem = { 173 .pa_alloc = pool_page_alloc, 174 .pa_free = pool_page_free, 175 .pa_pagesz = 0 176 }; 177 178 struct pool_allocator pool_allocator_nointr = { 179 .pa_alloc = pool_page_alloc, 180 .pa_free = pool_page_free, 181 .pa_pagesz = 0 182 }; 183 184 struct pool_allocator pool_allocator_meta = { 185 .pa_alloc = pool_page_alloc_meta, 186 .pa_free = pool_page_free_meta, 187 .pa_pagesz = 0 188 }; 189 190 #define POOL_ALLOCATOR_BIG_BASE 13 191 static struct pool_allocator pool_allocator_big[] = { 192 { 193 .pa_alloc = pool_page_alloc, 194 .pa_free = pool_page_free, 195 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 0), 196 }, 197 { 198 .pa_alloc = pool_page_alloc, 199 .pa_free = pool_page_free, 200 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 1), 201 }, 202 { 203 .pa_alloc = pool_page_alloc, 204 .pa_free = pool_page_free, 205 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 2), 206 }, 207 { 208 .pa_alloc = pool_page_alloc, 209 .pa_free = pool_page_free, 210 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 3), 211 }, 212 { 213 .pa_alloc = pool_page_alloc, 214 .pa_free = pool_page_free, 215 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 4), 216 }, 217 { 218 .pa_alloc = pool_page_alloc, 219 .pa_free = pool_page_free, 220 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 5), 221 }, 222 { 223 .pa_alloc = pool_page_alloc, 224 .pa_free = pool_page_free, 225 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 6), 226 }, 227 { 228 .pa_alloc = pool_page_alloc, 229 .pa_free = pool_page_free, 230 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 7), 231 }, 232 { 233 .pa_alloc = pool_page_alloc, 234 .pa_free = pool_page_free, 235 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 8), 236 }, 237 { 238 .pa_alloc = pool_page_alloc, 239 .pa_free = pool_page_free, 240 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 9), 241 }, 242 { 243 .pa_alloc = pool_page_alloc, 244 .pa_free = pool_page_free, 245 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 10), 246 }, 247 { 248 .pa_alloc = pool_page_alloc, 249 .pa_free = pool_page_free, 250 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 11), 251 } 252 }; 253 254 static int pool_bigidx(size_t); 255 256 /* # of seconds to retain page after last use */ 257 int pool_inactive_time = 10; 258 259 /* Next candidate for drainage (see pool_drain()) */ 260 static struct pool *drainpp; 261 262 /* This lock protects both pool_head and drainpp. */ 263 static kmutex_t pool_head_lock; 264 static kcondvar_t pool_busy; 265 266 /* This lock protects initialization of a potentially shared pool allocator */ 267 static kmutex_t pool_allocator_lock; 268 269 static unsigned int poolid_counter = 0; 270 271 typedef uint32_t pool_item_bitmap_t; 272 #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) 273 #define BITMAP_MASK (BITMAP_SIZE - 1) 274 #define BITMAP_MIN_SIZE (CHAR_BIT * sizeof(((struct pool_item_header *)NULL)->ph_u2)) 275 276 struct pool_item_header { 277 /* Page headers */ 278 LIST_ENTRY(pool_item_header) 279 ph_pagelist; /* pool page list */ 280 union { 281 /* !PR_PHINPAGE */ 282 struct { 283 SPLAY_ENTRY(pool_item_header) 284 phu_node; /* off-page page headers */ 285 } phu_offpage; 286 /* PR_PHINPAGE */ 287 struct { 288 unsigned int phu_poolid; 289 } phu_onpage; 290 } ph_u1; 291 void * ph_page; /* this page's address */ 292 uint32_t ph_time; /* last referenced */ 293 uint16_t ph_nmissing; /* # of chunks in use */ 294 uint16_t ph_off; /* start offset in page */ 295 union { 296 /* !PR_USEBMAP */ 297 struct { 298 LIST_HEAD(, pool_item) 299 phu_itemlist; /* chunk list for this page */ 300 } phu_normal; 301 /* PR_USEBMAP */ 302 struct { 303 pool_item_bitmap_t phu_bitmap[1]; 304 } phu_notouch; 305 } ph_u2; 306 }; 307 #define ph_node ph_u1.phu_offpage.phu_node 308 #define ph_poolid ph_u1.phu_onpage.phu_poolid 309 #define ph_itemlist ph_u2.phu_normal.phu_itemlist 310 #define ph_bitmap ph_u2.phu_notouch.phu_bitmap 311 312 #define PHSIZE ALIGN(sizeof(struct pool_item_header)) 313 314 CTASSERT(offsetof(struct pool_item_header, ph_u2) + 315 BITMAP_MIN_SIZE / CHAR_BIT == sizeof(struct pool_item_header)); 316 317 #if defined(DIAGNOSTIC) && !defined(KASAN) 318 #define POOL_CHECK_MAGIC 319 #endif 320 321 struct pool_item { 322 #ifdef POOL_CHECK_MAGIC 323 u_int pi_magic; 324 #endif 325 #define PI_MAGIC 0xdeaddeadU 326 /* Other entries use only this list entry */ 327 LIST_ENTRY(pool_item) pi_list; 328 }; 329 330 #define POOL_NEEDS_CATCHUP(pp) \ 331 ((pp)->pr_nitems < (pp)->pr_minitems || \ 332 (pp)->pr_npages < (pp)->pr_minpages) 333 #define POOL_OBJ_TO_PAGE(pp, v) \ 334 (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask) 335 336 /* 337 * Pool cache management. 338 * 339 * Pool caches provide a way for constructed objects to be cached by the 340 * pool subsystem. This can lead to performance improvements by avoiding 341 * needless object construction/destruction; it is deferred until absolutely 342 * necessary. 343 * 344 * Caches are grouped into cache groups. Each cache group references up 345 * to PCG_NUMOBJECTS constructed objects. When a cache allocates an 346 * object from the pool, it calls the object's constructor and places it 347 * into a cache group. When a cache group frees an object back to the 348 * pool, it first calls the object's destructor. This allows the object 349 * to persist in constructed form while freed to the cache. 350 * 351 * The pool references each cache, so that when a pool is drained by the 352 * pagedaemon, it can drain each individual cache as well. Each time a 353 * cache is drained, the most idle cache group is freed to the pool in 354 * its entirety. 355 * 356 * Pool caches are laid on top of pools. By layering them, we can avoid 357 * the complexity of cache management for pools which would not benefit 358 * from it. 359 */ 360 361 static struct pool pcg_normal_pool; 362 static struct pool pcg_large_pool; 363 static struct pool cache_pool; 364 static struct pool cache_cpu_pool; 365 366 static pcg_t *volatile pcg_large_cache __cacheline_aligned; 367 static pcg_t *volatile pcg_normal_cache __cacheline_aligned; 368 369 /* List of all caches. */ 370 TAILQ_HEAD(,pool_cache) pool_cache_head = 371 TAILQ_HEAD_INITIALIZER(pool_cache_head); 372 373 int pool_cache_disable; /* global disable for caching */ 374 static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */ 375 376 static bool pool_cache_put_slow(pool_cache_t, pool_cache_cpu_t *, int, 377 void *); 378 static bool pool_cache_get_slow(pool_cache_t, pool_cache_cpu_t *, int, 379 void **, paddr_t *, int); 380 static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); 381 static int pool_cache_invalidate_groups(pool_cache_t, pcg_t *); 382 static void pool_cache_invalidate_cpu(pool_cache_t, u_int); 383 static void pool_cache_transfer(pool_cache_t); 384 static int pool_pcg_get(pcg_t *volatile *, pcg_t **); 385 static int pool_pcg_put(pcg_t *volatile *, pcg_t *); 386 static pcg_t * pool_pcg_trunc(pcg_t *volatile *); 387 388 static int pool_catchup(struct pool *); 389 static void pool_prime_page(struct pool *, void *, 390 struct pool_item_header *); 391 static void pool_update_curpage(struct pool *); 392 393 static int pool_grow(struct pool *, int); 394 static void *pool_allocator_alloc(struct pool *, int); 395 static void pool_allocator_free(struct pool *, void *); 396 397 static void pool_print_pagelist(struct pool *, struct pool_pagelist *, 398 void (*)(const char *, ...) __printflike(1, 2)); 399 static void pool_print1(struct pool *, const char *, 400 void (*)(const char *, ...) __printflike(1, 2)); 401 402 static int pool_chk_page(struct pool *, const char *, 403 struct pool_item_header *); 404 405 /* -------------------------------------------------------------------------- */ 406 407 static inline unsigned int 408 pr_item_bitmap_index(const struct pool *pp, const struct pool_item_header *ph, 409 const void *v) 410 { 411 const char *cp = v; 412 unsigned int idx; 413 414 KASSERT(pp->pr_roflags & PR_USEBMAP); 415 idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size; 416 417 if (__predict_false(idx >= pp->pr_itemsperpage)) { 418 panic("%s: [%s] %u >= %u", __func__, pp->pr_wchan, idx, 419 pp->pr_itemsperpage); 420 } 421 422 return idx; 423 } 424 425 static inline void 426 pr_item_bitmap_put(const struct pool *pp, struct pool_item_header *ph, 427 void *obj) 428 { 429 unsigned int idx = pr_item_bitmap_index(pp, ph, obj); 430 pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE); 431 pool_item_bitmap_t mask = 1U << (idx & BITMAP_MASK); 432 433 if (__predict_false((*bitmap & mask) != 0)) { 434 panic("%s: [%s] %p already freed", __func__, pp->pr_wchan, obj); 435 } 436 437 *bitmap |= mask; 438 } 439 440 static inline void * 441 pr_item_bitmap_get(const struct pool *pp, struct pool_item_header *ph) 442 { 443 pool_item_bitmap_t *bitmap = ph->ph_bitmap; 444 unsigned int idx; 445 int i; 446 447 for (i = 0; ; i++) { 448 int bit; 449 450 KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage); 451 bit = ffs32(bitmap[i]); 452 if (bit) { 453 pool_item_bitmap_t mask; 454 455 bit--; 456 idx = (i * BITMAP_SIZE) + bit; 457 mask = 1U << bit; 458 KASSERT((bitmap[i] & mask) != 0); 459 bitmap[i] &= ~mask; 460 break; 461 } 462 } 463 KASSERT(idx < pp->pr_itemsperpage); 464 return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size; 465 } 466 467 static inline void 468 pr_item_bitmap_init(const struct pool *pp, struct pool_item_header *ph) 469 { 470 pool_item_bitmap_t *bitmap = ph->ph_bitmap; 471 const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE); 472 int i; 473 474 for (i = 0; i < n; i++) { 475 bitmap[i] = (pool_item_bitmap_t)-1; 476 } 477 } 478 479 /* -------------------------------------------------------------------------- */ 480 481 static inline void 482 pr_item_linkedlist_put(const struct pool *pp, struct pool_item_header *ph, 483 void *obj) 484 { 485 struct pool_item *pi = obj; 486 487 KASSERT(!pp_has_pser(pp)); 488 489 #ifdef POOL_CHECK_MAGIC 490 pi->pi_magic = PI_MAGIC; 491 #endif 492 493 if (pp->pr_redzone) { 494 /* 495 * Mark the pool_item as valid. The rest is already 496 * invalid. 497 */ 498 kasan_mark(pi, sizeof(*pi), sizeof(*pi), 0); 499 } 500 501 LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); 502 } 503 504 static inline void * 505 pr_item_linkedlist_get(struct pool *pp, struct pool_item_header *ph) 506 { 507 struct pool_item *pi; 508 void *v; 509 510 v = pi = LIST_FIRST(&ph->ph_itemlist); 511 if (__predict_false(v == NULL)) { 512 mutex_exit(&pp->pr_lock); 513 panic("%s: [%s] page empty", __func__, pp->pr_wchan); 514 } 515 KASSERTMSG((pp->pr_nitems > 0), 516 "%s: [%s] nitems %u inconsistent on itemlist", 517 __func__, pp->pr_wchan, pp->pr_nitems); 518 #ifdef POOL_CHECK_MAGIC 519 KASSERTMSG((pi->pi_magic == PI_MAGIC), 520 "%s: [%s] free list modified: " 521 "magic=%x; page %p; item addr %p", __func__, 522 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); 523 #endif 524 525 /* 526 * Remove from item list. 527 */ 528 LIST_REMOVE(pi, pi_list); 529 530 return v; 531 } 532 533 /* -------------------------------------------------------------------------- */ 534 535 static inline void 536 pr_phinpage_check(struct pool *pp, struct pool_item_header *ph, void *page, 537 void *object) 538 { 539 if (__predict_false((void *)ph->ph_page != page)) { 540 panic("%s: [%s] item %p not part of pool", __func__, 541 pp->pr_wchan, object); 542 } 543 if (__predict_false((char *)object < (char *)page + ph->ph_off)) { 544 panic("%s: [%s] item %p below item space", __func__, 545 pp->pr_wchan, object); 546 } 547 if (__predict_false(ph->ph_poolid != pp->pr_poolid)) { 548 panic("%s: [%s] item %p poolid %u != %u", __func__, 549 pp->pr_wchan, object, ph->ph_poolid, pp->pr_poolid); 550 } 551 } 552 553 static inline void 554 pc_phinpage_check(pool_cache_t pc, void *object) 555 { 556 struct pool_item_header *ph; 557 struct pool *pp; 558 void *page; 559 560 pp = &pc->pc_pool; 561 page = POOL_OBJ_TO_PAGE(pp, object); 562 ph = (struct pool_item_header *)page; 563 564 pr_phinpage_check(pp, ph, page, object); 565 } 566 567 /* -------------------------------------------------------------------------- */ 568 569 static inline int 570 phtree_compare(struct pool_item_header *a, struct pool_item_header *b) 571 { 572 573 /* 574 * We consider pool_item_header with smaller ph_page bigger. This 575 * unnatural ordering is for the benefit of pr_find_pagehead. 576 */ 577 if (a->ph_page < b->ph_page) 578 return 1; 579 else if (a->ph_page > b->ph_page) 580 return -1; 581 else 582 return 0; 583 } 584 585 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare); 586 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); 587 588 static inline struct pool_item_header * 589 pr_find_pagehead_noalign(struct pool *pp, void *v) 590 { 591 struct pool_item_header *ph, tmp; 592 593 tmp.ph_page = (void *)(uintptr_t)v; 594 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); 595 if (ph == NULL) { 596 ph = SPLAY_ROOT(&pp->pr_phtree); 597 if (ph != NULL && phtree_compare(&tmp, ph) >= 0) { 598 ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph); 599 } 600 KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0); 601 } 602 603 return ph; 604 } 605 606 /* 607 * Return the pool page header based on item address. 608 */ 609 static inline struct pool_item_header * 610 pr_find_pagehead(struct pool *pp, void *v) 611 { 612 struct pool_item_header *ph, tmp; 613 614 if ((pp->pr_roflags & PR_NOALIGN) != 0) { 615 ph = pr_find_pagehead_noalign(pp, v); 616 } else { 617 void *page = POOL_OBJ_TO_PAGE(pp, v); 618 if ((pp->pr_roflags & PR_PHINPAGE) != 0) { 619 ph = (struct pool_item_header *)page; 620 pr_phinpage_check(pp, ph, page, v); 621 } else { 622 tmp.ph_page = page; 623 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); 624 } 625 } 626 627 KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) || 628 ((char *)ph->ph_page <= (char *)v && 629 (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz)); 630 return ph; 631 } 632 633 static void 634 pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq) 635 { 636 struct pool_item_header *ph; 637 638 while ((ph = LIST_FIRST(pq)) != NULL) { 639 LIST_REMOVE(ph, ph_pagelist); 640 pool_allocator_free(pp, ph->ph_page); 641 if ((pp->pr_roflags & PR_PHINPAGE) == 0) 642 pool_put(pp->pr_phpool, ph); 643 } 644 } 645 646 /* 647 * Remove a page from the pool. 648 */ 649 static inline void 650 pr_rmpage(struct pool *pp, struct pool_item_header *ph, 651 struct pool_pagelist *pq) 652 { 653 654 KASSERT(mutex_owned(&pp->pr_lock)); 655 656 /* 657 * If the page was idle, decrement the idle page count. 658 */ 659 if (ph->ph_nmissing == 0) { 660 KASSERT(pp->pr_nidle != 0); 661 KASSERTMSG((pp->pr_nitems >= pp->pr_itemsperpage), 662 "%s: [%s] nitems=%u < itemsperpage=%u", __func__, 663 pp->pr_wchan, pp->pr_nitems, pp->pr_itemsperpage); 664 pp->pr_nidle--; 665 } 666 667 pp->pr_nitems -= pp->pr_itemsperpage; 668 669 /* 670 * Unlink the page from the pool and queue it for release. 671 */ 672 LIST_REMOVE(ph, ph_pagelist); 673 if (pp->pr_roflags & PR_PHINPAGE) { 674 if (__predict_false(ph->ph_poolid != pp->pr_poolid)) { 675 panic("%s: [%s] ph %p poolid %u != %u", 676 __func__, pp->pr_wchan, ph, ph->ph_poolid, 677 pp->pr_poolid); 678 } 679 } else { 680 SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); 681 } 682 LIST_INSERT_HEAD(pq, ph, ph_pagelist); 683 684 pp->pr_npages--; 685 pp->pr_npagefree++; 686 687 pool_update_curpage(pp); 688 } 689 690 /* 691 * Initialize all the pools listed in the "pools" link set. 692 */ 693 void 694 pool_subsystem_init(void) 695 { 696 size_t size; 697 int idx; 698 699 mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); 700 mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE); 701 cv_init(&pool_busy, "poolbusy"); 702 703 /* 704 * Initialize private page header pool and cache magazine pool if we 705 * haven't done so yet. 706 */ 707 for (idx = 0; idx < PHPOOL_MAX; idx++) { 708 static char phpool_names[PHPOOL_MAX][6+1+6+1]; 709 int nelem; 710 size_t sz; 711 712 nelem = PHPOOL_FREELIST_NELEM(idx); 713 KASSERT(nelem != 0); 714 snprintf(phpool_names[idx], sizeof(phpool_names[idx]), 715 "phpool-%d", nelem); 716 sz = offsetof(struct pool_item_header, 717 ph_bitmap[howmany(nelem, BITMAP_SIZE)]); 718 pool_init(&phpool[idx], sz, 0, 0, 0, 719 phpool_names[idx], &pool_allocator_meta, IPL_VM); 720 } 721 722 size = sizeof(pcg_t) + 723 (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t); 724 pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0, 725 "pcgnormal", &pool_allocator_meta, IPL_VM); 726 727 size = sizeof(pcg_t) + 728 (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t); 729 pool_init(&pcg_large_pool, size, coherency_unit, 0, 0, 730 "pcglarge", &pool_allocator_meta, IPL_VM); 731 732 pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit, 733 0, 0, "pcache", &pool_allocator_meta, IPL_NONE); 734 735 pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit, 736 0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE); 737 } 738 739 static inline bool 740 pool_init_is_phinpage(const struct pool *pp) 741 { 742 size_t pagesize; 743 744 if (pp->pr_roflags & PR_PHINPAGE) { 745 return true; 746 } 747 if (pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) { 748 return false; 749 } 750 751 pagesize = pp->pr_alloc->pa_pagesz; 752 753 /* 754 * Threshold: the item size is below 1/16 of a page size, and below 755 * 8 times the page header size. The latter ensures we go off-page 756 * if the page header would make us waste a rather big item. 757 */ 758 if (pp->pr_size < MIN(pagesize / 16, PHSIZE * 8)) { 759 return true; 760 } 761 762 /* Put the header into the page if it doesn't waste any items. */ 763 if (pagesize / pp->pr_size == (pagesize - PHSIZE) / pp->pr_size) { 764 return true; 765 } 766 767 return false; 768 } 769 770 static inline bool 771 pool_init_is_usebmap(const struct pool *pp) 772 { 773 size_t bmapsize; 774 775 if (pp->pr_roflags & PR_NOTOUCH) { 776 return true; 777 } 778 779 /* 780 * If we're off-page, go with a bitmap. 781 */ 782 if (!(pp->pr_roflags & PR_PHINPAGE)) { 783 return true; 784 } 785 786 /* 787 * If we're on-page, and the page header can already contain a bitmap 788 * big enough to cover all the items of the page, go with a bitmap. 789 */ 790 bmapsize = roundup(PHSIZE, pp->pr_align) - 791 offsetof(struct pool_item_header, ph_bitmap[0]); 792 KASSERT(bmapsize % sizeof(pool_item_bitmap_t) == 0); 793 if (pp->pr_itemsperpage <= bmapsize * CHAR_BIT) { 794 return true; 795 } 796 797 return false; 798 } 799 800 /* 801 * Initialize the given pool resource structure. 802 * 803 * We export this routine to allow other kernel parts to declare 804 * static pools that must be initialized before kmem(9) is available. 805 */ 806 void 807 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, 808 const char *wchan, struct pool_allocator *palloc, int ipl) 809 { 810 struct pool *pp1; 811 size_t prsize; 812 int itemspace, slack; 813 814 /* XXX ioff will be removed. */ 815 KASSERT(ioff == 0); 816 817 #ifdef DEBUG 818 if (__predict_true(!cold)) 819 mutex_enter(&pool_head_lock); 820 /* 821 * Check that the pool hasn't already been initialised and 822 * added to the list of all pools. 823 */ 824 TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { 825 if (pp == pp1) 826 panic("%s: [%s] already initialised", __func__, 827 wchan); 828 } 829 if (__predict_true(!cold)) 830 mutex_exit(&pool_head_lock); 831 #endif 832 833 if (palloc == NULL) { 834 if (size > PAGE_SIZE) { 835 int bigidx = pool_bigidx(size); 836 837 palloc = &pool_allocator_big[bigidx]; 838 flags |= PR_NOALIGN; 839 } else if (ipl == IPL_NONE) { 840 palloc = &pool_allocator_nointr; 841 } else { 842 palloc = &pool_allocator_kmem; 843 } 844 } 845 846 if (!cold) 847 mutex_enter(&pool_allocator_lock); 848 if (palloc->pa_refcnt++ == 0) { 849 if (palloc->pa_pagesz == 0) 850 palloc->pa_pagesz = PAGE_SIZE; 851 852 TAILQ_INIT(&palloc->pa_list); 853 854 mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM); 855 palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); 856 palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; 857 } 858 if (!cold) 859 mutex_exit(&pool_allocator_lock); 860 861 /* 862 * PR_PSERIALIZE implies PR_NOTOUCH; freed objects must remain 863 * valid until the the backing page is returned to the system. 864 */ 865 if (flags & PR_PSERIALIZE) { 866 flags |= PR_NOTOUCH; 867 } 868 869 if (align == 0) 870 align = ALIGN(1); 871 872 prsize = size; 873 if ((flags & PR_NOTOUCH) == 0 && prsize < sizeof(struct pool_item)) 874 prsize = sizeof(struct pool_item); 875 876 prsize = roundup(prsize, align); 877 KASSERTMSG((prsize <= palloc->pa_pagesz), 878 "%s: [%s] pool item size (%zu) larger than page size (%u)", 879 __func__, wchan, prsize, palloc->pa_pagesz); 880 881 /* 882 * Initialize the pool structure. 883 */ 884 LIST_INIT(&pp->pr_emptypages); 885 LIST_INIT(&pp->pr_fullpages); 886 LIST_INIT(&pp->pr_partpages); 887 pp->pr_cache = NULL; 888 pp->pr_curpage = NULL; 889 pp->pr_npages = 0; 890 pp->pr_minitems = 0; 891 pp->pr_minpages = 0; 892 pp->pr_maxitems = UINT_MAX; 893 pp->pr_maxpages = UINT_MAX; 894 pp->pr_roflags = flags; 895 pp->pr_flags = 0; 896 pp->pr_size = prsize; 897 pp->pr_reqsize = size; 898 pp->pr_align = align; 899 pp->pr_wchan = wchan; 900 pp->pr_alloc = palloc; 901 pp->pr_poolid = atomic_inc_uint_nv(&poolid_counter); 902 pp->pr_nitems = 0; 903 pp->pr_nout = 0; 904 pp->pr_hardlimit = UINT_MAX; 905 pp->pr_hardlimit_warning = NULL; 906 pp->pr_hardlimit_ratecap.tv_sec = 0; 907 pp->pr_hardlimit_ratecap.tv_usec = 0; 908 pp->pr_hardlimit_warning_last.tv_sec = 0; 909 pp->pr_hardlimit_warning_last.tv_usec = 0; 910 pp->pr_drain_hook = NULL; 911 pp->pr_drain_hook_arg = NULL; 912 pp->pr_freecheck = NULL; 913 pp->pr_redzone = false; 914 pool_redzone_init(pp, size); 915 pool_quarantine_init(pp); 916 917 /* 918 * Decide whether to put the page header off-page to avoid wasting too 919 * large a part of the page or too big an item. Off-page page headers 920 * go on a hash table, so we can match a returned item with its header 921 * based on the page address. 922 */ 923 if (pool_init_is_phinpage(pp)) { 924 /* Use the beginning of the page for the page header */ 925 itemspace = palloc->pa_pagesz - roundup(PHSIZE, align); 926 pp->pr_itemoffset = roundup(PHSIZE, align); 927 pp->pr_roflags |= PR_PHINPAGE; 928 } else { 929 /* The page header will be taken from our page header pool */ 930 itemspace = palloc->pa_pagesz; 931 pp->pr_itemoffset = 0; 932 SPLAY_INIT(&pp->pr_phtree); 933 } 934 935 pp->pr_itemsperpage = itemspace / pp->pr_size; 936 KASSERT(pp->pr_itemsperpage != 0); 937 938 /* 939 * Decide whether to use a bitmap or a linked list to manage freed 940 * items. 941 */ 942 if (pool_init_is_usebmap(pp)) { 943 pp->pr_roflags |= PR_USEBMAP; 944 } 945 946 /* 947 * If we're off-page, then we're using a bitmap; choose the appropriate 948 * pool to allocate page headers, whose size varies depending on the 949 * bitmap. If we're on-page, nothing to do. 950 */ 951 if (!(pp->pr_roflags & PR_PHINPAGE)) { 952 int idx; 953 954 KASSERT(pp->pr_roflags & PR_USEBMAP); 955 956 for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx); 957 idx++) { 958 /* nothing */ 959 } 960 if (idx >= PHPOOL_MAX) { 961 /* 962 * if you see this panic, consider to tweak 963 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM. 964 */ 965 panic("%s: [%s] too large itemsperpage(%d) for " 966 "PR_USEBMAP", __func__, 967 pp->pr_wchan, pp->pr_itemsperpage); 968 } 969 pp->pr_phpool = &phpool[idx]; 970 } else { 971 pp->pr_phpool = NULL; 972 } 973 974 /* 975 * Use the slack between the chunks and the page header 976 * for "cache coloring". 977 */ 978 slack = itemspace - pp->pr_itemsperpage * pp->pr_size; 979 pp->pr_maxcolor = rounddown(slack, align); 980 pp->pr_curcolor = 0; 981 982 pp->pr_nget = 0; 983 pp->pr_nfail = 0; 984 pp->pr_nput = 0; 985 pp->pr_npagealloc = 0; 986 pp->pr_npagefree = 0; 987 pp->pr_hiwat = 0; 988 pp->pr_nidle = 0; 989 pp->pr_refcnt = 0; 990 991 mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); 992 cv_init(&pp->pr_cv, wchan); 993 pp->pr_ipl = ipl; 994 995 /* Insert into the list of all pools. */ 996 if (!cold) 997 mutex_enter(&pool_head_lock); 998 TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { 999 if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0) 1000 break; 1001 } 1002 if (pp1 == NULL) 1003 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); 1004 else 1005 TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist); 1006 if (!cold) 1007 mutex_exit(&pool_head_lock); 1008 1009 /* Insert this into the list of pools using this allocator. */ 1010 if (!cold) 1011 mutex_enter(&palloc->pa_lock); 1012 TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); 1013 if (!cold) 1014 mutex_exit(&palloc->pa_lock); 1015 } 1016 1017 /* 1018 * De-commission a pool resource. 1019 */ 1020 void 1021 pool_destroy(struct pool *pp) 1022 { 1023 struct pool_pagelist pq; 1024 struct pool_item_header *ph; 1025 1026 pool_quarantine_flush(pp); 1027 1028 /* Remove from global pool list */ 1029 mutex_enter(&pool_head_lock); 1030 while (pp->pr_refcnt != 0) 1031 cv_wait(&pool_busy, &pool_head_lock); 1032 TAILQ_REMOVE(&pool_head, pp, pr_poollist); 1033 if (drainpp == pp) 1034 drainpp = NULL; 1035 mutex_exit(&pool_head_lock); 1036 1037 /* Remove this pool from its allocator's list of pools. */ 1038 mutex_enter(&pp->pr_alloc->pa_lock); 1039 TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); 1040 mutex_exit(&pp->pr_alloc->pa_lock); 1041 1042 mutex_enter(&pool_allocator_lock); 1043 if (--pp->pr_alloc->pa_refcnt == 0) 1044 mutex_destroy(&pp->pr_alloc->pa_lock); 1045 mutex_exit(&pool_allocator_lock); 1046 1047 mutex_enter(&pp->pr_lock); 1048 1049 KASSERT(pp->pr_cache == NULL); 1050 KASSERTMSG((pp->pr_nout == 0), 1051 "%s: [%s] pool busy: still out: %u", __func__, pp->pr_wchan, 1052 pp->pr_nout); 1053 KASSERT(LIST_EMPTY(&pp->pr_fullpages)); 1054 KASSERT(LIST_EMPTY(&pp->pr_partpages)); 1055 1056 /* Remove all pages */ 1057 LIST_INIT(&pq); 1058 while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) 1059 pr_rmpage(pp, ph, &pq); 1060 1061 mutex_exit(&pp->pr_lock); 1062 1063 pr_pagelist_free(pp, &pq); 1064 cv_destroy(&pp->pr_cv); 1065 mutex_destroy(&pp->pr_lock); 1066 } 1067 1068 void 1069 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg) 1070 { 1071 1072 /* XXX no locking -- must be used just after pool_init() */ 1073 KASSERTMSG((pp->pr_drain_hook == NULL), 1074 "%s: [%s] already set", __func__, pp->pr_wchan); 1075 pp->pr_drain_hook = fn; 1076 pp->pr_drain_hook_arg = arg; 1077 } 1078 1079 static struct pool_item_header * 1080 pool_alloc_item_header(struct pool *pp, void *storage, int flags) 1081 { 1082 struct pool_item_header *ph; 1083 1084 if ((pp->pr_roflags & PR_PHINPAGE) != 0) 1085 ph = storage; 1086 else 1087 ph = pool_get(pp->pr_phpool, flags); 1088 1089 return ph; 1090 } 1091 1092 /* 1093 * Grab an item from the pool. 1094 */ 1095 void * 1096 pool_get(struct pool *pp, int flags) 1097 { 1098 struct pool_item_header *ph; 1099 void *v; 1100 1101 KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK)); 1102 KASSERTMSG((pp->pr_itemsperpage != 0), 1103 "%s: [%s] pr_itemsperpage is zero, " 1104 "pool not initialized?", __func__, pp->pr_wchan); 1105 KASSERTMSG((!(cpu_intr_p() || cpu_softintr_p()) 1106 || pp->pr_ipl != IPL_NONE || cold || panicstr != NULL), 1107 "%s: [%s] is IPL_NONE, but called from interrupt context", 1108 __func__, pp->pr_wchan); 1109 if (flags & PR_WAITOK) { 1110 ASSERT_SLEEPABLE(); 1111 } 1112 1113 if (flags & PR_NOWAIT) { 1114 if (fault_inject()) 1115 return NULL; 1116 } 1117 1118 mutex_enter(&pp->pr_lock); 1119 startover: 1120 /* 1121 * Check to see if we've reached the hard limit. If we have, 1122 * and we can wait, then wait until an item has been returned to 1123 * the pool. 1124 */ 1125 KASSERTMSG((pp->pr_nout <= pp->pr_hardlimit), 1126 "%s: %s: crossed hard limit", __func__, pp->pr_wchan); 1127 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { 1128 if (pp->pr_drain_hook != NULL) { 1129 /* 1130 * Since the drain hook is going to free things 1131 * back to the pool, unlock, call the hook, re-lock, 1132 * and check the hardlimit condition again. 1133 */ 1134 mutex_exit(&pp->pr_lock); 1135 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); 1136 mutex_enter(&pp->pr_lock); 1137 if (pp->pr_nout < pp->pr_hardlimit) 1138 goto startover; 1139 } 1140 1141 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { 1142 /* 1143 * XXX: A warning isn't logged in this case. Should 1144 * it be? 1145 */ 1146 pp->pr_flags |= PR_WANTED; 1147 do { 1148 cv_wait(&pp->pr_cv, &pp->pr_lock); 1149 } while (pp->pr_flags & PR_WANTED); 1150 goto startover; 1151 } 1152 1153 /* 1154 * Log a message that the hard limit has been hit. 1155 */ 1156 if (pp->pr_hardlimit_warning != NULL && 1157 ratecheck(&pp->pr_hardlimit_warning_last, 1158 &pp->pr_hardlimit_ratecap)) 1159 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); 1160 1161 pp->pr_nfail++; 1162 1163 mutex_exit(&pp->pr_lock); 1164 KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0); 1165 return NULL; 1166 } 1167 1168 /* 1169 * The convention we use is that if `curpage' is not NULL, then 1170 * it points at a non-empty bucket. In particular, `curpage' 1171 * never points at a page header which has PR_PHINPAGE set and 1172 * has no items in its bucket. 1173 */ 1174 if ((ph = pp->pr_curpage) == NULL) { 1175 int error; 1176 1177 KASSERTMSG((pp->pr_nitems == 0), 1178 "%s: [%s] curpage NULL, inconsistent nitems %u", 1179 __func__, pp->pr_wchan, pp->pr_nitems); 1180 1181 /* 1182 * Call the back-end page allocator for more memory. 1183 * Release the pool lock, as the back-end page allocator 1184 * may block. 1185 */ 1186 error = pool_grow(pp, flags); 1187 if (error != 0) { 1188 /* 1189 * pool_grow aborts when another thread 1190 * is allocating a new page. Retry if it 1191 * waited for it. 1192 */ 1193 if (error == ERESTART) 1194 goto startover; 1195 1196 /* 1197 * We were unable to allocate a page or item 1198 * header, but we released the lock during 1199 * allocation, so perhaps items were freed 1200 * back to the pool. Check for this case. 1201 */ 1202 if (pp->pr_curpage != NULL) 1203 goto startover; 1204 1205 pp->pr_nfail++; 1206 mutex_exit(&pp->pr_lock); 1207 KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0); 1208 return NULL; 1209 } 1210 1211 /* Start the allocation process over. */ 1212 goto startover; 1213 } 1214 if (pp->pr_roflags & PR_USEBMAP) { 1215 KASSERTMSG((ph->ph_nmissing < pp->pr_itemsperpage), 1216 "%s: [%s] pool page empty", __func__, pp->pr_wchan); 1217 v = pr_item_bitmap_get(pp, ph); 1218 } else { 1219 v = pr_item_linkedlist_get(pp, ph); 1220 } 1221 pp->pr_nitems--; 1222 pp->pr_nout++; 1223 if (ph->ph_nmissing == 0) { 1224 KASSERT(pp->pr_nidle > 0); 1225 pp->pr_nidle--; 1226 1227 /* 1228 * This page was previously empty. Move it to the list of 1229 * partially-full pages. This page is already curpage. 1230 */ 1231 LIST_REMOVE(ph, ph_pagelist); 1232 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); 1233 } 1234 ph->ph_nmissing++; 1235 if (ph->ph_nmissing == pp->pr_itemsperpage) { 1236 KASSERTMSG(((pp->pr_roflags & PR_USEBMAP) || 1237 LIST_EMPTY(&ph->ph_itemlist)), 1238 "%s: [%s] nmissing (%u) inconsistent", __func__, 1239 pp->pr_wchan, ph->ph_nmissing); 1240 /* 1241 * This page is now full. Move it to the full list 1242 * and select a new current page. 1243 */ 1244 LIST_REMOVE(ph, ph_pagelist); 1245 LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist); 1246 pool_update_curpage(pp); 1247 } 1248 1249 pp->pr_nget++; 1250 1251 /* 1252 * If we have a low water mark and we are now below that low 1253 * water mark, add more items to the pool. 1254 */ 1255 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { 1256 /* 1257 * XXX: Should we log a warning? Should we set up a timeout 1258 * to try again in a second or so? The latter could break 1259 * a caller's assumptions about interrupt protection, etc. 1260 */ 1261 } 1262 1263 mutex_exit(&pp->pr_lock); 1264 KASSERT((((vaddr_t)v) & (pp->pr_align - 1)) == 0); 1265 FREECHECK_OUT(&pp->pr_freecheck, v); 1266 pool_redzone_fill(pp, v); 1267 pool_get_kmsan(pp, v); 1268 if (flags & PR_ZERO) 1269 memset(v, 0, pp->pr_reqsize); 1270 return v; 1271 } 1272 1273 /* 1274 * Internal version of pool_put(). Pool is already locked/entered. 1275 */ 1276 static void 1277 pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq) 1278 { 1279 struct pool_item_header *ph; 1280 1281 KASSERT(mutex_owned(&pp->pr_lock)); 1282 pool_redzone_check(pp, v); 1283 pool_put_kmsan(pp, v); 1284 FREECHECK_IN(&pp->pr_freecheck, v); 1285 LOCKDEBUG_MEM_CHECK(v, pp->pr_size); 1286 1287 KASSERTMSG((pp->pr_nout > 0), 1288 "%s: [%s] putting with none out", __func__, pp->pr_wchan); 1289 1290 if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { 1291 panic("%s: [%s] page header missing", __func__, pp->pr_wchan); 1292 } 1293 1294 /* 1295 * Return to item list. 1296 */ 1297 if (pp->pr_roflags & PR_USEBMAP) { 1298 pr_item_bitmap_put(pp, ph, v); 1299 } else { 1300 pr_item_linkedlist_put(pp, ph, v); 1301 } 1302 KDASSERT(ph->ph_nmissing != 0); 1303 ph->ph_nmissing--; 1304 pp->pr_nput++; 1305 pp->pr_nitems++; 1306 pp->pr_nout--; 1307 1308 /* Cancel "pool empty" condition if it exists */ 1309 if (pp->pr_curpage == NULL) 1310 pp->pr_curpage = ph; 1311 1312 if (pp->pr_flags & PR_WANTED) { 1313 pp->pr_flags &= ~PR_WANTED; 1314 cv_broadcast(&pp->pr_cv); 1315 } 1316 1317 /* 1318 * If this page is now empty, do one of two things: 1319 * 1320 * (1) If we have more pages than the page high water mark, 1321 * free the page back to the system. ONLY CONSIDER 1322 * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE 1323 * CLAIM. 1324 * 1325 * (2) Otherwise, move the page to the empty page list. 1326 * 1327 * Either way, select a new current page (so we use a partially-full 1328 * page if one is available). 1329 */ 1330 if (ph->ph_nmissing == 0) { 1331 pp->pr_nidle++; 1332 if (pp->pr_nitems - pp->pr_itemsperpage >= pp->pr_minitems && 1333 pp->pr_npages > pp->pr_minpages && 1334 (pp->pr_npages > pp->pr_maxpages || 1335 pp->pr_nitems > pp->pr_maxitems)) { 1336 pr_rmpage(pp, ph, pq); 1337 } else { 1338 LIST_REMOVE(ph, ph_pagelist); 1339 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); 1340 1341 /* 1342 * Update the timestamp on the page. A page must 1343 * be idle for some period of time before it can 1344 * be reclaimed by the pagedaemon. This minimizes 1345 * ping-pong'ing for memory. 1346 * 1347 * note for 64-bit time_t: truncating to 32-bit is not 1348 * a problem for our usage. 1349 */ 1350 ph->ph_time = time_uptime; 1351 } 1352 pool_update_curpage(pp); 1353 } 1354 1355 /* 1356 * If the page was previously completely full, move it to the 1357 * partially-full list and make it the current page. The next 1358 * allocation will get the item from this page, instead of 1359 * further fragmenting the pool. 1360 */ 1361 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) { 1362 LIST_REMOVE(ph, ph_pagelist); 1363 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); 1364 pp->pr_curpage = ph; 1365 } 1366 } 1367 1368 void 1369 pool_put(struct pool *pp, void *v) 1370 { 1371 struct pool_pagelist pq; 1372 1373 LIST_INIT(&pq); 1374 1375 mutex_enter(&pp->pr_lock); 1376 if (!pool_put_quarantine(pp, v, &pq)) { 1377 pool_do_put(pp, v, &pq); 1378 } 1379 mutex_exit(&pp->pr_lock); 1380 1381 pr_pagelist_free(pp, &pq); 1382 } 1383 1384 /* 1385 * pool_grow: grow a pool by a page. 1386 * 1387 * => called with pool locked. 1388 * => unlock and relock the pool. 1389 * => return with pool locked. 1390 */ 1391 1392 static int 1393 pool_grow(struct pool *pp, int flags) 1394 { 1395 struct pool_item_header *ph; 1396 char *storage; 1397 1398 /* 1399 * If there's a pool_grow in progress, wait for it to complete 1400 * and try again from the top. 1401 */ 1402 if (pp->pr_flags & PR_GROWING) { 1403 if (flags & PR_WAITOK) { 1404 do { 1405 cv_wait(&pp->pr_cv, &pp->pr_lock); 1406 } while (pp->pr_flags & PR_GROWING); 1407 return ERESTART; 1408 } else { 1409 if (pp->pr_flags & PR_GROWINGNOWAIT) { 1410 /* 1411 * This needs an unlock/relock dance so 1412 * that the other caller has a chance to 1413 * run and actually do the thing. Note 1414 * that this is effectively a busy-wait. 1415 */ 1416 mutex_exit(&pp->pr_lock); 1417 mutex_enter(&pp->pr_lock); 1418 return ERESTART; 1419 } 1420 return EWOULDBLOCK; 1421 } 1422 } 1423 pp->pr_flags |= PR_GROWING; 1424 if (flags & PR_WAITOK) 1425 mutex_exit(&pp->pr_lock); 1426 else 1427 pp->pr_flags |= PR_GROWINGNOWAIT; 1428 1429 storage = pool_allocator_alloc(pp, flags); 1430 if (__predict_false(storage == NULL)) 1431 goto out; 1432 1433 ph = pool_alloc_item_header(pp, storage, flags); 1434 if (__predict_false(ph == NULL)) { 1435 pool_allocator_free(pp, storage); 1436 goto out; 1437 } 1438 1439 if (flags & PR_WAITOK) 1440 mutex_enter(&pp->pr_lock); 1441 pool_prime_page(pp, storage, ph); 1442 pp->pr_npagealloc++; 1443 KASSERT(pp->pr_flags & PR_GROWING); 1444 pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT); 1445 /* 1446 * If anyone was waiting for pool_grow, notify them that we 1447 * may have just done it. 1448 */ 1449 cv_broadcast(&pp->pr_cv); 1450 return 0; 1451 out: 1452 if (flags & PR_WAITOK) 1453 mutex_enter(&pp->pr_lock); 1454 KASSERT(pp->pr_flags & PR_GROWING); 1455 pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT); 1456 return ENOMEM; 1457 } 1458 1459 void 1460 pool_prime(struct pool *pp, int n) 1461 { 1462 1463 mutex_enter(&pp->pr_lock); 1464 pp->pr_minpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1465 if (pp->pr_maxpages <= pp->pr_minpages) 1466 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ 1467 while (pp->pr_npages < pp->pr_minpages) 1468 (void) pool_grow(pp, PR_WAITOK); 1469 mutex_exit(&pp->pr_lock); 1470 } 1471 1472 /* 1473 * Add a page worth of items to the pool. 1474 * 1475 * Note, we must be called with the pool descriptor LOCKED. 1476 */ 1477 static void 1478 pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph) 1479 { 1480 const unsigned int align = pp->pr_align; 1481 struct pool_item *pi; 1482 void *cp = storage; 1483 int n; 1484 1485 KASSERT(mutex_owned(&pp->pr_lock)); 1486 KASSERTMSG(((pp->pr_roflags & PR_NOALIGN) || 1487 (((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) == 0)), 1488 "%s: [%s] unaligned page: %p", __func__, pp->pr_wchan, cp); 1489 1490 /* 1491 * Insert page header. 1492 */ 1493 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); 1494 LIST_INIT(&ph->ph_itemlist); 1495 ph->ph_page = storage; 1496 ph->ph_nmissing = 0; 1497 ph->ph_time = time_uptime; 1498 if (pp->pr_roflags & PR_PHINPAGE) 1499 ph->ph_poolid = pp->pr_poolid; 1500 else 1501 SPLAY_INSERT(phtree, &pp->pr_phtree, ph); 1502 1503 pp->pr_nidle++; 1504 1505 /* 1506 * The item space starts after the on-page header, if any. 1507 */ 1508 ph->ph_off = pp->pr_itemoffset; 1509 1510 /* 1511 * Color this page. 1512 */ 1513 ph->ph_off += pp->pr_curcolor; 1514 cp = (char *)cp + ph->ph_off; 1515 if ((pp->pr_curcolor += align) > pp->pr_maxcolor) 1516 pp->pr_curcolor = 0; 1517 1518 KASSERT((((vaddr_t)cp) & (align - 1)) == 0); 1519 1520 /* 1521 * Insert remaining chunks on the bucket list. 1522 */ 1523 n = pp->pr_itemsperpage; 1524 pp->pr_nitems += n; 1525 1526 if (pp->pr_roflags & PR_USEBMAP) { 1527 pr_item_bitmap_init(pp, ph); 1528 } else { 1529 while (n--) { 1530 pi = (struct pool_item *)cp; 1531 1532 KASSERT((((vaddr_t)pi) & (align - 1)) == 0); 1533 1534 /* Insert on page list */ 1535 LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); 1536 #ifdef POOL_CHECK_MAGIC 1537 pi->pi_magic = PI_MAGIC; 1538 #endif 1539 cp = (char *)cp + pp->pr_size; 1540 1541 KASSERT((((vaddr_t)cp) & (align - 1)) == 0); 1542 } 1543 } 1544 1545 /* 1546 * If the pool was depleted, point at the new page. 1547 */ 1548 if (pp->pr_curpage == NULL) 1549 pp->pr_curpage = ph; 1550 1551 if (++pp->pr_npages > pp->pr_hiwat) 1552 pp->pr_hiwat = pp->pr_npages; 1553 } 1554 1555 /* 1556 * Used by pool_get() when nitems drops below the low water mark. This 1557 * is used to catch up pr_nitems with the low water mark. 1558 * 1559 * Note 1, we never wait for memory here, we let the caller decide what to do. 1560 * 1561 * Note 2, we must be called with the pool already locked, and we return 1562 * with it locked. 1563 */ 1564 static int 1565 pool_catchup(struct pool *pp) 1566 { 1567 int error = 0; 1568 1569 while (POOL_NEEDS_CATCHUP(pp)) { 1570 error = pool_grow(pp, PR_NOWAIT); 1571 if (error) { 1572 if (error == ERESTART) 1573 continue; 1574 break; 1575 } 1576 } 1577 return error; 1578 } 1579 1580 static void 1581 pool_update_curpage(struct pool *pp) 1582 { 1583 1584 pp->pr_curpage = LIST_FIRST(&pp->pr_partpages); 1585 if (pp->pr_curpage == NULL) { 1586 pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); 1587 } 1588 KASSERTMSG((pp->pr_curpage == NULL) == (pp->pr_nitems == 0), 1589 "pp=%p curpage=%p nitems=%u", pp, pp->pr_curpage, pp->pr_nitems); 1590 } 1591 1592 void 1593 pool_setlowat(struct pool *pp, int n) 1594 { 1595 1596 mutex_enter(&pp->pr_lock); 1597 pp->pr_minitems = n; 1598 1599 /* Make sure we're caught up with the newly-set low water mark. */ 1600 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { 1601 /* 1602 * XXX: Should we log a warning? Should we set up a timeout 1603 * to try again in a second or so? The latter could break 1604 * a caller's assumptions about interrupt protection, etc. 1605 */ 1606 } 1607 1608 mutex_exit(&pp->pr_lock); 1609 } 1610 1611 void 1612 pool_sethiwat(struct pool *pp, int n) 1613 { 1614 1615 mutex_enter(&pp->pr_lock); 1616 1617 pp->pr_maxitems = n; 1618 1619 mutex_exit(&pp->pr_lock); 1620 } 1621 1622 void 1623 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap) 1624 { 1625 1626 mutex_enter(&pp->pr_lock); 1627 1628 pp->pr_hardlimit = n; 1629 pp->pr_hardlimit_warning = warnmess; 1630 pp->pr_hardlimit_ratecap.tv_sec = ratecap; 1631 pp->pr_hardlimit_warning_last.tv_sec = 0; 1632 pp->pr_hardlimit_warning_last.tv_usec = 0; 1633 1634 pp->pr_maxpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1635 1636 mutex_exit(&pp->pr_lock); 1637 } 1638 1639 unsigned int 1640 pool_nget(struct pool *pp) 1641 { 1642 1643 return pp->pr_nget; 1644 } 1645 1646 unsigned int 1647 pool_nput(struct pool *pp) 1648 { 1649 1650 return pp->pr_nput; 1651 } 1652 1653 /* 1654 * Release all complete pages that have not been used recently. 1655 * 1656 * Must not be called from interrupt context. 1657 */ 1658 int 1659 pool_reclaim(struct pool *pp) 1660 { 1661 struct pool_item_header *ph, *phnext; 1662 struct pool_pagelist pq; 1663 struct pool_cache *pc; 1664 uint32_t curtime; 1665 bool klock; 1666 int rv; 1667 1668 KASSERT(!cpu_intr_p()); 1669 KASSERT(!cpu_softintr_p()); 1670 1671 if (pp->pr_drain_hook != NULL) { 1672 /* 1673 * The drain hook must be called with the pool unlocked. 1674 */ 1675 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); 1676 } 1677 1678 /* 1679 * XXXSMP Because we do not want to cause non-MPSAFE code 1680 * to block. 1681 */ 1682 if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK || 1683 pp->pr_ipl == IPL_SOFTSERIAL) { 1684 KERNEL_LOCK(1, NULL); 1685 klock = true; 1686 } else 1687 klock = false; 1688 1689 /* Reclaim items from the pool's cache (if any). */ 1690 if ((pc = atomic_load_consume(&pp->pr_cache)) != NULL) 1691 pool_cache_invalidate(pc); 1692 1693 if (mutex_tryenter(&pp->pr_lock) == 0) { 1694 if (klock) { 1695 KERNEL_UNLOCK_ONE(NULL); 1696 } 1697 return 0; 1698 } 1699 1700 LIST_INIT(&pq); 1701 1702 curtime = time_uptime; 1703 1704 for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) { 1705 phnext = LIST_NEXT(ph, ph_pagelist); 1706 1707 /* Check our minimum page claim */ 1708 if (pp->pr_npages <= pp->pr_minpages) 1709 break; 1710 1711 KASSERT(ph->ph_nmissing == 0); 1712 if (curtime - ph->ph_time < pool_inactive_time) 1713 continue; 1714 1715 /* 1716 * If freeing this page would put us below the minimum free items 1717 * or the minimum pages, stop now. 1718 */ 1719 if (pp->pr_nitems - pp->pr_itemsperpage < pp->pr_minitems || 1720 pp->pr_npages - 1 < pp->pr_minpages) 1721 break; 1722 1723 pr_rmpage(pp, ph, &pq); 1724 } 1725 1726 mutex_exit(&pp->pr_lock); 1727 1728 if (LIST_EMPTY(&pq)) 1729 rv = 0; 1730 else { 1731 pr_pagelist_free(pp, &pq); 1732 rv = 1; 1733 } 1734 1735 if (klock) { 1736 KERNEL_UNLOCK_ONE(NULL); 1737 } 1738 1739 return rv; 1740 } 1741 1742 /* 1743 * Drain pools, one at a time. The drained pool is returned within ppp. 1744 * 1745 * Note, must never be called from interrupt context. 1746 */ 1747 bool 1748 pool_drain(struct pool **ppp) 1749 { 1750 bool reclaimed; 1751 struct pool *pp; 1752 1753 KASSERT(!TAILQ_EMPTY(&pool_head)); 1754 1755 pp = NULL; 1756 1757 /* Find next pool to drain, and add a reference. */ 1758 mutex_enter(&pool_head_lock); 1759 do { 1760 if (drainpp == NULL) { 1761 drainpp = TAILQ_FIRST(&pool_head); 1762 } 1763 if (drainpp != NULL) { 1764 pp = drainpp; 1765 drainpp = TAILQ_NEXT(pp, pr_poollist); 1766 } 1767 /* 1768 * Skip completely idle pools. We depend on at least 1769 * one pool in the system being active. 1770 */ 1771 } while (pp == NULL || pp->pr_npages == 0); 1772 pp->pr_refcnt++; 1773 mutex_exit(&pool_head_lock); 1774 1775 /* Drain the cache (if any) and pool.. */ 1776 reclaimed = pool_reclaim(pp); 1777 1778 /* Finally, unlock the pool. */ 1779 mutex_enter(&pool_head_lock); 1780 pp->pr_refcnt--; 1781 cv_broadcast(&pool_busy); 1782 mutex_exit(&pool_head_lock); 1783 1784 if (ppp != NULL) 1785 *ppp = pp; 1786 1787 return reclaimed; 1788 } 1789 1790 /* 1791 * Calculate the total number of pages consumed by pools. 1792 */ 1793 int 1794 pool_totalpages(void) 1795 { 1796 1797 mutex_enter(&pool_head_lock); 1798 int pages = pool_totalpages_locked(); 1799 mutex_exit(&pool_head_lock); 1800 1801 return pages; 1802 } 1803 1804 int 1805 pool_totalpages_locked(void) 1806 { 1807 struct pool *pp; 1808 uint64_t total = 0; 1809 1810 TAILQ_FOREACH(pp, &pool_head, pr_poollist) { 1811 uint64_t bytes = 1812 (uint64_t)pp->pr_npages * pp->pr_alloc->pa_pagesz; 1813 1814 if ((pp->pr_roflags & PR_RECURSIVE) != 0) 1815 bytes -= ((uint64_t)pp->pr_nout * pp->pr_size); 1816 total += bytes; 1817 } 1818 1819 return atop(total); 1820 } 1821 1822 /* 1823 * Diagnostic helpers. 1824 */ 1825 1826 void 1827 pool_printall(const char *modif, void (*pr)(const char *, ...)) 1828 { 1829 struct pool *pp; 1830 1831 TAILQ_FOREACH(pp, &pool_head, pr_poollist) { 1832 pool_printit(pp, modif, pr); 1833 } 1834 } 1835 1836 void 1837 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) 1838 { 1839 1840 if (pp == NULL) { 1841 (*pr)("Must specify a pool to print.\n"); 1842 return; 1843 } 1844 1845 pool_print1(pp, modif, pr); 1846 } 1847 1848 static void 1849 pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl, 1850 void (*pr)(const char *, ...)) 1851 { 1852 struct pool_item_header *ph; 1853 1854 LIST_FOREACH(ph, pl, ph_pagelist) { 1855 (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n", 1856 ph->ph_page, ph->ph_nmissing, ph->ph_time); 1857 #ifdef POOL_CHECK_MAGIC 1858 struct pool_item *pi; 1859 if (!(pp->pr_roflags & PR_USEBMAP)) { 1860 LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) { 1861 if (pi->pi_magic != PI_MAGIC) { 1862 (*pr)("\t\t\titem %p, magic 0x%x\n", 1863 pi, pi->pi_magic); 1864 } 1865 } 1866 } 1867 #endif 1868 } 1869 } 1870 1871 static void 1872 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) 1873 { 1874 struct pool_item_header *ph; 1875 pool_cache_t pc; 1876 pcg_t *pcg; 1877 pool_cache_cpu_t *cc; 1878 uint64_t cpuhit, cpumiss, pchit, pcmiss; 1879 uint32_t nfull; 1880 int i; 1881 bool print_log = false, print_pagelist = false, print_cache = false; 1882 bool print_short = false, skip_empty = false; 1883 char c; 1884 1885 while ((c = *modif++) != '\0') { 1886 if (c == 'l') 1887 print_log = true; 1888 if (c == 'p') 1889 print_pagelist = true; 1890 if (c == 'c') 1891 print_cache = true; 1892 if (c == 's') 1893 print_short = true; 1894 if (c == 'S') 1895 skip_empty = true; 1896 } 1897 1898 if (skip_empty && pp->pr_nget == 0) 1899 return; 1900 1901 if ((pc = atomic_load_consume(&pp->pr_cache)) != NULL) { 1902 (*pr)("POOLCACHE"); 1903 } else { 1904 (*pr)("POOL"); 1905 } 1906 1907 /* Single line output. */ 1908 if (print_short) { 1909 (*pr)(" %s:%p:%u:%u:%u:%u:%u:%u:%u:%u:%u:%u:%zu\n", 1910 pp->pr_wchan, pp, pp->pr_size, pp->pr_align, pp->pr_npages, 1911 pp->pr_nitems, pp->pr_nout, pp->pr_nget, pp->pr_nput, 1912 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_nidle, 1913 (size_t)pp->pr_npagealloc * pp->pr_alloc->pa_pagesz); 1914 return; 1915 } 1916 1917 (*pr)(" %s: itemsize %u, totalmem %zu align %u, ioff %u, roflags 0x%08x\n", 1918 pp->pr_wchan, pp->pr_size, 1919 (size_t)pp->pr_npagealloc * pp->pr_alloc->pa_pagesz, 1920 pp->pr_align, pp->pr_itemoffset, pp->pr_roflags); 1921 (*pr)("\tpool %p, alloc %p\n", pp, pp->pr_alloc); 1922 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", 1923 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); 1924 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", 1925 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit); 1926 1927 (*pr)("\tnget %lu, nfail %lu, nput %lu\n", 1928 pp->pr_nget, pp->pr_nfail, pp->pr_nput); 1929 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n", 1930 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle); 1931 1932 if (!print_pagelist) 1933 goto skip_pagelist; 1934 1935 if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) 1936 (*pr)("\n\tempty page list:\n"); 1937 pool_print_pagelist(pp, &pp->pr_emptypages, pr); 1938 if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL) 1939 (*pr)("\n\tfull page list:\n"); 1940 pool_print_pagelist(pp, &pp->pr_fullpages, pr); 1941 if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL) 1942 (*pr)("\n\tpartial-page list:\n"); 1943 pool_print_pagelist(pp, &pp->pr_partpages, pr); 1944 1945 if (pp->pr_curpage == NULL) 1946 (*pr)("\tno current page\n"); 1947 else 1948 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page); 1949 1950 skip_pagelist: 1951 if (print_log) 1952 goto skip_log; 1953 1954 (*pr)("\n"); 1955 1956 skip_log: 1957 1958 #define PR_GROUPLIST(pcg) \ 1959 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \ 1960 for (i = 0; i < pcg->pcg_size; i++) { \ 1961 if (pcg->pcg_objects[i].pcgo_pa != \ 1962 POOL_PADDR_INVALID) { \ 1963 (*pr)("\t\t\t%p, 0x%llx\n", \ 1964 pcg->pcg_objects[i].pcgo_va, \ 1965 (unsigned long long) \ 1966 pcg->pcg_objects[i].pcgo_pa); \ 1967 } else { \ 1968 (*pr)("\t\t\t%p\n", \ 1969 pcg->pcg_objects[i].pcgo_va); \ 1970 } \ 1971 } 1972 1973 if (pc != NULL) { 1974 cpuhit = 0; 1975 cpumiss = 0; 1976 pcmiss = 0; 1977 nfull = 0; 1978 for (i = 0; i < __arraycount(pc->pc_cpus); i++) { 1979 if ((cc = pc->pc_cpus[i]) == NULL) 1980 continue; 1981 cpuhit += cc->cc_hits; 1982 cpumiss += cc->cc_misses; 1983 pcmiss += cc->cc_pcmisses; 1984 nfull += cc->cc_nfull; 1985 } 1986 pchit = cpumiss - pcmiss; 1987 (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss); 1988 (*pr)("\tcache layer hits %llu misses %llu\n", pchit, pcmiss); 1989 (*pr)("\tcache layer full groups %u\n", nfull); 1990 if (print_cache) { 1991 (*pr)("\tfull cache groups:\n"); 1992 for (pcg = pc->pc_fullgroups; pcg != NULL; 1993 pcg = pcg->pcg_next) { 1994 PR_GROUPLIST(pcg); 1995 } 1996 } 1997 } 1998 #undef PR_GROUPLIST 1999 } 2000 2001 static int 2002 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph) 2003 { 2004 struct pool_item *pi; 2005 void *page; 2006 int n; 2007 2008 if ((pp->pr_roflags & PR_NOALIGN) == 0) { 2009 page = POOL_OBJ_TO_PAGE(pp, ph); 2010 if (page != ph->ph_page && 2011 (pp->pr_roflags & PR_PHINPAGE) != 0) { 2012 if (label != NULL) 2013 printf("%s: ", label); 2014 printf("pool(%p:%s): page inconsistency: page %p;" 2015 " at page head addr %p (p %p)\n", pp, 2016 pp->pr_wchan, ph->ph_page, 2017 ph, page); 2018 return 1; 2019 } 2020 } 2021 2022 if ((pp->pr_roflags & PR_USEBMAP) != 0) 2023 return 0; 2024 2025 for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0; 2026 pi != NULL; 2027 pi = LIST_NEXT(pi,pi_list), n++) { 2028 2029 #ifdef POOL_CHECK_MAGIC 2030 if (pi->pi_magic != PI_MAGIC) { 2031 if (label != NULL) 2032 printf("%s: ", label); 2033 printf("pool(%s): free list modified: magic=%x;" 2034 " page %p; item ordinal %d; addr %p\n", 2035 pp->pr_wchan, pi->pi_magic, ph->ph_page, 2036 n, pi); 2037 panic("pool"); 2038 } 2039 #endif 2040 if ((pp->pr_roflags & PR_NOALIGN) != 0) { 2041 continue; 2042 } 2043 page = POOL_OBJ_TO_PAGE(pp, pi); 2044 if (page == ph->ph_page) 2045 continue; 2046 2047 if (label != NULL) 2048 printf("%s: ", label); 2049 printf("pool(%p:%s): page inconsistency: page %p;" 2050 " item ordinal %d; addr %p (p %p)\n", pp, 2051 pp->pr_wchan, ph->ph_page, 2052 n, pi, page); 2053 return 1; 2054 } 2055 return 0; 2056 } 2057 2058 2059 int 2060 pool_chk(struct pool *pp, const char *label) 2061 { 2062 struct pool_item_header *ph; 2063 int r = 0; 2064 2065 mutex_enter(&pp->pr_lock); 2066 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { 2067 r = pool_chk_page(pp, label, ph); 2068 if (r) { 2069 goto out; 2070 } 2071 } 2072 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { 2073 r = pool_chk_page(pp, label, ph); 2074 if (r) { 2075 goto out; 2076 } 2077 } 2078 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { 2079 r = pool_chk_page(pp, label, ph); 2080 if (r) { 2081 goto out; 2082 } 2083 } 2084 2085 out: 2086 mutex_exit(&pp->pr_lock); 2087 return r; 2088 } 2089 2090 /* 2091 * pool_cache_init: 2092 * 2093 * Initialize a pool cache. 2094 */ 2095 pool_cache_t 2096 pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags, 2097 const char *wchan, struct pool_allocator *palloc, int ipl, 2098 int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg) 2099 { 2100 pool_cache_t pc; 2101 2102 pc = pool_get(&cache_pool, PR_WAITOK); 2103 if (pc == NULL) 2104 return NULL; 2105 2106 pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan, 2107 palloc, ipl, ctor, dtor, arg); 2108 2109 return pc; 2110 } 2111 2112 /* 2113 * pool_cache_bootstrap: 2114 * 2115 * Kernel-private version of pool_cache_init(). The caller 2116 * provides initial storage. 2117 */ 2118 void 2119 pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align, 2120 u_int align_offset, u_int flags, const char *wchan, 2121 struct pool_allocator *palloc, int ipl, 2122 int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), 2123 void *arg) 2124 { 2125 CPU_INFO_ITERATOR cii; 2126 pool_cache_t pc1; 2127 struct cpu_info *ci; 2128 struct pool *pp; 2129 unsigned int ppflags; 2130 2131 pp = &pc->pc_pool; 2132 ppflags = flags; 2133 if (ctor == NULL) { 2134 ctor = NO_CTOR; 2135 } 2136 if (dtor == NULL) { 2137 dtor = NO_DTOR; 2138 } else { 2139 /* 2140 * If we have a destructor, then the pool layer does not 2141 * need to worry about PR_PSERIALIZE. 2142 */ 2143 ppflags &= ~PR_PSERIALIZE; 2144 } 2145 2146 pool_init(pp, size, align, align_offset, ppflags, wchan, palloc, ipl); 2147 2148 pc->pc_fullgroups = NULL; 2149 pc->pc_partgroups = NULL; 2150 pc->pc_ctor = ctor; 2151 pc->pc_dtor = dtor; 2152 pc->pc_arg = arg; 2153 pc->pc_refcnt = 0; 2154 pc->pc_roflags = flags; 2155 pc->pc_freecheck = NULL; 2156 2157 if ((flags & PR_LARGECACHE) != 0) { 2158 pc->pc_pcgsize = PCG_NOBJECTS_LARGE; 2159 pc->pc_pcgpool = &pcg_large_pool; 2160 pc->pc_pcgcache = &pcg_large_cache; 2161 } else { 2162 pc->pc_pcgsize = PCG_NOBJECTS_NORMAL; 2163 pc->pc_pcgpool = &pcg_normal_pool; 2164 pc->pc_pcgcache = &pcg_normal_cache; 2165 } 2166 2167 /* Allocate per-CPU caches. */ 2168 memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); 2169 pc->pc_ncpu = 0; 2170 if (ncpu < 2) { 2171 /* XXX For sparc: boot CPU is not attached yet. */ 2172 pool_cache_cpu_init1(curcpu(), pc); 2173 } else { 2174 for (CPU_INFO_FOREACH(cii, ci)) { 2175 pool_cache_cpu_init1(ci, pc); 2176 } 2177 } 2178 2179 /* Add to list of all pools. */ 2180 if (__predict_true(!cold)) 2181 mutex_enter(&pool_head_lock); 2182 TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) { 2183 if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0) 2184 break; 2185 } 2186 if (pc1 == NULL) 2187 TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist); 2188 else 2189 TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist); 2190 if (__predict_true(!cold)) 2191 mutex_exit(&pool_head_lock); 2192 2193 atomic_store_release(&pp->pr_cache, pc); 2194 } 2195 2196 /* 2197 * pool_cache_destroy: 2198 * 2199 * Destroy a pool cache. 2200 */ 2201 void 2202 pool_cache_destroy(pool_cache_t pc) 2203 { 2204 2205 pool_cache_bootstrap_destroy(pc); 2206 pool_put(&cache_pool, pc); 2207 } 2208 2209 /* 2210 * pool_cache_bootstrap_destroy: 2211 * 2212 * Destroy a pool cache. 2213 */ 2214 void 2215 pool_cache_bootstrap_destroy(pool_cache_t pc) 2216 { 2217 struct pool *pp = &pc->pc_pool; 2218 u_int i; 2219 2220 /* Remove it from the global list. */ 2221 mutex_enter(&pool_head_lock); 2222 while (pc->pc_refcnt != 0) 2223 cv_wait(&pool_busy, &pool_head_lock); 2224 TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist); 2225 mutex_exit(&pool_head_lock); 2226 2227 /* First, invalidate the entire cache. */ 2228 pool_cache_invalidate(pc); 2229 2230 /* Disassociate it from the pool. */ 2231 mutex_enter(&pp->pr_lock); 2232 atomic_store_relaxed(&pp->pr_cache, NULL); 2233 mutex_exit(&pp->pr_lock); 2234 2235 /* Destroy per-CPU data */ 2236 for (i = 0; i < __arraycount(pc->pc_cpus); i++) 2237 pool_cache_invalidate_cpu(pc, i); 2238 2239 /* Finally, destroy it. */ 2240 pool_destroy(pp); 2241 } 2242 2243 /* 2244 * pool_cache_cpu_init1: 2245 * 2246 * Called for each pool_cache whenever a new CPU is attached. 2247 */ 2248 static void 2249 pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc) 2250 { 2251 pool_cache_cpu_t *cc; 2252 int index; 2253 2254 index = ci->ci_index; 2255 2256 KASSERT(index < __arraycount(pc->pc_cpus)); 2257 2258 if ((cc = pc->pc_cpus[index]) != NULL) { 2259 return; 2260 } 2261 2262 /* 2263 * The first CPU is 'free'. This needs to be the case for 2264 * bootstrap - we may not be able to allocate yet. 2265 */ 2266 if (pc->pc_ncpu == 0) { 2267 cc = &pc->pc_cpu0; 2268 pc->pc_ncpu = 1; 2269 } else { 2270 pc->pc_ncpu++; 2271 cc = pool_get(&cache_cpu_pool, PR_WAITOK); 2272 } 2273 2274 cc->cc_current = __UNCONST(&pcg_dummy); 2275 cc->cc_previous = __UNCONST(&pcg_dummy); 2276 cc->cc_pcgcache = pc->pc_pcgcache; 2277 cc->cc_hits = 0; 2278 cc->cc_misses = 0; 2279 cc->cc_pcmisses = 0; 2280 cc->cc_contended = 0; 2281 cc->cc_nfull = 0; 2282 cc->cc_npart = 0; 2283 2284 pc->pc_cpus[index] = cc; 2285 } 2286 2287 /* 2288 * pool_cache_cpu_init: 2289 * 2290 * Called whenever a new CPU is attached. 2291 */ 2292 void 2293 pool_cache_cpu_init(struct cpu_info *ci) 2294 { 2295 pool_cache_t pc; 2296 2297 mutex_enter(&pool_head_lock); 2298 TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) { 2299 pc->pc_refcnt++; 2300 mutex_exit(&pool_head_lock); 2301 2302 pool_cache_cpu_init1(ci, pc); 2303 2304 mutex_enter(&pool_head_lock); 2305 pc->pc_refcnt--; 2306 cv_broadcast(&pool_busy); 2307 } 2308 mutex_exit(&pool_head_lock); 2309 } 2310 2311 /* 2312 * pool_cache_reclaim: 2313 * 2314 * Reclaim memory from a pool cache. 2315 */ 2316 bool 2317 pool_cache_reclaim(pool_cache_t pc) 2318 { 2319 2320 return pool_reclaim(&pc->pc_pool); 2321 } 2322 2323 static inline void 2324 pool_cache_pre_destruct(pool_cache_t pc) 2325 { 2326 /* 2327 * Perform a passive serialization barrier before destructing 2328 * a batch of one or more objects. 2329 */ 2330 if (__predict_false(pc_has_pser(pc))) { 2331 pool_barrier(); 2332 } 2333 } 2334 2335 static void 2336 pool_cache_destruct_object1(pool_cache_t pc, void *object) 2337 { 2338 (*pc->pc_dtor)(pc->pc_arg, object); 2339 pool_put(&pc->pc_pool, object); 2340 } 2341 2342 /* 2343 * pool_cache_destruct_object: 2344 * 2345 * Force destruction of an object and its release back into 2346 * the pool. 2347 */ 2348 void 2349 pool_cache_destruct_object(pool_cache_t pc, void *object) 2350 { 2351 2352 FREECHECK_IN(&pc->pc_freecheck, object); 2353 2354 pool_cache_pre_destruct(pc); 2355 pool_cache_destruct_object1(pc, object); 2356 } 2357 2358 /* 2359 * pool_cache_invalidate_groups: 2360 * 2361 * Invalidate a chain of groups and destruct all objects. Return the 2362 * number of groups that were invalidated. 2363 */ 2364 static int 2365 pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg) 2366 { 2367 void *object; 2368 pcg_t *next; 2369 int i, n; 2370 2371 if (pcg == NULL) { 2372 return 0; 2373 } 2374 2375 pool_cache_pre_destruct(pc); 2376 2377 for (n = 0; pcg != NULL; pcg = next, n++) { 2378 next = pcg->pcg_next; 2379 2380 for (i = 0; i < pcg->pcg_avail; i++) { 2381 object = pcg->pcg_objects[i].pcgo_va; 2382 pool_cache_destruct_object1(pc, object); 2383 } 2384 2385 if (pcg->pcg_size == PCG_NOBJECTS_LARGE) { 2386 pool_put(&pcg_large_pool, pcg); 2387 } else { 2388 KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL); 2389 pool_put(&pcg_normal_pool, pcg); 2390 } 2391 } 2392 return n; 2393 } 2394 2395 /* 2396 * pool_cache_invalidate: 2397 * 2398 * Invalidate a pool cache (destruct and release all of the 2399 * cached objects). Does not reclaim objects from the pool. 2400 * 2401 * Note: For pool caches that provide constructed objects, there 2402 * is an assumption that another level of synchronization is occurring 2403 * between the input to the constructor and the cache invalidation. 2404 * 2405 * Invalidation is a costly process and should not be called from 2406 * interrupt context. 2407 */ 2408 void 2409 pool_cache_invalidate(pool_cache_t pc) 2410 { 2411 uint64_t where; 2412 pcg_t *pcg; 2413 int n, s; 2414 2415 KASSERT(!cpu_intr_p()); 2416 KASSERT(!cpu_softintr_p()); 2417 2418 if (ncpu < 2 || !mp_online) { 2419 /* 2420 * We might be called early enough in the boot process 2421 * for the CPU data structures to not be fully initialized. 2422 * In this case, transfer the content of the local CPU's 2423 * cache back into global cache as only this CPU is currently 2424 * running. 2425 */ 2426 pool_cache_transfer(pc); 2427 } else { 2428 /* 2429 * Signal all CPUs that they must transfer their local 2430 * cache back to the global pool then wait for the xcall to 2431 * complete. 2432 */ 2433 where = xc_broadcast(0, 2434 __FPTRCAST(xcfunc_t, pool_cache_transfer), pc, NULL); 2435 xc_wait(where); 2436 } 2437 2438 /* Now dequeue and invalidate everything. */ 2439 pcg = pool_pcg_trunc(&pcg_normal_cache); 2440 (void)pool_cache_invalidate_groups(pc, pcg); 2441 2442 pcg = pool_pcg_trunc(&pcg_large_cache); 2443 (void)pool_cache_invalidate_groups(pc, pcg); 2444 2445 pcg = pool_pcg_trunc(&pc->pc_fullgroups); 2446 n = pool_cache_invalidate_groups(pc, pcg); 2447 s = splvm(); 2448 ((pool_cache_cpu_t *)pc->pc_cpus[curcpu()->ci_index])->cc_nfull -= n; 2449 splx(s); 2450 2451 pcg = pool_pcg_trunc(&pc->pc_partgroups); 2452 n = pool_cache_invalidate_groups(pc, pcg); 2453 s = splvm(); 2454 ((pool_cache_cpu_t *)pc->pc_cpus[curcpu()->ci_index])->cc_npart -= n; 2455 splx(s); 2456 } 2457 2458 /* 2459 * pool_cache_invalidate_cpu: 2460 * 2461 * Invalidate all CPU-bound cached objects in pool cache, the CPU being 2462 * identified by its associated index. 2463 * It is caller's responsibility to ensure that no operation is 2464 * taking place on this pool cache while doing this invalidation. 2465 * WARNING: as no inter-CPU locking is enforced, trying to invalidate 2466 * pool cached objects from a CPU different from the one currently running 2467 * may result in an undefined behaviour. 2468 */ 2469 static void 2470 pool_cache_invalidate_cpu(pool_cache_t pc, u_int index) 2471 { 2472 pool_cache_cpu_t *cc; 2473 pcg_t *pcg; 2474 2475 if ((cc = pc->pc_cpus[index]) == NULL) 2476 return; 2477 2478 if ((pcg = cc->cc_current) != &pcg_dummy) { 2479 pcg->pcg_next = NULL; 2480 pool_cache_invalidate_groups(pc, pcg); 2481 } 2482 if ((pcg = cc->cc_previous) != &pcg_dummy) { 2483 pcg->pcg_next = NULL; 2484 pool_cache_invalidate_groups(pc, pcg); 2485 } 2486 if (cc != &pc->pc_cpu0) 2487 pool_put(&cache_cpu_pool, cc); 2488 2489 } 2490 2491 void 2492 pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) 2493 { 2494 2495 pool_set_drain_hook(&pc->pc_pool, fn, arg); 2496 } 2497 2498 void 2499 pool_cache_setlowat(pool_cache_t pc, int n) 2500 { 2501 2502 pool_setlowat(&pc->pc_pool, n); 2503 } 2504 2505 void 2506 pool_cache_sethiwat(pool_cache_t pc, int n) 2507 { 2508 2509 pool_sethiwat(&pc->pc_pool, n); 2510 } 2511 2512 void 2513 pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap) 2514 { 2515 2516 pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap); 2517 } 2518 2519 void 2520 pool_cache_prime(pool_cache_t pc, int n) 2521 { 2522 2523 pool_prime(&pc->pc_pool, n); 2524 } 2525 2526 unsigned int 2527 pool_cache_nget(pool_cache_t pc) 2528 { 2529 2530 return pool_nget(&pc->pc_pool); 2531 } 2532 2533 unsigned int 2534 pool_cache_nput(pool_cache_t pc) 2535 { 2536 2537 return pool_nput(&pc->pc_pool); 2538 } 2539 2540 /* 2541 * pool_pcg_get: 2542 * 2543 * Get a cache group from the specified list. Return true if 2544 * contention was encountered. Must be called at IPL_VM because 2545 * of spin wait vs. kernel_lock. 2546 */ 2547 static int 2548 pool_pcg_get(pcg_t *volatile *head, pcg_t **pcgp) 2549 { 2550 int count = SPINLOCK_BACKOFF_MIN; 2551 pcg_t *o, *n; 2552 2553 for (o = atomic_load_relaxed(head);; o = n) { 2554 if (__predict_false(o == &pcg_dummy)) { 2555 /* Wait for concurrent get to complete. */ 2556 SPINLOCK_BACKOFF(count); 2557 n = atomic_load_relaxed(head); 2558 continue; 2559 } 2560 if (__predict_false(o == NULL)) { 2561 break; 2562 } 2563 /* Lock out concurrent get/put. */ 2564 n = atomic_cas_ptr(head, o, __UNCONST(&pcg_dummy)); 2565 if (o == n) { 2566 /* Fetch pointer to next item and then unlock. */ 2567 membar_datadep_consumer(); /* alpha */ 2568 n = atomic_load_relaxed(&o->pcg_next); 2569 atomic_store_release(head, n); 2570 break; 2571 } 2572 } 2573 *pcgp = o; 2574 return count != SPINLOCK_BACKOFF_MIN; 2575 } 2576 2577 /* 2578 * pool_pcg_trunc: 2579 * 2580 * Chop out entire list of pool cache groups. 2581 */ 2582 static pcg_t * 2583 pool_pcg_trunc(pcg_t *volatile *head) 2584 { 2585 int count = SPINLOCK_BACKOFF_MIN, s; 2586 pcg_t *o, *n; 2587 2588 s = splvm(); 2589 for (o = atomic_load_relaxed(head);; o = n) { 2590 if (__predict_false(o == &pcg_dummy)) { 2591 /* Wait for concurrent get to complete. */ 2592 SPINLOCK_BACKOFF(count); 2593 n = atomic_load_relaxed(head); 2594 continue; 2595 } 2596 n = atomic_cas_ptr(head, o, NULL); 2597 if (o == n) { 2598 splx(s); 2599 membar_datadep_consumer(); /* alpha */ 2600 return o; 2601 } 2602 } 2603 } 2604 2605 /* 2606 * pool_pcg_put: 2607 * 2608 * Put a pool cache group to the specified list. Return true if 2609 * contention was encountered. Must be called at IPL_VM because of 2610 * spin wait vs. kernel_lock. 2611 */ 2612 static int 2613 pool_pcg_put(pcg_t *volatile *head, pcg_t *pcg) 2614 { 2615 int count = SPINLOCK_BACKOFF_MIN; 2616 pcg_t *o, *n; 2617 2618 for (o = atomic_load_relaxed(head);; o = n) { 2619 if (__predict_false(o == &pcg_dummy)) { 2620 /* Wait for concurrent get to complete. */ 2621 SPINLOCK_BACKOFF(count); 2622 n = atomic_load_relaxed(head); 2623 continue; 2624 } 2625 pcg->pcg_next = o; 2626 membar_release(); 2627 n = atomic_cas_ptr(head, o, pcg); 2628 if (o == n) { 2629 return count != SPINLOCK_BACKOFF_MIN; 2630 } 2631 } 2632 } 2633 2634 static bool __noinline 2635 pool_cache_get_slow(pool_cache_t pc, pool_cache_cpu_t *cc, int s, 2636 void **objectp, paddr_t *pap, int flags) 2637 { 2638 pcg_t *pcg, *cur; 2639 void *object; 2640 2641 KASSERT(cc->cc_current->pcg_avail == 0); 2642 KASSERT(cc->cc_previous->pcg_avail == 0); 2643 2644 cc->cc_misses++; 2645 2646 /* 2647 * If there's a full group, release our empty group back to the 2648 * cache. Install the full group as cc_current and return. 2649 */ 2650 cc->cc_contended += pool_pcg_get(&pc->pc_fullgroups, &pcg); 2651 if (__predict_true(pcg != NULL)) { 2652 KASSERT(pcg->pcg_avail == pcg->pcg_size); 2653 if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) { 2654 KASSERT(cur->pcg_avail == 0); 2655 (void)pool_pcg_put(cc->cc_pcgcache, cur); 2656 } 2657 cc->cc_nfull--; 2658 cc->cc_current = pcg; 2659 return true; 2660 } 2661 2662 /* 2663 * Nothing available locally or in cache. Take the slow 2664 * path: fetch a new object from the pool and construct 2665 * it. 2666 */ 2667 cc->cc_pcmisses++; 2668 splx(s); 2669 2670 object = pool_get(&pc->pc_pool, flags); 2671 *objectp = object; 2672 if (__predict_false(object == NULL)) { 2673 KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0); 2674 return false; 2675 } 2676 2677 if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) { 2678 pool_put(&pc->pc_pool, object); 2679 *objectp = NULL; 2680 return false; 2681 } 2682 2683 KASSERT((((vaddr_t)object) & (pc->pc_pool.pr_align - 1)) == 0); 2684 2685 if (pap != NULL) { 2686 #ifdef POOL_VTOPHYS 2687 *pap = POOL_VTOPHYS(object); 2688 #else 2689 *pap = POOL_PADDR_INVALID; 2690 #endif 2691 } 2692 2693 FREECHECK_OUT(&pc->pc_freecheck, object); 2694 return false; 2695 } 2696 2697 /* 2698 * pool_cache_get{,_paddr}: 2699 * 2700 * Get an object from a pool cache (optionally returning 2701 * the physical address of the object). 2702 */ 2703 void * 2704 pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap) 2705 { 2706 pool_cache_cpu_t *cc; 2707 pcg_t *pcg; 2708 void *object; 2709 int s; 2710 2711 KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK)); 2712 if (pc->pc_pool.pr_ipl == IPL_NONE && 2713 __predict_true(!cold) && 2714 __predict_true(panicstr == NULL)) { 2715 KASSERTMSG(!cpu_intr_p(), 2716 "%s: [%s] is IPL_NONE, but called from interrupt context", 2717 __func__, pc->pc_pool.pr_wchan); 2718 KASSERTMSG(!cpu_softintr_p(), 2719 "%s: [%s] is IPL_NONE," 2720 " but called from soft interrupt context", 2721 __func__, pc->pc_pool.pr_wchan); 2722 } 2723 2724 if (flags & PR_WAITOK) { 2725 ASSERT_SLEEPABLE(); 2726 } 2727 2728 if (flags & PR_NOWAIT) { 2729 if (fault_inject()) 2730 return NULL; 2731 } 2732 2733 /* Lock out interrupts and disable preemption. */ 2734 s = splvm(); 2735 while (/* CONSTCOND */ true) { 2736 /* Try and allocate an object from the current group. */ 2737 cc = pc->pc_cpus[curcpu()->ci_index]; 2738 pcg = cc->cc_current; 2739 if (__predict_true(pcg->pcg_avail > 0)) { 2740 object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va; 2741 if (__predict_false(pap != NULL)) 2742 *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; 2743 #if defined(DIAGNOSTIC) 2744 pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; 2745 KASSERT(pcg->pcg_avail < pcg->pcg_size); 2746 KASSERT(object != NULL); 2747 #endif 2748 cc->cc_hits++; 2749 splx(s); 2750 FREECHECK_OUT(&pc->pc_freecheck, object); 2751 pool_redzone_fill(&pc->pc_pool, object); 2752 pool_cache_get_kmsan(pc, object); 2753 return object; 2754 } 2755 2756 /* 2757 * That failed. If the previous group isn't empty, swap 2758 * it with the current group and allocate from there. 2759 */ 2760 pcg = cc->cc_previous; 2761 if (__predict_true(pcg->pcg_avail > 0)) { 2762 cc->cc_previous = cc->cc_current; 2763 cc->cc_current = pcg; 2764 continue; 2765 } 2766 2767 /* 2768 * Can't allocate from either group: try the slow path. 2769 * If get_slow() allocated an object for us, or if 2770 * no more objects are available, it will return false. 2771 * Otherwise, we need to retry. 2772 */ 2773 if (!pool_cache_get_slow(pc, cc, s, &object, pap, flags)) { 2774 if (object != NULL) { 2775 kmsan_orig(object, pc->pc_pool.pr_size, 2776 KMSAN_TYPE_POOL, __RET_ADDR); 2777 } 2778 break; 2779 } 2780 } 2781 2782 /* 2783 * We would like to KASSERT(object || (flags & PR_NOWAIT)), but 2784 * pool_cache_get can fail even in the PR_WAITOK case, if the 2785 * constructor fails. 2786 */ 2787 return object; 2788 } 2789 2790 static bool __noinline 2791 pool_cache_put_slow(pool_cache_t pc, pool_cache_cpu_t *cc, int s, void *object) 2792 { 2793 pcg_t *pcg, *cur; 2794 2795 KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size); 2796 KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size); 2797 2798 cc->cc_misses++; 2799 2800 /* 2801 * Try to get an empty group from the cache. If there are no empty 2802 * groups in the cache then allocate one. 2803 */ 2804 (void)pool_pcg_get(cc->cc_pcgcache, &pcg); 2805 if (__predict_false(pcg == NULL)) { 2806 if (__predict_true(!pool_cache_disable)) { 2807 pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); 2808 } 2809 if (__predict_true(pcg != NULL)) { 2810 pcg->pcg_avail = 0; 2811 pcg->pcg_size = pc->pc_pcgsize; 2812 } 2813 } 2814 2815 /* 2816 * If there's a empty group, release our full group back to the 2817 * cache. Install the empty group to the local CPU and return. 2818 */ 2819 if (pcg != NULL) { 2820 KASSERT(pcg->pcg_avail == 0); 2821 if (__predict_false(cc->cc_previous == &pcg_dummy)) { 2822 cc->cc_previous = pcg; 2823 } else { 2824 cur = cc->cc_current; 2825 if (__predict_true(cur != &pcg_dummy)) { 2826 KASSERT(cur->pcg_avail == cur->pcg_size); 2827 cc->cc_contended += 2828 pool_pcg_put(&pc->pc_fullgroups, cur); 2829 cc->cc_nfull++; 2830 } 2831 cc->cc_current = pcg; 2832 } 2833 return true; 2834 } 2835 2836 /* 2837 * Nothing available locally or in cache, and we didn't 2838 * allocate an empty group. Take the slow path and destroy 2839 * the object here and now. 2840 */ 2841 cc->cc_pcmisses++; 2842 splx(s); 2843 pool_cache_destruct_object(pc, object); 2844 2845 return false; 2846 } 2847 2848 /* 2849 * pool_cache_put{,_paddr}: 2850 * 2851 * Put an object back to the pool cache (optionally caching the 2852 * physical address of the object). 2853 */ 2854 void 2855 pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa) 2856 { 2857 pool_cache_cpu_t *cc; 2858 pcg_t *pcg; 2859 int s; 2860 2861 KASSERT(object != NULL); 2862 pool_cache_put_kmsan(pc, object); 2863 pool_cache_redzone_check(pc, object); 2864 FREECHECK_IN(&pc->pc_freecheck, object); 2865 2866 if (pc->pc_pool.pr_roflags & PR_PHINPAGE) { 2867 pc_phinpage_check(pc, object); 2868 } 2869 2870 if (pool_cache_put_nocache(pc, object)) { 2871 return; 2872 } 2873 2874 /* Lock out interrupts and disable preemption. */ 2875 s = splvm(); 2876 while (/* CONSTCOND */ true) { 2877 /* If the current group isn't full, release it there. */ 2878 cc = pc->pc_cpus[curcpu()->ci_index]; 2879 pcg = cc->cc_current; 2880 if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) { 2881 pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object; 2882 pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa; 2883 pcg->pcg_avail++; 2884 cc->cc_hits++; 2885 splx(s); 2886 return; 2887 } 2888 2889 /* 2890 * That failed. If the previous group isn't full, swap 2891 * it with the current group and try again. 2892 */ 2893 pcg = cc->cc_previous; 2894 if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) { 2895 cc->cc_previous = cc->cc_current; 2896 cc->cc_current = pcg; 2897 continue; 2898 } 2899 2900 /* 2901 * Can't free to either group: try the slow path. 2902 * If put_slow() releases the object for us, it 2903 * will return false. Otherwise we need to retry. 2904 */ 2905 if (!pool_cache_put_slow(pc, cc, s, object)) 2906 break; 2907 } 2908 } 2909 2910 /* 2911 * pool_cache_transfer: 2912 * 2913 * Transfer objects from the per-CPU cache to the global cache. 2914 * Run within a cross-call thread. 2915 */ 2916 static void 2917 pool_cache_transfer(pool_cache_t pc) 2918 { 2919 pool_cache_cpu_t *cc; 2920 pcg_t *prev, *cur; 2921 int s; 2922 2923 s = splvm(); 2924 cc = pc->pc_cpus[curcpu()->ci_index]; 2925 cur = cc->cc_current; 2926 cc->cc_current = __UNCONST(&pcg_dummy); 2927 prev = cc->cc_previous; 2928 cc->cc_previous = __UNCONST(&pcg_dummy); 2929 if (cur != &pcg_dummy) { 2930 if (cur->pcg_avail == cur->pcg_size) { 2931 (void)pool_pcg_put(&pc->pc_fullgroups, cur); 2932 cc->cc_nfull++; 2933 } else if (cur->pcg_avail == 0) { 2934 (void)pool_pcg_put(pc->pc_pcgcache, cur); 2935 } else { 2936 (void)pool_pcg_put(&pc->pc_partgroups, cur); 2937 cc->cc_npart++; 2938 } 2939 } 2940 if (prev != &pcg_dummy) { 2941 if (prev->pcg_avail == prev->pcg_size) { 2942 (void)pool_pcg_put(&pc->pc_fullgroups, prev); 2943 cc->cc_nfull++; 2944 } else if (prev->pcg_avail == 0) { 2945 (void)pool_pcg_put(pc->pc_pcgcache, prev); 2946 } else { 2947 (void)pool_pcg_put(&pc->pc_partgroups, prev); 2948 cc->cc_npart++; 2949 } 2950 } 2951 splx(s); 2952 } 2953 2954 static int 2955 pool_bigidx(size_t size) 2956 { 2957 int i; 2958 2959 for (i = 0; i < __arraycount(pool_allocator_big); i++) { 2960 if (1 << (i + POOL_ALLOCATOR_BIG_BASE) >= size) 2961 return i; 2962 } 2963 panic("pool item size %zu too large, use a custom allocator", size); 2964 } 2965 2966 static void * 2967 pool_allocator_alloc(struct pool *pp, int flags) 2968 { 2969 struct pool_allocator *pa = pp->pr_alloc; 2970 void *res; 2971 2972 res = (*pa->pa_alloc)(pp, flags); 2973 if (res == NULL && (flags & PR_WAITOK) == 0) { 2974 /* 2975 * We only run the drain hook here if PR_NOWAIT. 2976 * In other cases, the hook will be run in 2977 * pool_reclaim(). 2978 */ 2979 if (pp->pr_drain_hook != NULL) { 2980 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); 2981 res = (*pa->pa_alloc)(pp, flags); 2982 } 2983 } 2984 return res; 2985 } 2986 2987 static void 2988 pool_allocator_free(struct pool *pp, void *v) 2989 { 2990 struct pool_allocator *pa = pp->pr_alloc; 2991 2992 if (pp->pr_redzone) { 2993 KASSERT(!pp_has_pser(pp)); 2994 kasan_mark(v, pa->pa_pagesz, pa->pa_pagesz, 0); 2995 } else if (__predict_false(pp_has_pser(pp))) { 2996 /* 2997 * Perform a passive serialization barrier before freeing 2998 * the pool page back to the system. 2999 */ 3000 pool_barrier(); 3001 } 3002 (*pa->pa_free)(pp, v); 3003 } 3004 3005 void * 3006 pool_page_alloc(struct pool *pp, int flags) 3007 { 3008 const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; 3009 vmem_addr_t va; 3010 int ret; 3011 3012 ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz, 3013 vflags | VM_INSTANTFIT, &va); 3014 3015 return ret ? NULL : (void *)va; 3016 } 3017 3018 void 3019 pool_page_free(struct pool *pp, void *v) 3020 { 3021 3022 uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz); 3023 } 3024 3025 static void * 3026 pool_page_alloc_meta(struct pool *pp, int flags) 3027 { 3028 const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; 3029 vmem_addr_t va; 3030 int ret; 3031 3032 ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz, 3033 vflags | VM_INSTANTFIT, &va); 3034 3035 return ret ? NULL : (void *)va; 3036 } 3037 3038 static void 3039 pool_page_free_meta(struct pool *pp, void *v) 3040 { 3041 3042 vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz); 3043 } 3044 3045 #ifdef KMSAN 3046 static inline void 3047 pool_get_kmsan(struct pool *pp, void *p) 3048 { 3049 kmsan_orig(p, pp->pr_size, KMSAN_TYPE_POOL, __RET_ADDR); 3050 kmsan_mark(p, pp->pr_size, KMSAN_STATE_UNINIT); 3051 } 3052 3053 static inline void 3054 pool_put_kmsan(struct pool *pp, void *p) 3055 { 3056 kmsan_mark(p, pp->pr_size, KMSAN_STATE_INITED); 3057 } 3058 3059 static inline void 3060 pool_cache_get_kmsan(pool_cache_t pc, void *p) 3061 { 3062 if (__predict_false(pc_has_ctor(pc))) { 3063 return; 3064 } 3065 pool_get_kmsan(&pc->pc_pool, p); 3066 } 3067 3068 static inline void 3069 pool_cache_put_kmsan(pool_cache_t pc, void *p) 3070 { 3071 pool_put_kmsan(&pc->pc_pool, p); 3072 } 3073 #endif 3074 3075 #ifdef POOL_QUARANTINE 3076 static void 3077 pool_quarantine_init(struct pool *pp) 3078 { 3079 pp->pr_quar.rotor = 0; 3080 memset(&pp->pr_quar, 0, sizeof(pp->pr_quar)); 3081 } 3082 3083 static void 3084 pool_quarantine_flush(struct pool *pp) 3085 { 3086 pool_quar_t *quar = &pp->pr_quar; 3087 struct pool_pagelist pq; 3088 size_t i; 3089 3090 LIST_INIT(&pq); 3091 3092 mutex_enter(&pp->pr_lock); 3093 for (i = 0; i < POOL_QUARANTINE_DEPTH; i++) { 3094 if (quar->list[i] == 0) 3095 continue; 3096 pool_do_put(pp, (void *)quar->list[i], &pq); 3097 } 3098 mutex_exit(&pp->pr_lock); 3099 3100 pr_pagelist_free(pp, &pq); 3101 } 3102 3103 static bool 3104 pool_put_quarantine(struct pool *pp, void *v, struct pool_pagelist *pq) 3105 { 3106 pool_quar_t *quar = &pp->pr_quar; 3107 uintptr_t old; 3108 3109 if (pp->pr_roflags & PR_NOTOUCH) { 3110 return false; 3111 } 3112 3113 pool_redzone_check(pp, v); 3114 3115 old = quar->list[quar->rotor]; 3116 quar->list[quar->rotor] = (uintptr_t)v; 3117 quar->rotor = (quar->rotor + 1) % POOL_QUARANTINE_DEPTH; 3118 if (old != 0) { 3119 pool_do_put(pp, (void *)old, pq); 3120 } 3121 3122 return true; 3123 } 3124 #endif 3125 3126 #ifdef POOL_NOCACHE 3127 static bool 3128 pool_cache_put_nocache(pool_cache_t pc, void *p) 3129 { 3130 pool_cache_destruct_object(pc, p); 3131 return true; 3132 } 3133 #endif 3134 3135 #ifdef POOL_REDZONE 3136 #if defined(_LP64) 3137 # define PRIME 0x9e37fffffffc0000UL 3138 #else /* defined(_LP64) */ 3139 # define PRIME 0x9e3779b1 3140 #endif /* defined(_LP64) */ 3141 #define STATIC_BYTE 0xFE 3142 CTASSERT(POOL_REDZONE_SIZE > 1); 3143 3144 #ifndef KASAN 3145 static inline uint8_t 3146 pool_pattern_generate(const void *p) 3147 { 3148 return (uint8_t)(((uintptr_t)p) * PRIME 3149 >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT); 3150 } 3151 #endif 3152 3153 static void 3154 pool_redzone_init(struct pool *pp, size_t requested_size) 3155 { 3156 size_t redzsz; 3157 size_t nsz; 3158 3159 #ifdef KASAN 3160 redzsz = requested_size; 3161 kasan_add_redzone(&redzsz); 3162 redzsz -= requested_size; 3163 #else 3164 redzsz = POOL_REDZONE_SIZE; 3165 #endif 3166 3167 if (pp->pr_roflags & PR_NOTOUCH) { 3168 pp->pr_redzone = false; 3169 return; 3170 } 3171 3172 /* 3173 * We may have extended the requested size earlier; check if 3174 * there's naturally space in the padding for a red zone. 3175 */ 3176 if (pp->pr_size - requested_size >= redzsz) { 3177 pp->pr_reqsize_with_redzone = requested_size + redzsz; 3178 pp->pr_redzone = true; 3179 return; 3180 } 3181 3182 /* 3183 * No space in the natural padding; check if we can extend a 3184 * bit the size of the pool. 3185 * 3186 * Avoid using redzone for allocations half of a page or larger. 3187 * For pagesize items, we'd waste a whole new page (could be 3188 * unmapped?), and for half pagesize items, approximately half 3189 * the space is lost (eg, 4K pages, you get one 2K allocation.) 3190 */ 3191 nsz = roundup(pp->pr_size + redzsz, pp->pr_align); 3192 if (nsz <= (pp->pr_alloc->pa_pagesz / 2)) { 3193 /* Ok, we can */ 3194 pp->pr_size = nsz; 3195 pp->pr_reqsize_with_redzone = requested_size + redzsz; 3196 pp->pr_redzone = true; 3197 } else { 3198 /* No space for a red zone... snif :'( */ 3199 pp->pr_redzone = false; 3200 aprint_debug("pool redzone disabled for '%s'\n", pp->pr_wchan); 3201 } 3202 } 3203 3204 static void 3205 pool_redzone_fill(struct pool *pp, void *p) 3206 { 3207 if (!pp->pr_redzone) 3208 return; 3209 KASSERT(!pp_has_pser(pp)); 3210 #ifdef KASAN 3211 kasan_mark(p, pp->pr_reqsize, pp->pr_reqsize_with_redzone, 3212 KASAN_POOL_REDZONE); 3213 #else 3214 uint8_t *cp, pat; 3215 const uint8_t *ep; 3216 3217 cp = (uint8_t *)p + pp->pr_reqsize; 3218 ep = cp + POOL_REDZONE_SIZE; 3219 3220 /* 3221 * We really don't want the first byte of the red zone to be '\0'; 3222 * an off-by-one in a string may not be properly detected. 3223 */ 3224 pat = pool_pattern_generate(cp); 3225 *cp = (pat == '\0') ? STATIC_BYTE: pat; 3226 cp++; 3227 3228 while (cp < ep) { 3229 *cp = pool_pattern_generate(cp); 3230 cp++; 3231 } 3232 #endif 3233 } 3234 3235 static void 3236 pool_redzone_check(struct pool *pp, void *p) 3237 { 3238 if (!pp->pr_redzone) 3239 return; 3240 KASSERT(!pp_has_pser(pp)); 3241 #ifdef KASAN 3242 kasan_mark(p, 0, pp->pr_reqsize_with_redzone, KASAN_POOL_FREED); 3243 #else 3244 uint8_t *cp, pat, expected; 3245 const uint8_t *ep; 3246 3247 cp = (uint8_t *)p + pp->pr_reqsize; 3248 ep = cp + POOL_REDZONE_SIZE; 3249 3250 pat = pool_pattern_generate(cp); 3251 expected = (pat == '\0') ? STATIC_BYTE: pat; 3252 if (__predict_false(*cp != expected)) { 3253 panic("%s: [%s] 0x%02x != 0x%02x", __func__, 3254 pp->pr_wchan, *cp, expected); 3255 } 3256 cp++; 3257 3258 while (cp < ep) { 3259 expected = pool_pattern_generate(cp); 3260 if (__predict_false(*cp != expected)) { 3261 panic("%s: [%s] 0x%02x != 0x%02x", __func__, 3262 pp->pr_wchan, *cp, expected); 3263 } 3264 cp++; 3265 } 3266 #endif 3267 } 3268 3269 static void 3270 pool_cache_redzone_check(pool_cache_t pc, void *p) 3271 { 3272 #ifdef KASAN 3273 /* 3274 * If there is a ctor/dtor, or if the cache objects use 3275 * passive serialization, leave the data as valid. 3276 */ 3277 if (__predict_false(pc_has_ctor(pc) || pc_has_dtor(pc) || 3278 pc_has_pser(pc))) { 3279 return; 3280 } 3281 #endif 3282 pool_redzone_check(&pc->pc_pool, p); 3283 } 3284 3285 #endif /* POOL_REDZONE */ 3286 3287 #if defined(DDB) 3288 static bool 3289 pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr) 3290 { 3291 3292 return (uintptr_t)ph->ph_page <= addr && 3293 addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz; 3294 } 3295 3296 static bool 3297 pool_in_item(struct pool *pp, void *item, uintptr_t addr) 3298 { 3299 3300 return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size; 3301 } 3302 3303 static bool 3304 pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr) 3305 { 3306 int i; 3307 3308 if (pcg == NULL) { 3309 return false; 3310 } 3311 for (i = 0; i < pcg->pcg_avail; i++) { 3312 if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) { 3313 return true; 3314 } 3315 } 3316 return false; 3317 } 3318 3319 static bool 3320 pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr) 3321 { 3322 3323 if ((pp->pr_roflags & PR_USEBMAP) != 0) { 3324 unsigned int idx = pr_item_bitmap_index(pp, ph, (void *)addr); 3325 pool_item_bitmap_t *bitmap = 3326 ph->ph_bitmap + (idx / BITMAP_SIZE); 3327 pool_item_bitmap_t mask = 1U << (idx & BITMAP_MASK); 3328 3329 return (*bitmap & mask) == 0; 3330 } else { 3331 struct pool_item *pi; 3332 3333 LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) { 3334 if (pool_in_item(pp, pi, addr)) { 3335 return false; 3336 } 3337 } 3338 return true; 3339 } 3340 } 3341 3342 void 3343 pool_whatis(uintptr_t addr, void (*pr)(const char *, ...)) 3344 { 3345 struct pool *pp; 3346 3347 TAILQ_FOREACH(pp, &pool_head, pr_poollist) { 3348 struct pool_item_header *ph; 3349 struct pool_cache *pc; 3350 uintptr_t item; 3351 bool allocated = true; 3352 bool incache = false; 3353 bool incpucache = false; 3354 char cpucachestr[32]; 3355 3356 if ((pp->pr_roflags & PR_PHINPAGE) != 0) { 3357 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { 3358 if (pool_in_page(pp, ph, addr)) { 3359 goto found; 3360 } 3361 } 3362 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { 3363 if (pool_in_page(pp, ph, addr)) { 3364 allocated = 3365 pool_allocated(pp, ph, addr); 3366 goto found; 3367 } 3368 } 3369 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { 3370 if (pool_in_page(pp, ph, addr)) { 3371 allocated = false; 3372 goto found; 3373 } 3374 } 3375 continue; 3376 } else { 3377 ph = pr_find_pagehead_noalign(pp, (void *)addr); 3378 if (ph == NULL || !pool_in_page(pp, ph, addr)) { 3379 continue; 3380 } 3381 allocated = pool_allocated(pp, ph, addr); 3382 } 3383 found: 3384 if (allocated && 3385 (pc = atomic_load_consume(&pp->pr_cache)) != NULL) { 3386 struct pool_cache_group *pcg; 3387 int i; 3388 3389 for (pcg = pc->pc_fullgroups; pcg != NULL; 3390 pcg = pcg->pcg_next) { 3391 if (pool_in_cg(pp, pcg, addr)) { 3392 incache = true; 3393 goto print; 3394 } 3395 } 3396 for (i = 0; i < __arraycount(pc->pc_cpus); i++) { 3397 pool_cache_cpu_t *cc; 3398 3399 if ((cc = pc->pc_cpus[i]) == NULL) { 3400 continue; 3401 } 3402 if (pool_in_cg(pp, cc->cc_current, addr) || 3403 pool_in_cg(pp, cc->cc_previous, addr)) { 3404 struct cpu_info *ci = 3405 cpu_lookup(i); 3406 3407 incpucache = true; 3408 snprintf(cpucachestr, 3409 sizeof(cpucachestr), 3410 "cached by CPU %u", 3411 ci->ci_index); 3412 goto print; 3413 } 3414 } 3415 } 3416 print: 3417 item = (uintptr_t)ph->ph_page + ph->ph_off; 3418 item = item + rounddown(addr - item, pp->pr_size); 3419 (*pr)("%p is %p+%zu in POOL '%s' (%s)\n", 3420 (void *)addr, item, (size_t)(addr - item), 3421 pp->pr_wchan, 3422 incpucache ? cpucachestr : 3423 incache ? "cached" : allocated ? "allocated" : "free"); 3424 } 3425 } 3426 #endif /* defined(DDB) */ 3427 3428 static int 3429 pool_sysctl(SYSCTLFN_ARGS) 3430 { 3431 struct pool_sysctl data; 3432 struct pool *pp; 3433 struct pool_cache *pc; 3434 pool_cache_cpu_t *cc; 3435 int error; 3436 size_t i, written; 3437 3438 if (oldp == NULL) { 3439 *oldlenp = 0; 3440 TAILQ_FOREACH(pp, &pool_head, pr_poollist) 3441 *oldlenp += sizeof(data); 3442 return 0; 3443 } 3444 3445 memset(&data, 0, sizeof(data)); 3446 error = 0; 3447 written = 0; 3448 mutex_enter(&pool_head_lock); 3449 TAILQ_FOREACH(pp, &pool_head, pr_poollist) { 3450 if (written + sizeof(data) > *oldlenp) 3451 break; 3452 pp->pr_refcnt++; 3453 strlcpy(data.pr_wchan, pp->pr_wchan, sizeof(data.pr_wchan)); 3454 data.pr_pagesize = pp->pr_alloc->pa_pagesz; 3455 data.pr_flags = pp->pr_roflags | pp->pr_flags; 3456 #define COPY(field) data.field = pp->field 3457 COPY(pr_size); 3458 3459 COPY(pr_itemsperpage); 3460 COPY(pr_nitems); 3461 COPY(pr_nout); 3462 COPY(pr_hardlimit); 3463 COPY(pr_npages); 3464 COPY(pr_minpages); 3465 COPY(pr_maxpages); 3466 3467 COPY(pr_nget); 3468 COPY(pr_nfail); 3469 COPY(pr_nput); 3470 COPY(pr_npagealloc); 3471 COPY(pr_npagefree); 3472 COPY(pr_hiwat); 3473 COPY(pr_nidle); 3474 #undef COPY 3475 3476 data.pr_cache_nmiss_pcpu = 0; 3477 data.pr_cache_nhit_pcpu = 0; 3478 data.pr_cache_nmiss_global = 0; 3479 data.pr_cache_nempty = 0; 3480 data.pr_cache_ncontended = 0; 3481 data.pr_cache_npartial = 0; 3482 if ((pc = atomic_load_consume(&pp->pr_cache)) != NULL) { 3483 uint32_t nfull = 0; 3484 data.pr_cache_meta_size = pc->pc_pcgsize; 3485 for (i = 0; i < pc->pc_ncpu; ++i) { 3486 cc = pc->pc_cpus[i]; 3487 if (cc == NULL) 3488 continue; 3489 data.pr_cache_ncontended += cc->cc_contended; 3490 data.pr_cache_nmiss_pcpu += cc->cc_misses; 3491 data.pr_cache_nhit_pcpu += cc->cc_hits; 3492 data.pr_cache_nmiss_global += cc->cc_pcmisses; 3493 nfull += cc->cc_nfull; /* 32-bit rollover! */ 3494 data.pr_cache_npartial += cc->cc_npart; 3495 } 3496 data.pr_cache_nfull = nfull; 3497 } else { 3498 data.pr_cache_meta_size = 0; 3499 data.pr_cache_nfull = 0; 3500 } 3501 data.pr_cache_nhit_global = data.pr_cache_nmiss_pcpu - 3502 data.pr_cache_nmiss_global; 3503 3504 if (pp->pr_refcnt == UINT_MAX) /* XXX possible? */ 3505 continue; 3506 mutex_exit(&pool_head_lock); 3507 error = sysctl_copyout(l, &data, oldp, sizeof(data)); 3508 mutex_enter(&pool_head_lock); 3509 if (--pp->pr_refcnt == 0) 3510 cv_broadcast(&pool_busy); 3511 if (error) 3512 break; 3513 written += sizeof(data); 3514 oldp = (char *)oldp + sizeof(data); 3515 } 3516 mutex_exit(&pool_head_lock); 3517 3518 *oldlenp = written; 3519 return error; 3520 } 3521 3522 SYSCTL_SETUP(sysctl_pool_setup, "sysctl kern.pool setup") 3523 { 3524 const struct sysctlnode *rnode = NULL; 3525 3526 sysctl_createv(clog, 0, NULL, &rnode, 3527 CTLFLAG_PERMANENT, 3528 CTLTYPE_STRUCT, "pool", 3529 SYSCTL_DESCR("Get pool statistics"), 3530 pool_sysctl, 0, NULL, 0, 3531 CTL_KERN, CTL_CREATE, CTL_EOL); 3532 } 3533