1 /* $OpenBSD: subr_pool.c,v 1.101 2011/04/04 11:13:55 deraadt Exp $ */ 2 /* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */ 3 4 /*- 5 * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace 10 * Simulation Facility, NASA Ames Research Center. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/proc.h> 37 #include <sys/errno.h> 38 #include <sys/kernel.h> 39 #include <sys/malloc.h> 40 #include <sys/pool.h> 41 #include <sys/syslog.h> 42 #include <sys/sysctl.h> 43 44 #include <uvm/uvm.h> 45 #include <dev/rndvar.h> 46 47 /* 48 * Pool resource management utility. 49 * 50 * Memory is allocated in pages which are split into pieces according to 51 * the pool item size. Each page is kept on one of three lists in the 52 * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages', 53 * for empty, full and partially-full pages respectively. The individual 54 * pool items are on a linked list headed by `ph_itemlist' in each page 55 * header. The memory for building the page list is either taken from 56 * the allocated pages themselves (for small pool items) or taken from 57 * an internal pool of page headers (`phpool'). 58 */ 59 60 /* List of all pools */ 61 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); 62 63 /* Private pool for page header structures */ 64 struct pool phpool; 65 66 struct pool_item_header { 67 /* Page headers */ 68 LIST_ENTRY(pool_item_header) 69 ph_pagelist; /* pool page list */ 70 TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */ 71 RB_ENTRY(pool_item_header) 72 ph_node; /* Off-page page headers */ 73 int ph_nmissing; /* # of chunks in use */ 74 caddr_t ph_page; /* this page's address */ 75 caddr_t ph_colored; /* page's colored address */ 76 int ph_pagesize; 77 int ph_magic; 78 }; 79 80 struct pool_item { 81 #ifdef DIAGNOSTIC 82 u_int32_t pi_magic; 83 #endif 84 /* Other entries use only this list entry */ 85 TAILQ_ENTRY(pool_item) pi_list; 86 }; 87 88 #ifdef DEADBEEF1 89 #define PI_MAGIC DEADBEEF1 90 #else 91 #define PI_MAGIC 0xdeafbeef 92 #endif 93 94 #ifdef POOL_DEBUG 95 int pool_debug = 1; 96 #else 97 int pool_debug = 0; 98 #endif 99 100 #define POOL_NEEDS_CATCHUP(pp) \ 101 ((pp)->pr_nitems < (pp)->pr_minitems) 102 103 /* 104 * Every pool gets a unique serial number assigned to it. If this counter 105 * wraps, we're screwed, but we shouldn't create so many pools anyway. 106 */ 107 unsigned int pool_serial; 108 109 int pool_catchup(struct pool *); 110 void pool_prime_page(struct pool *, caddr_t, struct pool_item_header *); 111 void pool_update_curpage(struct pool *); 112 void *pool_do_get(struct pool *, int); 113 void pool_do_put(struct pool *, void *); 114 void pr_rmpage(struct pool *, struct pool_item_header *, 115 struct pool_pagelist *); 116 int pool_chk_page(struct pool *, const char *, struct pool_item_header *); 117 struct pool_item_header *pool_alloc_item_header(struct pool *, caddr_t , int); 118 119 void *pool_allocator_alloc(struct pool *, int, int *); 120 void pool_allocator_free(struct pool *, void *); 121 122 /* 123 * XXX - quick hack. For pools with large items we want to use a special 124 * allocator. For now, instead of having the allocator figure out 125 * the allocation size from the pool (which can be done trivially 126 * with round_page(pr_itemsperpage * pr_size)) which would require 127 * lots of changes everywhere, we just create allocators for each 128 * size. We limit those to 128 pages. 129 */ 130 #define POOL_LARGE_MAXPAGES 128 131 struct pool_allocator pool_allocator_large[POOL_LARGE_MAXPAGES]; 132 struct pool_allocator pool_allocator_large_ni[POOL_LARGE_MAXPAGES]; 133 void *pool_large_alloc(struct pool *, int, int *); 134 void pool_large_free(struct pool *, void *); 135 void *pool_large_alloc_ni(struct pool *, int, int *); 136 void pool_large_free_ni(struct pool *, void *); 137 138 139 #ifdef DDB 140 void pool_print_pagelist(struct pool_pagelist *, 141 int (*)(const char *, ...)); 142 void pool_print1(struct pool *, const char *, int (*)(const char *, ...)); 143 #endif 144 145 #define pool_sleep(pl) msleep(pl, &pl->pr_mtx, PSWP, pl->pr_wchan, 0) 146 147 static __inline int 148 phtree_compare(struct pool_item_header *a, struct pool_item_header *b) 149 { 150 long diff = (vaddr_t)a->ph_page - (vaddr_t)b->ph_page; 151 if (diff < 0) 152 return -(-diff >= a->ph_pagesize); 153 else if (diff > 0) 154 return (diff >= b->ph_pagesize); 155 else 156 return (0); 157 } 158 159 RB_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare); 160 RB_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); 161 162 /* 163 * Return the pool page header based on page address. 164 */ 165 static __inline struct pool_item_header * 166 pr_find_pagehead(struct pool *pp, void *v) 167 { 168 struct pool_item_header *ph, tmp; 169 170 if ((pp->pr_roflags & PR_PHINPAGE) != 0) { 171 caddr_t page; 172 173 page = (caddr_t)((vaddr_t)v & pp->pr_alloc->pa_pagemask); 174 175 return ((struct pool_item_header *)(page + pp->pr_phoffset)); 176 } 177 178 /* 179 * The trick we're using in the tree compare function is to compare 180 * two elements equal when they overlap. We want to return the 181 * page header that belongs to the element just before this address. 182 * We don't want this element to compare equal to the next element, 183 * so the compare function takes the pagesize from the lower element. 184 * If this header is the lower, its pagesize is zero, so it can't 185 * overlap with the next header. But if the header we're looking for 186 * is lower, we'll use its pagesize and it will overlap and return 187 * equal. 188 */ 189 tmp.ph_page = v; 190 tmp.ph_pagesize = 0; 191 ph = RB_FIND(phtree, &pp->pr_phtree, &tmp); 192 193 if (ph) { 194 KASSERT(ph->ph_page <= (caddr_t)v); 195 KASSERT(ph->ph_page + ph->ph_pagesize > (caddr_t)v); 196 } 197 return ph; 198 } 199 200 /* 201 * Remove a page from the pool. 202 */ 203 void 204 pr_rmpage(struct pool *pp, struct pool_item_header *ph, 205 struct pool_pagelist *pq) 206 { 207 208 /* 209 * If the page was idle, decrement the idle page count. 210 */ 211 if (ph->ph_nmissing == 0) { 212 #ifdef DIAGNOSTIC 213 if (pp->pr_nidle == 0) 214 panic("pr_rmpage: nidle inconsistent"); 215 if (pp->pr_nitems < pp->pr_itemsperpage) 216 panic("pr_rmpage: nitems inconsistent"); 217 #endif 218 pp->pr_nidle--; 219 } 220 221 pp->pr_nitems -= pp->pr_itemsperpage; 222 223 /* 224 * Unlink a page from the pool and release it (or queue it for release). 225 */ 226 LIST_REMOVE(ph, ph_pagelist); 227 if ((pp->pr_roflags & PR_PHINPAGE) == 0) 228 RB_REMOVE(phtree, &pp->pr_phtree, ph); 229 if (pq) { 230 LIST_INSERT_HEAD(pq, ph, ph_pagelist); 231 } else { 232 pool_allocator_free(pp, ph->ph_page); 233 if ((pp->pr_roflags & PR_PHINPAGE) == 0) 234 pool_put(&phpool, ph); 235 } 236 pp->pr_npages--; 237 pp->pr_npagefree++; 238 239 pool_update_curpage(pp); 240 } 241 242 /* 243 * Initialize the given pool resource structure. 244 * 245 * We export this routine to allow other kernel parts to declare 246 * static pools that must be initialized before malloc() is available. 247 */ 248 void 249 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, 250 const char *wchan, struct pool_allocator *palloc) 251 { 252 int off, slack; 253 254 #ifdef MALLOC_DEBUG 255 if ((flags & PR_DEBUG) && (ioff != 0 || align != 0)) 256 flags &= ~PR_DEBUG; 257 #endif 258 /* 259 * Check arguments and construct default values. 260 */ 261 if (palloc == NULL) { 262 if (size > PAGE_SIZE) { 263 int psize; 264 265 /* 266 * XXX - should take align into account as well. 267 */ 268 if (size == round_page(size)) 269 psize = size / PAGE_SIZE; 270 else 271 psize = PAGE_SIZE / roundup(size % PAGE_SIZE, 272 1024); 273 if (psize > POOL_LARGE_MAXPAGES) 274 psize = POOL_LARGE_MAXPAGES; 275 if (flags & PR_WAITOK) 276 palloc = &pool_allocator_large_ni[psize-1]; 277 else 278 palloc = &pool_allocator_large[psize-1]; 279 if (palloc->pa_pagesz == 0) { 280 palloc->pa_pagesz = psize * PAGE_SIZE; 281 if (flags & PR_WAITOK) { 282 palloc->pa_alloc = pool_large_alloc_ni; 283 palloc->pa_free = pool_large_free_ni; 284 } else { 285 palloc->pa_alloc = pool_large_alloc; 286 palloc->pa_free = pool_large_free; 287 } 288 } 289 } else { 290 palloc = &pool_allocator_nointr; 291 } 292 } 293 if (palloc->pa_pagesz == 0) { 294 palloc->pa_pagesz = PAGE_SIZE; 295 } 296 if (palloc->pa_pagemask == 0) { 297 palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); 298 palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; 299 } 300 301 if (align == 0) 302 align = ALIGN(1); 303 304 if (size < sizeof(struct pool_item)) 305 size = sizeof(struct pool_item); 306 307 size = roundup(size, align); 308 #ifdef DIAGNOSTIC 309 if (size > palloc->pa_pagesz) 310 panic("pool_init: pool item size (%lu) too large", 311 (u_long)size); 312 #endif 313 314 /* 315 * Initialize the pool structure. 316 */ 317 LIST_INIT(&pp->pr_emptypages); 318 LIST_INIT(&pp->pr_fullpages); 319 LIST_INIT(&pp->pr_partpages); 320 pp->pr_curpage = NULL; 321 pp->pr_npages = 0; 322 pp->pr_minitems = 0; 323 pp->pr_minpages = 0; 324 pp->pr_maxpages = 8; 325 pp->pr_roflags = flags; 326 pp->pr_flags = 0; 327 pp->pr_size = size; 328 pp->pr_align = align; 329 pp->pr_wchan = wchan; 330 pp->pr_alloc = palloc; 331 pp->pr_nitems = 0; 332 pp->pr_nout = 0; 333 pp->pr_hardlimit = UINT_MAX; 334 pp->pr_hardlimit_warning = NULL; 335 pp->pr_hardlimit_ratecap.tv_sec = 0; 336 pp->pr_hardlimit_ratecap.tv_usec = 0; 337 pp->pr_hardlimit_warning_last.tv_sec = 0; 338 pp->pr_hardlimit_warning_last.tv_usec = 0; 339 pp->pr_serial = ++pool_serial; 340 if (pool_serial == 0) 341 panic("pool_init: too much uptime"); 342 343 /* constructor, destructor, and arg */ 344 pp->pr_ctor = NULL; 345 pp->pr_dtor = NULL; 346 pp->pr_arg = NULL; 347 348 /* 349 * Decide whether to put the page header off page to avoid 350 * wasting too large a part of the page. Off-page page headers 351 * go into an RB tree, so we can match a returned item with 352 * its header based on the page address. 353 * We use 1/16 of the page size as the threshold (XXX: tune) 354 */ 355 if (pp->pr_size < palloc->pa_pagesz/16 && pp->pr_size < PAGE_SIZE) { 356 /* Use the end of the page for the page header */ 357 pp->pr_roflags |= PR_PHINPAGE; 358 pp->pr_phoffset = off = palloc->pa_pagesz - 359 ALIGN(sizeof(struct pool_item_header)); 360 } else { 361 /* The page header will be taken from our page header pool */ 362 pp->pr_phoffset = 0; 363 off = palloc->pa_pagesz; 364 RB_INIT(&pp->pr_phtree); 365 } 366 367 /* 368 * Alignment is to take place at `ioff' within the item. This means 369 * we must reserve up to `align - 1' bytes on the page to allow 370 * appropriate positioning of each item. 371 * 372 * Silently enforce `0 <= ioff < align'. 373 */ 374 pp->pr_itemoffset = ioff = ioff % align; 375 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; 376 KASSERT(pp->pr_itemsperpage != 0); 377 378 /* 379 * Use the slack between the chunks and the page header 380 * for "cache coloring". 381 */ 382 slack = off - pp->pr_itemsperpage * pp->pr_size; 383 pp->pr_maxcolor = (slack / align) * align; 384 pp->pr_curcolor = 0; 385 386 pp->pr_nget = 0; 387 pp->pr_nfail = 0; 388 pp->pr_nput = 0; 389 pp->pr_npagealloc = 0; 390 pp->pr_npagefree = 0; 391 pp->pr_hiwat = 0; 392 pp->pr_nidle = 0; 393 394 pp->pr_ipl = -1; 395 mtx_init(&pp->pr_mtx, IPL_NONE); 396 397 if (phpool.pr_size == 0) { 398 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 399 0, "phpool", NULL); 400 pool_setipl(&phpool, IPL_HIGH); 401 } 402 403 /* pglistalloc/constraint parameters */ 404 pp->pr_crange = &no_constraint; 405 pp->pr_pa_nsegs = 0; 406 407 /* Insert this into the list of all pools. */ 408 TAILQ_INSERT_HEAD(&pool_head, pp, pr_poollist); 409 } 410 411 void 412 pool_setipl(struct pool *pp, int ipl) 413 { 414 pp->pr_ipl = ipl; 415 mtx_init(&pp->pr_mtx, ipl); 416 } 417 418 /* 419 * Decommission a pool resource. 420 */ 421 void 422 pool_destroy(struct pool *pp) 423 { 424 struct pool_item_header *ph; 425 426 #ifdef DIAGNOSTIC 427 if (pp->pr_nout != 0) 428 panic("pool_destroy: pool busy: still out: %u", pp->pr_nout); 429 #endif 430 431 /* Remove all pages */ 432 while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) 433 pr_rmpage(pp, ph, NULL); 434 KASSERT(LIST_EMPTY(&pp->pr_fullpages)); 435 KASSERT(LIST_EMPTY(&pp->pr_partpages)); 436 437 /* Remove from global pool list */ 438 TAILQ_REMOVE(&pool_head, pp, pr_poollist); 439 } 440 441 struct pool_item_header * 442 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags) 443 { 444 struct pool_item_header *ph; 445 446 if ((pp->pr_roflags & PR_PHINPAGE) != 0) 447 ph = (struct pool_item_header *)(storage + pp->pr_phoffset); 448 else 449 ph = pool_get(&phpool, (flags & ~(PR_WAITOK | PR_ZERO)) | 450 PR_NOWAIT); 451 if (pool_debug) 452 ph->ph_magic = PI_MAGIC; 453 return (ph); 454 } 455 456 /* 457 * Grab an item from the pool; must be called at appropriate spl level 458 */ 459 void * 460 pool_get(struct pool *pp, int flags) 461 { 462 void *v; 463 464 KASSERT(flags & (PR_WAITOK | PR_NOWAIT)); 465 466 #ifdef DIAGNOSTIC 467 if ((flags & PR_WAITOK) != 0) 468 assertwaitok(); 469 #endif /* DIAGNOSTIC */ 470 471 mtx_enter(&pp->pr_mtx); 472 v = pool_do_get(pp, flags); 473 mtx_leave(&pp->pr_mtx); 474 if (v == NULL) 475 return (v); 476 477 if (pp->pr_ctor) { 478 if (flags & PR_ZERO) 479 panic("pool_get: PR_ZERO when ctor set"); 480 if (pp->pr_ctor(pp->pr_arg, v, flags)) { 481 mtx_enter(&pp->pr_mtx); 482 pool_do_put(pp, v); 483 mtx_leave(&pp->pr_mtx); 484 v = NULL; 485 } 486 } else { 487 if (flags & PR_ZERO) 488 memset(v, 0, pp->pr_size); 489 } 490 if (v != NULL) 491 pp->pr_nget++; 492 return (v); 493 } 494 495 void * 496 pool_do_get(struct pool *pp, int flags) 497 { 498 struct pool_item *pi; 499 struct pool_item_header *ph; 500 void *v; 501 int slowdown = 0; 502 #if defined(DIAGNOSTIC) && defined(POOL_DEBUG) 503 int i, *ip; 504 #endif 505 506 #ifdef MALLOC_DEBUG 507 if (pp->pr_roflags & PR_DEBUG) { 508 void *addr; 509 510 addr = NULL; 511 debug_malloc(pp->pr_size, M_DEBUG, 512 (flags & PR_WAITOK) ? M_WAITOK : M_NOWAIT, &addr); 513 return (addr); 514 } 515 #endif 516 517 startover: 518 /* 519 * Check to see if we've reached the hard limit. If we have, 520 * and we can wait, then wait until an item has been returned to 521 * the pool. 522 */ 523 #ifdef DIAGNOSTIC 524 if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) 525 panic("pool_do_get: %s: crossed hard limit", pp->pr_wchan); 526 #endif 527 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { 528 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { 529 /* 530 * XXX: A warning isn't logged in this case. Should 531 * it be? 532 */ 533 pp->pr_flags |= PR_WANTED; 534 pool_sleep(pp); 535 goto startover; 536 } 537 538 /* 539 * Log a message that the hard limit has been hit. 540 */ 541 if (pp->pr_hardlimit_warning != NULL && 542 ratecheck(&pp->pr_hardlimit_warning_last, 543 &pp->pr_hardlimit_ratecap)) 544 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); 545 546 pp->pr_nfail++; 547 return (NULL); 548 } 549 550 /* 551 * The convention we use is that if `curpage' is not NULL, then 552 * it points at a non-empty bucket. In particular, `curpage' 553 * never points at a page header which has PR_PHINPAGE set and 554 * has no items in its bucket. 555 */ 556 if ((ph = pp->pr_curpage) == NULL) { 557 #ifdef DIAGNOSTIC 558 if (pp->pr_nitems != 0) { 559 printf("pool_do_get: %s: curpage NULL, nitems %u\n", 560 pp->pr_wchan, pp->pr_nitems); 561 panic("pool_do_get: nitems inconsistent"); 562 } 563 #endif 564 565 /* 566 * Call the back-end page allocator for more memory. 567 */ 568 v = pool_allocator_alloc(pp, flags, &slowdown); 569 if (__predict_true(v != NULL)) 570 ph = pool_alloc_item_header(pp, v, flags); 571 572 if (__predict_false(v == NULL || ph == NULL)) { 573 if (v != NULL) 574 pool_allocator_free(pp, v); 575 576 if ((flags & PR_WAITOK) == 0) { 577 pp->pr_nfail++; 578 return (NULL); 579 } 580 581 /* 582 * Wait for items to be returned to this pool. 583 * 584 * XXX: maybe we should wake up once a second and 585 * try again? 586 */ 587 pp->pr_flags |= PR_WANTED; 588 pool_sleep(pp); 589 goto startover; 590 } 591 592 /* We have more memory; add it to the pool */ 593 pool_prime_page(pp, v, ph); 594 pp->pr_npagealloc++; 595 596 if (slowdown && (flags & PR_WAITOK)) { 597 mtx_leave(&pp->pr_mtx); 598 yield(); 599 mtx_enter(&pp->pr_mtx); 600 } 601 602 /* Start the allocation process over. */ 603 goto startover; 604 } 605 if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) { 606 panic("pool_do_get: %s: page empty", pp->pr_wchan); 607 } 608 #ifdef DIAGNOSTIC 609 if (__predict_false(pp->pr_nitems == 0)) { 610 printf("pool_do_get: %s: items on itemlist, nitems %u\n", 611 pp->pr_wchan, pp->pr_nitems); 612 panic("pool_do_get: nitems inconsistent"); 613 } 614 #endif 615 616 #ifdef DIAGNOSTIC 617 if (__predict_false(pi->pi_magic != PI_MAGIC)) 618 panic("pool_do_get(%s): free list modified: " 619 "page %p; item addr %p; offset 0x%x=0x%x", 620 pp->pr_wchan, ph->ph_page, pi, 0, pi->pi_magic); 621 #ifdef POOL_DEBUG 622 if (pool_debug && ph->ph_magic) { 623 for (ip = (int *)pi, i = sizeof(*pi) / sizeof(int); 624 i < pp->pr_size / sizeof(int); i++) { 625 if (ip[i] != ph->ph_magic) { 626 panic("pool_do_get(%s): free list modified: " 627 "page %p; item addr %p; offset 0x%x=0x%x", 628 pp->pr_wchan, ph->ph_page, pi, 629 i * sizeof(int), ip[i]); 630 } 631 } 632 } 633 #endif /* POOL_DEBUG */ 634 #endif /* DIAGNOSTIC */ 635 636 /* 637 * Remove from item list. 638 */ 639 TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list); 640 pp->pr_nitems--; 641 pp->pr_nout++; 642 if (ph->ph_nmissing == 0) { 643 #ifdef DIAGNOSTIC 644 if (__predict_false(pp->pr_nidle == 0)) 645 panic("pool_do_get: nidle inconsistent"); 646 #endif 647 pp->pr_nidle--; 648 649 /* 650 * This page was previously empty. Move it to the list of 651 * partially-full pages. This page is already curpage. 652 */ 653 LIST_REMOVE(ph, ph_pagelist); 654 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); 655 } 656 ph->ph_nmissing++; 657 if (TAILQ_EMPTY(&ph->ph_itemlist)) { 658 #ifdef DIAGNOSTIC 659 if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) { 660 panic("pool_do_get: %s: nmissing inconsistent", 661 pp->pr_wchan); 662 } 663 #endif 664 /* 665 * This page is now full. Move it to the full list 666 * and select a new current page. 667 */ 668 LIST_REMOVE(ph, ph_pagelist); 669 LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist); 670 pool_update_curpage(pp); 671 } 672 673 /* 674 * If we have a low water mark and we are now below that low 675 * water mark, add more items to the pool. 676 */ 677 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { 678 /* 679 * XXX: Should we log a warning? Should we set up a timeout 680 * to try again in a second or so? The latter could break 681 * a caller's assumptions about interrupt protection, etc. 682 */ 683 } 684 return (v); 685 } 686 687 /* 688 * Return resource to the pool; must be called at appropriate spl level 689 */ 690 void 691 pool_put(struct pool *pp, void *v) 692 { 693 if (pp->pr_dtor) 694 pp->pr_dtor(pp->pr_arg, v); 695 mtx_enter(&pp->pr_mtx); 696 pool_do_put(pp, v); 697 mtx_leave(&pp->pr_mtx); 698 pp->pr_nput++; 699 } 700 701 /* 702 * Internal version of pool_put(). 703 */ 704 void 705 pool_do_put(struct pool *pp, void *v) 706 { 707 struct pool_item *pi = v; 708 struct pool_item_header *ph; 709 #if defined(DIAGNOSTIC) && defined(POOL_DEBUG) 710 int i, *ip; 711 #endif 712 713 if (v == NULL) 714 panic("pool_put of NULL"); 715 716 #ifdef MALLOC_DEBUG 717 if (pp->pr_roflags & PR_DEBUG) { 718 debug_free(v, M_DEBUG); 719 return; 720 } 721 #endif 722 723 #ifdef DIAGNOSTIC 724 if (pp->pr_ipl != -1) 725 splassert(pp->pr_ipl); 726 727 if (__predict_false(pp->pr_nout == 0)) { 728 printf("pool %s: putting with none out\n", 729 pp->pr_wchan); 730 panic("pool_do_put"); 731 } 732 #endif 733 734 if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { 735 panic("pool_do_put: %s: page header missing", pp->pr_wchan); 736 } 737 738 /* 739 * Return to item list. 740 */ 741 #ifdef DIAGNOSTIC 742 pi->pi_magic = PI_MAGIC; 743 #ifdef POOL_DEBUG 744 if (ph->ph_magic) { 745 for (ip = (int *)pi, i = sizeof(*pi)/sizeof(int); 746 i < pp->pr_size / sizeof(int); i++) 747 ip[i] = ph->ph_magic; 748 } 749 #endif /* POOL_DEBUG */ 750 #endif /* DIAGNOSTIC */ 751 752 TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); 753 ph->ph_nmissing--; 754 pp->pr_nitems++; 755 pp->pr_nout--; 756 757 /* Cancel "pool empty" condition if it exists */ 758 if (pp->pr_curpage == NULL) 759 pp->pr_curpage = ph; 760 761 if (pp->pr_flags & PR_WANTED) { 762 pp->pr_flags &= ~PR_WANTED; 763 if (ph->ph_nmissing == 0) 764 pp->pr_nidle++; 765 wakeup(pp); 766 return; 767 } 768 769 /* 770 * If this page is now empty, do one of two things: 771 * 772 * (1) If we have more pages than the page high water mark, 773 * free the page back to the system. 774 * 775 * (2) Otherwise, move the page to the empty page list. 776 * 777 * Either way, select a new current page (so we use a partially-full 778 * page if one is available). 779 */ 780 if (ph->ph_nmissing == 0) { 781 pp->pr_nidle++; 782 if (pp->pr_nidle > pp->pr_maxpages) { 783 pr_rmpage(pp, ph, NULL); 784 } else { 785 LIST_REMOVE(ph, ph_pagelist); 786 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); 787 } 788 pool_update_curpage(pp); 789 } 790 791 /* 792 * If the page was previously completely full, move it to the 793 * partially-full list and make it the current page. The next 794 * allocation will get the item from this page, instead of 795 * further fragmenting the pool. 796 */ 797 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) { 798 LIST_REMOVE(ph, ph_pagelist); 799 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); 800 pp->pr_curpage = ph; 801 } 802 } 803 804 /* 805 * Add N items to the pool. 806 */ 807 int 808 pool_prime(struct pool *pp, int n) 809 { 810 struct pool_item_header *ph; 811 caddr_t cp; 812 int newpages; 813 int slowdown; 814 815 mtx_enter(&pp->pr_mtx); 816 newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 817 818 while (newpages-- > 0) { 819 cp = pool_allocator_alloc(pp, PR_NOWAIT, &slowdown); 820 if (__predict_true(cp != NULL)) 821 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); 822 if (__predict_false(cp == NULL || ph == NULL)) { 823 if (cp != NULL) 824 pool_allocator_free(pp, cp); 825 break; 826 } 827 828 pool_prime_page(pp, cp, ph); 829 pp->pr_npagealloc++; 830 pp->pr_minpages++; 831 } 832 833 if (pp->pr_minpages >= pp->pr_maxpages) 834 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ 835 836 mtx_leave(&pp->pr_mtx); 837 return (0); 838 } 839 840 /* 841 * Add a page worth of items to the pool. 842 * 843 * Note, we must be called with the pool descriptor LOCKED. 844 */ 845 void 846 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph) 847 { 848 struct pool_item *pi; 849 caddr_t cp = storage; 850 unsigned int align = pp->pr_align; 851 unsigned int ioff = pp->pr_itemoffset; 852 int n; 853 #if defined(DIAGNOSTIC) && defined(POOL_DEBUG) 854 int i, *ip; 855 #endif 856 857 /* 858 * Insert page header. 859 */ 860 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); 861 TAILQ_INIT(&ph->ph_itemlist); 862 ph->ph_page = storage; 863 ph->ph_pagesize = pp->pr_alloc->pa_pagesz; 864 ph->ph_nmissing = 0; 865 if ((pp->pr_roflags & PR_PHINPAGE) == 0) 866 RB_INSERT(phtree, &pp->pr_phtree, ph); 867 868 pp->pr_nidle++; 869 870 /* 871 * Color this page. 872 */ 873 cp = (caddr_t)(cp + pp->pr_curcolor); 874 if ((pp->pr_curcolor += align) > pp->pr_maxcolor) 875 pp->pr_curcolor = 0; 876 877 /* 878 * Adjust storage to apply alignment to `pr_itemoffset' in each item. 879 */ 880 if (ioff != 0) 881 cp = (caddr_t)(cp + (align - ioff)); 882 ph->ph_colored = cp; 883 884 /* 885 * Insert remaining chunks on the bucket list. 886 */ 887 n = pp->pr_itemsperpage; 888 pp->pr_nitems += n; 889 890 while (n--) { 891 pi = (struct pool_item *)cp; 892 893 KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0); 894 895 /* Insert on page list */ 896 TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list); 897 898 #ifdef DIAGNOSTIC 899 pi->pi_magic = PI_MAGIC; 900 #ifdef POOL_DEBUG 901 if (ph->ph_magic) { 902 for (ip = (int *)pi, i = sizeof(*pi)/sizeof(int); 903 i < pp->pr_size / sizeof(int); i++) 904 ip[i] = ph->ph_magic; 905 } 906 #endif /* POOL_DEBUG */ 907 #endif /* DIAGNOSTIC */ 908 cp = (caddr_t)(cp + pp->pr_size); 909 } 910 911 /* 912 * If the pool was depleted, point at the new page. 913 */ 914 if (pp->pr_curpage == NULL) 915 pp->pr_curpage = ph; 916 917 if (++pp->pr_npages > pp->pr_hiwat) 918 pp->pr_hiwat = pp->pr_npages; 919 } 920 921 /* 922 * Used by pool_get() when nitems drops below the low water mark. This 923 * is used to catch up pr_nitems with the low water mark. 924 * 925 * Note we never wait for memory here, we let the caller decide what to do. 926 */ 927 int 928 pool_catchup(struct pool *pp) 929 { 930 struct pool_item_header *ph; 931 caddr_t cp; 932 int error = 0; 933 int slowdown; 934 935 while (POOL_NEEDS_CATCHUP(pp)) { 936 /* 937 * Call the page back-end allocator for more memory. 938 */ 939 cp = pool_allocator_alloc(pp, PR_NOWAIT, &slowdown); 940 if (__predict_true(cp != NULL)) 941 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); 942 if (__predict_false(cp == NULL || ph == NULL)) { 943 if (cp != NULL) 944 pool_allocator_free(pp, cp); 945 error = ENOMEM; 946 break; 947 } 948 pool_prime_page(pp, cp, ph); 949 pp->pr_npagealloc++; 950 } 951 952 return (error); 953 } 954 955 void 956 pool_update_curpage(struct pool *pp) 957 { 958 959 pp->pr_curpage = LIST_FIRST(&pp->pr_partpages); 960 if (pp->pr_curpage == NULL) { 961 pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); 962 } 963 } 964 965 void 966 pool_setlowat(struct pool *pp, int n) 967 { 968 969 pp->pr_minitems = n; 970 pp->pr_minpages = (n == 0) 971 ? 0 972 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 973 974 mtx_enter(&pp->pr_mtx); 975 /* Make sure we're caught up with the newly-set low water mark. */ 976 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { 977 /* 978 * XXX: Should we log a warning? Should we set up a timeout 979 * to try again in a second or so? The latter could break 980 * a caller's assumptions about interrupt protection, etc. 981 */ 982 } 983 mtx_leave(&pp->pr_mtx); 984 } 985 986 void 987 pool_sethiwat(struct pool *pp, int n) 988 { 989 990 pp->pr_maxpages = (n == 0) 991 ? 0 992 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 993 } 994 995 int 996 pool_sethardlimit(struct pool *pp, u_int n, const char *warnmsg, int ratecap) 997 { 998 int error = 0; 999 1000 if (n < pp->pr_nout) { 1001 error = EINVAL; 1002 goto done; 1003 } 1004 1005 pp->pr_hardlimit = n; 1006 pp->pr_hardlimit_warning = warnmsg; 1007 pp->pr_hardlimit_ratecap.tv_sec = ratecap; 1008 pp->pr_hardlimit_warning_last.tv_sec = 0; 1009 pp->pr_hardlimit_warning_last.tv_usec = 0; 1010 1011 done: 1012 return (error); 1013 } 1014 1015 void 1016 pool_set_constraints(struct pool *pp, struct uvm_constraint_range *range, 1017 int nsegs) 1018 { 1019 /* 1020 * Subsequent changes to the constrictions are only 1021 * allowed to make them _more_ strict. 1022 */ 1023 KASSERT(pp->pr_crange->ucr_high >= range->ucr_high && 1024 pp->pr_crange->ucr_low <= range->ucr_low); 1025 1026 pp->pr_crange = range; 1027 pp->pr_pa_nsegs = nsegs; 1028 } 1029 1030 void 1031 pool_set_ctordtor(struct pool *pp, int (*ctor)(void *, void *, int), 1032 void (*dtor)(void *, void *), void *arg) 1033 { 1034 pp->pr_ctor = ctor; 1035 pp->pr_dtor = dtor; 1036 pp->pr_arg = arg; 1037 } 1038 /* 1039 * Release all complete pages that have not been used recently. 1040 * 1041 * Returns non-zero if any pages have been reclaimed. 1042 */ 1043 int 1044 pool_reclaim(struct pool *pp) 1045 { 1046 struct pool_item_header *ph, *phnext; 1047 struct pool_pagelist pq; 1048 1049 LIST_INIT(&pq); 1050 1051 mtx_enter(&pp->pr_mtx); 1052 for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) { 1053 phnext = LIST_NEXT(ph, ph_pagelist); 1054 1055 /* Check our minimum page claim */ 1056 if (pp->pr_npages <= pp->pr_minpages) 1057 break; 1058 1059 KASSERT(ph->ph_nmissing == 0); 1060 1061 /* 1062 * If freeing this page would put us below 1063 * the low water mark, stop now. 1064 */ 1065 if ((pp->pr_nitems - pp->pr_itemsperpage) < 1066 pp->pr_minitems) 1067 break; 1068 1069 pr_rmpage(pp, ph, &pq); 1070 } 1071 mtx_leave(&pp->pr_mtx); 1072 1073 if (LIST_EMPTY(&pq)) 1074 return (0); 1075 while ((ph = LIST_FIRST(&pq)) != NULL) { 1076 LIST_REMOVE(ph, ph_pagelist); 1077 pool_allocator_free(pp, ph->ph_page); 1078 if (pp->pr_roflags & PR_PHINPAGE) 1079 continue; 1080 pool_put(&phpool, ph); 1081 } 1082 1083 return (1); 1084 } 1085 1086 /* 1087 * Release all complete pages that have not been used recently 1088 * from all pools. 1089 */ 1090 void 1091 pool_reclaim_all(void) 1092 { 1093 struct pool *pp; 1094 TAILQ_FOREACH(pp, &pool_head, pr_poollist) 1095 pool_reclaim(pp); 1096 } 1097 1098 #ifdef DDB 1099 #include <machine/db_machdep.h> 1100 #include <ddb/db_interface.h> 1101 #include <ddb/db_output.h> 1102 1103 /* 1104 * Diagnostic helpers. 1105 */ 1106 void 1107 pool_printit(struct pool *pp, const char *modif, int (*pr)(const char *, ...)) 1108 { 1109 pool_print1(pp, modif, pr); 1110 } 1111 1112 void 1113 pool_print_pagelist(struct pool_pagelist *pl, int (*pr)(const char *, ...)) 1114 { 1115 struct pool_item_header *ph; 1116 #ifdef DIAGNOSTIC 1117 struct pool_item *pi; 1118 #endif 1119 1120 LIST_FOREACH(ph, pl, ph_pagelist) { 1121 (*pr)("\t\tpage %p, nmissing %d\n", 1122 ph->ph_page, ph->ph_nmissing); 1123 #ifdef DIAGNOSTIC 1124 TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) { 1125 if (pi->pi_magic != PI_MAGIC) { 1126 (*pr)("\t\t\titem %p, magic 0x%x\n", 1127 pi, pi->pi_magic); 1128 } 1129 } 1130 #endif 1131 } 1132 } 1133 1134 void 1135 pool_print1(struct pool *pp, const char *modif, int (*pr)(const char *, ...)) 1136 { 1137 struct pool_item_header *ph; 1138 int print_pagelist = 0; 1139 char c; 1140 1141 while ((c = *modif++) != '\0') { 1142 if (c == 'p') 1143 print_pagelist = 1; 1144 modif++; 1145 } 1146 1147 (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", 1148 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, 1149 pp->pr_roflags); 1150 (*pr)("\talloc %p\n", pp->pr_alloc); 1151 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", 1152 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); 1153 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", 1154 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit); 1155 1156 (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n", 1157 pp->pr_nget, pp->pr_nfail, pp->pr_nput); 1158 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n", 1159 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle); 1160 1161 if (print_pagelist == 0) 1162 return; 1163 1164 if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) 1165 (*pr)("\n\tempty page list:\n"); 1166 pool_print_pagelist(&pp->pr_emptypages, pr); 1167 if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL) 1168 (*pr)("\n\tfull page list:\n"); 1169 pool_print_pagelist(&pp->pr_fullpages, pr); 1170 if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL) 1171 (*pr)("\n\tpartial-page list:\n"); 1172 pool_print_pagelist(&pp->pr_partpages, pr); 1173 1174 if (pp->pr_curpage == NULL) 1175 (*pr)("\tno current page\n"); 1176 else 1177 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page); 1178 } 1179 1180 void 1181 db_show_all_pools(db_expr_t expr, int haddr, db_expr_t count, char *modif) 1182 { 1183 struct pool *pp; 1184 char maxp[16]; 1185 int ovflw; 1186 char mode; 1187 1188 mode = modif[0]; 1189 if (mode != '\0' && mode != 'a') { 1190 db_printf("usage: show all pools [/a]\n"); 1191 return; 1192 } 1193 1194 if (mode == '\0') 1195 db_printf("%-10s%4s%9s%5s%9s%6s%6s%6s%6s%6s%6s%5s\n", 1196 "Name", 1197 "Size", 1198 "Requests", 1199 "Fail", 1200 "Releases", 1201 "Pgreq", 1202 "Pgrel", 1203 "Npage", 1204 "Hiwat", 1205 "Minpg", 1206 "Maxpg", 1207 "Idle"); 1208 else 1209 db_printf("%-10s %18s %18s\n", 1210 "Name", "Address", "Allocator"); 1211 1212 TAILQ_FOREACH(pp, &pool_head, pr_poollist) { 1213 if (mode == 'a') { 1214 db_printf("%-10s %18p %18p\n", pp->pr_wchan, pp, 1215 pp->pr_alloc); 1216 continue; 1217 } 1218 1219 if (!pp->pr_nget) 1220 continue; 1221 1222 if (pp->pr_maxpages == UINT_MAX) 1223 snprintf(maxp, sizeof maxp, "inf"); 1224 else 1225 snprintf(maxp, sizeof maxp, "%u", pp->pr_maxpages); 1226 1227 #define PRWORD(ovflw, fmt, width, fixed, val) do { \ 1228 (ovflw) += db_printf((fmt), \ 1229 (width) - (fixed) - (ovflw) > 0 ? \ 1230 (width) - (fixed) - (ovflw) : 0, \ 1231 (val)) - (width); \ 1232 if ((ovflw) < 0) \ 1233 (ovflw) = 0; \ 1234 } while (/* CONSTCOND */0) 1235 1236 ovflw = 0; 1237 PRWORD(ovflw, "%-*s", 10, 0, pp->pr_wchan); 1238 PRWORD(ovflw, " %*u", 4, 1, pp->pr_size); 1239 PRWORD(ovflw, " %*lu", 9, 1, pp->pr_nget); 1240 PRWORD(ovflw, " %*lu", 5, 1, pp->pr_nfail); 1241 PRWORD(ovflw, " %*lu", 9, 1, pp->pr_nput); 1242 PRWORD(ovflw, " %*lu", 6, 1, pp->pr_npagealloc); 1243 PRWORD(ovflw, " %*lu", 6, 1, pp->pr_npagefree); 1244 PRWORD(ovflw, " %*d", 6, 1, pp->pr_npages); 1245 PRWORD(ovflw, " %*d", 6, 1, pp->pr_hiwat); 1246 PRWORD(ovflw, " %*d", 6, 1, pp->pr_minpages); 1247 PRWORD(ovflw, " %*s", 6, 1, maxp); 1248 PRWORD(ovflw, " %*lu\n", 5, 1, pp->pr_nidle); 1249 1250 pool_chk(pp, pp->pr_wchan); 1251 } 1252 } 1253 1254 int 1255 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph) 1256 { 1257 struct pool_item *pi; 1258 caddr_t page; 1259 int n; 1260 #if defined(DIAGNOSTIC) && defined(POOL_DEBUG) 1261 int i, *ip; 1262 #endif 1263 1264 page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask); 1265 if (page != ph->ph_page && 1266 (pp->pr_roflags & PR_PHINPAGE) != 0) { 1267 if (label != NULL) 1268 printf("%s: ", label); 1269 printf("pool(%p:%s): page inconsistency: page %p; " 1270 "at page head addr %p (p %p)\n", 1271 pp, pp->pr_wchan, ph->ph_page, ph, page); 1272 return 1; 1273 } 1274 1275 for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0; 1276 pi != NULL; 1277 pi = TAILQ_NEXT(pi,pi_list), n++) { 1278 1279 #ifdef DIAGNOSTIC 1280 if (pi->pi_magic != PI_MAGIC) { 1281 if (label != NULL) 1282 printf("%s: ", label); 1283 printf("pool(%s): free list modified: " 1284 "page %p; item ordinal %d; addr %p " 1285 "(p %p); offset 0x%x=0x%x\n", 1286 pp->pr_wchan, ph->ph_page, n, pi, page, 1287 0, pi->pi_magic); 1288 } 1289 #ifdef POOL_DEBUG 1290 if (pool_debug && ph->ph_magic) { 1291 for (ip = (int *)pi, i = sizeof(*pi) / sizeof(int); 1292 i < pp->pr_size / sizeof(int); i++) { 1293 if (ip[i] != ph->ph_magic) { 1294 printf("pool(%s): free list modified: " 1295 "page %p; item ordinal %d; addr %p " 1296 "(p %p); offset 0x%x=0x%x\n", 1297 pp->pr_wchan, ph->ph_page, n, pi, 1298 page, i * sizeof(int), ip[i]); 1299 } 1300 } 1301 } 1302 1303 #endif /* POOL_DEBUG */ 1304 #endif /* DIAGNOSTIC */ 1305 page = 1306 (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask); 1307 if (page == ph->ph_page) 1308 continue; 1309 1310 if (label != NULL) 1311 printf("%s: ", label); 1312 printf("pool(%p:%s): page inconsistency: page %p;" 1313 " item ordinal %d; addr %p (p %p)\n", pp, 1314 pp->pr_wchan, ph->ph_page, n, pi, page); 1315 return 1; 1316 } 1317 return 0; 1318 } 1319 1320 int 1321 pool_chk(struct pool *pp, const char *label) 1322 { 1323 struct pool_item_header *ph; 1324 int r = 0; 1325 1326 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) 1327 r += pool_chk_page(pp, label, ph); 1328 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) 1329 r += pool_chk_page(pp, label, ph); 1330 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) 1331 r += pool_chk_page(pp, label, ph); 1332 1333 return (r); 1334 } 1335 1336 void 1337 pool_walk(struct pool *pp, int full, int (*pr)(const char *, ...), 1338 void (*func)(void *, int, int (*)(const char *, ...))) 1339 { 1340 struct pool_item_header *ph; 1341 struct pool_item *pi; 1342 caddr_t cp; 1343 int n; 1344 1345 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { 1346 cp = ph->ph_colored; 1347 n = ph->ph_nmissing; 1348 1349 while (n--) { 1350 func(cp, full, pr); 1351 cp += pp->pr_size; 1352 } 1353 } 1354 1355 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { 1356 cp = ph->ph_colored; 1357 n = ph->ph_nmissing; 1358 1359 do { 1360 TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) { 1361 if (cp == (caddr_t)pi) 1362 break; 1363 } 1364 if (cp != (caddr_t)pi) { 1365 func(cp, full, pr); 1366 n--; 1367 } 1368 1369 cp += pp->pr_size; 1370 } while (n > 0); 1371 } 1372 } 1373 #endif 1374 1375 /* 1376 * We have three different sysctls. 1377 * kern.pool.npools - the number of pools. 1378 * kern.pool.pool.<pool#> - the pool struct for the pool#. 1379 * kern.pool.name.<pool#> - the name for pool#. 1380 */ 1381 int 1382 sysctl_dopool(int *name, u_int namelen, char *where, size_t *sizep) 1383 { 1384 struct pool *pp, *foundpool = NULL; 1385 size_t buflen = where != NULL ? *sizep : 0; 1386 int npools = 0, s; 1387 unsigned int lookfor; 1388 size_t len; 1389 1390 switch (*name) { 1391 case KERN_POOL_NPOOLS: 1392 if (namelen != 1 || buflen != sizeof(int)) 1393 return (EINVAL); 1394 lookfor = 0; 1395 break; 1396 case KERN_POOL_NAME: 1397 if (namelen != 2 || buflen < 1) 1398 return (EINVAL); 1399 lookfor = name[1]; 1400 break; 1401 case KERN_POOL_POOL: 1402 if (namelen != 2 || buflen != sizeof(struct pool)) 1403 return (EINVAL); 1404 lookfor = name[1]; 1405 break; 1406 default: 1407 return (EINVAL); 1408 } 1409 1410 s = splvm(); 1411 1412 TAILQ_FOREACH(pp, &pool_head, pr_poollist) { 1413 npools++; 1414 if (lookfor == pp->pr_serial) { 1415 foundpool = pp; 1416 break; 1417 } 1418 } 1419 1420 splx(s); 1421 1422 if (*name != KERN_POOL_NPOOLS && foundpool == NULL) 1423 return (ENOENT); 1424 1425 switch (*name) { 1426 case KERN_POOL_NPOOLS: 1427 return copyout(&npools, where, buflen); 1428 case KERN_POOL_NAME: 1429 len = strlen(foundpool->pr_wchan) + 1; 1430 if (*sizep < len) 1431 return (ENOMEM); 1432 *sizep = len; 1433 return copyout(foundpool->pr_wchan, where, len); 1434 case KERN_POOL_POOL: 1435 return copyout(foundpool, where, buflen); 1436 } 1437 /* NOTREACHED */ 1438 return (0); /* XXX - Stupid gcc */ 1439 } 1440 1441 /* 1442 * Pool backend allocators. 1443 * 1444 * Each pool has a backend allocator that handles allocation, deallocation 1445 */ 1446 void *pool_page_alloc(struct pool *, int, int *); 1447 void pool_page_free(struct pool *, void *); 1448 1449 /* 1450 * safe for interrupts, name preserved for compat this is the default 1451 * allocator 1452 */ 1453 struct pool_allocator pool_allocator_nointr = { 1454 pool_page_alloc, pool_page_free, 0, 1455 }; 1456 1457 /* 1458 * XXX - we have at least three different resources for the same allocation 1459 * and each resource can be depleted. First we have the ready elements in 1460 * the pool. Then we have the resource (typically a vm_map) for this 1461 * allocator, then we have physical memory. Waiting for any of these can 1462 * be unnecessary when any other is freed, but the kernel doesn't support 1463 * sleeping on multiple addresses, so we have to fake. The caller sleeps on 1464 * the pool (so that we can be awakened when an item is returned to the pool), 1465 * but we set PA_WANT on the allocator. When a page is returned to 1466 * the allocator and PA_WANT is set pool_allocator_free will wakeup all 1467 * sleeping pools belonging to this allocator. (XXX - thundering herd). 1468 * We also wake up the allocator in case someone without a pool (malloc) 1469 * is sleeping waiting for this allocator. 1470 */ 1471 1472 void * 1473 pool_allocator_alloc(struct pool *pp, int flags, int *slowdown) 1474 { 1475 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; 1476 void *v; 1477 1478 if (waitok) 1479 mtx_leave(&pp->pr_mtx); 1480 v = pp->pr_alloc->pa_alloc(pp, flags, slowdown); 1481 if (waitok) 1482 mtx_enter(&pp->pr_mtx); 1483 1484 return (v); 1485 } 1486 1487 void 1488 pool_allocator_free(struct pool *pp, void *v) 1489 { 1490 struct pool_allocator *pa = pp->pr_alloc; 1491 1492 (*pa->pa_free)(pp, v); 1493 } 1494 1495 void * 1496 pool_page_alloc(struct pool *pp, int flags, int *slowdown) 1497 { 1498 int kfl = (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT; 1499 1500 return (uvm_km_getpage_pla(kfl, slowdown, pp->pr_crange->ucr_low, 1501 pp->pr_crange->ucr_high, 0, 0)); 1502 } 1503 1504 void 1505 pool_page_free(struct pool *pp, void *v) 1506 { 1507 uvm_km_putpage(v); 1508 } 1509 1510 void * 1511 pool_large_alloc(struct pool *pp, int flags, int *slowdown) 1512 { 1513 int kfl = (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT; 1514 vaddr_t va; 1515 int s; 1516 1517 s = splvm(); 1518 va = uvm_km_kmemalloc_pla(kmem_map, NULL, pp->pr_alloc->pa_pagesz, 0, 1519 kfl, pp->pr_crange->ucr_low, pp->pr_crange->ucr_high, 1520 0, 0, pp->pr_pa_nsegs); 1521 splx(s); 1522 1523 return ((void *)va); 1524 } 1525 1526 void 1527 pool_large_free(struct pool *pp, void *v) 1528 { 1529 int s; 1530 1531 s = splvm(); 1532 uvm_km_free(kmem_map, (vaddr_t)v, pp->pr_alloc->pa_pagesz); 1533 splx(s); 1534 } 1535 1536 void * 1537 pool_large_alloc_ni(struct pool *pp, int flags, int *slowdown) 1538 { 1539 int kfl = (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT; 1540 1541 return ((void *)uvm_km_kmemalloc_pla(kernel_map, uvm.kernel_object, 1542 pp->pr_alloc->pa_pagesz, 0, kfl, 1543 pp->pr_crange->ucr_low, pp->pr_crange->ucr_high, 1544 0, 0, pp->pr_pa_nsegs)); 1545 } 1546 1547 void 1548 pool_large_free_ni(struct pool *pp, void *v) 1549 { 1550 uvm_km_free(kernel_map, (vaddr_t)v, pp->pr_alloc->pa_pagesz); 1551 } 1552