1 /* $OpenBSD: subr_pool.c,v 1.91 2010/01/16 03:08:00 tedu Exp $ */ 2 /* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */ 3 4 /*- 5 * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace 10 * Simulation Facility, NASA Ames Research Center. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/proc.h> 37 #include <sys/errno.h> 38 #include <sys/kernel.h> 39 #include <sys/malloc.h> 40 #include <sys/pool.h> 41 #include <sys/syslog.h> 42 #include <sys/sysctl.h> 43 44 #include <uvm/uvm.h> 45 46 47 /* 48 * Pool resource management utility. 49 * 50 * Memory is allocated in pages which are split into pieces according to 51 * the pool item size. Each page is kept on one of three lists in the 52 * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages', 53 * for empty, full and partially-full pages respectively. The individual 54 * pool items are on a linked list headed by `ph_itemlist' in each page 55 * header. The memory for building the page list is either taken from 56 * the allocated pages themselves (for small pool items) or taken from 57 * an internal pool of page headers (`phpool'). 58 */ 59 60 /* List of all pools */ 61 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); 62 63 /* Private pool for page header structures */ 64 struct pool phpool; 65 66 struct pool_item_header { 67 /* Page headers */ 68 LIST_ENTRY(pool_item_header) 69 ph_pagelist; /* pool page list */ 70 TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */ 71 RB_ENTRY(pool_item_header) 72 ph_node; /* Off-page page headers */ 73 int ph_nmissing; /* # of chunks in use */ 74 caddr_t ph_page; /* this page's address */ 75 caddr_t ph_colored; /* page's colored address */ 76 int ph_pagesize; 77 }; 78 79 struct pool_item { 80 #ifdef DIAGNOSTIC 81 u_int32_t pi_magic; 82 #endif 83 /* Other entries use only this list entry */ 84 TAILQ_ENTRY(pool_item) pi_list; 85 }; 86 87 #ifdef DEADBEEF1 88 #define PI_MAGIC DEADBEEF1 89 #else 90 #define PI_MAGIC 0xdeafbeef 91 #endif 92 93 #define POOL_NEEDS_CATCHUP(pp) \ 94 ((pp)->pr_nitems < (pp)->pr_minitems) 95 96 /* 97 * Every pool gets a unique serial number assigned to it. If this counter 98 * wraps, we're screwed, but we shouldn't create so many pools anyway. 99 */ 100 unsigned int pool_serial; 101 102 int pool_catchup(struct pool *); 103 void pool_prime_page(struct pool *, caddr_t, struct pool_item_header *); 104 void pool_update_curpage(struct pool *); 105 void *pool_do_get(struct pool *, int); 106 void pool_do_put(struct pool *, void *); 107 void pr_rmpage(struct pool *, struct pool_item_header *, 108 struct pool_pagelist *); 109 int pool_chk_page(struct pool *, const char *, struct pool_item_header *); 110 struct pool_item_header *pool_alloc_item_header(struct pool *, caddr_t , int); 111 112 void *pool_allocator_alloc(struct pool *, int, int *); 113 void pool_allocator_free(struct pool *, void *); 114 115 /* 116 * XXX - quick hack. For pools with large items we want to use a special 117 * allocator. For now, instead of having the allocator figure out 118 * the allocation size from the pool (which can be done trivially 119 * with round_page(pr_itemsperpage * pr_size)) which would require 120 * lots of changes everywhere, we just create allocators for each 121 * size. We limit those to 128 pages. 122 */ 123 #define POOL_LARGE_MAXPAGES 128 124 struct pool_allocator pool_allocator_large[POOL_LARGE_MAXPAGES]; 125 struct pool_allocator pool_allocator_large_ni[POOL_LARGE_MAXPAGES]; 126 void *pool_large_alloc(struct pool *, int, int *); 127 void pool_large_free(struct pool *, void *); 128 void *pool_large_alloc_ni(struct pool *, int, int *); 129 void pool_large_free_ni(struct pool *, void *); 130 131 132 #ifdef DDB 133 void pool_print_pagelist(struct pool_pagelist *, 134 int (*)(const char *, ...)); 135 void pool_print1(struct pool *, const char *, int (*)(const char *, ...)); 136 #endif 137 138 #define pool_sleep(pl) msleep(pl, &pl->pr_mtx, PSWP, pl->pr_wchan, 0) 139 140 static __inline int 141 phtree_compare(struct pool_item_header *a, struct pool_item_header *b) 142 { 143 long diff = (vaddr_t)a->ph_page - (vaddr_t)b->ph_page; 144 if (diff < 0) 145 return -(-diff >= a->ph_pagesize); 146 else if (diff > 0) 147 return (diff >= b->ph_pagesize); 148 else 149 return (0); 150 } 151 152 RB_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare); 153 RB_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); 154 155 /* 156 * Return the pool page header based on page address. 157 */ 158 static __inline struct pool_item_header * 159 pr_find_pagehead(struct pool *pp, void *v) 160 { 161 struct pool_item_header *ph, tmp; 162 163 if ((pp->pr_roflags & PR_PHINPAGE) != 0) { 164 caddr_t page; 165 166 page = (caddr_t)((vaddr_t)v & pp->pr_alloc->pa_pagemask); 167 168 return ((struct pool_item_header *)(page + pp->pr_phoffset)); 169 } 170 171 /* 172 * The trick we're using in the tree compare function is to compare 173 * two elements equal when they overlap. We want to return the 174 * page header that belongs to the element just before this address. 175 * We don't want this element to compare equal to the next element, 176 * so the compare function takes the pagesize from the lower element. 177 * If this header is the lower, its pagesize is zero, so it can't 178 * overlap with the next header. But if the header we're looking for 179 * is lower, we'll use its pagesize and it will overlap and return 180 * equal. 181 */ 182 tmp.ph_page = v; 183 tmp.ph_pagesize = 0; 184 ph = RB_FIND(phtree, &pp->pr_phtree, &tmp); 185 186 if (ph) { 187 KASSERT(ph->ph_page <= (caddr_t)v); 188 KASSERT(ph->ph_page + ph->ph_pagesize > (caddr_t)v); 189 } 190 return ph; 191 } 192 193 /* 194 * Remove a page from the pool. 195 */ 196 void 197 pr_rmpage(struct pool *pp, struct pool_item_header *ph, 198 struct pool_pagelist *pq) 199 { 200 201 /* 202 * If the page was idle, decrement the idle page count. 203 */ 204 if (ph->ph_nmissing == 0) { 205 #ifdef DIAGNOSTIC 206 if (pp->pr_nidle == 0) 207 panic("pr_rmpage: nidle inconsistent"); 208 if (pp->pr_nitems < pp->pr_itemsperpage) 209 panic("pr_rmpage: nitems inconsistent"); 210 #endif 211 pp->pr_nidle--; 212 } 213 214 pp->pr_nitems -= pp->pr_itemsperpage; 215 216 /* 217 * Unlink a page from the pool and release it (or queue it for release). 218 */ 219 LIST_REMOVE(ph, ph_pagelist); 220 if ((pp->pr_roflags & PR_PHINPAGE) == 0) 221 RB_REMOVE(phtree, &pp->pr_phtree, ph); 222 if (pq) { 223 LIST_INSERT_HEAD(pq, ph, ph_pagelist); 224 } else { 225 pool_allocator_free(pp, ph->ph_page); 226 if ((pp->pr_roflags & PR_PHINPAGE) == 0) 227 pool_put(&phpool, ph); 228 } 229 pp->pr_npages--; 230 pp->pr_npagefree++; 231 232 pool_update_curpage(pp); 233 } 234 235 /* 236 * Initialize the given pool resource structure. 237 * 238 * We export this routine to allow other kernel parts to declare 239 * static pools that must be initialized before malloc() is available. 240 */ 241 void 242 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, 243 const char *wchan, struct pool_allocator *palloc) 244 { 245 int off, slack; 246 247 #ifdef MALLOC_DEBUG 248 if ((flags & PR_DEBUG) && (ioff != 0 || align != 0)) 249 flags &= ~PR_DEBUG; 250 #endif 251 /* 252 * Check arguments and construct default values. 253 */ 254 if (palloc == NULL) { 255 if (size > PAGE_SIZE) { 256 int psize; 257 258 /* 259 * XXX - should take align into account as well. 260 */ 261 if (size == round_page(size)) 262 psize = size / PAGE_SIZE; 263 else 264 psize = PAGE_SIZE / roundup(size % PAGE_SIZE, 265 1024); 266 if (psize > POOL_LARGE_MAXPAGES) 267 psize = POOL_LARGE_MAXPAGES; 268 if (flags & PR_WAITOK) 269 palloc = &pool_allocator_large_ni[psize-1]; 270 else 271 palloc = &pool_allocator_large[psize-1]; 272 if (palloc->pa_pagesz == 0) { 273 palloc->pa_pagesz = psize * PAGE_SIZE; 274 if (flags & PR_WAITOK) { 275 palloc->pa_alloc = pool_large_alloc_ni; 276 palloc->pa_free = pool_large_free_ni; 277 } else { 278 palloc->pa_alloc = pool_large_alloc; 279 palloc->pa_free = pool_large_free; 280 } 281 } 282 } else { 283 palloc = &pool_allocator_nointr; 284 } 285 } 286 if (palloc->pa_pagesz == 0) { 287 palloc->pa_pagesz = PAGE_SIZE; 288 } 289 if (palloc->pa_pagemask == 0) { 290 palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); 291 palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; 292 } 293 294 if (align == 0) 295 align = ALIGN(1); 296 297 if (size < sizeof(struct pool_item)) 298 size = sizeof(struct pool_item); 299 300 size = roundup(size, align); 301 #ifdef DIAGNOSTIC 302 if (size > palloc->pa_pagesz) 303 panic("pool_init: pool item size (%lu) too large", 304 (u_long)size); 305 #endif 306 307 /* 308 * Initialize the pool structure. 309 */ 310 LIST_INIT(&pp->pr_emptypages); 311 LIST_INIT(&pp->pr_fullpages); 312 LIST_INIT(&pp->pr_partpages); 313 pp->pr_curpage = NULL; 314 pp->pr_npages = 0; 315 pp->pr_minitems = 0; 316 pp->pr_minpages = 0; 317 pp->pr_maxpages = 8; 318 pp->pr_roflags = flags; 319 pp->pr_flags = 0; 320 pp->pr_size = size; 321 pp->pr_align = align; 322 pp->pr_wchan = wchan; 323 pp->pr_alloc = palloc; 324 pp->pr_nitems = 0; 325 pp->pr_nout = 0; 326 pp->pr_hardlimit = UINT_MAX; 327 pp->pr_hardlimit_warning = NULL; 328 pp->pr_hardlimit_ratecap.tv_sec = 0; 329 pp->pr_hardlimit_ratecap.tv_usec = 0; 330 pp->pr_hardlimit_warning_last.tv_sec = 0; 331 pp->pr_hardlimit_warning_last.tv_usec = 0; 332 pp->pr_serial = ++pool_serial; 333 if (pool_serial == 0) 334 panic("pool_init: too much uptime"); 335 336 /* constructor, destructor, and arg */ 337 pp->pr_ctor = NULL; 338 pp->pr_dtor = NULL; 339 pp->pr_arg = NULL; 340 341 /* 342 * Decide whether to put the page header off page to avoid 343 * wasting too large a part of the page. Off-page page headers 344 * go into an RB tree, so we can match a returned item with 345 * its header based on the page address. 346 * We use 1/16 of the page size as the threshold (XXX: tune) 347 */ 348 if (pp->pr_size < palloc->pa_pagesz/16 && pp->pr_size < PAGE_SIZE) { 349 /* Use the end of the page for the page header */ 350 pp->pr_roflags |= PR_PHINPAGE; 351 pp->pr_phoffset = off = palloc->pa_pagesz - 352 ALIGN(sizeof(struct pool_item_header)); 353 } else { 354 /* The page header will be taken from our page header pool */ 355 pp->pr_phoffset = 0; 356 off = palloc->pa_pagesz; 357 RB_INIT(&pp->pr_phtree); 358 } 359 360 /* 361 * Alignment is to take place at `ioff' within the item. This means 362 * we must reserve up to `align - 1' bytes on the page to allow 363 * appropriate positioning of each item. 364 * 365 * Silently enforce `0 <= ioff < align'. 366 */ 367 pp->pr_itemoffset = ioff = ioff % align; 368 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; 369 KASSERT(pp->pr_itemsperpage != 0); 370 371 /* 372 * Use the slack between the chunks and the page header 373 * for "cache coloring". 374 */ 375 slack = off - pp->pr_itemsperpage * pp->pr_size; 376 pp->pr_maxcolor = (slack / align) * align; 377 pp->pr_curcolor = 0; 378 379 pp->pr_nget = 0; 380 pp->pr_nfail = 0; 381 pp->pr_nput = 0; 382 pp->pr_npagealloc = 0; 383 pp->pr_npagefree = 0; 384 pp->pr_hiwat = 0; 385 pp->pr_nidle = 0; 386 387 pp->pr_ipl = -1; 388 mtx_init(&pp->pr_mtx, IPL_NONE); 389 390 if (phpool.pr_size == 0) { 391 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 392 0, "phpool", NULL); 393 pool_setipl(&phpool, IPL_HIGH); 394 } 395 396 /* Insert this into the list of all pools. */ 397 TAILQ_INSERT_HEAD(&pool_head, pp, pr_poollist); 398 } 399 400 void 401 pool_setipl(struct pool *pp, int ipl) 402 { 403 pp->pr_ipl = ipl; 404 mtx_init(&pp->pr_mtx, ipl); 405 } 406 407 /* 408 * Decommission a pool resource. 409 */ 410 void 411 pool_destroy(struct pool *pp) 412 { 413 struct pool_item_header *ph; 414 415 #ifdef DIAGNOSTIC 416 if (pp->pr_nout != 0) 417 panic("pool_destroy: pool busy: still out: %u", pp->pr_nout); 418 #endif 419 420 /* Remove all pages */ 421 while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) 422 pr_rmpage(pp, ph, NULL); 423 KASSERT(LIST_EMPTY(&pp->pr_fullpages)); 424 KASSERT(LIST_EMPTY(&pp->pr_partpages)); 425 426 /* Remove from global pool list */ 427 TAILQ_REMOVE(&pool_head, pp, pr_poollist); 428 } 429 430 struct pool_item_header * 431 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags) 432 { 433 struct pool_item_header *ph; 434 435 if ((pp->pr_roflags & PR_PHINPAGE) != 0) 436 ph = (struct pool_item_header *)(storage + pp->pr_phoffset); 437 else 438 ph = pool_get(&phpool, flags & ~(PR_WAITOK | PR_ZERO)); 439 440 return (ph); 441 } 442 443 /* 444 * Grab an item from the pool; must be called at appropriate spl level 445 */ 446 void * 447 pool_get(struct pool *pp, int flags) 448 { 449 void *v; 450 451 #ifdef DIAGNOSTIC 452 if ((flags & PR_WAITOK) != 0) 453 splassert(IPL_NONE); 454 #endif /* DIAGNOSTIC */ 455 456 mtx_enter(&pp->pr_mtx); 457 v = pool_do_get(pp, flags); 458 mtx_leave(&pp->pr_mtx); 459 if (v == NULL) 460 return (v); 461 462 if (pp->pr_ctor) { 463 if (flags & PR_ZERO) 464 panic("pool_get: PR_ZERO when ctor set"); 465 if (pp->pr_ctor(pp->pr_arg, v, flags)) { 466 mtx_enter(&pp->pr_mtx); 467 pool_do_put(pp, v); 468 mtx_leave(&pp->pr_mtx); 469 v = NULL; 470 } 471 } else { 472 if (flags & PR_ZERO) 473 memset(v, 0, pp->pr_size); 474 } 475 if (v != NULL) 476 pp->pr_nget++; 477 return (v); 478 } 479 480 void * 481 pool_do_get(struct pool *pp, int flags) 482 { 483 struct pool_item *pi; 484 struct pool_item_header *ph; 485 void *v; 486 int slowdown = 0; 487 #if defined(DIAGNOSTIC) && defined(POOL_DEBUG) 488 int i, *ip; 489 #endif 490 491 #ifdef MALLOC_DEBUG 492 if (pp->pr_roflags & PR_DEBUG) { 493 void *addr; 494 495 addr = NULL; 496 debug_malloc(pp->pr_size, M_DEBUG, 497 (flags & PR_WAITOK) ? M_WAITOK : M_NOWAIT, &addr); 498 return (addr); 499 } 500 #endif 501 502 startover: 503 /* 504 * Check to see if we've reached the hard limit. If we have, 505 * and we can wait, then wait until an item has been returned to 506 * the pool. 507 */ 508 #ifdef DIAGNOSTIC 509 if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) 510 panic("pool_do_get: %s: crossed hard limit", pp->pr_wchan); 511 #endif 512 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { 513 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { 514 /* 515 * XXX: A warning isn't logged in this case. Should 516 * it be? 517 */ 518 pp->pr_flags |= PR_WANTED; 519 pool_sleep(pp); 520 goto startover; 521 } 522 523 /* 524 * Log a message that the hard limit has been hit. 525 */ 526 if (pp->pr_hardlimit_warning != NULL && 527 ratecheck(&pp->pr_hardlimit_warning_last, 528 &pp->pr_hardlimit_ratecap)) 529 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); 530 531 pp->pr_nfail++; 532 return (NULL); 533 } 534 535 /* 536 * The convention we use is that if `curpage' is not NULL, then 537 * it points at a non-empty bucket. In particular, `curpage' 538 * never points at a page header which has PR_PHINPAGE set and 539 * has no items in its bucket. 540 */ 541 if ((ph = pp->pr_curpage) == NULL) { 542 #ifdef DIAGNOSTIC 543 if (pp->pr_nitems != 0) { 544 printf("pool_do_get: %s: curpage NULL, nitems %u\n", 545 pp->pr_wchan, pp->pr_nitems); 546 panic("pool_do_get: nitems inconsistent"); 547 } 548 #endif 549 550 /* 551 * Call the back-end page allocator for more memory. 552 */ 553 v = pool_allocator_alloc(pp, flags, &slowdown); 554 if (__predict_true(v != NULL)) 555 ph = pool_alloc_item_header(pp, v, flags); 556 557 if (__predict_false(v == NULL || ph == NULL)) { 558 if (v != NULL) 559 pool_allocator_free(pp, v); 560 561 if ((flags & PR_WAITOK) == 0) { 562 pp->pr_nfail++; 563 return (NULL); 564 } 565 566 /* 567 * Wait for items to be returned to this pool. 568 * 569 * XXX: maybe we should wake up once a second and 570 * try again? 571 */ 572 pp->pr_flags |= PR_WANTED; 573 pool_sleep(pp); 574 goto startover; 575 } 576 577 /* We have more memory; add it to the pool */ 578 pool_prime_page(pp, v, ph); 579 pp->pr_npagealloc++; 580 581 if (slowdown && (flags & PR_WAITOK)) { 582 mtx_leave(&pp->pr_mtx); 583 yield(); 584 mtx_enter(&pp->pr_mtx); 585 } 586 587 /* Start the allocation process over. */ 588 goto startover; 589 } 590 if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) { 591 panic("pool_do_get: %s: page empty", pp->pr_wchan); 592 } 593 #ifdef DIAGNOSTIC 594 if (__predict_false(pp->pr_nitems == 0)) { 595 printf("pool_do_get: %s: items on itemlist, nitems %u\n", 596 pp->pr_wchan, pp->pr_nitems); 597 panic("pool_do_get: nitems inconsistent"); 598 } 599 #endif 600 601 #ifdef DIAGNOSTIC 602 if (__predict_false(pi->pi_magic != PI_MAGIC)) 603 panic("pool_do_get(%s): free list modified: " 604 "page %p; item addr %p; offset 0x%x=0x%x", 605 pp->pr_wchan, ph->ph_page, pi, 0, pi->pi_magic); 606 #ifdef POOL_DEBUG 607 for (ip = (int *)pi, i = sizeof(*pi) / sizeof(int); 608 i < pp->pr_size / sizeof(int); i++) { 609 if (ip[i] != PI_MAGIC) { 610 panic("pool_do_get(%s): free list modified: " 611 "page %p; item addr %p; offset 0x%x=0x%x", 612 pp->pr_wchan, ph->ph_page, pi, 613 i * sizeof(int), ip[i]); 614 } 615 } 616 #endif /* POOL_DEBUG */ 617 #endif /* DIAGNOSTIC */ 618 619 /* 620 * Remove from item list. 621 */ 622 TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list); 623 pp->pr_nitems--; 624 pp->pr_nout++; 625 if (ph->ph_nmissing == 0) { 626 #ifdef DIAGNOSTIC 627 if (__predict_false(pp->pr_nidle == 0)) 628 panic("pool_do_get: nidle inconsistent"); 629 #endif 630 pp->pr_nidle--; 631 632 /* 633 * This page was previously empty. Move it to the list of 634 * partially-full pages. This page is already curpage. 635 */ 636 LIST_REMOVE(ph, ph_pagelist); 637 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); 638 } 639 ph->ph_nmissing++; 640 if (TAILQ_EMPTY(&ph->ph_itemlist)) { 641 #ifdef DIAGNOSTIC 642 if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) { 643 panic("pool_do_get: %s: nmissing inconsistent", 644 pp->pr_wchan); 645 } 646 #endif 647 /* 648 * This page is now full. Move it to the full list 649 * and select a new current page. 650 */ 651 LIST_REMOVE(ph, ph_pagelist); 652 LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist); 653 pool_update_curpage(pp); 654 } 655 656 /* 657 * If we have a low water mark and we are now below that low 658 * water mark, add more items to the pool. 659 */ 660 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { 661 /* 662 * XXX: Should we log a warning? Should we set up a timeout 663 * to try again in a second or so? The latter could break 664 * a caller's assumptions about interrupt protection, etc. 665 */ 666 } 667 return (v); 668 } 669 670 /* 671 * Return resource to the pool; must be called at appropriate spl level 672 */ 673 void 674 pool_put(struct pool *pp, void *v) 675 { 676 if (pp->pr_dtor) 677 pp->pr_dtor(pp->pr_arg, v); 678 mtx_enter(&pp->pr_mtx); 679 pool_do_put(pp, v); 680 mtx_leave(&pp->pr_mtx); 681 pp->pr_nput++; 682 } 683 684 /* 685 * Internal version of pool_put(). 686 */ 687 void 688 pool_do_put(struct pool *pp, void *v) 689 { 690 struct pool_item *pi = v; 691 struct pool_item_header *ph; 692 #if defined(DIAGNOSTIC) && defined(POOL_DEBUG) 693 int i, *ip; 694 #endif 695 696 if (v == NULL) 697 panic("pool_put of NULL"); 698 699 #ifdef MALLOC_DEBUG 700 if (pp->pr_roflags & PR_DEBUG) { 701 debug_free(v, M_DEBUG); 702 return; 703 } 704 #endif 705 706 #ifdef DIAGNOSTIC 707 if (pp->pr_ipl != -1) 708 splassert(pp->pr_ipl); 709 710 if (__predict_false(pp->pr_nout == 0)) { 711 printf("pool %s: putting with none out\n", 712 pp->pr_wchan); 713 panic("pool_do_put"); 714 } 715 #endif 716 717 if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { 718 panic("pool_do_put: %s: page header missing", pp->pr_wchan); 719 } 720 721 /* 722 * Return to item list. 723 */ 724 #ifdef DIAGNOSTIC 725 pi->pi_magic = PI_MAGIC; 726 #ifdef POOL_DEBUG 727 for (ip = (int *)pi, i = sizeof(*pi)/sizeof(int); 728 i < pp->pr_size / sizeof(int); i++) 729 ip[i] = PI_MAGIC; 730 #endif /* POOL_DEBUG */ 731 #endif /* DIAGNOSTIC */ 732 733 TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); 734 ph->ph_nmissing--; 735 pp->pr_nitems++; 736 pp->pr_nout--; 737 738 /* Cancel "pool empty" condition if it exists */ 739 if (pp->pr_curpage == NULL) 740 pp->pr_curpage = ph; 741 742 if (pp->pr_flags & PR_WANTED) { 743 pp->pr_flags &= ~PR_WANTED; 744 if (ph->ph_nmissing == 0) 745 pp->pr_nidle++; 746 wakeup(pp); 747 return; 748 } 749 750 /* 751 * If this page is now empty, do one of two things: 752 * 753 * (1) If we have more pages than the page high water mark, 754 * free the page back to the system. 755 * 756 * (2) Otherwise, move the page to the empty page list. 757 * 758 * Either way, select a new current page (so we use a partially-full 759 * page if one is available). 760 */ 761 if (ph->ph_nmissing == 0) { 762 pp->pr_nidle++; 763 if (pp->pr_nidle > pp->pr_maxpages) { 764 pr_rmpage(pp, ph, NULL); 765 } else { 766 LIST_REMOVE(ph, ph_pagelist); 767 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); 768 } 769 pool_update_curpage(pp); 770 } 771 772 /* 773 * If the page was previously completely full, move it to the 774 * partially-full list and make it the current page. The next 775 * allocation will get the item from this page, instead of 776 * further fragmenting the pool. 777 */ 778 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) { 779 LIST_REMOVE(ph, ph_pagelist); 780 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); 781 pp->pr_curpage = ph; 782 } 783 } 784 785 /* 786 * Add N items to the pool. 787 */ 788 int 789 pool_prime(struct pool *pp, int n) 790 { 791 struct pool_item_header *ph; 792 caddr_t cp; 793 int newpages; 794 int slowdown; 795 796 mtx_enter(&pp->pr_mtx); 797 newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 798 799 while (newpages-- > 0) { 800 cp = pool_allocator_alloc(pp, PR_NOWAIT, &slowdown); 801 if (__predict_true(cp != NULL)) 802 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); 803 if (__predict_false(cp == NULL || ph == NULL)) { 804 if (cp != NULL) 805 pool_allocator_free(pp, cp); 806 break; 807 } 808 809 pool_prime_page(pp, cp, ph); 810 pp->pr_npagealloc++; 811 pp->pr_minpages++; 812 } 813 814 if (pp->pr_minpages >= pp->pr_maxpages) 815 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ 816 817 mtx_leave(&pp->pr_mtx); 818 return (0); 819 } 820 821 /* 822 * Add a page worth of items to the pool. 823 * 824 * Note, we must be called with the pool descriptor LOCKED. 825 */ 826 void 827 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph) 828 { 829 struct pool_item *pi; 830 caddr_t cp = storage; 831 unsigned int align = pp->pr_align; 832 unsigned int ioff = pp->pr_itemoffset; 833 int n; 834 #if defined(DIAGNOSTIC) && defined(POOL_DEBUG) 835 int i, *ip; 836 #endif 837 838 /* 839 * Insert page header. 840 */ 841 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); 842 TAILQ_INIT(&ph->ph_itemlist); 843 ph->ph_page = storage; 844 ph->ph_pagesize = pp->pr_alloc->pa_pagesz; 845 ph->ph_nmissing = 0; 846 if ((pp->pr_roflags & PR_PHINPAGE) == 0) 847 RB_INSERT(phtree, &pp->pr_phtree, ph); 848 849 pp->pr_nidle++; 850 851 /* 852 * Color this page. 853 */ 854 cp = (caddr_t)(cp + pp->pr_curcolor); 855 if ((pp->pr_curcolor += align) > pp->pr_maxcolor) 856 pp->pr_curcolor = 0; 857 858 /* 859 * Adjust storage to apply aligment to `pr_itemoffset' in each item. 860 */ 861 if (ioff != 0) 862 cp = (caddr_t)(cp + (align - ioff)); 863 ph->ph_colored = cp; 864 865 /* 866 * Insert remaining chunks on the bucket list. 867 */ 868 n = pp->pr_itemsperpage; 869 pp->pr_nitems += n; 870 871 while (n--) { 872 pi = (struct pool_item *)cp; 873 874 KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0); 875 876 /* Insert on page list */ 877 TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list); 878 879 #ifdef DIAGNOSTIC 880 pi->pi_magic = PI_MAGIC; 881 #ifdef POOL_DEBUG 882 for (ip = (int *)pi, i = sizeof(*pi)/sizeof(int); 883 i < pp->pr_size / sizeof(int); i++) 884 ip[i] = PI_MAGIC; 885 #endif /* POOL_DEBUG */ 886 #endif /* DIAGNOSTIC */ 887 cp = (caddr_t)(cp + pp->pr_size); 888 } 889 890 /* 891 * If the pool was depleted, point at the new page. 892 */ 893 if (pp->pr_curpage == NULL) 894 pp->pr_curpage = ph; 895 896 if (++pp->pr_npages > pp->pr_hiwat) 897 pp->pr_hiwat = pp->pr_npages; 898 } 899 900 /* 901 * Used by pool_get() when nitems drops below the low water mark. This 902 * is used to catch up pr_nitems with the low water mark. 903 * 904 * Note we never wait for memory here, we let the caller decide what to do. 905 */ 906 int 907 pool_catchup(struct pool *pp) 908 { 909 struct pool_item_header *ph; 910 caddr_t cp; 911 int error = 0; 912 int slowdown; 913 914 while (POOL_NEEDS_CATCHUP(pp)) { 915 /* 916 * Call the page back-end allocator for more memory. 917 */ 918 cp = pool_allocator_alloc(pp, PR_NOWAIT, &slowdown); 919 if (__predict_true(cp != NULL)) 920 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); 921 if (__predict_false(cp == NULL || ph == NULL)) { 922 if (cp != NULL) 923 pool_allocator_free(pp, cp); 924 error = ENOMEM; 925 break; 926 } 927 pool_prime_page(pp, cp, ph); 928 pp->pr_npagealloc++; 929 } 930 931 return (error); 932 } 933 934 void 935 pool_update_curpage(struct pool *pp) 936 { 937 938 pp->pr_curpage = LIST_FIRST(&pp->pr_partpages); 939 if (pp->pr_curpage == NULL) { 940 pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); 941 } 942 } 943 944 void 945 pool_setlowat(struct pool *pp, int n) 946 { 947 948 pp->pr_minitems = n; 949 pp->pr_minpages = (n == 0) 950 ? 0 951 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 952 953 mtx_enter(&pp->pr_mtx); 954 /* Make sure we're caught up with the newly-set low water mark. */ 955 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { 956 /* 957 * XXX: Should we log a warning? Should we set up a timeout 958 * to try again in a second or so? The latter could break 959 * a caller's assumptions about interrupt protection, etc. 960 */ 961 } 962 mtx_leave(&pp->pr_mtx); 963 } 964 965 void 966 pool_sethiwat(struct pool *pp, int n) 967 { 968 969 pp->pr_maxpages = (n == 0) 970 ? 0 971 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 972 } 973 974 int 975 pool_sethardlimit(struct pool *pp, u_int n, const char *warnmsg, int ratecap) 976 { 977 int error = 0; 978 979 if (n < pp->pr_nout) { 980 error = EINVAL; 981 goto done; 982 } 983 984 pp->pr_hardlimit = n; 985 pp->pr_hardlimit_warning = warnmsg; 986 pp->pr_hardlimit_ratecap.tv_sec = ratecap; 987 pp->pr_hardlimit_warning_last.tv_sec = 0; 988 pp->pr_hardlimit_warning_last.tv_usec = 0; 989 990 /* 991 * In-line version of pool_sethiwat(). 992 */ 993 pp->pr_maxpages = (n == 0 || n == UINT_MAX) 994 ? n 995 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 996 997 done: 998 return (error); 999 } 1000 1001 void 1002 pool_set_ctordtor(struct pool *pp, int (*ctor)(void *, void *, int), 1003 void (*dtor)(void *, void *), void *arg) 1004 { 1005 pp->pr_ctor = ctor; 1006 pp->pr_dtor = dtor; 1007 pp->pr_arg = arg; 1008 } 1009 /* 1010 * Release all complete pages that have not been used recently. 1011 * 1012 * Returns non-zero if any pages have been reclaimed. 1013 */ 1014 int 1015 pool_reclaim(struct pool *pp) 1016 { 1017 struct pool_item_header *ph, *phnext; 1018 struct pool_pagelist pq; 1019 1020 LIST_INIT(&pq); 1021 1022 mtx_enter(&pp->pr_mtx); 1023 for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) { 1024 phnext = LIST_NEXT(ph, ph_pagelist); 1025 1026 /* Check our minimum page claim */ 1027 if (pp->pr_npages <= pp->pr_minpages) 1028 break; 1029 1030 KASSERT(ph->ph_nmissing == 0); 1031 1032 /* 1033 * If freeing this page would put us below 1034 * the low water mark, stop now. 1035 */ 1036 if ((pp->pr_nitems - pp->pr_itemsperpage) < 1037 pp->pr_minitems) 1038 break; 1039 1040 pr_rmpage(pp, ph, &pq); 1041 } 1042 mtx_leave(&pp->pr_mtx); 1043 1044 if (LIST_EMPTY(&pq)) 1045 return (0); 1046 while ((ph = LIST_FIRST(&pq)) != NULL) { 1047 LIST_REMOVE(ph, ph_pagelist); 1048 pool_allocator_free(pp, ph->ph_page); 1049 if (pp->pr_roflags & PR_PHINPAGE) 1050 continue; 1051 pool_put(&phpool, ph); 1052 } 1053 1054 return (1); 1055 } 1056 1057 #ifdef DDB 1058 #include <machine/db_machdep.h> 1059 #include <ddb/db_interface.h> 1060 #include <ddb/db_output.h> 1061 1062 /* 1063 * Diagnostic helpers. 1064 */ 1065 void 1066 pool_printit(struct pool *pp, const char *modif, int (*pr)(const char *, ...)) 1067 { 1068 pool_print1(pp, modif, pr); 1069 } 1070 1071 void 1072 pool_print_pagelist(struct pool_pagelist *pl, int (*pr)(const char *, ...)) 1073 { 1074 struct pool_item_header *ph; 1075 #ifdef DIAGNOSTIC 1076 struct pool_item *pi; 1077 #endif 1078 1079 LIST_FOREACH(ph, pl, ph_pagelist) { 1080 (*pr)("\t\tpage %p, nmissing %d\n", 1081 ph->ph_page, ph->ph_nmissing); 1082 #ifdef DIAGNOSTIC 1083 TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) { 1084 if (pi->pi_magic != PI_MAGIC) { 1085 (*pr)("\t\t\titem %p, magic 0x%x\n", 1086 pi, pi->pi_magic); 1087 } 1088 } 1089 #endif 1090 } 1091 } 1092 1093 void 1094 pool_print1(struct pool *pp, const char *modif, int (*pr)(const char *, ...)) 1095 { 1096 struct pool_item_header *ph; 1097 int print_pagelist = 0; 1098 char c; 1099 1100 while ((c = *modif++) != '\0') { 1101 if (c == 'p') 1102 print_pagelist = 1; 1103 modif++; 1104 } 1105 1106 (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", 1107 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, 1108 pp->pr_roflags); 1109 (*pr)("\talloc %p\n", pp->pr_alloc); 1110 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", 1111 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); 1112 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", 1113 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit); 1114 1115 (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n", 1116 pp->pr_nget, pp->pr_nfail, pp->pr_nput); 1117 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n", 1118 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle); 1119 1120 if (print_pagelist == 0) 1121 return; 1122 1123 if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) 1124 (*pr)("\n\tempty page list:\n"); 1125 pool_print_pagelist(&pp->pr_emptypages, pr); 1126 if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL) 1127 (*pr)("\n\tfull page list:\n"); 1128 pool_print_pagelist(&pp->pr_fullpages, pr); 1129 if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL) 1130 (*pr)("\n\tpartial-page list:\n"); 1131 pool_print_pagelist(&pp->pr_partpages, pr); 1132 1133 if (pp->pr_curpage == NULL) 1134 (*pr)("\tno current page\n"); 1135 else 1136 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page); 1137 } 1138 1139 void 1140 db_show_all_pools(db_expr_t expr, int haddr, db_expr_t count, char *modif) 1141 { 1142 struct pool *pp; 1143 char maxp[16]; 1144 int ovflw; 1145 char mode; 1146 1147 mode = modif[0]; 1148 if (mode != '\0' && mode != 'a') { 1149 db_printf("usage: show all pools [/a]\n"); 1150 return; 1151 } 1152 1153 if (mode == '\0') 1154 db_printf("%-10s%4s%9s%5s%9s%6s%6s%6s%6s%6s%6s%5s\n", 1155 "Name", 1156 "Size", 1157 "Requests", 1158 "Fail", 1159 "Releases", 1160 "Pgreq", 1161 "Pgrel", 1162 "Npage", 1163 "Hiwat", 1164 "Minpg", 1165 "Maxpg", 1166 "Idle"); 1167 else 1168 db_printf("%-10s %18s %18s\n", 1169 "Name", "Address", "Allocator"); 1170 1171 TAILQ_FOREACH(pp, &pool_head, pr_poollist) { 1172 if (mode == 'a') { 1173 db_printf("%-10s %18p %18p\n", pp->pr_wchan, pp, 1174 pp->pr_alloc); 1175 continue; 1176 } 1177 1178 if (!pp->pr_nget) 1179 continue; 1180 1181 if (pp->pr_maxpages == UINT_MAX) 1182 snprintf(maxp, sizeof maxp, "inf"); 1183 else 1184 snprintf(maxp, sizeof maxp, "%u", pp->pr_maxpages); 1185 1186 #define PRWORD(ovflw, fmt, width, fixed, val) do { \ 1187 (ovflw) += db_printf((fmt), \ 1188 (width) - (fixed) - (ovflw) > 0 ? \ 1189 (width) - (fixed) - (ovflw) : 0, \ 1190 (val)) - (width); \ 1191 if ((ovflw) < 0) \ 1192 (ovflw) = 0; \ 1193 } while (/* CONSTCOND */0) 1194 1195 ovflw = 0; 1196 PRWORD(ovflw, "%-*s", 10, 0, pp->pr_wchan); 1197 PRWORD(ovflw, " %*u", 4, 1, pp->pr_size); 1198 PRWORD(ovflw, " %*lu", 9, 1, pp->pr_nget); 1199 PRWORD(ovflw, " %*lu", 5, 1, pp->pr_nfail); 1200 PRWORD(ovflw, " %*lu", 9, 1, pp->pr_nput); 1201 PRWORD(ovflw, " %*lu", 6, 1, pp->pr_npagealloc); 1202 PRWORD(ovflw, " %*lu", 6, 1, pp->pr_npagefree); 1203 PRWORD(ovflw, " %*d", 6, 1, pp->pr_npages); 1204 PRWORD(ovflw, " %*d", 6, 1, pp->pr_hiwat); 1205 PRWORD(ovflw, " %*d", 6, 1, pp->pr_minpages); 1206 PRWORD(ovflw, " %*s", 6, 1, maxp); 1207 PRWORD(ovflw, " %*lu\n", 5, 1, pp->pr_nidle); 1208 1209 pool_chk(pp, pp->pr_wchan); 1210 } 1211 } 1212 1213 int 1214 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph) 1215 { 1216 struct pool_item *pi; 1217 caddr_t page; 1218 int n; 1219 #if defined(DIAGNOSTIC) && defined(POOL_DEBUG) 1220 int i, *ip; 1221 #endif 1222 1223 page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask); 1224 if (page != ph->ph_page && 1225 (pp->pr_roflags & PR_PHINPAGE) != 0) { 1226 if (label != NULL) 1227 printf("%s: ", label); 1228 printf("pool(%p:%s): page inconsistency: page %p; " 1229 "at page head addr %p (p %p)\n", 1230 pp, pp->pr_wchan, ph->ph_page, ph, page); 1231 return 1; 1232 } 1233 1234 for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0; 1235 pi != NULL; 1236 pi = TAILQ_NEXT(pi,pi_list), n++) { 1237 1238 #ifdef DIAGNOSTIC 1239 if (pi->pi_magic != PI_MAGIC) { 1240 if (label != NULL) 1241 printf("%s: ", label); 1242 printf("pool(%s): free list modified: " 1243 "page %p; item ordinal %d; addr %p " 1244 "(p %p); offset 0x%x=0x%x\n", 1245 pp->pr_wchan, ph->ph_page, n, pi, page, 1246 0, pi->pi_magic); 1247 } 1248 #ifdef POOL_DEBUG 1249 for (ip = (int *)pi, i = sizeof(*pi) / sizeof(int); 1250 i < pp->pr_size / sizeof(int); i++) { 1251 if (ip[i] != PI_MAGIC) { 1252 printf("pool(%s): free list modified: " 1253 "page %p; item ordinal %d; addr %p " 1254 "(p %p); offset 0x%x=0x%x\n", 1255 pp->pr_wchan, ph->ph_page, n, pi, 1256 page, i * sizeof(int), ip[i]); 1257 } 1258 } 1259 1260 #endif /* POOL_DEBUG */ 1261 #endif /* DIAGNOSTIC */ 1262 page = 1263 (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask); 1264 if (page == ph->ph_page) 1265 continue; 1266 1267 if (label != NULL) 1268 printf("%s: ", label); 1269 printf("pool(%p:%s): page inconsistency: page %p;" 1270 " item ordinal %d; addr %p (p %p)\n", pp, 1271 pp->pr_wchan, ph->ph_page, n, pi, page); 1272 return 1; 1273 } 1274 return 0; 1275 } 1276 1277 int 1278 pool_chk(struct pool *pp, const char *label) 1279 { 1280 struct pool_item_header *ph; 1281 int r = 0; 1282 1283 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) 1284 r += pool_chk_page(pp, label, ph); 1285 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) 1286 r += pool_chk_page(pp, label, ph); 1287 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) 1288 r += pool_chk_page(pp, label, ph); 1289 1290 return (r); 1291 } 1292 1293 void 1294 pool_walk(struct pool *pp, int full, int (*pr)(const char *, ...), 1295 void (*func)(void *, int, int (*)(const char *, ...))) 1296 { 1297 struct pool_item_header *ph; 1298 struct pool_item *pi; 1299 caddr_t cp; 1300 int n; 1301 1302 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { 1303 cp = ph->ph_colored; 1304 n = ph->ph_nmissing; 1305 1306 while (n--) { 1307 func(cp, full, pr); 1308 cp += pp->pr_size; 1309 } 1310 } 1311 1312 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { 1313 cp = ph->ph_colored; 1314 n = ph->ph_nmissing; 1315 1316 do { 1317 TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) { 1318 if (cp == (caddr_t)pi) 1319 break; 1320 } 1321 if (cp != (caddr_t)pi) { 1322 func(cp, full, pr); 1323 n--; 1324 } 1325 1326 cp += pp->pr_size; 1327 } while (n > 0); 1328 } 1329 } 1330 #endif 1331 1332 /* 1333 * We have three different sysctls. 1334 * kern.pool.npools - the number of pools. 1335 * kern.pool.pool.<pool#> - the pool struct for the pool#. 1336 * kern.pool.name.<pool#> - the name for pool#. 1337 */ 1338 int 1339 sysctl_dopool(int *name, u_int namelen, char *where, size_t *sizep) 1340 { 1341 struct pool *pp, *foundpool = NULL; 1342 size_t buflen = where != NULL ? *sizep : 0; 1343 int npools = 0, s; 1344 unsigned int lookfor; 1345 size_t len; 1346 1347 switch (*name) { 1348 case KERN_POOL_NPOOLS: 1349 if (namelen != 1 || buflen != sizeof(int)) 1350 return (EINVAL); 1351 lookfor = 0; 1352 break; 1353 case KERN_POOL_NAME: 1354 if (namelen != 2 || buflen < 1) 1355 return (EINVAL); 1356 lookfor = name[1]; 1357 break; 1358 case KERN_POOL_POOL: 1359 if (namelen != 2 || buflen != sizeof(struct pool)) 1360 return (EINVAL); 1361 lookfor = name[1]; 1362 break; 1363 default: 1364 return (EINVAL); 1365 } 1366 1367 s = splvm(); 1368 1369 TAILQ_FOREACH(pp, &pool_head, pr_poollist) { 1370 npools++; 1371 if (lookfor == pp->pr_serial) { 1372 foundpool = pp; 1373 break; 1374 } 1375 } 1376 1377 splx(s); 1378 1379 if (*name != KERN_POOL_NPOOLS && foundpool == NULL) 1380 return (ENOENT); 1381 1382 switch (*name) { 1383 case KERN_POOL_NPOOLS: 1384 return copyout(&npools, where, buflen); 1385 case KERN_POOL_NAME: 1386 len = strlen(foundpool->pr_wchan) + 1; 1387 if (*sizep < len) 1388 return (ENOMEM); 1389 *sizep = len; 1390 return copyout(foundpool->pr_wchan, where, len); 1391 case KERN_POOL_POOL: 1392 return copyout(foundpool, where, buflen); 1393 } 1394 /* NOTREACHED */ 1395 return (0); /* XXX - Stupid gcc */ 1396 } 1397 1398 /* 1399 * Pool backend allocators. 1400 * 1401 * Each pool has a backend allocator that handles allocation, deallocation 1402 */ 1403 void *pool_page_alloc(struct pool *, int, int *); 1404 void pool_page_free(struct pool *, void *); 1405 1406 /* 1407 * safe for interrupts, name preserved for compat this is the default 1408 * allocator 1409 */ 1410 struct pool_allocator pool_allocator_nointr = { 1411 pool_page_alloc, pool_page_free, 0, 1412 }; 1413 1414 /* 1415 * XXX - we have at least three different resources for the same allocation 1416 * and each resource can be depleted. First we have the ready elements in 1417 * the pool. Then we have the resource (typically a vm_map) for this 1418 * allocator, then we have physical memory. Waiting for any of these can 1419 * be unnecessary when any other is freed, but the kernel doesn't support 1420 * sleeping on multiple addresses, so we have to fake. The caller sleeps on 1421 * the pool (so that we can be awakened when an item is returned to the pool), 1422 * but we set PA_WANT on the allocator. When a page is returned to 1423 * the allocator and PA_WANT is set pool_allocator_free will wakeup all 1424 * sleeping pools belonging to this allocator. (XXX - thundering herd). 1425 * We also wake up the allocator in case someone without a pool (malloc) 1426 * is sleeping waiting for this allocator. 1427 */ 1428 1429 void * 1430 pool_allocator_alloc(struct pool *pp, int flags, int *slowdown) 1431 { 1432 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; 1433 void *v; 1434 1435 if (waitok) 1436 mtx_leave(&pp->pr_mtx); 1437 v = pp->pr_alloc->pa_alloc(pp, flags, slowdown); 1438 if (waitok) 1439 mtx_enter(&pp->pr_mtx); 1440 1441 return (v); 1442 } 1443 1444 void 1445 pool_allocator_free(struct pool *pp, void *v) 1446 { 1447 struct pool_allocator *pa = pp->pr_alloc; 1448 1449 (*pa->pa_free)(pp, v); 1450 } 1451 1452 void * 1453 pool_page_alloc(struct pool *pp, int flags, int *slowdown) 1454 { 1455 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; 1456 1457 return (uvm_km_getpage(waitok, slowdown)); 1458 } 1459 1460 void 1461 pool_page_free(struct pool *pp, void *v) 1462 { 1463 1464 uvm_km_putpage(v); 1465 } 1466 1467 void * 1468 pool_large_alloc(struct pool *pp, int flags, int *slowdown) 1469 { 1470 int kfl = (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT; 1471 vaddr_t va; 1472 int s; 1473 1474 s = splvm(); 1475 va = uvm_km_kmemalloc(kmem_map, NULL, pp->pr_alloc->pa_pagesz, kfl); 1476 splx(s); 1477 1478 return ((void *)va); 1479 } 1480 1481 void 1482 pool_large_free(struct pool *pp, void *v) 1483 { 1484 int s; 1485 1486 s = splvm(); 1487 uvm_km_free(kmem_map, (vaddr_t)v, pp->pr_alloc->pa_pagesz); 1488 splx(s); 1489 } 1490 1491 void * 1492 pool_large_alloc_ni(struct pool *pp, int flags, int *slowdown) 1493 { 1494 int kfl = (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT; 1495 1496 return ((void *)uvm_km_kmemalloc(kernel_map, uvm.kernel_object, 1497 pp->pr_alloc->pa_pagesz, kfl)); 1498 } 1499 1500 void 1501 pool_large_free_ni(struct pool *pp, void *v) 1502 { 1503 uvm_km_free(kernel_map, (vaddr_t)v, pp->pr_alloc->pa_pagesz); 1504 } 1505