1 /* $NetBSD: subr_pool.c,v 1.31 2000/02/14 19:28:19 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #include "opt_pool.h" 41 #include "opt_poollog.h" 42 #include "opt_lockdebug.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/proc.h> 47 #include <sys/errno.h> 48 #include <sys/kernel.h> 49 #include <sys/malloc.h> 50 #include <sys/lock.h> 51 #include <sys/pool.h> 52 #include <sys/syslog.h> 53 54 #include <vm/vm.h> 55 #include <vm/vm_kern.h> 56 57 #include <uvm/uvm.h> 58 59 /* 60 * Pool resource management utility. 61 * 62 * Memory is allocated in pages which are split into pieces according 63 * to the pool item size. Each page is kept on a list headed by `pr_pagelist' 64 * in the pool structure and the individual pool items are on a linked list 65 * headed by `ph_itemlist' in each page header. The memory for building 66 * the page list is either taken from the allocated pages themselves (for 67 * small pool items) or taken from an internal pool of page headers (`phpool'). 68 */ 69 70 /* List of all pools */ 71 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); 72 73 /* Private pool for page header structures */ 74 static struct pool phpool; 75 76 /* # of seconds to retain page after last use */ 77 int pool_inactive_time = 10; 78 79 /* Next candidate for drainage (see pool_drain()) */ 80 static struct pool *drainpp; 81 82 /* This spin lock protects both pool_head and drainpp. */ 83 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER; 84 85 struct pool_item_header { 86 /* Page headers */ 87 TAILQ_ENTRY(pool_item_header) 88 ph_pagelist; /* pool page list */ 89 TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */ 90 LIST_ENTRY(pool_item_header) 91 ph_hashlist; /* Off-page page headers */ 92 int ph_nmissing; /* # of chunks in use */ 93 caddr_t ph_page; /* this page's address */ 94 struct timeval ph_time; /* last referenced */ 95 }; 96 97 struct pool_item { 98 #ifdef DIAGNOSTIC 99 int pi_magic; 100 #define PI_MAGIC 0xdeadbeef 101 #endif 102 /* Other entries use only this list entry */ 103 TAILQ_ENTRY(pool_item) pi_list; 104 }; 105 106 107 #define PR_HASH_INDEX(pp,addr) \ 108 (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1)) 109 110 111 112 static struct pool_item_header 113 *pr_find_pagehead __P((struct pool *, caddr_t)); 114 static void pr_rmpage __P((struct pool *, struct pool_item_header *)); 115 static int pool_catchup __P((struct pool *)); 116 static void pool_prime_page __P((struct pool *, caddr_t)); 117 static void *pool_page_alloc __P((unsigned long, int, int)); 118 static void pool_page_free __P((void *, unsigned long, int)); 119 120 static void pool_print1 __P((struct pool *, const char *, 121 void (*)(const char *, ...))); 122 123 /* 124 * Pool log entry. An array of these is allocated in pool_create(). 125 */ 126 struct pool_log { 127 const char *pl_file; 128 long pl_line; 129 int pl_action; 130 #define PRLOG_GET 1 131 #define PRLOG_PUT 2 132 void *pl_addr; 133 }; 134 135 /* Number of entries in pool log buffers */ 136 #ifndef POOL_LOGSIZE 137 #define POOL_LOGSIZE 10 138 #endif 139 140 int pool_logsize = POOL_LOGSIZE; 141 142 #ifdef DIAGNOSTIC 143 static void pr_log __P((struct pool *, void *, int, const char *, long)); 144 static void pr_printlog __P((struct pool *, struct pool_item *, 145 void (*)(const char *, ...))); 146 static void pr_enter __P((struct pool *, const char *, long)); 147 static void pr_leave __P((struct pool *)); 148 static void pr_enter_check __P((struct pool *, 149 void (*)(const char *, ...))); 150 151 static __inline__ void 152 pr_log(pp, v, action, file, line) 153 struct pool *pp; 154 void *v; 155 int action; 156 const char *file; 157 long line; 158 { 159 int n = pp->pr_curlogentry; 160 struct pool_log *pl; 161 162 if ((pp->pr_roflags & PR_LOGGING) == 0) 163 return; 164 165 /* 166 * Fill in the current entry. Wrap around and overwrite 167 * the oldest entry if necessary. 168 */ 169 pl = &pp->pr_log[n]; 170 pl->pl_file = file; 171 pl->pl_line = line; 172 pl->pl_action = action; 173 pl->pl_addr = v; 174 if (++n >= pp->pr_logsize) 175 n = 0; 176 pp->pr_curlogentry = n; 177 } 178 179 static void 180 pr_printlog(pp, pi, pr) 181 struct pool *pp; 182 struct pool_item *pi; 183 void (*pr) __P((const char *, ...)); 184 { 185 int i = pp->pr_logsize; 186 int n = pp->pr_curlogentry; 187 188 if ((pp->pr_roflags & PR_LOGGING) == 0) 189 return; 190 191 /* 192 * Print all entries in this pool's log. 193 */ 194 while (i-- > 0) { 195 struct pool_log *pl = &pp->pr_log[n]; 196 if (pl->pl_action != 0) { 197 if (pi == NULL || pi == pl->pl_addr) { 198 (*pr)("\tlog entry %d:\n", i); 199 (*pr)("\t\taction = %s, addr = %p\n", 200 pl->pl_action == PRLOG_GET ? "get" : "put", 201 pl->pl_addr); 202 (*pr)("\t\tfile: %s at line %lu\n", 203 pl->pl_file, pl->pl_line); 204 } 205 } 206 if (++n >= pp->pr_logsize) 207 n = 0; 208 } 209 } 210 211 static __inline__ void 212 pr_enter(pp, file, line) 213 struct pool *pp; 214 const char *file; 215 long line; 216 { 217 218 if (pp->pr_entered_file != NULL) { 219 printf("pool %s: reentrancy at file %s line %ld\n", 220 pp->pr_wchan, file, line); 221 printf(" previous entry at file %s line %ld\n", 222 pp->pr_entered_file, pp->pr_entered_line); 223 panic("pr_enter"); 224 } 225 226 pp->pr_entered_file = file; 227 pp->pr_entered_line = line; 228 } 229 230 static __inline__ void 231 pr_leave(pp) 232 struct pool *pp; 233 { 234 235 if (pp->pr_entered_file == NULL) { 236 printf("pool %s not entered?\n", pp->pr_wchan); 237 panic("pr_leave"); 238 } 239 240 pp->pr_entered_file = NULL; 241 pp->pr_entered_line = 0; 242 } 243 244 static __inline__ void 245 pr_enter_check(pp, pr) 246 struct pool *pp; 247 void (*pr) __P((const char *, ...)); 248 { 249 250 if (pp->pr_entered_file != NULL) 251 (*pr)("\n\tcurrently entered from file %s line %ld\n", 252 pp->pr_entered_file, pp->pr_entered_line); 253 } 254 #else 255 #define pr_log(pp, v, action, file, line) 256 #define pr_printlog(pp, pi, pr) 257 #define pr_enter(pp, file, line) 258 #define pr_leave(pp) 259 #define pr_enter_check(pp, pr) 260 #endif /* DIAGNOSTIC */ 261 262 /* 263 * Return the pool page header based on page address. 264 */ 265 static __inline__ struct pool_item_header * 266 pr_find_pagehead(pp, page) 267 struct pool *pp; 268 caddr_t page; 269 { 270 struct pool_item_header *ph; 271 272 if ((pp->pr_roflags & PR_PHINPAGE) != 0) 273 return ((struct pool_item_header *)(page + pp->pr_phoffset)); 274 275 for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]); 276 ph != NULL; 277 ph = LIST_NEXT(ph, ph_hashlist)) { 278 if (ph->ph_page == page) 279 return (ph); 280 } 281 return (NULL); 282 } 283 284 /* 285 * Remove a page from the pool. 286 */ 287 static __inline__ void 288 pr_rmpage(pp, ph) 289 struct pool *pp; 290 struct pool_item_header *ph; 291 { 292 293 /* 294 * If the page was idle, decrement the idle page count. 295 */ 296 if (ph->ph_nmissing == 0) { 297 #ifdef DIAGNOSTIC 298 if (pp->pr_nidle == 0) 299 panic("pr_rmpage: nidle inconsistent"); 300 if (pp->pr_nitems < pp->pr_itemsperpage) 301 panic("pr_rmpage: nitems inconsistent"); 302 #endif 303 pp->pr_nidle--; 304 } 305 306 pp->pr_nitems -= pp->pr_itemsperpage; 307 308 /* 309 * Unlink a page from the pool and release it. 310 */ 311 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); 312 (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype); 313 pp->pr_npages--; 314 pp->pr_npagefree++; 315 316 if ((pp->pr_roflags & PR_PHINPAGE) == 0) { 317 int s; 318 LIST_REMOVE(ph, ph_hashlist); 319 s = splhigh(); 320 pool_put(&phpool, ph); 321 splx(s); 322 } 323 324 if (pp->pr_curpage == ph) { 325 /* 326 * Find a new non-empty page header, if any. 327 * Start search from the page head, to increase the 328 * chance for "high water" pages to be freed. 329 */ 330 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; 331 ph = TAILQ_NEXT(ph, ph_pagelist)) 332 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) 333 break; 334 335 pp->pr_curpage = ph; 336 } 337 } 338 339 /* 340 * Allocate and initialize a pool. 341 */ 342 struct pool * 343 pool_create(size, align, ioff, nitems, wchan, pagesz, alloc, release, mtype) 344 size_t size; 345 u_int align; 346 u_int ioff; 347 int nitems; 348 const char *wchan; 349 size_t pagesz; 350 void *(*alloc) __P((unsigned long, int, int)); 351 void (*release) __P((void *, unsigned long, int)); 352 int mtype; 353 { 354 struct pool *pp; 355 int flags; 356 357 pp = (struct pool *)malloc(sizeof(*pp), M_POOL, M_NOWAIT); 358 if (pp == NULL) 359 return (NULL); 360 361 flags = PR_FREEHEADER; 362 pool_init(pp, size, align, ioff, flags, wchan, pagesz, 363 alloc, release, mtype); 364 365 if (nitems != 0) { 366 if (pool_prime(pp, nitems, NULL) != 0) { 367 pool_destroy(pp); 368 return (NULL); 369 } 370 } 371 372 return (pp); 373 } 374 375 /* 376 * Initialize the given pool resource structure. 377 * 378 * We export this routine to allow other kernel parts to declare 379 * static pools that must be initialized before malloc() is available. 380 */ 381 void 382 pool_init(pp, size, align, ioff, flags, wchan, pagesz, alloc, release, mtype) 383 struct pool *pp; 384 size_t size; 385 u_int align; 386 u_int ioff; 387 int flags; 388 const char *wchan; 389 size_t pagesz; 390 void *(*alloc) __P((unsigned long, int, int)); 391 void (*release) __P((void *, unsigned long, int)); 392 int mtype; 393 { 394 int off, slack, i; 395 396 #ifdef POOL_DIAGNOSTIC 397 /* 398 * Always log if POOL_DIAGNOSTIC is defined. 399 */ 400 if (pool_logsize != 0) 401 flags |= PR_LOGGING; 402 #endif 403 404 /* 405 * Check arguments and construct default values. 406 */ 407 if (!powerof2(pagesz) || pagesz > PAGE_SIZE) 408 panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz); 409 410 if (alloc == NULL && release == NULL) { 411 alloc = pool_page_alloc; 412 release = pool_page_free; 413 pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */ 414 } else if ((alloc != NULL && release != NULL) == 0) { 415 /* If you specifiy one, must specify both. */ 416 panic("pool_init: must specify alloc and release together"); 417 } 418 419 if (pagesz == 0) 420 pagesz = PAGE_SIZE; 421 422 if (align == 0) 423 align = ALIGN(1); 424 425 if (size < sizeof(struct pool_item)) 426 size = sizeof(struct pool_item); 427 428 /* 429 * Initialize the pool structure. 430 */ 431 TAILQ_INIT(&pp->pr_pagelist); 432 pp->pr_curpage = NULL; 433 pp->pr_npages = 0; 434 pp->pr_minitems = 0; 435 pp->pr_minpages = 0; 436 pp->pr_maxpages = UINT_MAX; 437 pp->pr_roflags = flags; 438 pp->pr_flags = 0; 439 pp->pr_size = ALIGN(size); 440 pp->pr_align = align; 441 pp->pr_wchan = wchan; 442 pp->pr_mtype = mtype; 443 pp->pr_alloc = alloc; 444 pp->pr_free = release; 445 pp->pr_pagesz = pagesz; 446 pp->pr_pagemask = ~(pagesz - 1); 447 pp->pr_pageshift = ffs(pagesz) - 1; 448 pp->pr_nitems = 0; 449 pp->pr_nout = 0; 450 pp->pr_hardlimit = UINT_MAX; 451 pp->pr_hardlimit_warning = NULL; 452 pp->pr_hardlimit_ratecap.tv_sec = 0; 453 pp->pr_hardlimit_ratecap.tv_usec = 0; 454 pp->pr_hardlimit_warning_last.tv_sec = 0; 455 pp->pr_hardlimit_warning_last.tv_usec = 0; 456 457 /* 458 * Decide whether to put the page header off page to avoid 459 * wasting too large a part of the page. Off-page page headers 460 * go on a hash table, so we can match a returned item 461 * with its header based on the page address. 462 * We use 1/16 of the page size as the threshold (XXX: tune) 463 */ 464 if (pp->pr_size < pagesz/16) { 465 /* Use the end of the page for the page header */ 466 pp->pr_roflags |= PR_PHINPAGE; 467 pp->pr_phoffset = off = 468 pagesz - ALIGN(sizeof(struct pool_item_header)); 469 } else { 470 /* The page header will be taken from our page header pool */ 471 pp->pr_phoffset = 0; 472 off = pagesz; 473 for (i = 0; i < PR_HASHTABSIZE; i++) { 474 LIST_INIT(&pp->pr_hashtab[i]); 475 } 476 } 477 478 /* 479 * Alignment is to take place at `ioff' within the item. This means 480 * we must reserve up to `align - 1' bytes on the page to allow 481 * appropriate positioning of each item. 482 * 483 * Silently enforce `0 <= ioff < align'. 484 */ 485 pp->pr_itemoffset = ioff = ioff % align; 486 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; 487 488 /* 489 * Use the slack between the chunks and the page header 490 * for "cache coloring". 491 */ 492 slack = off - pp->pr_itemsperpage * pp->pr_size; 493 pp->pr_maxcolor = (slack / align) * align; 494 pp->pr_curcolor = 0; 495 496 pp->pr_nget = 0; 497 pp->pr_nfail = 0; 498 pp->pr_nput = 0; 499 pp->pr_npagealloc = 0; 500 pp->pr_npagefree = 0; 501 pp->pr_hiwat = 0; 502 pp->pr_nidle = 0; 503 504 if (flags & PR_LOGGING) { 505 if (kmem_map == NULL || 506 (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), 507 M_TEMP, M_NOWAIT)) == NULL) 508 pp->pr_roflags &= ~PR_LOGGING; 509 pp->pr_curlogentry = 0; 510 pp->pr_logsize = pool_logsize; 511 } 512 513 pp->pr_entered_file = NULL; 514 pp->pr_entered_line = 0; 515 516 simple_lock_init(&pp->pr_slock); 517 518 /* 519 * Initialize private page header pool if we haven't done so yet. 520 * XXX LOCKING. 521 */ 522 if (phpool.pr_size == 0) { 523 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 524 0, "phpool", 0, 0, 0, 0); 525 } 526 527 /* Insert into the list of all pools. */ 528 simple_lock(&pool_head_slock); 529 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); 530 simple_unlock(&pool_head_slock); 531 } 532 533 /* 534 * De-commision a pool resource. 535 */ 536 void 537 pool_destroy(pp) 538 struct pool *pp; 539 { 540 struct pool_item_header *ph; 541 542 #ifdef DIAGNOSTIC 543 if (pp->pr_nout != 0) { 544 pr_printlog(pp, NULL, printf); 545 panic("pool_destroy: pool busy: still out: %u\n", 546 pp->pr_nout); 547 } 548 #endif 549 550 /* Remove all pages */ 551 if ((pp->pr_roflags & PR_STATIC) == 0) 552 while ((ph = pp->pr_pagelist.tqh_first) != NULL) 553 pr_rmpage(pp, ph); 554 555 /* Remove from global pool list */ 556 simple_lock(&pool_head_slock); 557 TAILQ_REMOVE(&pool_head, pp, pr_poollist); 558 /* XXX Only clear this if we were drainpp? */ 559 drainpp = NULL; 560 simple_unlock(&pool_head_slock); 561 562 if ((pp->pr_roflags & PR_LOGGING) != 0) 563 free(pp->pr_log, M_TEMP); 564 565 if (pp->pr_roflags & PR_FREEHEADER) 566 free(pp, M_POOL); 567 } 568 569 570 /* 571 * Grab an item from the pool; must be called at appropriate spl level 572 */ 573 void * 574 _pool_get(pp, flags, file, line) 575 struct pool *pp; 576 int flags; 577 const char *file; 578 long line; 579 { 580 void *v; 581 struct pool_item *pi; 582 struct pool_item_header *ph; 583 584 #ifdef DIAGNOSTIC 585 if ((pp->pr_roflags & PR_STATIC) && (flags & PR_MALLOCOK)) { 586 pr_printlog(pp, NULL, printf); 587 panic("pool_get: static"); 588 } 589 #endif 590 591 if (curproc == NULL && (flags & PR_WAITOK) != 0) 592 panic("pool_get: must have NOWAIT"); 593 594 simple_lock(&pp->pr_slock); 595 pr_enter(pp, file, line); 596 597 startover: 598 /* 599 * Check to see if we've reached the hard limit. If we have, 600 * and we can wait, then wait until an item has been returned to 601 * the pool. 602 */ 603 #ifdef DIAGNOSTIC 604 if (pp->pr_nout > pp->pr_hardlimit) { 605 pr_leave(pp); 606 simple_unlock(&pp->pr_slock); 607 panic("pool_get: %s: crossed hard limit", pp->pr_wchan); 608 } 609 #endif 610 if (pp->pr_nout == pp->pr_hardlimit) { 611 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { 612 /* 613 * XXX: A warning isn't logged in this case. Should 614 * it be? 615 */ 616 pp->pr_flags |= PR_WANTED; 617 pr_leave(pp); 618 simple_unlock(&pp->pr_slock); 619 tsleep((caddr_t)pp, PSWP, pp->pr_wchan, 0); 620 simple_lock(&pp->pr_slock); 621 pr_enter(pp, file, line); 622 goto startover; 623 } 624 625 /* 626 * Log a message that the hard limit has been hit. 627 */ 628 if (pp->pr_hardlimit_warning != NULL && 629 ratecheck(&pp->pr_hardlimit_warning_last, 630 &pp->pr_hardlimit_ratecap)) 631 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); 632 633 if (flags & PR_URGENT) 634 panic("pool_get: urgent"); 635 636 pp->pr_nfail++; 637 638 pr_leave(pp); 639 simple_unlock(&pp->pr_slock); 640 return (NULL); 641 } 642 643 /* 644 * The convention we use is that if `curpage' is not NULL, then 645 * it points at a non-empty bucket. In particular, `curpage' 646 * never points at a page header which has PR_PHINPAGE set and 647 * has no items in its bucket. 648 */ 649 if ((ph = pp->pr_curpage) == NULL) { 650 void *v; 651 652 #ifdef DIAGNOSTIC 653 if (pp->pr_nitems != 0) { 654 simple_unlock(&pp->pr_slock); 655 printf("pool_get: %s: curpage NULL, nitems %u\n", 656 pp->pr_wchan, pp->pr_nitems); 657 panic("pool_get: nitems inconsistent\n"); 658 } 659 #endif 660 661 /* 662 * Call the back-end page allocator for more memory. 663 * Release the pool lock, as the back-end page allocator 664 * may block. 665 */ 666 pr_leave(pp); 667 simple_unlock(&pp->pr_slock); 668 v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype); 669 simple_lock(&pp->pr_slock); 670 pr_enter(pp, file, line); 671 672 if (v == NULL) { 673 /* 674 * We were unable to allocate a page, but 675 * we released the lock during allocation, 676 * so perhaps items were freed back to the 677 * pool. Check for this case. 678 */ 679 if (pp->pr_curpage != NULL) 680 goto startover; 681 682 if (flags & PR_URGENT) 683 panic("pool_get: urgent"); 684 685 if ((flags & PR_WAITOK) == 0) { 686 pp->pr_nfail++; 687 pr_leave(pp); 688 simple_unlock(&pp->pr_slock); 689 return (NULL); 690 } 691 692 /* 693 * Wait for items to be returned to this pool. 694 * 695 * XXX: we actually want to wait just until 696 * the page allocator has memory again. Depending 697 * on this pool's usage, we might get stuck here 698 * for a long time. 699 * 700 * XXX: maybe we should wake up once a second and 701 * try again? 702 */ 703 pp->pr_flags |= PR_WANTED; 704 pr_leave(pp); 705 simple_unlock(&pp->pr_slock); 706 tsleep((caddr_t)pp, PSWP, pp->pr_wchan, 0); 707 simple_lock(&pp->pr_slock); 708 pr_enter(pp, file, line); 709 goto startover; 710 } 711 712 /* We have more memory; add it to the pool */ 713 pp->pr_npagealloc++; 714 pool_prime_page(pp, v); 715 716 /* Start the allocation process over. */ 717 goto startover; 718 } 719 720 if ((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL) { 721 pr_leave(pp); 722 simple_unlock(&pp->pr_slock); 723 panic("pool_get: %s: page empty", pp->pr_wchan); 724 } 725 #ifdef DIAGNOSTIC 726 if (pp->pr_nitems == 0) { 727 pr_leave(pp); 728 simple_unlock(&pp->pr_slock); 729 printf("pool_get: %s: items on itemlist, nitems %u\n", 730 pp->pr_wchan, pp->pr_nitems); 731 panic("pool_get: nitems inconsistent\n"); 732 } 733 #endif 734 pr_log(pp, v, PRLOG_GET, file, line); 735 736 #ifdef DIAGNOSTIC 737 if (pi->pi_magic != PI_MAGIC) { 738 pr_printlog(pp, pi, printf); 739 panic("pool_get(%s): free list modified: magic=%x; page %p;" 740 " item addr %p\n", 741 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); 742 } 743 #endif 744 745 /* 746 * Remove from item list. 747 */ 748 TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list); 749 pp->pr_nitems--; 750 pp->pr_nout++; 751 if (ph->ph_nmissing == 0) { 752 #ifdef DIAGNOSTIC 753 if (pp->pr_nidle == 0) 754 panic("pool_get: nidle inconsistent"); 755 #endif 756 pp->pr_nidle--; 757 } 758 ph->ph_nmissing++; 759 if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) { 760 #ifdef DIAGNOSTIC 761 if (ph->ph_nmissing != pp->pr_itemsperpage) { 762 pr_leave(pp); 763 simple_unlock(&pp->pr_slock); 764 panic("pool_get: %s: nmissing inconsistent", 765 pp->pr_wchan); 766 } 767 #endif 768 /* 769 * Find a new non-empty page header, if any. 770 * Start search from the page head, to increase 771 * the chance for "high water" pages to be freed. 772 * 773 * Migrate empty pages to the end of the list. This 774 * will speed the update of curpage as pages become 775 * idle. Empty pages intermingled with idle pages 776 * is no big deal. As soon as a page becomes un-empty, 777 * it will move back to the head of the list. 778 */ 779 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); 780 TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); 781 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; 782 ph = TAILQ_NEXT(ph, ph_pagelist)) 783 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) 784 break; 785 786 pp->pr_curpage = ph; 787 } 788 789 pp->pr_nget++; 790 791 /* 792 * If we have a low water mark and we are now below that low 793 * water mark, add more items to the pool. 794 */ 795 if (pp->pr_nitems < pp->pr_minitems && pool_catchup(pp) != 0) { 796 /* 797 * XXX: Should we log a warning? Should we set up a timeout 798 * to try again in a second or so? The latter could break 799 * a caller's assumptions about interrupt protection, etc. 800 */ 801 } 802 803 pr_leave(pp); 804 simple_unlock(&pp->pr_slock); 805 return (v); 806 } 807 808 /* 809 * Return resource to the pool; must be called at appropriate spl level 810 */ 811 void 812 _pool_put(pp, v, file, line) 813 struct pool *pp; 814 void *v; 815 const char *file; 816 long line; 817 { 818 struct pool_item *pi = v; 819 struct pool_item_header *ph; 820 caddr_t page; 821 int s; 822 823 page = (caddr_t)((u_long)v & pp->pr_pagemask); 824 825 simple_lock(&pp->pr_slock); 826 pr_enter(pp, file, line); 827 828 #ifdef DIAGNOSTIC 829 if (pp->pr_nout == 0) { 830 printf("pool %s: putting with none out\n", 831 pp->pr_wchan); 832 panic("pool_put"); 833 } 834 #endif 835 836 pr_log(pp, v, PRLOG_PUT, file, line); 837 838 if ((ph = pr_find_pagehead(pp, page)) == NULL) { 839 pr_printlog(pp, NULL, printf); 840 panic("pool_put: %s: page header missing", pp->pr_wchan); 841 } 842 843 #ifdef LOCKDEBUG 844 /* 845 * Check if we're freeing a locked simple lock. 846 */ 847 simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size); 848 #endif 849 850 /* 851 * Return to item list. 852 */ 853 #ifdef DIAGNOSTIC 854 /* XXX Should fill the item. */ 855 pi->pi_magic = PI_MAGIC; 856 #endif 857 TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); 858 ph->ph_nmissing--; 859 pp->pr_nput++; 860 pp->pr_nitems++; 861 pp->pr_nout--; 862 863 /* Cancel "pool empty" condition if it exists */ 864 if (pp->pr_curpage == NULL) 865 pp->pr_curpage = ph; 866 867 if (pp->pr_flags & PR_WANTED) { 868 pp->pr_flags &= ~PR_WANTED; 869 if (ph->ph_nmissing == 0) 870 pp->pr_nidle++; 871 pr_leave(pp); 872 simple_unlock(&pp->pr_slock); 873 wakeup((caddr_t)pp); 874 return; 875 } 876 877 /* 878 * If this page is now complete, do one of two things: 879 * 880 * (1) If we have more pages than the page high water 881 * mark, free the page back to the system. 882 * 883 * (2) Move it to the end of the page list, so that 884 * we minimize our chances of fragmenting the 885 * pool. Idle pages migrate to the end (along with 886 * completely empty pages, so that we find un-empty 887 * pages more quickly when we update curpage) of the 888 * list so they can be more easily swept up by 889 * the pagedaemon when pages are scarce. 890 */ 891 if (ph->ph_nmissing == 0) { 892 pp->pr_nidle++; 893 if (pp->pr_npages > pp->pr_maxpages) { 894 pr_rmpage(pp, ph); 895 } else { 896 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); 897 TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); 898 899 /* 900 * Update the timestamp on the page. A page must 901 * be idle for some period of time before it can 902 * be reclaimed by the pagedaemon. This minimizes 903 * ping-pong'ing for memory. 904 */ 905 s = splclock(); 906 ph->ph_time = mono_time; 907 splx(s); 908 909 /* 910 * Update the current page pointer. Just look for 911 * the first page with any free items. 912 * 913 * XXX: Maybe we want an option to look for the 914 * page with the fewest available items, to minimize 915 * fragmentation? 916 */ 917 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; 918 ph = TAILQ_NEXT(ph, ph_pagelist)) 919 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) 920 break; 921 922 pp->pr_curpage = ph; 923 } 924 } 925 /* 926 * If the page has just become un-empty, move it to the head of 927 * the list, and make it the current page. The next allocation 928 * will get the item from this page, instead of further fragmenting 929 * the pool. 930 */ 931 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) { 932 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); 933 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist); 934 pp->pr_curpage = ph; 935 } 936 937 pr_leave(pp); 938 simple_unlock(&pp->pr_slock); 939 940 } 941 942 /* 943 * Add N items to the pool. 944 */ 945 int 946 pool_prime(pp, n, storage) 947 struct pool *pp; 948 int n; 949 caddr_t storage; 950 { 951 caddr_t cp; 952 int newnitems, newpages; 953 954 #ifdef DIAGNOSTIC 955 if (storage && !(pp->pr_roflags & PR_STATIC)) 956 panic("pool_prime: static"); 957 /* !storage && static caught below */ 958 #endif 959 960 simple_lock(&pp->pr_slock); 961 962 newnitems = pp->pr_minitems + n; 963 newpages = 964 roundup(newnitems, pp->pr_itemsperpage) / pp->pr_itemsperpage 965 - pp->pr_minpages; 966 967 while (newpages-- > 0) { 968 if (pp->pr_roflags & PR_STATIC) { 969 cp = storage; 970 storage += pp->pr_pagesz; 971 } else { 972 simple_unlock(&pp->pr_slock); 973 cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype); 974 simple_lock(&pp->pr_slock); 975 } 976 977 if (cp == NULL) { 978 simple_unlock(&pp->pr_slock); 979 return (ENOMEM); 980 } 981 982 pp->pr_npagealloc++; 983 pool_prime_page(pp, cp); 984 pp->pr_minpages++; 985 } 986 987 pp->pr_minitems = newnitems; 988 989 if (pp->pr_minpages >= pp->pr_maxpages) 990 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ 991 992 simple_unlock(&pp->pr_slock); 993 return (0); 994 } 995 996 /* 997 * Add a page worth of items to the pool. 998 * 999 * Note, we must be called with the pool descriptor LOCKED. 1000 */ 1001 static void 1002 pool_prime_page(pp, storage) 1003 struct pool *pp; 1004 caddr_t storage; 1005 { 1006 struct pool_item *pi; 1007 struct pool_item_header *ph; 1008 caddr_t cp = storage; 1009 unsigned int align = pp->pr_align; 1010 unsigned int ioff = pp->pr_itemoffset; 1011 int s, n; 1012 1013 if ((pp->pr_roflags & PR_PHINPAGE) != 0) { 1014 ph = (struct pool_item_header *)(cp + pp->pr_phoffset); 1015 } else { 1016 s = splhigh(); 1017 ph = pool_get(&phpool, PR_URGENT); 1018 splx(s); 1019 LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], 1020 ph, ph_hashlist); 1021 } 1022 1023 /* 1024 * Insert page header. 1025 */ 1026 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist); 1027 TAILQ_INIT(&ph->ph_itemlist); 1028 ph->ph_page = storage; 1029 ph->ph_nmissing = 0; 1030 memset(&ph->ph_time, 0, sizeof(ph->ph_time)); 1031 1032 pp->pr_nidle++; 1033 1034 /* 1035 * Color this page. 1036 */ 1037 cp = (caddr_t)(cp + pp->pr_curcolor); 1038 if ((pp->pr_curcolor += align) > pp->pr_maxcolor) 1039 pp->pr_curcolor = 0; 1040 1041 /* 1042 * Adjust storage to apply aligment to `pr_itemoffset' in each item. 1043 */ 1044 if (ioff != 0) 1045 cp = (caddr_t)(cp + (align - ioff)); 1046 1047 /* 1048 * Insert remaining chunks on the bucket list. 1049 */ 1050 n = pp->pr_itemsperpage; 1051 pp->pr_nitems += n; 1052 1053 while (n--) { 1054 pi = (struct pool_item *)cp; 1055 1056 /* Insert on page list */ 1057 TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list); 1058 #ifdef DIAGNOSTIC 1059 pi->pi_magic = PI_MAGIC; 1060 #endif 1061 cp = (caddr_t)(cp + pp->pr_size); 1062 } 1063 1064 /* 1065 * If the pool was depleted, point at the new page. 1066 */ 1067 if (pp->pr_curpage == NULL) 1068 pp->pr_curpage = ph; 1069 1070 if (++pp->pr_npages > pp->pr_hiwat) 1071 pp->pr_hiwat = pp->pr_npages; 1072 } 1073 1074 /* 1075 * Like pool_prime(), except this is used by pool_get() when nitems 1076 * drops below the low water mark. This is used to catch up nitmes 1077 * with the low water mark. 1078 * 1079 * Note 1, we never wait for memory here, we let the caller decide what to do. 1080 * 1081 * Note 2, this doesn't work with static pools. 1082 * 1083 * Note 3, we must be called with the pool already locked, and we return 1084 * with it locked. 1085 */ 1086 static int 1087 pool_catchup(pp) 1088 struct pool *pp; 1089 { 1090 caddr_t cp; 1091 int error = 0; 1092 1093 if (pp->pr_roflags & PR_STATIC) { 1094 /* 1095 * We dropped below the low water mark, and this is not a 1096 * good thing. Log a warning. 1097 * 1098 * XXX: rate-limit this? 1099 */ 1100 printf("WARNING: static pool `%s' dropped below low water " 1101 "mark\n", pp->pr_wchan); 1102 return (0); 1103 } 1104 1105 while (pp->pr_nitems < pp->pr_minitems) { 1106 /* 1107 * Call the page back-end allocator for more memory. 1108 * 1109 * XXX: We never wait, so should we bother unlocking 1110 * the pool descriptor? 1111 */ 1112 simple_unlock(&pp->pr_slock); 1113 cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype); 1114 simple_lock(&pp->pr_slock); 1115 if (cp == NULL) { 1116 error = ENOMEM; 1117 break; 1118 } 1119 pp->pr_npagealloc++; 1120 pool_prime_page(pp, cp); 1121 } 1122 1123 return (error); 1124 } 1125 1126 void 1127 pool_setlowat(pp, n) 1128 pool_handle_t pp; 1129 int n; 1130 { 1131 int error; 1132 1133 simple_lock(&pp->pr_slock); 1134 1135 pp->pr_minitems = n; 1136 pp->pr_minpages = (n == 0) 1137 ? 0 1138 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1139 1140 /* Make sure we're caught up with the newly-set low water mark. */ 1141 if ((error = pool_catchup(pp)) != 0) { 1142 /* 1143 * XXX: Should we log a warning? Should we set up a timeout 1144 * to try again in a second or so? The latter could break 1145 * a caller's assumptions about interrupt protection, etc. 1146 */ 1147 } 1148 1149 simple_unlock(&pp->pr_slock); 1150 } 1151 1152 void 1153 pool_sethiwat(pp, n) 1154 pool_handle_t pp; 1155 int n; 1156 { 1157 1158 simple_lock(&pp->pr_slock); 1159 1160 pp->pr_maxpages = (n == 0) 1161 ? 0 1162 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1163 1164 simple_unlock(&pp->pr_slock); 1165 } 1166 1167 void 1168 pool_sethardlimit(pp, n, warnmess, ratecap) 1169 pool_handle_t pp; 1170 int n; 1171 const char *warnmess; 1172 int ratecap; 1173 { 1174 1175 simple_lock(&pp->pr_slock); 1176 1177 pp->pr_hardlimit = n; 1178 pp->pr_hardlimit_warning = warnmess; 1179 pp->pr_hardlimit_ratecap.tv_sec = ratecap; 1180 pp->pr_hardlimit_warning_last.tv_sec = 0; 1181 pp->pr_hardlimit_warning_last.tv_usec = 0; 1182 1183 /* 1184 * In-line version of pool_sethiwat(), because we don't want to 1185 * release the lock. 1186 */ 1187 pp->pr_maxpages = (n == 0) 1188 ? 0 1189 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1190 1191 simple_unlock(&pp->pr_slock); 1192 } 1193 1194 /* 1195 * Default page allocator. 1196 */ 1197 static void * 1198 pool_page_alloc(sz, flags, mtype) 1199 unsigned long sz; 1200 int flags; 1201 int mtype; 1202 { 1203 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; 1204 1205 return ((void *)uvm_km_alloc_poolpage(waitok)); 1206 } 1207 1208 static void 1209 pool_page_free(v, sz, mtype) 1210 void *v; 1211 unsigned long sz; 1212 int mtype; 1213 { 1214 1215 uvm_km_free_poolpage((vaddr_t)v); 1216 } 1217 1218 /* 1219 * Alternate pool page allocator for pools that know they will 1220 * never be accessed in interrupt context. 1221 */ 1222 void * 1223 pool_page_alloc_nointr(sz, flags, mtype) 1224 unsigned long sz; 1225 int flags; 1226 int mtype; 1227 { 1228 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; 1229 1230 return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object, 1231 waitok)); 1232 } 1233 1234 void 1235 pool_page_free_nointr(v, sz, mtype) 1236 void *v; 1237 unsigned long sz; 1238 int mtype; 1239 { 1240 1241 uvm_km_free_poolpage1(kernel_map, (vaddr_t)v); 1242 } 1243 1244 1245 /* 1246 * Release all complete pages that have not been used recently. 1247 */ 1248 void 1249 _pool_reclaim(pp, file, line) 1250 pool_handle_t pp; 1251 const char *file; 1252 long line; 1253 { 1254 struct pool_item_header *ph, *phnext; 1255 struct timeval curtime; 1256 int s; 1257 1258 if (pp->pr_roflags & PR_STATIC) 1259 return; 1260 1261 if (simple_lock_try(&pp->pr_slock) == 0) 1262 return; 1263 pr_enter(pp, file, line); 1264 1265 s = splclock(); 1266 curtime = mono_time; 1267 splx(s); 1268 1269 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) { 1270 phnext = TAILQ_NEXT(ph, ph_pagelist); 1271 1272 /* Check our minimum page claim */ 1273 if (pp->pr_npages <= pp->pr_minpages) 1274 break; 1275 1276 if (ph->ph_nmissing == 0) { 1277 struct timeval diff; 1278 timersub(&curtime, &ph->ph_time, &diff); 1279 if (diff.tv_sec < pool_inactive_time) 1280 continue; 1281 1282 /* 1283 * If freeing this page would put us below 1284 * the low water mark, stop now. 1285 */ 1286 if ((pp->pr_nitems - pp->pr_itemsperpage) < 1287 pp->pr_minitems) 1288 break; 1289 1290 pr_rmpage(pp, ph); 1291 } 1292 } 1293 1294 pr_leave(pp); 1295 simple_unlock(&pp->pr_slock); 1296 } 1297 1298 1299 /* 1300 * Drain pools, one at a time. 1301 * 1302 * Note, we must never be called from an interrupt context. 1303 */ 1304 void 1305 pool_drain(arg) 1306 void *arg; 1307 { 1308 struct pool *pp; 1309 int s; 1310 1311 s = splimp(); 1312 simple_lock(&pool_head_slock); 1313 1314 if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL) 1315 goto out; 1316 1317 pp = drainpp; 1318 drainpp = TAILQ_NEXT(pp, pr_poollist); 1319 1320 pool_reclaim(pp); 1321 1322 out: 1323 simple_unlock(&pool_head_slock); 1324 splx(s); 1325 } 1326 1327 1328 /* 1329 * Diagnostic helpers. 1330 */ 1331 void 1332 pool_print(pp, modif) 1333 struct pool *pp; 1334 const char *modif; 1335 { 1336 int s; 1337 1338 s = splimp(); 1339 if (simple_lock_try(&pp->pr_slock) == 0) { 1340 printf("pool %s is locked; try again later\n", 1341 pp->pr_wchan); 1342 splx(s); 1343 return; 1344 } 1345 pool_print1(pp, modif, printf); 1346 simple_unlock(&pp->pr_slock); 1347 splx(s); 1348 } 1349 1350 void 1351 pool_printit(pp, modif, pr) 1352 struct pool *pp; 1353 const char *modif; 1354 void (*pr) __P((const char *, ...)); 1355 { 1356 int didlock = 0; 1357 1358 if (pp == NULL) { 1359 (*pr)("Must specify a pool to print.\n"); 1360 return; 1361 } 1362 1363 /* 1364 * Called from DDB; interrupts should be blocked, and all 1365 * other processors should be paused. We can skip locking 1366 * the pool in this case. 1367 * 1368 * We do a simple_lock_try() just to print the lock 1369 * status, however. 1370 */ 1371 1372 if (simple_lock_try(&pp->pr_slock) == 0) 1373 (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan); 1374 else 1375 didlock = 1; 1376 1377 pool_print1(pp, modif, pr); 1378 1379 if (didlock) 1380 simple_unlock(&pp->pr_slock); 1381 } 1382 1383 static void 1384 pool_print1(pp, modif, pr) 1385 struct pool *pp; 1386 const char *modif; 1387 void (*pr) __P((const char *, ...)); 1388 { 1389 struct pool_item_header *ph; 1390 #ifdef DIAGNOSTIC 1391 struct pool_item *pi; 1392 #endif 1393 int print_log = 0, print_pagelist = 0; 1394 char c; 1395 1396 while ((c = *modif++) != '\0') { 1397 if (c == 'l') 1398 print_log = 1; 1399 if (c == 'p') 1400 print_pagelist = 1; 1401 modif++; 1402 } 1403 1404 (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", 1405 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, 1406 pp->pr_roflags); 1407 (*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype); 1408 (*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free); 1409 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", 1410 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); 1411 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", 1412 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit); 1413 1414 (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n", 1415 pp->pr_nget, pp->pr_nfail, pp->pr_nput); 1416 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n", 1417 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle); 1418 1419 if (print_pagelist == 0) 1420 goto skip_pagelist; 1421 1422 if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL) 1423 (*pr)("\n\tpage list:\n"); 1424 for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) { 1425 (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n", 1426 ph->ph_page, ph->ph_nmissing, 1427 (u_long)ph->ph_time.tv_sec, 1428 (u_long)ph->ph_time.tv_usec); 1429 #ifdef DIAGNOSTIC 1430 for (pi = TAILQ_FIRST(&ph->ph_itemlist); pi != NULL; 1431 pi = TAILQ_NEXT(pi, pi_list)) { 1432 if (pi->pi_magic != PI_MAGIC) { 1433 (*pr)("\t\t\titem %p, magic 0x%x\n", 1434 pi, pi->pi_magic); 1435 } 1436 } 1437 #endif 1438 } 1439 if (pp->pr_curpage == NULL) 1440 (*pr)("\tno current page\n"); 1441 else 1442 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page); 1443 1444 skip_pagelist: 1445 1446 if (print_log == 0) 1447 goto skip_log; 1448 1449 (*pr)("\n"); 1450 if ((pp->pr_roflags & PR_LOGGING) == 0) 1451 (*pr)("\tno log\n"); 1452 else 1453 pr_printlog(pp, NULL, pr); 1454 1455 skip_log: 1456 1457 pr_enter_check(pp, pr); 1458 } 1459 1460 int 1461 pool_chk(pp, label) 1462 struct pool *pp; 1463 char *label; 1464 { 1465 struct pool_item_header *ph; 1466 int r = 0; 1467 1468 simple_lock(&pp->pr_slock); 1469 1470 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; 1471 ph = TAILQ_NEXT(ph, ph_pagelist)) { 1472 1473 struct pool_item *pi; 1474 int n; 1475 caddr_t page; 1476 1477 page = (caddr_t)((u_long)ph & pp->pr_pagemask); 1478 if (page != ph->ph_page && 1479 (pp->pr_roflags & PR_PHINPAGE) != 0) { 1480 if (label != NULL) 1481 printf("%s: ", label); 1482 printf("pool(%p:%s): page inconsistency: page %p;" 1483 " at page head addr %p (p %p)\n", pp, 1484 pp->pr_wchan, ph->ph_page, 1485 ph, page); 1486 r++; 1487 goto out; 1488 } 1489 1490 for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0; 1491 pi != NULL; 1492 pi = TAILQ_NEXT(pi,pi_list), n++) { 1493 1494 #ifdef DIAGNOSTIC 1495 if (pi->pi_magic != PI_MAGIC) { 1496 if (label != NULL) 1497 printf("%s: ", label); 1498 printf("pool(%s): free list modified: magic=%x;" 1499 " page %p; item ordinal %d;" 1500 " addr %p (p %p)\n", 1501 pp->pr_wchan, pi->pi_magic, ph->ph_page, 1502 n, pi, page); 1503 panic("pool"); 1504 } 1505 #endif 1506 page = (caddr_t)((u_long)pi & pp->pr_pagemask); 1507 if (page == ph->ph_page) 1508 continue; 1509 1510 if (label != NULL) 1511 printf("%s: ", label); 1512 printf("pool(%p:%s): page inconsistency: page %p;" 1513 " item ordinal %d; addr %p (p %p)\n", pp, 1514 pp->pr_wchan, ph->ph_page, 1515 n, pi, page); 1516 r++; 1517 goto out; 1518 } 1519 } 1520 out: 1521 simple_unlock(&pp->pr_slock); 1522 return (r); 1523 } 1524