1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1997, 1998 John S. Dyson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. Absolutely no warranty of function or purpose is made by the author 14 * John S. Dyson. 15 * 16 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $ 17 */ 18 19 #include <sys/param.h> 20 #include <sys/queue.h> 21 #include <sys/systm.h> 22 #include <sys/kernel.h> 23 #include <sys/lock.h> 24 #include <sys/malloc.h> 25 #include <sys/sysctl.h> 26 #include <sys/vmmeter.h> 27 28 #include <vm/vm.h> 29 #include <vm/vm_object.h> 30 #include <vm/vm_page.h> 31 #include <vm/vm_map.h> 32 #include <vm/vm_kern.h> 33 #include <vm/vm_extern.h> 34 #include <vm/vm_zone.h> 35 36 #include <sys/spinlock2.h> 37 #include <vm/vm_page2.h> 38 39 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header"); 40 41 #define ZONE_ERROR_INVALID 0 42 #define ZONE_ERROR_NOTFREE 1 43 #define ZONE_ERROR_ALREADYFREE 2 44 45 #define ZONE_ROUNDING 32 46 47 #define ZENTRY_FREE 0x12342378 48 49 long zone_burst = 128; 50 51 static void *zget(vm_zone_t z); 52 53 /* 54 * Return an item from the specified zone. This function is non-blocking for 55 * ZONE_INTERRUPT zones. 56 * 57 * No requirements. 58 */ 59 void * 60 zalloc(vm_zone_t z) 61 { 62 globaldata_t gd = mycpu; 63 vm_zpcpu_t *zpcpu; 64 void *item; 65 long n; 66 67 #ifdef INVARIANTS 68 if (z == NULL) 69 zerror(ZONE_ERROR_INVALID); 70 #endif 71 zpcpu = &z->zpcpu[gd->gd_cpuid]; 72 retry: 73 /* 74 * Avoid spinlock contention by allocating from a per-cpu queue 75 */ 76 if (zpcpu->zfreecnt > 0) { 77 crit_enter_gd(gd); 78 if (zpcpu->zfreecnt > 0) { 79 item = zpcpu->zitems; 80 #ifdef INVARIANTS 81 KASSERT(item != NULL, 82 ("zitems_pcpu unexpectedly NULL")); 83 if (((void **)item)[1] != (void *)ZENTRY_FREE) 84 zerror(ZONE_ERROR_NOTFREE); 85 ((void **)item)[1] = NULL; 86 #endif 87 zpcpu->zitems = ((void **) item)[0]; 88 --zpcpu->zfreecnt; 89 ++zpcpu->znalloc; 90 crit_exit_gd(gd); 91 92 return item; 93 } 94 crit_exit_gd(gd); 95 } 96 97 /* 98 * Per-zone spinlock for the remainder. Always load at least one 99 * item. 100 */ 101 spin_lock(&z->zlock); 102 if (z->zfreecnt > z->zfreemin) { 103 n = zone_burst; 104 do { 105 item = z->zitems; 106 #ifdef INVARIANTS 107 KASSERT(item != NULL, ("zitems unexpectedly NULL")); 108 if (((void **)item)[1] != (void *)ZENTRY_FREE) 109 zerror(ZONE_ERROR_NOTFREE); 110 #endif 111 z->zitems = ((void **)item)[0]; 112 --z->zfreecnt; 113 ((void **)item)[0] = zpcpu->zitems; 114 zpcpu->zitems = item; 115 ++zpcpu->zfreecnt; 116 } while (--n > 0 && z->zfreecnt > z->zfreemin); 117 spin_unlock(&z->zlock); 118 goto retry; 119 } else { 120 spin_unlock(&z->zlock); 121 item = zget(z); 122 /* 123 * PANICFAIL allows the caller to assume that the zalloc() 124 * will always succeed. If it doesn't, we panic here. 125 */ 126 if (item == NULL && (z->zflags & ZONE_PANICFAIL)) 127 panic("zalloc(%s) failed", z->zname); 128 } 129 return item; 130 } 131 132 /* 133 * Free an item to the specified zone. 134 * 135 * No requirements. 136 */ 137 void 138 zfree(vm_zone_t z, void *item) 139 { 140 globaldata_t gd = mycpu; 141 vm_zpcpu_t *zpcpu; 142 void *tail_item; 143 long count; 144 long zmax; 145 146 zpcpu = &z->zpcpu[gd->gd_cpuid]; 147 148 /* 149 * Avoid spinlock contention by freeing into a per-cpu queue 150 */ 151 zmax = z->zmax_pcpu; 152 if (zmax < 1024) 153 zmax = 1024; 154 155 /* 156 * Add to pcpu cache 157 */ 158 crit_enter_gd(gd); 159 ((void **)item)[0] = zpcpu->zitems; 160 #ifdef INVARIANTS 161 if (((void **)item)[1] == (void *)ZENTRY_FREE) 162 zerror(ZONE_ERROR_ALREADYFREE); 163 ((void **)item)[1] = (void *)ZENTRY_FREE; 164 #endif 165 zpcpu->zitems = item; 166 ++zpcpu->zfreecnt; 167 168 if (zpcpu->zfreecnt < zmax) { 169 crit_exit_gd(gd); 170 return; 171 } 172 173 /* 174 * Hystereis, move (zmax) (calculated below) items to the pool. 175 */ 176 zmax = zmax / 2; 177 if (zmax > zone_burst) 178 zmax = zone_burst; 179 tail_item = item; 180 count = 1; 181 182 while (count < zmax) { 183 tail_item = ((void **)tail_item)[0]; 184 ++count; 185 } 186 zpcpu->zitems = ((void **)tail_item)[0]; 187 zpcpu->zfreecnt -= count; 188 189 /* 190 * Per-zone spinlock for the remainder. 191 * 192 * Also implement hysteresis by freeing a number of pcpu 193 * entries. 194 */ 195 spin_lock(&z->zlock); 196 ((void **)tail_item)[0] = z->zitems; 197 z->zitems = item; 198 z->zfreecnt += count; 199 spin_unlock(&z->zlock); 200 201 crit_exit_gd(gd); 202 } 203 204 /* 205 * This file comprises a very simple zone allocator. This is used 206 * in lieu of the malloc allocator, where needed or more optimal. 207 * 208 * Note that the initial implementation of this had coloring, and 209 * absolutely no improvement (actually perf degradation) occurred. 210 * 211 * Note also that the zones are type stable. The only restriction is 212 * that the first two longwords of a data structure can be changed 213 * between allocations. Any data that must be stable between allocations 214 * must reside in areas after the first two longwords. 215 * 216 * zinitna, zinit, zbootinit are the initialization routines. 217 * zalloc, zfree, are the allocation/free routines. 218 */ 219 220 LIST_HEAD(zlist, vm_zone) zlist = LIST_HEAD_INITIALIZER(zlist); 221 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS); 222 static vm_pindex_t zone_kmem_pages, zone_kern_pages; 223 static long zone_kmem_kvaspace; 224 225 /* 226 * Create a zone, but don't allocate the zone structure. If the 227 * zone had been previously created by the zone boot code, initialize 228 * various parts of the zone code. 229 * 230 * If waits are not allowed during allocation (e.g. during interrupt 231 * code), a-priori allocate the kernel virtual space, and allocate 232 * only pages when needed. 233 * 234 * Arguments: 235 * z pointer to zone structure. 236 * obj pointer to VM object (opt). 237 * name name of zone. 238 * size size of zone entries. 239 * nentries number of zone entries allocated (only ZONE_INTERRUPT.) 240 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time. 241 * zalloc number of pages allocated when memory is needed. 242 * 243 * Note that when using ZONE_INTERRUPT, the size of the zone is limited 244 * by the nentries argument. The size of the memory allocatable is 245 * unlimited if ZONE_INTERRUPT is not set. 246 * 247 * No requirements. 248 */ 249 int 250 zinitna(vm_zone_t z, vm_object_t obj, char *name, size_t size, 251 long nentries, uint32_t flags) 252 { 253 size_t totsize; 254 255 /* 256 * Only zones created with zinit() are destroyable. 257 */ 258 if (z->zflags & ZONE_DESTROYABLE) 259 panic("zinitna: can't create destroyable zone"); 260 261 /* 262 * NOTE: We can only adjust zsize if we previously did not 263 * use zbootinit(). 264 */ 265 if ((z->zflags & ZONE_BOOT) == 0) { 266 z->zsize = roundup2(size, ZONE_ROUNDING); 267 spin_init(&z->zlock, "zinitna"); 268 z->zfreecnt = 0; 269 z->ztotal = 0; 270 z->zmax = 0; 271 z->zname = name; 272 z->zitems = NULL; 273 274 lwkt_gettoken(&vm_token); 275 LIST_INSERT_HEAD(&zlist, z, zlink); 276 lwkt_reltoken(&vm_token); 277 278 bzero(z->zpcpu, sizeof(z->zpcpu)); 279 } 280 281 z->zkmvec = NULL; 282 z->zkmcur = z->zkmmax = 0; 283 z->zflags |= flags; 284 285 /* 286 * If we cannot wait, allocate KVA space up front, and we will fill 287 * in pages as needed. This is particularly required when creating 288 * an allocation space for map entries in kernel_map, because we 289 * do not want to go into a recursion deadlock with 290 * vm_map_entry_reserve(). 291 */ 292 if (z->zflags & ZONE_INTERRUPT) { 293 totsize = round_page((size_t)z->zsize * nentries); 294 atomic_add_long(&zone_kmem_kvaspace, totsize); 295 296 z->zkva = kmem_alloc_pageable(&kernel_map, totsize, 297 VM_SUBSYS_ZALLOC); 298 if (z->zkva == 0) { 299 LIST_REMOVE(z, zlink); 300 return 0; 301 } 302 303 z->zpagemax = totsize / PAGE_SIZE; 304 if (obj == NULL) { 305 z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax); 306 } else { 307 z->zobj = obj; 308 _vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj); 309 vm_object_drop(obj); 310 } 311 z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT | 312 VM_ALLOC_NORMAL | VM_ALLOC_RETRY; 313 z->zmax += nentries; 314 z->zmax_pcpu = z->zmax / ncpus / 16; 315 316 /* 317 * Set reasonable pcpu cache bounds. Low-memory systems 318 * might try to cache too little, large-memory systems 319 * might try to cache more than necessarsy. 320 * 321 * In particular, pvzone can wind up being excessive and 322 * waste memory unnecessarily. 323 */ 324 if (z->zmax_pcpu < 1024) 325 z->zmax_pcpu = 1024; 326 if (z->zmax_pcpu * z->zsize > 16*1024*1024) 327 z->zmax_pcpu = 16*1024*1024 / z->zsize; 328 } else { 329 z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM; 330 z->zmax = 0; 331 z->zmax_pcpu = 8192; 332 } 333 334 335 if (z->zsize > PAGE_SIZE) 336 z->zfreemin = 1; 337 else 338 z->zfreemin = PAGE_SIZE / z->zsize; 339 340 z->zpagecount = 0; 341 342 /* 343 * Reduce kernel_map spam by allocating in chunks of 4 pages. 344 */ 345 z->zalloc = 4; 346 347 /* 348 * Populate the interrrupt zone at creation time rather than 349 * on first allocation, as this is a potentially long operation. 350 */ 351 if (z->zflags & ZONE_INTERRUPT) { 352 void *buf; 353 354 buf = zget(z); 355 zfree(z, buf); 356 } 357 358 return 1; 359 } 360 361 /* 362 * Subroutine same as zinitna, except zone data structure is allocated 363 * automatically by malloc. This routine should normally be used, except 364 * in certain tricky startup conditions in the VM system -- then 365 * zbootinit and zinitna can be used. Zinit is the standard zone 366 * initialization call. 367 * 368 * No requirements. 369 */ 370 vm_zone_t 371 zinit(char *name, size_t size, long nentries, uint32_t flags) 372 { 373 vm_zone_t z; 374 375 z = (vm_zone_t) kmalloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT); 376 if (z == NULL) 377 return NULL; 378 379 z->zflags = 0; 380 if (zinitna(z, NULL, name, size, nentries, 381 flags & ~ZONE_DESTROYABLE) == 0) { 382 kfree(z, M_ZONE); 383 return NULL; 384 } 385 386 if (flags & ZONE_DESTROYABLE) 387 z->zflags |= ZONE_DESTROYABLE; 388 389 return z; 390 } 391 392 /* 393 * Initialize a zone before the system is fully up. This routine should 394 * only be called before full VM startup. 395 * 396 * Called from the low level boot code only. 397 */ 398 void 399 zbootinit(vm_zone_t z, char *name, size_t size, void *item, long nitems) 400 { 401 long i; 402 403 spin_init(&z->zlock, "zbootinit"); 404 bzero(z->zpcpu, sizeof(z->zpcpu)); 405 z->zname = name; 406 z->zsize = size; 407 z->zpagemax = 0; 408 z->zobj = NULL; 409 z->zflags = ZONE_BOOT; 410 z->zfreemin = 0; 411 z->zallocflag = 0; 412 z->zpagecount = 0; 413 z->zalloc = 0; 414 415 bzero(item, (size_t)nitems * z->zsize); 416 z->zitems = NULL; 417 for (i = 0; i < nitems; i++) { 418 ((void **)item)[0] = z->zitems; 419 #ifdef INVARIANTS 420 ((void **)item)[1] = (void *)ZENTRY_FREE; 421 #endif 422 z->zitems = item; 423 item = (uint8_t *)item + z->zsize; 424 } 425 z->zfreecnt = nitems; 426 z->zmax = nitems; 427 z->ztotal = nitems; 428 429 lwkt_gettoken(&vm_token); 430 LIST_INSERT_HEAD(&zlist, z, zlink); 431 lwkt_reltoken(&vm_token); 432 } 433 434 /* 435 * Release all resources owned by zone created with zinit(). 436 * 437 * No requirements. 438 */ 439 void 440 zdestroy(vm_zone_t z) 441 { 442 vm_page_t m; 443 vm_pindex_t i; 444 445 if (z == NULL) 446 panic("zdestroy: null zone"); 447 if ((z->zflags & ZONE_DESTROYABLE) == 0) 448 panic("zdestroy: undestroyable zone"); 449 450 lwkt_gettoken(&vm_token); 451 LIST_REMOVE(z, zlink); 452 lwkt_reltoken(&vm_token); 453 454 /* 455 * Release virtual mappings, physical memory and update sysctl stats. 456 */ 457 if (z->zflags & ZONE_INTERRUPT) { 458 /* 459 * Pages mapped via pmap_kenter() must be removed from the 460 * kernel_pmap() before calling kmem_free() to avoid issues 461 * with kernel_pmap.pm_stats.resident_count. 462 */ 463 pmap_qremove(z->zkva, z->zpagemax); 464 vm_object_hold(z->zobj); 465 for (i = 0; i < z->zpagecount; ++i) { 466 m = vm_page_lookup_busy_wait(z->zobj, i, TRUE, "vmzd"); 467 vm_page_unwire(m, 0); 468 vm_page_free(m); 469 } 470 471 /* 472 * Free the mapping. 473 */ 474 kmem_free(&kernel_map, z->zkva, 475 (size_t)z->zpagemax * PAGE_SIZE); 476 atomic_subtract_long(&zone_kmem_kvaspace, 477 (size_t)z->zpagemax * PAGE_SIZE); 478 479 /* 480 * Free the backing object and physical pages. 481 */ 482 vm_object_deallocate(z->zobj); 483 vm_object_drop(z->zobj); 484 atomic_subtract_long(&zone_kmem_pages, z->zpagecount); 485 } else { 486 for (i = 0; i < z->zkmcur; i++) { 487 kmem_free(&kernel_map, z->zkmvec[i], 488 (size_t)z->zalloc * PAGE_SIZE); 489 atomic_subtract_long(&zone_kern_pages, z->zalloc); 490 } 491 if (z->zkmvec != NULL) 492 kfree(z->zkmvec, M_ZONE); 493 } 494 495 spin_uninit(&z->zlock); 496 kfree(z, M_ZONE); 497 } 498 499 500 /* 501 * void *zalloc(vm_zone_t zone) -- 502 * Returns an item from a specified zone. May not be called from a 503 * FAST interrupt or IPI function. 504 * 505 * void zfree(vm_zone_t zone, void *item) -- 506 * Frees an item back to a specified zone. May not be called from a 507 * FAST interrupt or IPI function. 508 */ 509 510 /* 511 * Internal zone routine. Not to be called from external (non vm_zone) code. 512 * 513 * No requirements. 514 */ 515 static void * 516 zget(vm_zone_t z) 517 { 518 vm_page_t m; 519 long nitems; 520 long savezpc; 521 size_t nbytes; 522 size_t noffset; 523 void *item; 524 vm_pindex_t npages; 525 vm_pindex_t i; 526 527 if (z == NULL) 528 panic("zget: null zone"); 529 530 if (z->zflags & ZONE_INTERRUPT) { 531 /* 532 * Interrupt zones do not mess with the kernel_map, they 533 * simply populate an existing mapping. 534 * 535 * First reserve the required space. 536 */ 537 vm_object_hold(z->zobj); 538 noffset = (size_t)z->zpagecount * PAGE_SIZE; 539 noffset -= noffset % z->zsize; 540 savezpc = z->zpagecount; 541 if (z->zpagecount + z->zalloc > z->zpagemax) 542 z->zpagecount = z->zpagemax; 543 else 544 z->zpagecount += z->zalloc; 545 item = (char *)z->zkva + noffset; 546 npages = z->zpagecount - savezpc; 547 nitems = ((size_t)(savezpc + npages) * PAGE_SIZE - noffset) / 548 z->zsize; 549 atomic_add_long(&zone_kmem_pages, npages); 550 551 /* 552 * Now allocate the pages. Note that we can block in the 553 * loop, so we've already done all the necessary calculations 554 * and reservations above. 555 */ 556 for (i = 0; i < npages; ++i) { 557 vm_offset_t zkva; 558 559 m = vm_page_alloc(z->zobj, savezpc + i, z->zallocflag); 560 KKASSERT(m != NULL); 561 /* note: z might be modified due to blocking */ 562 563 KKASSERT(m->queue == PQ_NONE); 564 m->valid = VM_PAGE_BITS_ALL; 565 vm_page_wire(m); 566 vm_page_wakeup(m); 567 568 zkva = z->zkva + (size_t)(savezpc + i) * PAGE_SIZE; 569 pmap_kenter(zkva, VM_PAGE_TO_PHYS(m)); 570 bzero((void *)zkva, PAGE_SIZE); 571 } 572 vm_object_drop(z->zobj); 573 } else if (z->zflags & ZONE_SPECIAL) { 574 /* 575 * The special zone is the one used for vm_map_entry_t's. 576 * We have to avoid an infinite recursion in 577 * vm_map_entry_reserve() by using vm_map_entry_kreserve() 578 * instead. The map entries are pre-reserved by the kernel 579 * by vm_map_entry_reserve_cpu_init(). 580 */ 581 nbytes = (size_t)z->zalloc * PAGE_SIZE; 582 583 item = (void *)kmem_alloc3(&kernel_map, nbytes, 584 VM_SUBSYS_ZALLOC, KM_KRESERVE); 585 586 /* note: z might be modified due to blocking */ 587 if (item != NULL) { 588 atomic_add_long(&zone_kern_pages, z->zalloc); 589 bzero(item, nbytes); 590 } else { 591 nbytes = 0; 592 } 593 nitems = nbytes / z->zsize; 594 } else { 595 /* 596 * Otherwise allocate KVA from the kernel_map. 597 */ 598 nbytes = (size_t)z->zalloc * PAGE_SIZE; 599 600 item = (void *)kmem_alloc3(&kernel_map, nbytes, 601 VM_SUBSYS_ZALLOC, 0); 602 603 /* note: z might be modified due to blocking */ 604 if (item != NULL) { 605 atomic_add_long(&zone_kern_pages, z->zalloc); 606 bzero(item, nbytes); 607 608 if (z->zflags & ZONE_DESTROYABLE) { 609 if (z->zkmcur == z->zkmmax) { 610 z->zkmmax = 611 z->zkmmax==0 ? 1 : z->zkmmax*2; 612 z->zkmvec = krealloc(z->zkmvec, 613 z->zkmmax * sizeof(z->zkmvec[0]), 614 M_ZONE, M_WAITOK); 615 } 616 z->zkmvec[z->zkmcur++] = (vm_offset_t)item; 617 } 618 } else { 619 nbytes = 0; 620 } 621 nitems = nbytes / z->zsize; 622 } 623 624 spin_lock(&z->zlock); 625 z->ztotal += nitems; 626 627 /* 628 * Save one for immediate allocation 629 */ 630 if (nitems != 0) { 631 nitems -= 1; 632 for (i = 0; i < nitems; i++) { 633 ((void **)item)[0] = z->zitems; 634 #ifdef INVARIANTS 635 ((void **)item)[1] = (void *)ZENTRY_FREE; 636 #endif 637 z->zitems = item; 638 item = (uint8_t *)item + z->zsize; 639 } 640 z->zfreecnt += nitems; 641 ++z->znalloc; 642 } else if (z->zfreecnt > 0) { 643 item = z->zitems; 644 z->zitems = ((void **)item)[0]; 645 #ifdef INVARIANTS 646 if (((void **)item)[1] != (void *)ZENTRY_FREE) 647 zerror(ZONE_ERROR_NOTFREE); 648 ((void **) item)[1] = NULL; 649 #endif 650 --z->zfreecnt; 651 ++z->znalloc; 652 } else { 653 item = NULL; 654 } 655 spin_unlock(&z->zlock); 656 657 /* 658 * A special zone may have used a kernel-reserved vm_map_entry. If 659 * so we have to be sure to recover our reserve so we don't run out. 660 * We will panic if we run out. 661 */ 662 if (z->zflags & ZONE_SPECIAL) 663 vm_map_entry_reserve(0); 664 665 return item; 666 } 667 668 /* 669 * No requirements. 670 */ 671 static int 672 sysctl_vm_zone(SYSCTL_HANDLER_ARGS) 673 { 674 vm_zone_t curzone; 675 char tmpbuf[128]; 676 char tmpname[14]; 677 int error = 0; 678 679 ksnprintf(tmpbuf, sizeof(tmpbuf), 680 "\nITEM SIZE LIMIT USED FREE REQUESTS\n"); 681 error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf)); 682 if (error) 683 return (error); 684 685 lwkt_gettoken(&vm_token); 686 LIST_FOREACH(curzone, &zlist, zlink) { 687 size_t i; 688 size_t len; 689 int offset; 690 long freecnt; 691 long znalloc; 692 int n; 693 694 len = strlen(curzone->zname); 695 if (len >= (sizeof(tmpname) - 1)) 696 len = (sizeof(tmpname) - 1); 697 for(i = 0; i < sizeof(tmpname) - 1; i++) 698 tmpname[i] = ' '; 699 tmpname[i] = 0; 700 memcpy(tmpname, curzone->zname, len); 701 tmpname[len] = ':'; 702 offset = 0; 703 if (curzone == LIST_FIRST(&zlist)) { 704 offset = 1; 705 tmpbuf[0] = '\n'; 706 } 707 freecnt = curzone->zfreecnt; 708 znalloc = curzone->znalloc; 709 for (n = 0; n < ncpus; ++n) { 710 freecnt += curzone->zpcpu[n].zfreecnt; 711 znalloc += curzone->zpcpu[n].znalloc; 712 } 713 714 ksnprintf(tmpbuf + offset, sizeof(tmpbuf) - offset, 715 "%s %6.6lu, %8.8lu, %6.6lu, %6.6lu, %8.8lu\n", 716 tmpname, curzone->zsize, curzone->zmax, 717 (curzone->ztotal - freecnt), 718 freecnt, znalloc); 719 720 len = strlen((char *)tmpbuf); 721 if (LIST_NEXT(curzone, zlink) == NULL) 722 tmpbuf[len - 1] = 0; 723 724 error = SYSCTL_OUT(req, tmpbuf, len); 725 726 if (error) 727 break; 728 } 729 lwkt_reltoken(&vm_token); 730 return (error); 731 } 732 733 #if defined(INVARIANTS) 734 735 /* 736 * Debugging only. 737 */ 738 void 739 zerror(int error) 740 { 741 char *msg; 742 743 switch (error) { 744 case ZONE_ERROR_INVALID: 745 msg = "zone: invalid zone"; 746 break; 747 case ZONE_ERROR_NOTFREE: 748 msg = "zone: entry not free"; 749 break; 750 case ZONE_ERROR_ALREADYFREE: 751 msg = "zone: freeing free entry"; 752 break; 753 default: 754 msg = "zone: invalid error"; 755 break; 756 } 757 panic("%s", msg); 758 } 759 #endif 760 761 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \ 762 NULL, 0, sysctl_vm_zone, "A", "Zone Info"); 763 764 SYSCTL_LONG(_vm, OID_AUTO, zone_kmem_pages, 765 CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone"); 766 SYSCTL_LONG(_vm, OID_AUTO, zone_burst, 767 CTLFLAG_RW, &zone_burst, 0, "Burst from depot to pcpu cache"); 768 SYSCTL_LONG(_vm, OID_AUTO, zone_kmem_kvaspace, 769 CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone"); 770 SYSCTL_LONG(_vm, OID_AUTO, zone_kern_pages, 771 CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone"); 772