1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1997, 1998 John S. Dyson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. Absolutely no warranty of function or purpose is made by the author 14 * John S. Dyson. 15 * 16 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $ 17 * $DragonFly: src/sys/vm/vm_zone.c,v 1.28 2008/01/23 17:35:48 nth Exp $ 18 */ 19 20 #include <sys/param.h> 21 #include <sys/queue.h> 22 #include <sys/systm.h> 23 #include <sys/kernel.h> 24 #include <sys/lock.h> 25 #include <sys/malloc.h> 26 #include <sys/sysctl.h> 27 #include <sys/vmmeter.h> 28 29 #include <vm/vm.h> 30 #include <vm/vm_object.h> 31 #include <vm/vm_page.h> 32 #include <vm/vm_map.h> 33 #include <vm/vm_kern.h> 34 #include <vm/vm_extern.h> 35 #include <vm/vm_zone.h> 36 37 #include <sys/spinlock2.h> 38 39 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header"); 40 41 #define ZONE_ERROR_INVALID 0 42 #define ZONE_ERROR_NOTFREE 1 43 #define ZONE_ERROR_ALREADYFREE 2 44 45 #define ZONE_ROUNDING 32 46 47 #define ZENTRY_FREE 0x12342378 48 49 int zone_burst = 32; 50 51 static void *zget(vm_zone_t z); 52 53 /* 54 * Return an item from the specified zone. This function is non-blocking for 55 * ZONE_INTERRUPT zones. 56 * 57 * No requirements. 58 */ 59 void * 60 zalloc(vm_zone_t z) 61 { 62 globaldata_t gd = mycpu; 63 void *item; 64 int n; 65 66 #ifdef INVARIANTS 67 if (z == NULL) 68 zerror(ZONE_ERROR_INVALID); 69 #endif 70 retry: 71 /* 72 * Avoid spinlock contention by allocating from a per-cpu queue 73 */ 74 if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) { 75 crit_enter_gd(gd); 76 if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) { 77 item = z->zitems_pcpu[gd->gd_cpuid]; 78 #ifdef INVARIANTS 79 KASSERT(item != NULL, 80 ("zitems_pcpu unexpectedly NULL")); 81 if (((void **)item)[1] != (void *)ZENTRY_FREE) 82 zerror(ZONE_ERROR_NOTFREE); 83 ((void **)item)[1] = 0; 84 #endif 85 z->zitems_pcpu[gd->gd_cpuid] = ((void **) item)[0]; 86 --z->zfreecnt_pcpu[gd->gd_cpuid]; 87 z->znalloc++; 88 crit_exit_gd(gd); 89 return item; 90 } 91 crit_exit_gd(gd); 92 } 93 94 /* 95 * Per-zone spinlock for the remainder. Always load at least one 96 * item. 97 */ 98 spin_lock(&z->zlock); 99 if (z->zfreecnt > z->zfreemin) { 100 n = zone_burst; 101 do { 102 item = z->zitems; 103 #ifdef INVARIANTS 104 KASSERT(item != NULL, ("zitems unexpectedly NULL")); 105 if (((void **)item)[1] != (void *)ZENTRY_FREE) 106 zerror(ZONE_ERROR_NOTFREE); 107 #endif 108 z->zitems = ((void **)item)[0]; 109 z->zfreecnt--; 110 ((void **)item)[0] = z->zitems_pcpu[gd->gd_cpuid]; 111 z->zitems_pcpu[gd->gd_cpuid] = item; 112 ++z->zfreecnt_pcpu[gd->gd_cpuid]; 113 } while (--n > 0 && z->zfreecnt > z->zfreemin); 114 spin_unlock(&z->zlock); 115 goto retry; 116 } else { 117 spin_unlock(&z->zlock); 118 item = zget(z); 119 /* 120 * PANICFAIL allows the caller to assume that the zalloc() 121 * will always succeed. If it doesn't, we panic here. 122 */ 123 if (item == NULL && (z->zflags & ZONE_PANICFAIL)) 124 panic("zalloc(%s) failed", z->zname); 125 } 126 return item; 127 } 128 129 /* 130 * Free an item to the specified zone. 131 * 132 * No requirements. 133 */ 134 void 135 zfree(vm_zone_t z, void *item) 136 { 137 globaldata_t gd = mycpu; 138 int zmax; 139 140 /* 141 * Avoid spinlock contention by freeing into a per-cpu queue 142 */ 143 if ((zmax = z->zmax) != 0) 144 zmax = zmax / ncpus / 16; 145 if (zmax < 64) 146 zmax = 64; 147 148 if (z->zfreecnt_pcpu[gd->gd_cpuid] < zmax) { 149 crit_enter_gd(gd); 150 ((void **)item)[0] = z->zitems_pcpu[gd->gd_cpuid]; 151 #ifdef INVARIANTS 152 if (((void **)item)[1] == (void *)ZENTRY_FREE) 153 zerror(ZONE_ERROR_ALREADYFREE); 154 ((void **)item)[1] = (void *)ZENTRY_FREE; 155 #endif 156 z->zitems_pcpu[gd->gd_cpuid] = item; 157 ++z->zfreecnt_pcpu[gd->gd_cpuid]; 158 crit_exit_gd(gd); 159 return; 160 } 161 162 /* 163 * Per-zone spinlock for the remainder. 164 */ 165 spin_lock(&z->zlock); 166 ((void **)item)[0] = z->zitems; 167 #ifdef INVARIANTS 168 if (((void **)item)[1] == (void *)ZENTRY_FREE) 169 zerror(ZONE_ERROR_ALREADYFREE); 170 ((void **)item)[1] = (void *)ZENTRY_FREE; 171 #endif 172 z->zitems = item; 173 z->zfreecnt++; 174 spin_unlock(&z->zlock); 175 } 176 177 /* 178 * This file comprises a very simple zone allocator. This is used 179 * in lieu of the malloc allocator, where needed or more optimal. 180 * 181 * Note that the initial implementation of this had coloring, and 182 * absolutely no improvement (actually perf degradation) occurred. 183 * 184 * Note also that the zones are type stable. The only restriction is 185 * that the first two longwords of a data structure can be changed 186 * between allocations. Any data that must be stable between allocations 187 * must reside in areas after the first two longwords. 188 * 189 * zinitna, zinit, zbootinit are the initialization routines. 190 * zalloc, zfree, are the allocation/free routines. 191 */ 192 193 LIST_HEAD(zlist, vm_zone) zlist = LIST_HEAD_INITIALIZER(zlist); 194 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS); 195 static int zone_kmem_pages, zone_kern_pages; 196 static long zone_kmem_kvaspace; 197 198 /* 199 * Create a zone, but don't allocate the zone structure. If the 200 * zone had been previously created by the zone boot code, initialize 201 * various parts of the zone code. 202 * 203 * If waits are not allowed during allocation (e.g. during interrupt 204 * code), a-priori allocate the kernel virtual space, and allocate 205 * only pages when needed. 206 * 207 * Arguments: 208 * z pointer to zone structure. 209 * obj pointer to VM object (opt). 210 * name name of zone. 211 * size size of zone entries. 212 * nentries number of zone entries allocated (only ZONE_INTERRUPT.) 213 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time. 214 * zalloc number of pages allocated when memory is needed. 215 * 216 * Note that when using ZONE_INTERRUPT, the size of the zone is limited 217 * by the nentries argument. The size of the memory allocatable is 218 * unlimited if ZONE_INTERRUPT is not set. 219 * 220 * No requirements. 221 */ 222 int 223 zinitna(vm_zone_t z, vm_object_t obj, char *name, int size, 224 int nentries, int flags, int zalloc) 225 { 226 size_t totsize; 227 228 /* 229 * Only zones created with zinit() are destroyable. 230 */ 231 if (z->zflags & ZONE_DESTROYABLE) 232 panic("zinitna: can't create destroyable zone"); 233 234 /* 235 * NOTE: We can only adjust zsize if we previously did not 236 * use zbootinit(). 237 */ 238 if ((z->zflags & ZONE_BOOT) == 0) { 239 z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1); 240 spin_init(&z->zlock); 241 z->zfreecnt = 0; 242 z->ztotal = 0; 243 z->zmax = 0; 244 z->zname = name; 245 z->znalloc = 0; 246 z->zitems = NULL; 247 248 lwkt_gettoken(&vm_token); 249 LIST_INSERT_HEAD(&zlist, z, zlink); 250 lwkt_reltoken(&vm_token); 251 252 bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu)); 253 bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu)); 254 } 255 256 z->zkmvec = NULL; 257 z->zkmcur = z->zkmmax = 0; 258 z->zflags |= flags; 259 260 /* 261 * If we cannot wait, allocate KVA space up front, and we will fill 262 * in pages as needed. This is particularly required when creating 263 * an allocation space for map entries in kernel_map, because we 264 * do not want to go into a recursion deadlock with 265 * vm_map_entry_reserve(). 266 */ 267 if (z->zflags & ZONE_INTERRUPT) { 268 totsize = round_page((size_t)z->zsize * nentries); 269 atomic_add_long(&zone_kmem_kvaspace, totsize); 270 271 z->zkva = kmem_alloc_pageable(&kernel_map, totsize); 272 if (z->zkva == 0) { 273 LIST_REMOVE(z, zlink); 274 return 0; 275 } 276 277 z->zpagemax = totsize / PAGE_SIZE; 278 if (obj == NULL) { 279 z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax); 280 } else { 281 z->zobj = obj; 282 _vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj); 283 } 284 z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT | 285 VM_ALLOC_NORMAL | VM_ALLOC_RETRY; 286 z->zmax += nentries; 287 } else { 288 z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM; 289 z->zmax = 0; 290 } 291 292 293 if (z->zsize > PAGE_SIZE) 294 z->zfreemin = 1; 295 else 296 z->zfreemin = PAGE_SIZE / z->zsize; 297 298 z->zpagecount = 0; 299 if (zalloc) 300 z->zalloc = zalloc; 301 else 302 z->zalloc = 1; 303 304 /* 305 * Populate the interrrupt zone at creation time rather than 306 * on first allocation, as this is a potentially long operation. 307 */ 308 if (z->zflags & ZONE_INTERRUPT) { 309 void *buf; 310 311 buf = zget(z); 312 zfree(z, buf); 313 } 314 315 return 1; 316 } 317 318 /* 319 * Subroutine same as zinitna, except zone data structure is allocated 320 * automatically by malloc. This routine should normally be used, except 321 * in certain tricky startup conditions in the VM system -- then 322 * zbootinit and zinitna can be used. Zinit is the standard zone 323 * initialization call. 324 * 325 * No requirements. 326 */ 327 vm_zone_t 328 zinit(char *name, int size, int nentries, int flags, int zalloc) 329 { 330 vm_zone_t z; 331 332 z = (vm_zone_t) kmalloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT); 333 if (z == NULL) 334 return NULL; 335 336 z->zflags = 0; 337 if (zinitna(z, NULL, name, size, nentries, 338 flags & ~ZONE_DESTROYABLE, zalloc) == 0) { 339 kfree(z, M_ZONE); 340 return NULL; 341 } 342 343 if (flags & ZONE_DESTROYABLE) 344 z->zflags |= ZONE_DESTROYABLE; 345 346 return z; 347 } 348 349 /* 350 * Initialize a zone before the system is fully up. This routine should 351 * only be called before full VM startup. 352 * 353 * Called from the low level boot code only. 354 */ 355 void 356 zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems) 357 { 358 int i; 359 360 bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu)); 361 bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu)); 362 363 z->zname = name; 364 z->zsize = size; 365 z->zpagemax = 0; 366 z->zobj = NULL; 367 z->zflags = ZONE_BOOT; 368 z->zfreemin = 0; 369 z->zallocflag = 0; 370 z->zpagecount = 0; 371 z->zalloc = 0; 372 z->znalloc = 0; 373 spin_init(&z->zlock); 374 375 bzero(item, (size_t)nitems * z->zsize); 376 z->zitems = NULL; 377 for (i = 0; i < nitems; i++) { 378 ((void **)item)[0] = z->zitems; 379 #ifdef INVARIANTS 380 ((void **)item)[1] = (void *)ZENTRY_FREE; 381 #endif 382 z->zitems = item; 383 item = (uint8_t *)item + z->zsize; 384 } 385 z->zfreecnt = nitems; 386 z->zmax = nitems; 387 z->ztotal = nitems; 388 389 lwkt_gettoken(&vm_token); 390 LIST_INSERT_HEAD(&zlist, z, zlink); 391 lwkt_reltoken(&vm_token); 392 } 393 394 /* 395 * Release all resources owned by zone created with zinit(). 396 * 397 * No requirements. 398 */ 399 void 400 zdestroy(vm_zone_t z) 401 { 402 vm_page_t m; 403 int i; 404 405 if (z == NULL) 406 panic("zdestroy: null zone"); 407 if ((z->zflags & ZONE_DESTROYABLE) == 0) 408 panic("zdestroy: undestroyable zone"); 409 410 lwkt_gettoken(&vm_token); 411 LIST_REMOVE(z, zlink); 412 lwkt_reltoken(&vm_token); 413 414 /* 415 * Release virtual mappings, physical memory and update sysctl stats. 416 */ 417 if (z->zflags & ZONE_INTERRUPT) { 418 /* 419 * Pages mapped via pmap_kenter() must be removed from the 420 * kernel_pmap() before calling kmem_free() to avoid issues 421 * with kernel_pmap.pm_stats.resident_count. 422 */ 423 pmap_qremove(z->zkva, z->zpagemax); 424 vm_object_hold(z->zobj); 425 for (i = 0; i < z->zpagecount; ++i) { 426 m = vm_page_lookup_busy_wait(z->zobj, i, TRUE, "vmzd"); 427 vm_page_unwire(m, 0); 428 vm_page_free(m); 429 } 430 431 /* 432 * Free the mapping. 433 */ 434 kmem_free(&kernel_map, z->zkva, 435 (size_t)z->zpagemax * PAGE_SIZE); 436 atomic_subtract_long(&zone_kmem_kvaspace, 437 (size_t)z->zpagemax * PAGE_SIZE); 438 439 /* 440 * Free the backing object and physical pages. 441 */ 442 vm_object_deallocate(z->zobj); 443 vm_object_drop(z->zobj); 444 atomic_subtract_int(&zone_kmem_pages, z->zpagecount); 445 } else { 446 for (i=0; i < z->zkmcur; i++) { 447 kmem_free(&kernel_map, z->zkmvec[i], 448 (size_t)z->zalloc * PAGE_SIZE); 449 atomic_subtract_int(&zone_kern_pages, z->zalloc); 450 } 451 if (z->zkmvec != NULL) 452 kfree(z->zkmvec, M_ZONE); 453 } 454 455 spin_uninit(&z->zlock); 456 kfree(z, M_ZONE); 457 } 458 459 460 /* 461 * void *zalloc(vm_zone_t zone) -- 462 * Returns an item from a specified zone. May not be called from a 463 * FAST interrupt or IPI function. 464 * 465 * void zfree(vm_zone_t zone, void *item) -- 466 * Frees an item back to a specified zone. May not be called from a 467 * FAST interrupt or IPI function. 468 */ 469 470 /* 471 * Internal zone routine. Not to be called from external (non vm_zone) code. 472 * 473 * No requirements. 474 */ 475 static void * 476 zget(vm_zone_t z) 477 { 478 int i; 479 vm_page_t m; 480 int nitems; 481 int npages; 482 int savezpc; 483 size_t nbytes; 484 size_t noffset; 485 void *item; 486 487 if (z == NULL) 488 panic("zget: null zone"); 489 490 if (z->zflags & ZONE_INTERRUPT) { 491 /* 492 * Interrupt zones do not mess with the kernel_map, they 493 * simply populate an existing mapping. 494 * 495 * First reserve the required space. 496 */ 497 vm_object_hold(z->zobj); 498 noffset = (size_t)z->zpagecount * PAGE_SIZE; 499 noffset -= noffset % z->zsize; 500 savezpc = z->zpagecount; 501 if (z->zpagecount + z->zalloc > z->zpagemax) 502 z->zpagecount = z->zpagemax; 503 else 504 z->zpagecount += z->zalloc; 505 item = (char *)z->zkva + noffset; 506 npages = z->zpagecount - savezpc; 507 nitems = ((size_t)(savezpc + npages) * PAGE_SIZE - noffset) / 508 z->zsize; 509 atomic_add_int(&zone_kmem_pages, npages); 510 511 /* 512 * Now allocate the pages. Note that we can block in the 513 * loop, so we've already done all the necessary calculations 514 * and reservations above. 515 */ 516 for (i = 0; i < npages; ++i) { 517 vm_offset_t zkva; 518 519 m = vm_page_alloc(z->zobj, savezpc + i, z->zallocflag); 520 KKASSERT(m != NULL); 521 /* note: z might be modified due to blocking */ 522 523 KKASSERT(m->queue == PQ_NONE); 524 m->valid = VM_PAGE_BITS_ALL; 525 vm_page_wire(m); 526 vm_page_wakeup(m); 527 528 zkva = z->zkva + (size_t)(savezpc + i) * PAGE_SIZE; 529 pmap_kenter(zkva, VM_PAGE_TO_PHYS(m)); 530 bzero((void *)zkva, PAGE_SIZE); 531 } 532 vm_object_drop(z->zobj); 533 } else if (z->zflags & ZONE_SPECIAL) { 534 /* 535 * The special zone is the one used for vm_map_entry_t's. 536 * We have to avoid an infinite recursion in 537 * vm_map_entry_reserve() by using vm_map_entry_kreserve() 538 * instead. The map entries are pre-reserved by the kernel 539 * by vm_map_entry_reserve_cpu_init(). 540 */ 541 nbytes = (size_t)z->zalloc * PAGE_SIZE; 542 543 item = (void *)kmem_alloc3(&kernel_map, nbytes, KM_KRESERVE); 544 545 /* note: z might be modified due to blocking */ 546 if (item != NULL) { 547 zone_kern_pages += z->zalloc; /* not MP-safe XXX */ 548 bzero(item, nbytes); 549 } else { 550 nbytes = 0; 551 } 552 nitems = nbytes / z->zsize; 553 } else { 554 /* 555 * Otherwise allocate KVA from the kernel_map. 556 */ 557 nbytes = (size_t)z->zalloc * PAGE_SIZE; 558 559 item = (void *)kmem_alloc3(&kernel_map, nbytes, 0); 560 561 /* note: z might be modified due to blocking */ 562 if (item != NULL) { 563 zone_kern_pages += z->zalloc; /* not MP-safe XXX */ 564 bzero(item, nbytes); 565 566 if (z->zflags & ZONE_DESTROYABLE) { 567 if (z->zkmcur == z->zkmmax) { 568 z->zkmmax = 569 z->zkmmax==0 ? 1 : z->zkmmax*2; 570 z->zkmvec = krealloc(z->zkmvec, 571 z->zkmmax * sizeof(z->zkmvec[0]), 572 M_ZONE, M_WAITOK); 573 } 574 z->zkmvec[z->zkmcur++] = (vm_offset_t)item; 575 } 576 } else { 577 nbytes = 0; 578 } 579 nitems = nbytes / z->zsize; 580 } 581 582 spin_lock(&z->zlock); 583 z->ztotal += nitems; 584 /* 585 * Save one for immediate allocation 586 */ 587 if (nitems != 0) { 588 nitems -= 1; 589 for (i = 0; i < nitems; i++) { 590 ((void **)item)[0] = z->zitems; 591 #ifdef INVARIANTS 592 ((void **)item)[1] = (void *)ZENTRY_FREE; 593 #endif 594 z->zitems = item; 595 item = (uint8_t *)item + z->zsize; 596 } 597 z->zfreecnt += nitems; 598 z->znalloc++; 599 } else if (z->zfreecnt > 0) { 600 item = z->zitems; 601 z->zitems = ((void **)item)[0]; 602 #ifdef INVARIANTS 603 if (((void **)item)[1] != (void *)ZENTRY_FREE) 604 zerror(ZONE_ERROR_NOTFREE); 605 ((void **) item)[1] = 0; 606 #endif 607 z->zfreecnt--; 608 z->znalloc++; 609 } else { 610 item = NULL; 611 } 612 spin_unlock(&z->zlock); 613 614 /* 615 * A special zone may have used a kernel-reserved vm_map_entry. If 616 * so we have to be sure to recover our reserve so we don't run out. 617 * We will panic if we run out. 618 */ 619 if (z->zflags & ZONE_SPECIAL) 620 vm_map_entry_reserve(0); 621 622 return item; 623 } 624 625 /* 626 * No requirements. 627 */ 628 static int 629 sysctl_vm_zone(SYSCTL_HANDLER_ARGS) 630 { 631 int error=0; 632 vm_zone_t curzone; 633 char tmpbuf[128]; 634 char tmpname[14]; 635 636 ksnprintf(tmpbuf, sizeof(tmpbuf), 637 "\nITEM SIZE LIMIT USED FREE REQUESTS\n"); 638 error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf)); 639 if (error) 640 return (error); 641 642 lwkt_gettoken(&vm_token); 643 LIST_FOREACH(curzone, &zlist, zlink) { 644 int i; 645 int len; 646 int offset; 647 648 len = strlen(curzone->zname); 649 if (len >= (sizeof(tmpname) - 1)) 650 len = (sizeof(tmpname) - 1); 651 for(i = 0; i < sizeof(tmpname) - 1; i++) 652 tmpname[i] = ' '; 653 tmpname[i] = 0; 654 memcpy(tmpname, curzone->zname, len); 655 tmpname[len] = ':'; 656 offset = 0; 657 if (curzone == LIST_FIRST(&zlist)) { 658 offset = 1; 659 tmpbuf[0] = '\n'; 660 } 661 662 ksnprintf(tmpbuf + offset, sizeof(tmpbuf) - offset, 663 "%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n", 664 tmpname, curzone->zsize, curzone->zmax, 665 (curzone->ztotal - curzone->zfreecnt), 666 curzone->zfreecnt, curzone->znalloc); 667 668 len = strlen((char *)tmpbuf); 669 if (LIST_NEXT(curzone, zlink) == NULL) 670 tmpbuf[len - 1] = 0; 671 672 error = SYSCTL_OUT(req, tmpbuf, len); 673 674 if (error) 675 break; 676 } 677 lwkt_reltoken(&vm_token); 678 return (error); 679 } 680 681 #if defined(INVARIANTS) 682 683 /* 684 * Debugging only. 685 */ 686 void 687 zerror(int error) 688 { 689 char *msg; 690 691 switch (error) { 692 case ZONE_ERROR_INVALID: 693 msg = "zone: invalid zone"; 694 break; 695 case ZONE_ERROR_NOTFREE: 696 msg = "zone: entry not free"; 697 break; 698 case ZONE_ERROR_ALREADYFREE: 699 msg = "zone: freeing free entry"; 700 break; 701 default: 702 msg = "zone: invalid error"; 703 break; 704 } 705 panic(msg); 706 } 707 #endif 708 709 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \ 710 NULL, 0, sysctl_vm_zone, "A", "Zone Info"); 711 712 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages, 713 CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone"); 714 SYSCTL_INT(_vm, OID_AUTO, zone_burst, 715 CTLFLAG_RW, &zone_burst, 0, "Burst from depot to pcpu cache"); 716 SYSCTL_LONG(_vm, OID_AUTO, zone_kmem_kvaspace, 717 CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone"); 718 SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages, 719 CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone"); 720