1 /* $OpenBSD: kern_malloc.c,v 1.81 2009/08/25 18:02:42 miod Exp $ */ 2 /* $NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $ */ 3 4 /* 5 * Copyright (c) 1987, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/proc.h> 37 #include <sys/kernel.h> 38 #include <sys/malloc.h> 39 #include <sys/systm.h> 40 #include <sys/sysctl.h> 41 #include <sys/time.h> 42 #include <sys/rwlock.h> 43 44 #include <uvm/uvm_extern.h> 45 46 static __inline__ long BUCKETINDX(size_t sz) 47 { 48 #ifdef SMALL_KERNEL 49 long b; 50 51 if (sz-- == 0) 52 return MINBUCKET; 53 54 for (b = MINBUCKET; b < MINBUCKET + 15; b++) 55 if ((sz >> b) == 0) 56 break; 57 #else 58 long b, d; 59 60 /* note that this relies upon MINALLOCSIZE being 1 << MINBUCKET */ 61 b = 7 + MINBUCKET; d = 4; 62 while (d != 0) { 63 if (sz <= (1 << b)) 64 b -= d; 65 else 66 b += d; 67 d >>= 1; 68 } 69 if (sz <= (1 << b)) 70 b += 0; 71 else 72 b += 1; 73 #endif 74 75 return b; 76 } 77 78 static struct vm_map kmem_map_store; 79 struct vm_map *kmem_map = NULL; 80 81 #ifdef NKMEMCLUSTERS 82 #error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size 83 #endif 84 85 /* 86 * Default number of pages in kmem_map. We attempt to calculate this 87 * at run-time, but allow it to be either patched or set in the kernel 88 * config file. 89 */ 90 #ifndef NKMEMPAGES 91 #define NKMEMPAGES 0 92 #endif 93 u_int nkmempages = NKMEMPAGES; 94 95 /* 96 * Defaults for lower- and upper-bounds for the kmem_map page count. 97 * Can be overridden by kernel config options. 98 */ 99 #ifndef NKMEMPAGES_MIN 100 #define NKMEMPAGES_MIN 0 101 #endif 102 u_int nkmempages_min = 0; 103 104 #ifndef NKMEMPAGES_MAX 105 #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT 106 #endif 107 u_int nkmempages_max = 0; 108 109 struct kmembuckets bucket[MINBUCKET + 16]; 110 struct kmemstats kmemstats[M_LAST]; 111 struct kmemusage *kmemusage; 112 char *kmembase, *kmemlimit; 113 char buckstring[16 * sizeof("123456,")]; 114 int buckstring_init = 0; 115 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES) 116 char *memname[] = INITKMEMNAMES; 117 char *memall = NULL; 118 struct rwlock sysctl_kmemlock = RWLOCK_INITIALIZER("sysctlklk"); 119 #endif 120 121 #ifdef DIAGNOSTIC 122 /* 123 * This structure provides a set of masks to catch unaligned frees. 124 */ 125 const long addrmask[] = { 0, 126 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 127 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 128 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, 129 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff, 130 }; 131 132 /* 133 * The WEIRD_ADDR is used as known text to copy into free objects so 134 * that modifications after frees can be detected. 135 */ 136 #ifdef DEADBEEF0 137 #define WEIRD_ADDR ((unsigned) DEADBEEF0) 138 #else 139 #define WEIRD_ADDR ((unsigned) 0xdeadbeef) 140 #endif 141 #define MAX_COPY 32 142 143 /* 144 * Normally the freelist structure is used only to hold the list pointer 145 * for free objects. However, when running with diagnostics, the first 146 * 8 bytes of the structure is unused except for diagnostic information, 147 * and the free list pointer is at offset 8 in the structure. Since the 148 * first 8 bytes is the portion of the structure most often modified, this 149 * helps to detect memory reuse problems and avoid free list corruption. 150 */ 151 struct freelist { 152 int32_t spare0; 153 int16_t type; 154 int16_t spare1; 155 caddr_t next; 156 }; 157 #else /* !DIAGNOSTIC */ 158 struct freelist { 159 caddr_t next; 160 }; 161 #endif /* DIAGNOSTIC */ 162 163 #ifndef SMALL_KERNEL 164 struct timeval malloc_errintvl = { 5, 0 }; 165 struct timeval malloc_lasterr; 166 #endif 167 168 /* 169 * Allocate a block of memory 170 */ 171 void * 172 malloc(unsigned long size, int type, int flags) 173 { 174 struct kmembuckets *kbp; 175 struct kmemusage *kup; 176 struct freelist *freep; 177 long indx, npg, allocsize; 178 int s; 179 caddr_t va, cp, savedlist; 180 #ifdef DIAGNOSTIC 181 int32_t *end, *lp; 182 int copysize, freshalloc; 183 char *savedtype; 184 #endif 185 #ifdef KMEMSTATS 186 struct kmemstats *ksp = &kmemstats[type]; 187 188 if (((unsigned long)type) >= M_LAST) 189 panic("malloc - bogus type"); 190 #endif 191 192 #ifdef MALLOC_DEBUG 193 if (debug_malloc(size, type, flags, (void **)&va)) { 194 if ((flags & M_ZERO) && va != NULL) 195 memset(va, 0, size); 196 return (va); 197 } 198 #endif 199 200 if (size > 65535 * PAGE_SIZE) { 201 if (flags & M_CANFAIL) { 202 #ifndef SMALL_KERNEL 203 if (ratecheck(&malloc_lasterr, &malloc_errintvl)) 204 printf("malloc(): allocation too large, " 205 "type = %d, size = %lu\n", type, size); 206 #endif 207 return (NULL); 208 } else 209 panic("malloc: allocation too large"); 210 } 211 212 indx = BUCKETINDX(size); 213 kbp = &bucket[indx]; 214 s = splvm(); 215 #ifdef KMEMSTATS 216 while (ksp->ks_memuse >= ksp->ks_limit) { 217 if (flags & M_NOWAIT) { 218 splx(s); 219 return (NULL); 220 } 221 if (ksp->ks_limblocks < 65535) 222 ksp->ks_limblocks++; 223 tsleep(ksp, PSWP+2, memname[type], 0); 224 } 225 ksp->ks_size |= 1 << indx; 226 #endif 227 #ifdef DIAGNOSTIC 228 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY; 229 #endif 230 if (kbp->kb_next == NULL) { 231 if (size > MAXALLOCSAVE) 232 allocsize = round_page(size); 233 else 234 allocsize = 1 << indx; 235 npg = atop(round_page(allocsize)); 236 va = (caddr_t) uvm_km_kmemalloc(kmem_map, NULL, 237 (vsize_t)ptoa(npg), 238 ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) | 239 ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0)); 240 if (va == NULL) { 241 /* 242 * Kmem_malloc() can return NULL, even if it can 243 * wait, if there is no map space available, because 244 * it can't fix that problem. Neither can we, 245 * right now. (We should release pages which 246 * are completely free and which are in buckets 247 * with too many free elements.) 248 */ 249 if ((flags & (M_NOWAIT|M_CANFAIL)) == 0) 250 panic("malloc: out of space in kmem_map"); 251 splx(s); 252 return (NULL); 253 } 254 #ifdef KMEMSTATS 255 kbp->kb_total += kbp->kb_elmpercl; 256 #endif 257 kup = btokup(va); 258 kup->ku_indx = indx; 259 #ifdef DIAGNOSTIC 260 freshalloc = 1; 261 #endif 262 if (allocsize > MAXALLOCSAVE) { 263 kup->ku_pagecnt = npg; 264 #ifdef KMEMSTATS 265 ksp->ks_memuse += allocsize; 266 #endif 267 goto out; 268 } 269 #ifdef KMEMSTATS 270 kup->ku_freecnt = kbp->kb_elmpercl; 271 kbp->kb_totalfree += kbp->kb_elmpercl; 272 #endif 273 /* 274 * Just in case we blocked while allocating memory, 275 * and someone else also allocated memory for this 276 * bucket, don't assume the list is still empty. 277 */ 278 savedlist = kbp->kb_next; 279 kbp->kb_next = cp = va + (npg * PAGE_SIZE) - allocsize; 280 for (;;) { 281 freep = (struct freelist *)cp; 282 #ifdef DIAGNOSTIC 283 /* 284 * Copy in known text to detect modification 285 * after freeing. 286 */ 287 end = (int32_t *)&cp[copysize]; 288 for (lp = (int32_t *)cp; lp < end; lp++) 289 *lp = WEIRD_ADDR; 290 freep->type = M_FREE; 291 #endif /* DIAGNOSTIC */ 292 if (cp <= va) 293 break; 294 cp -= allocsize; 295 freep->next = cp; 296 } 297 freep->next = savedlist; 298 if (savedlist == NULL) 299 kbp->kb_last = (caddr_t)freep; 300 } else { 301 #ifdef DIAGNOSTIC 302 freshalloc = 0; 303 #endif 304 } 305 va = kbp->kb_next; 306 kbp->kb_next = ((struct freelist *)va)->next; 307 #ifdef DIAGNOSTIC 308 freep = (struct freelist *)va; 309 savedtype = (unsigned)freep->type < M_LAST ? 310 memname[freep->type] : "???"; 311 if (freshalloc == 0 && kbp->kb_next) { 312 int rv; 313 vaddr_t addr = (vaddr_t)kbp->kb_next; 314 315 vm_map_lock(kmem_map); 316 rv = uvm_map_checkprot(kmem_map, addr, 317 addr + sizeof(struct freelist), VM_PROT_WRITE); 318 vm_map_unlock(kmem_map); 319 320 if (!rv) { 321 printf("%s %d of object %p size 0x%lx %s %s" 322 " (invalid addr %p)\n", 323 "Data modified on freelist: word", 324 (int32_t *)&kbp->kb_next - (int32_t *)kbp, va, size, 325 "previous type", savedtype, addr); 326 kbp->kb_next = NULL; 327 } 328 } 329 330 /* Fill the fields that we've used with WEIRD_ADDR */ 331 #if BYTE_ORDER == BIG_ENDIAN 332 freep->type = WEIRD_ADDR >> 16; 333 #endif 334 #if BYTE_ORDER == LITTLE_ENDIAN 335 freep->type = (short)WEIRD_ADDR; 336 #endif 337 end = (int32_t *)&freep->next + 338 (sizeof(freep->next) / sizeof(int32_t)); 339 for (lp = (int32_t *)&freep->next; lp < end; lp++) 340 *lp = WEIRD_ADDR; 341 342 /* and check that the data hasn't been modified. */ 343 if (freshalloc == 0) { 344 end = (int32_t *)&va[copysize]; 345 for (lp = (int32_t *)va; lp < end; lp++) { 346 if (*lp == WEIRD_ADDR) 347 continue; 348 printf("%s %d of object %p size 0x%lx %s %s" 349 " (0x%x != 0x%x)\n", 350 "Data modified on freelist: word", 351 lp - (int32_t *)va, va, size, 352 "previous type", savedtype, *lp, WEIRD_ADDR); 353 break; 354 } 355 } 356 357 freep->spare0 = 0; 358 #endif /* DIAGNOSTIC */ 359 #ifdef KMEMSTATS 360 kup = btokup(va); 361 if (kup->ku_indx != indx) 362 panic("malloc: wrong bucket"); 363 if (kup->ku_freecnt == 0) 364 panic("malloc: lost data"); 365 kup->ku_freecnt--; 366 kbp->kb_totalfree--; 367 ksp->ks_memuse += 1 << indx; 368 out: 369 kbp->kb_calls++; 370 ksp->ks_inuse++; 371 ksp->ks_calls++; 372 if (ksp->ks_memuse > ksp->ks_maxused) 373 ksp->ks_maxused = ksp->ks_memuse; 374 #else 375 out: 376 #endif 377 splx(s); 378 379 if ((flags & M_ZERO) && va != NULL) 380 memset(va, 0, size); 381 return (va); 382 } 383 384 /* 385 * Free a block of memory allocated by malloc. 386 */ 387 void 388 free(void *addr, int type) 389 { 390 struct kmembuckets *kbp; 391 struct kmemusage *kup; 392 struct freelist *freep; 393 long size; 394 int s; 395 #ifdef DIAGNOSTIC 396 caddr_t cp; 397 int32_t *end, *lp; 398 long alloc, copysize; 399 #endif 400 #ifdef KMEMSTATS 401 struct kmemstats *ksp = &kmemstats[type]; 402 #endif 403 404 #ifdef MALLOC_DEBUG 405 if (debug_free(addr, type)) 406 return; 407 #endif 408 409 #ifdef DIAGNOSTIC 410 if (addr < (void *)kmembase || addr >= (void *)kmemlimit) 411 panic("free: non-malloced addr %p type %s", addr, 412 memname[type]); 413 #endif 414 415 kup = btokup(addr); 416 size = 1 << kup->ku_indx; 417 kbp = &bucket[kup->ku_indx]; 418 s = splvm(); 419 #ifdef DIAGNOSTIC 420 /* 421 * Check for returns of data that do not point to the 422 * beginning of the allocation. 423 */ 424 if (size > PAGE_SIZE) 425 alloc = addrmask[BUCKETINDX(PAGE_SIZE)]; 426 else 427 alloc = addrmask[kup->ku_indx]; 428 if (((u_long)addr & alloc) != 0) 429 panic("free: unaligned addr %p, size %ld, type %s, mask %ld", 430 addr, size, memname[type], alloc); 431 #endif /* DIAGNOSTIC */ 432 if (size > MAXALLOCSAVE) { 433 uvm_km_free(kmem_map, (vaddr_t)addr, ptoa(kup->ku_pagecnt)); 434 #ifdef KMEMSTATS 435 size = kup->ku_pagecnt << PGSHIFT; 436 ksp->ks_memuse -= size; 437 kup->ku_indx = 0; 438 kup->ku_pagecnt = 0; 439 if (ksp->ks_memuse + size >= ksp->ks_limit && 440 ksp->ks_memuse < ksp->ks_limit) 441 wakeup(ksp); 442 ksp->ks_inuse--; 443 kbp->kb_total -= 1; 444 #endif 445 splx(s); 446 return; 447 } 448 freep = (struct freelist *)addr; 449 #ifdef DIAGNOSTIC 450 /* 451 * Check for multiple frees. Use a quick check to see if 452 * it looks free before laboriously searching the freelist. 453 */ 454 if (freep->spare0 == WEIRD_ADDR) { 455 for (cp = kbp->kb_next; cp; 456 cp = ((struct freelist *)cp)->next) { 457 if (addr != cp) 458 continue; 459 printf("multiply freed item %p\n", addr); 460 panic("free: duplicated free"); 461 } 462 } 463 /* 464 * Copy in known text to detect modification after freeing 465 * and to make it look free. Also, save the type being freed 466 * so we can list likely culprit if modification is detected 467 * when the object is reallocated. 468 */ 469 copysize = size < MAX_COPY ? size : MAX_COPY; 470 end = (int32_t *)&((caddr_t)addr)[copysize]; 471 for (lp = (int32_t *)addr; lp < end; lp++) 472 *lp = WEIRD_ADDR; 473 freep->type = type; 474 #endif /* DIAGNOSTIC */ 475 #ifdef KMEMSTATS 476 kup->ku_freecnt++; 477 if (kup->ku_freecnt >= kbp->kb_elmpercl) { 478 if (kup->ku_freecnt > kbp->kb_elmpercl) 479 panic("free: multiple frees"); 480 else if (kbp->kb_totalfree > kbp->kb_highwat) 481 kbp->kb_couldfree++; 482 } 483 kbp->kb_totalfree++; 484 ksp->ks_memuse -= size; 485 if (ksp->ks_memuse + size >= ksp->ks_limit && 486 ksp->ks_memuse < ksp->ks_limit) 487 wakeup(ksp); 488 ksp->ks_inuse--; 489 #endif 490 if (kbp->kb_next == NULL) 491 kbp->kb_next = addr; 492 else 493 ((struct freelist *)kbp->kb_last)->next = addr; 494 freep->next = NULL; 495 kbp->kb_last = addr; 496 splx(s); 497 } 498 499 /* 500 * Compute the number of pages that kmem_map will map, that is, 501 * the size of the kernel malloc arena. 502 */ 503 void 504 kmeminit_nkmempages(void) 505 { 506 u_int npages; 507 508 if (nkmempages != 0) { 509 /* 510 * It's already been set (by us being here before, or 511 * by patching or kernel config options), bail out now. 512 */ 513 return; 514 } 515 516 /* 517 * We can't initialize these variables at compilation time, since 518 * the page size may not be known (on sparc GENERIC kernels, for 519 * example). But we still want the MD code to be able to provide 520 * better values. 521 */ 522 if (nkmempages_min == 0) 523 nkmempages_min = NKMEMPAGES_MIN; 524 if (nkmempages_max == 0) 525 nkmempages_max = NKMEMPAGES_MAX; 526 527 /* 528 * We use the following (simple) formula: 529 * 530 * - Starting point is physical memory / 4. 531 * 532 * - Clamp it down to nkmempages_max. 533 * 534 * - Round it up to nkmempages_min. 535 */ 536 npages = physmem / 4; 537 538 if (npages > nkmempages_max) 539 npages = nkmempages_max; 540 541 if (npages < nkmempages_min) 542 npages = nkmempages_min; 543 544 nkmempages = npages; 545 } 546 547 /* 548 * Initialize the kernel memory allocator 549 */ 550 void 551 kmeminit(void) 552 { 553 vaddr_t base, limit; 554 #ifdef KMEMSTATS 555 long indx; 556 #endif 557 558 #ifdef DIAGNOSTIC 559 if (sizeof(struct freelist) > (1 << MINBUCKET)) 560 panic("kmeminit: minbucket too small/struct freelist too big"); 561 #endif 562 563 /* 564 * Compute the number of kmem_map pages, if we have not 565 * done so already. 566 */ 567 kmeminit_nkmempages(); 568 base = vm_map_min(kernel_map); 569 kmem_map = uvm_km_suballoc(kernel_map, &base, &limit, 570 (vsize_t)(nkmempages * PAGE_SIZE), VM_MAP_INTRSAFE, FALSE, 571 &kmem_map_store); 572 kmembase = (char *)base; 573 kmemlimit = (char *)limit; 574 kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map, 575 (vsize_t)(nkmempages * sizeof(struct kmemusage))); 576 #ifdef KMEMSTATS 577 for (indx = 0; indx < MINBUCKET + 16; indx++) { 578 if (1 << indx >= PAGE_SIZE) 579 bucket[indx].kb_elmpercl = 1; 580 else 581 bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx); 582 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; 583 } 584 for (indx = 0; indx < M_LAST; indx++) 585 kmemstats[indx].ks_limit = nkmempages * PAGE_SIZE * 6 / 10; 586 #endif 587 #ifdef MALLOC_DEBUG 588 debug_malloc_init(); 589 #endif 590 } 591 592 /* 593 * Return kernel malloc statistics information. 594 */ 595 int 596 sysctl_malloc(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 597 size_t newlen, struct proc *p) 598 { 599 struct kmembuckets kb; 600 int i, siz; 601 602 if (namelen != 2 && name[0] != KERN_MALLOC_BUCKETS && 603 name[0] != KERN_MALLOC_KMEMNAMES) 604 return (ENOTDIR); /* overloaded */ 605 606 switch (name[0]) { 607 case KERN_MALLOC_BUCKETS: 608 /* Initialize the first time */ 609 if (buckstring_init == 0) { 610 buckstring_init = 1; 611 bzero(buckstring, sizeof(buckstring)); 612 for (siz = 0, i = MINBUCKET; i < MINBUCKET + 16; i++) { 613 snprintf(buckstring + siz, 614 sizeof buckstring - siz, 615 "%d,", (u_int)(1<<i)); 616 siz += strlen(buckstring + siz); 617 } 618 /* Remove trailing comma */ 619 if (siz) 620 buckstring[siz - 1] = '\0'; 621 } 622 return (sysctl_rdstring(oldp, oldlenp, newp, buckstring)); 623 624 case KERN_MALLOC_BUCKET: 625 bcopy(&bucket[BUCKETINDX(name[1])], &kb, sizeof(kb)); 626 kb.kb_next = kb.kb_last = 0; 627 return (sysctl_rdstruct(oldp, oldlenp, newp, &kb, sizeof(kb))); 628 case KERN_MALLOC_KMEMSTATS: 629 #ifdef KMEMSTATS 630 if ((name[1] < 0) || (name[1] >= M_LAST)) 631 return (EINVAL); 632 return (sysctl_rdstruct(oldp, oldlenp, newp, 633 &kmemstats[name[1]], sizeof(struct kmemstats))); 634 #else 635 return (EOPNOTSUPP); 636 #endif 637 case KERN_MALLOC_KMEMNAMES: 638 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES) 639 if (memall == NULL) { 640 int totlen; 641 642 i = rw_enter(&sysctl_kmemlock, RW_WRITE|RW_INTR); 643 if (i) 644 return (i); 645 646 /* Figure out how large a buffer we need */ 647 for (totlen = 0, i = 0; i < M_LAST; i++) { 648 if (memname[i]) 649 totlen += strlen(memname[i]); 650 totlen++; 651 } 652 memall = malloc(totlen + M_LAST, M_SYSCTL, 653 M_WAITOK|M_ZERO); 654 for (siz = 0, i = 0; i < M_LAST; i++) { 655 snprintf(memall + siz, 656 totlen + M_LAST - siz, 657 "%s,", memname[i] ? memname[i] : ""); 658 siz += strlen(memall + siz); 659 } 660 /* Remove trailing comma */ 661 if (siz) 662 memall[siz - 1] = '\0'; 663 664 /* Now, convert all spaces to underscores */ 665 for (i = 0; i < totlen; i++) 666 if (memall[i] == ' ') 667 memall[i] = '_'; 668 rw_exit_write(&sysctl_kmemlock); 669 } 670 return (sysctl_rdstring(oldp, oldlenp, newp, memall)); 671 #else 672 return (EOPNOTSUPP); 673 #endif 674 default: 675 return (EOPNOTSUPP); 676 } 677 /* NOTREACHED */ 678 } 679 680 /* 681 * Round up a size to how much malloc would actually allocate. 682 */ 683 size_t 684 malloc_roundup(size_t sz) 685 { 686 if (sz > MAXALLOCSAVE) 687 return round_page(sz); 688 689 return (1 << BUCKETINDX(sz)); 690 } 691 692 #if defined(DDB) 693 #include <machine/db_machdep.h> 694 #include <ddb/db_interface.h> 695 #include <ddb/db_output.h> 696 697 void 698 malloc_printit(int (*pr)(const char *, ...)) 699 { 700 #ifdef KMEMSTATS 701 struct kmemstats *km; 702 int i; 703 704 (*pr)("%15s %5s %6s %7s %6s %9s %8s %8s\n", 705 "Type", "InUse", "MemUse", "HighUse", "Limit", "Requests", 706 "Type Lim", "Kern Lim"); 707 for (i = 0, km = kmemstats; i < M_LAST; i++, km++) { 708 if (!km->ks_calls || !memname[i]) 709 continue; 710 711 (*pr)("%15s %5ld %6ldK %7ldK %6ldK %9ld %8d %8d\n", 712 memname[i], km->ks_inuse, km->ks_memuse / 1024, 713 km->ks_maxused / 1024, km->ks_limit / 1024, 714 km->ks_calls, km->ks_limblocks, km->ks_mapblocks); 715 } 716 #else 717 (*pr)("No KMEMSTATS compiled in\n"); 718 #endif 719 } 720 #endif /* DDB */ 721