1 /* $OpenBSD: kern_malloc.c,v 1.148 2022/08/14 01:58:27 jsg Exp $ */ 2 /* $NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $ */ 3 4 /* 5 * Copyright (c) 1987, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/malloc.h> 37 #include <sys/proc.h> 38 #include <sys/stdint.h> 39 #include <sys/systm.h> 40 #include <sys/sysctl.h> 41 #include <sys/time.h> 42 #include <sys/mutex.h> 43 #include <sys/rwlock.h> 44 #include <sys/tracepoint.h> 45 46 #include <uvm/uvm_extern.h> 47 48 #if defined(DDB) 49 #include <machine/db_machdep.h> 50 #include <ddb/db_output.h> 51 #endif 52 53 static 54 #ifndef SMALL_KERNEL 55 __inline__ 56 #endif 57 long BUCKETINDX(size_t sz) 58 { 59 long b, d; 60 61 /* note that this relies upon MINALLOCSIZE being 1 << MINBUCKET */ 62 b = 7 + MINBUCKET; d = 4; 63 while (d != 0) { 64 if (sz <= (1 << b)) 65 b -= d; 66 else 67 b += d; 68 d >>= 1; 69 } 70 if (sz <= (1 << b)) 71 b += 0; 72 else 73 b += 1; 74 return b; 75 } 76 77 static struct vm_map kmem_map_store; 78 struct vm_map *kmem_map = NULL; 79 80 /* 81 * Default number of pages in kmem_map. We attempt to calculate this 82 * at run-time, but allow it to be either patched or set in the kernel 83 * config file. 84 */ 85 #ifndef NKMEMPAGES 86 #define NKMEMPAGES 0 87 #endif 88 u_int nkmempages = NKMEMPAGES; 89 90 /* 91 * Defaults for lower- and upper-bounds for the kmem_map page count. 92 * Can be overridden by kernel config options. 93 */ 94 #ifndef NKMEMPAGES_MIN 95 #define NKMEMPAGES_MIN 0 96 #endif 97 u_int nkmempages_min = 0; 98 99 #ifndef NKMEMPAGES_MAX 100 #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT 101 #endif 102 u_int nkmempages_max = 0; 103 104 struct mutex malloc_mtx = MUTEX_INITIALIZER(IPL_VM); 105 struct kmembuckets bucket[MINBUCKET + 16]; 106 #ifdef KMEMSTATS 107 struct kmemstats kmemstats[M_LAST]; 108 #endif 109 struct kmemusage *kmemusage; 110 char *kmembase, *kmemlimit; 111 char buckstring[16 * sizeof("123456,")]; 112 int buckstring_init = 0; 113 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) 114 char *memname[] = INITKMEMNAMES; 115 char *memall = NULL; 116 struct rwlock sysctl_kmemlock = RWLOCK_INITIALIZER("sysctlklk"); 117 #endif 118 119 /* 120 * Normally the freelist structure is used only to hold the list pointer 121 * for free objects. However, when running with diagnostics, the first 122 * 8 bytes of the structure is unused except for diagnostic information, 123 * and the free list pointer is at offset 8 in the structure. Since the 124 * first 8 bytes is the portion of the structure most often modified, this 125 * helps to detect memory reuse problems and avoid free list corruption. 126 */ 127 struct kmem_freelist { 128 int32_t kf_spare0; 129 int16_t kf_type; 130 int16_t kf_spare1; 131 XSIMPLEQ_ENTRY(kmem_freelist) kf_flist; 132 }; 133 134 #ifdef DIAGNOSTIC 135 /* 136 * This structure provides a set of masks to catch unaligned frees. 137 */ 138 const long addrmask[] = { 0, 139 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 140 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 141 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, 142 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff, 143 }; 144 145 #endif /* DIAGNOSTIC */ 146 147 #ifndef SMALL_KERNEL 148 struct timeval malloc_errintvl = { 5, 0 }; 149 struct timeval malloc_lasterr; 150 #endif 151 152 /* 153 * Allocate a block of memory 154 */ 155 void * 156 malloc(size_t size, int type, int flags) 157 { 158 struct kmembuckets *kbp; 159 struct kmemusage *kup; 160 struct kmem_freelist *freep; 161 long indx, npg, allocsize; 162 caddr_t va, cp; 163 int s; 164 #ifdef DIAGNOSTIC 165 int freshalloc; 166 char *savedtype; 167 #endif 168 #ifdef KMEMSTATS 169 struct kmemstats *ksp = &kmemstats[type]; 170 int wake; 171 172 if (((unsigned long)type) <= 1 || ((unsigned long)type) >= M_LAST) 173 panic("malloc: bogus type %d", type); 174 #endif 175 176 KASSERT(flags & (M_WAITOK | M_NOWAIT)); 177 178 #ifdef DIAGNOSTIC 179 if ((flags & M_NOWAIT) == 0) { 180 extern int pool_debug; 181 assertwaitok(); 182 if (pool_debug == 2) 183 yield(); 184 } 185 #endif 186 187 if (size > 65535 * PAGE_SIZE) { 188 if (flags & M_CANFAIL) { 189 #ifndef SMALL_KERNEL 190 if (ratecheck(&malloc_lasterr, &malloc_errintvl)) 191 printf("malloc(): allocation too large, " 192 "type = %d, size = %lu\n", type, size); 193 #endif 194 return (NULL); 195 } else 196 panic("malloc: allocation too large, " 197 "type = %d, size = %lu", type, size); 198 } 199 200 indx = BUCKETINDX(size); 201 if (size > MAXALLOCSAVE) 202 allocsize = round_page(size); 203 else 204 allocsize = 1 << indx; 205 kbp = &bucket[indx]; 206 mtx_enter(&malloc_mtx); 207 #ifdef KMEMSTATS 208 while (ksp->ks_memuse >= ksp->ks_limit) { 209 if (flags & M_NOWAIT) { 210 mtx_leave(&malloc_mtx); 211 return (NULL); 212 } 213 #ifdef DIAGNOSTIC 214 if (ISSET(flags, M_WAITOK) && curproc == &proc0) 215 panic("%s: cannot sleep for memory during boot", 216 __func__); 217 #endif 218 if (ksp->ks_limblocks < 65535) 219 ksp->ks_limblocks++; 220 msleep_nsec(ksp, &malloc_mtx, PSWP+2, memname[type], INFSLP); 221 } 222 ksp->ks_memuse += allocsize; /* account for this early */ 223 ksp->ks_size |= 1 << indx; 224 #endif 225 if (XSIMPLEQ_FIRST(&kbp->kb_freelist) == NULL) { 226 mtx_leave(&malloc_mtx); 227 npg = atop(round_page(allocsize)); 228 s = splvm(); 229 va = (caddr_t)uvm_km_kmemalloc_pla(kmem_map, NULL, 230 (vsize_t)ptoa(npg), 0, 231 ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) | 232 ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0), 233 no_constraint.ucr_low, no_constraint.ucr_high, 234 0, 0, 0); 235 splx(s); 236 if (va == NULL) { 237 /* 238 * Kmem_malloc() can return NULL, even if it can 239 * wait, if there is no map space available, because 240 * it can't fix that problem. Neither can we, 241 * right now. (We should release pages which 242 * are completely free and which are in buckets 243 * with too many free elements.) 244 */ 245 if ((flags & (M_NOWAIT|M_CANFAIL)) == 0) 246 panic("malloc: out of space in kmem_map"); 247 248 #ifdef KMEMSTATS 249 mtx_enter(&malloc_mtx); 250 ksp->ks_memuse -= allocsize; 251 wake = ksp->ks_memuse + allocsize >= ksp->ks_limit && 252 ksp->ks_memuse < ksp->ks_limit; 253 mtx_leave(&malloc_mtx); 254 if (wake) 255 wakeup(ksp); 256 #endif 257 return (NULL); 258 } 259 mtx_enter(&malloc_mtx); 260 #ifdef KMEMSTATS 261 kbp->kb_total += kbp->kb_elmpercl; 262 #endif 263 kup = btokup(va); 264 kup->ku_indx = indx; 265 #ifdef DIAGNOSTIC 266 freshalloc = 1; 267 #endif 268 if (allocsize > MAXALLOCSAVE) { 269 kup->ku_pagecnt = npg; 270 goto out; 271 } 272 #ifdef KMEMSTATS 273 kup->ku_freecnt = kbp->kb_elmpercl; 274 kbp->kb_totalfree += kbp->kb_elmpercl; 275 #endif 276 cp = va + (npg * PAGE_SIZE) - allocsize; 277 for (;;) { 278 freep = (struct kmem_freelist *)cp; 279 #ifdef DIAGNOSTIC 280 /* 281 * Copy in known text to detect modification 282 * after freeing. 283 */ 284 poison_mem(cp, allocsize); 285 freep->kf_type = M_FREE; 286 #endif /* DIAGNOSTIC */ 287 XSIMPLEQ_INSERT_HEAD(&kbp->kb_freelist, freep, 288 kf_flist); 289 if (cp <= va) 290 break; 291 cp -= allocsize; 292 } 293 } else { 294 #ifdef DIAGNOSTIC 295 freshalloc = 0; 296 #endif 297 } 298 freep = XSIMPLEQ_FIRST(&kbp->kb_freelist); 299 XSIMPLEQ_REMOVE_HEAD(&kbp->kb_freelist, kf_flist); 300 va = (caddr_t)freep; 301 #ifdef DIAGNOSTIC 302 savedtype = (unsigned)freep->kf_type < M_LAST ? 303 memname[freep->kf_type] : "???"; 304 if (freshalloc == 0 && XSIMPLEQ_FIRST(&kbp->kb_freelist)) { 305 int rv; 306 vaddr_t addr = (vaddr_t)XSIMPLEQ_FIRST(&kbp->kb_freelist); 307 308 vm_map_lock(kmem_map); 309 rv = uvm_map_checkprot(kmem_map, addr, 310 addr + sizeof(struct kmem_freelist), PROT_WRITE); 311 vm_map_unlock(kmem_map); 312 313 if (!rv) { 314 printf("%s %zd of object %p size 0x%lx %s %s" 315 " (invalid addr %p)\n", 316 "Data modified on freelist: word", 317 (int32_t *)&addr - (int32_t *)kbp, va, size, 318 "previous type", savedtype, (void *)addr); 319 } 320 } 321 322 /* Fill the fields that we've used with poison */ 323 poison_mem(freep, sizeof(*freep)); 324 325 /* and check that the data hasn't been modified. */ 326 if (freshalloc == 0) { 327 size_t pidx; 328 uint32_t pval; 329 if (poison_check(va, allocsize, &pidx, &pval)) { 330 panic("%s %zd of object %p size 0x%lx %s %s" 331 " (0x%x != 0x%x)\n", 332 "Data modified on freelist: word", 333 pidx, va, size, "previous type", 334 savedtype, ((int32_t*)va)[pidx], pval); 335 } 336 } 337 338 freep->kf_spare0 = 0; 339 #endif /* DIAGNOSTIC */ 340 #ifdef KMEMSTATS 341 kup = btokup(va); 342 if (kup->ku_indx != indx) 343 panic("malloc: wrong bucket"); 344 if (kup->ku_freecnt == 0) 345 panic("malloc: lost data"); 346 kup->ku_freecnt--; 347 kbp->kb_totalfree--; 348 out: 349 kbp->kb_calls++; 350 ksp->ks_inuse++; 351 ksp->ks_calls++; 352 if (ksp->ks_memuse > ksp->ks_maxused) 353 ksp->ks_maxused = ksp->ks_memuse; 354 #else 355 out: 356 #endif 357 mtx_leave(&malloc_mtx); 358 359 if ((flags & M_ZERO) && va != NULL) 360 memset(va, 0, size); 361 362 TRACEPOINT(uvm, malloc, type, va, size, flags); 363 364 return (va); 365 } 366 367 /* 368 * Free a block of memory allocated by malloc. 369 */ 370 void 371 free(void *addr, int type, size_t freedsize) 372 { 373 struct kmembuckets *kbp; 374 struct kmemusage *kup; 375 struct kmem_freelist *freep; 376 long size; 377 int s; 378 #ifdef DIAGNOSTIC 379 long alloc; 380 #endif 381 #ifdef KMEMSTATS 382 struct kmemstats *ksp = &kmemstats[type]; 383 int wake; 384 #endif 385 386 if (addr == NULL) 387 return; 388 389 #ifdef DIAGNOSTIC 390 if (addr < (void *)kmembase || addr >= (void *)kmemlimit) 391 panic("free: non-malloced addr %p type %s", addr, 392 memname[type]); 393 #endif 394 395 TRACEPOINT(uvm, free, type, addr, freedsize); 396 397 mtx_enter(&malloc_mtx); 398 kup = btokup(addr); 399 size = 1 << kup->ku_indx; 400 kbp = &bucket[kup->ku_indx]; 401 if (size > MAXALLOCSAVE) 402 size = kup->ku_pagecnt << PAGE_SHIFT; 403 #ifdef DIAGNOSTIC 404 #if 0 405 if (freedsize == 0) { 406 static int zerowarnings; 407 if (zerowarnings < 5) { 408 zerowarnings++; 409 printf("free with zero size: (%d)\n", type); 410 #ifdef DDB 411 db_stack_dump(); 412 #endif 413 } 414 #endif 415 if (freedsize != 0 && freedsize > size) 416 panic("free: size too large %zu > %ld (%p) type %s", 417 freedsize, size, addr, memname[type]); 418 if (freedsize != 0 && size > MINALLOCSIZE && freedsize <= size / 2) 419 panic("free: size too small %zu <= %ld / 2 (%p) type %s", 420 freedsize, size, addr, memname[type]); 421 /* 422 * Check for returns of data that do not point to the 423 * beginning of the allocation. 424 */ 425 if (size > PAGE_SIZE) 426 alloc = addrmask[BUCKETINDX(PAGE_SIZE)]; 427 else 428 alloc = addrmask[kup->ku_indx]; 429 if (((u_long)addr & alloc) != 0) 430 panic("free: unaligned addr %p, size %ld, type %s, mask %ld", 431 addr, size, memname[type], alloc); 432 #endif /* DIAGNOSTIC */ 433 if (size > MAXALLOCSAVE) { 434 u_short pagecnt = kup->ku_pagecnt; 435 436 kup->ku_indx = 0; 437 kup->ku_pagecnt = 0; 438 mtx_leave(&malloc_mtx); 439 s = splvm(); 440 uvm_km_free(kmem_map, (vaddr_t)addr, ptoa(pagecnt)); 441 splx(s); 442 #ifdef KMEMSTATS 443 mtx_enter(&malloc_mtx); 444 ksp->ks_memuse -= size; 445 wake = ksp->ks_memuse + size >= ksp->ks_limit && 446 ksp->ks_memuse < ksp->ks_limit; 447 ksp->ks_inuse--; 448 kbp->kb_total -= 1; 449 mtx_leave(&malloc_mtx); 450 if (wake) 451 wakeup(ksp); 452 #endif 453 return; 454 } 455 freep = (struct kmem_freelist *)addr; 456 #ifdef DIAGNOSTIC 457 /* 458 * Check for multiple frees. Use a quick check to see if 459 * it looks free before laboriously searching the freelist. 460 */ 461 if (freep->kf_spare0 == poison_value(freep)) { 462 struct kmem_freelist *fp; 463 XSIMPLEQ_FOREACH(fp, &kbp->kb_freelist, kf_flist) { 464 if (addr != fp) 465 continue; 466 printf("multiply freed item %p\n", addr); 467 panic("free: duplicated free"); 468 } 469 } 470 /* 471 * Copy in known text to detect modification after freeing 472 * and to make it look free. Also, save the type being freed 473 * so we can list likely culprit if modification is detected 474 * when the object is reallocated. 475 */ 476 poison_mem(addr, size); 477 freep->kf_spare0 = poison_value(freep); 478 479 freep->kf_type = type; 480 #endif /* DIAGNOSTIC */ 481 #ifdef KMEMSTATS 482 kup->ku_freecnt++; 483 if (kup->ku_freecnt >= kbp->kb_elmpercl) { 484 if (kup->ku_freecnt > kbp->kb_elmpercl) 485 panic("free: multiple frees"); 486 else if (kbp->kb_totalfree > kbp->kb_highwat) 487 kbp->kb_couldfree++; 488 } 489 kbp->kb_totalfree++; 490 ksp->ks_memuse -= size; 491 wake = ksp->ks_memuse + size >= ksp->ks_limit && 492 ksp->ks_memuse < ksp->ks_limit; 493 ksp->ks_inuse--; 494 #endif 495 XSIMPLEQ_INSERT_TAIL(&kbp->kb_freelist, freep, kf_flist); 496 mtx_leave(&malloc_mtx); 497 #ifdef KMEMSTATS 498 if (wake) 499 wakeup(ksp); 500 #endif 501 } 502 503 /* 504 * Compute the number of pages that kmem_map will map, that is, 505 * the size of the kernel malloc arena. 506 */ 507 void 508 kmeminit_nkmempages(void) 509 { 510 u_int npages; 511 512 if (nkmempages != 0) { 513 /* 514 * It's already been set (by us being here before, or 515 * by patching or kernel config options), bail out now. 516 */ 517 return; 518 } 519 520 /* 521 * We can't initialize these variables at compilation time, since 522 * the page size may not be known (on sparc GENERIC kernels, for 523 * example). But we still want the MD code to be able to provide 524 * better values. 525 */ 526 if (nkmempages_min == 0) 527 nkmempages_min = NKMEMPAGES_MIN; 528 if (nkmempages_max == 0) 529 nkmempages_max = NKMEMPAGES_MAX; 530 531 /* 532 * We use the following (simple) formula: 533 * 534 * - Starting point is physical memory / 4. 535 * 536 * - Clamp it down to nkmempages_max. 537 * 538 * - Round it up to nkmempages_min. 539 */ 540 npages = physmem / 4; 541 542 if (npages > nkmempages_max) 543 npages = nkmempages_max; 544 545 if (npages < nkmempages_min) 546 npages = nkmempages_min; 547 548 nkmempages = npages; 549 } 550 551 /* 552 * Initialize the kernel memory allocator 553 */ 554 void 555 kmeminit(void) 556 { 557 vaddr_t base, limit; 558 long indx; 559 560 #ifdef DIAGNOSTIC 561 if (sizeof(struct kmem_freelist) > (1 << MINBUCKET)) 562 panic("kmeminit: minbucket too small/struct freelist too big"); 563 #endif 564 565 /* 566 * Compute the number of kmem_map pages, if we have not 567 * done so already. 568 */ 569 kmeminit_nkmempages(); 570 base = vm_map_min(kernel_map); 571 kmem_map = uvm_km_suballoc(kernel_map, &base, &limit, 572 (vsize_t)nkmempages << PAGE_SHIFT, 573 #ifdef KVA_GUARDPAGES 574 VM_MAP_INTRSAFE | VM_MAP_GUARDPAGES, 575 #else 576 VM_MAP_INTRSAFE, 577 #endif 578 FALSE, &kmem_map_store); 579 kmembase = (char *)base; 580 kmemlimit = (char *)limit; 581 kmemusage = km_alloc(round_page(nkmempages * sizeof(struct kmemusage)), 582 &kv_any, &kp_zero, &kd_waitok); 583 for (indx = 0; indx < MINBUCKET + 16; indx++) { 584 XSIMPLEQ_INIT(&bucket[indx].kb_freelist); 585 } 586 #ifdef KMEMSTATS 587 for (indx = 0; indx < MINBUCKET + 16; indx++) { 588 if (1 << indx >= PAGE_SIZE) 589 bucket[indx].kb_elmpercl = 1; 590 else 591 bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx); 592 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; 593 } 594 for (indx = 0; indx < M_LAST; indx++) 595 kmemstats[indx].ks_limit = nkmempages * PAGE_SIZE * 6 / 10; 596 #endif 597 } 598 599 /* 600 * Return kernel malloc statistics information. 601 */ 602 int 603 sysctl_malloc(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 604 size_t newlen, struct proc *p) 605 { 606 struct kmembuckets kb; 607 #ifdef KMEMSTATS 608 struct kmemstats km; 609 #endif 610 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) 611 int error; 612 #endif 613 int i, siz; 614 615 if (namelen != 2 && name[0] != KERN_MALLOC_BUCKETS && 616 name[0] != KERN_MALLOC_KMEMNAMES) 617 return (ENOTDIR); /* overloaded */ 618 619 switch (name[0]) { 620 case KERN_MALLOC_BUCKETS: 621 /* Initialize the first time */ 622 if (buckstring_init == 0) { 623 buckstring_init = 1; 624 memset(buckstring, 0, sizeof(buckstring)); 625 for (siz = 0, i = MINBUCKET; i < MINBUCKET + 16; i++) { 626 snprintf(buckstring + siz, 627 sizeof buckstring - siz, 628 "%d,", (u_int)(1<<i)); 629 siz += strlen(buckstring + siz); 630 } 631 /* Remove trailing comma */ 632 if (siz) 633 buckstring[siz - 1] = '\0'; 634 } 635 return (sysctl_rdstring(oldp, oldlenp, newp, buckstring)); 636 637 case KERN_MALLOC_BUCKET: 638 mtx_enter(&malloc_mtx); 639 memcpy(&kb, &bucket[BUCKETINDX(name[1])], sizeof(kb)); 640 mtx_leave(&malloc_mtx); 641 memset(&kb.kb_freelist, 0, sizeof(kb.kb_freelist)); 642 return (sysctl_rdstruct(oldp, oldlenp, newp, &kb, sizeof(kb))); 643 case KERN_MALLOC_KMEMSTATS: 644 #ifdef KMEMSTATS 645 if ((name[1] < 0) || (name[1] >= M_LAST)) 646 return (EINVAL); 647 mtx_enter(&malloc_mtx); 648 memcpy(&km, &kmemstats[name[1]], sizeof(km)); 649 mtx_leave(&malloc_mtx); 650 return (sysctl_rdstruct(oldp, oldlenp, newp, &km, sizeof(km))); 651 #else 652 return (EOPNOTSUPP); 653 #endif 654 case KERN_MALLOC_KMEMNAMES: 655 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) 656 error = rw_enter(&sysctl_kmemlock, RW_WRITE|RW_INTR); 657 if (error) 658 return (error); 659 if (memall == NULL) { 660 int totlen; 661 662 /* Figure out how large a buffer we need */ 663 for (totlen = 0, i = 0; i < M_LAST; i++) { 664 if (memname[i]) 665 totlen += strlen(memname[i]); 666 totlen++; 667 } 668 memall = malloc(totlen + M_LAST, M_SYSCTL, 669 M_WAITOK|M_ZERO); 670 for (siz = 0, i = 0; i < M_LAST; i++) { 671 snprintf(memall + siz, 672 totlen + M_LAST - siz, 673 "%s,", memname[i] ? memname[i] : ""); 674 siz += strlen(memall + siz); 675 } 676 /* Remove trailing comma */ 677 if (siz) 678 memall[siz - 1] = '\0'; 679 680 /* Now, convert all spaces to underscores */ 681 for (i = 0; i < totlen; i++) 682 if (memall[i] == ' ') 683 memall[i] = '_'; 684 } 685 rw_exit_write(&sysctl_kmemlock); 686 return (sysctl_rdstring(oldp, oldlenp, newp, memall)); 687 #else 688 return (EOPNOTSUPP); 689 #endif 690 default: 691 return (EOPNOTSUPP); 692 } 693 /* NOTREACHED */ 694 } 695 696 #if defined(DDB) 697 698 void 699 malloc_printit( 700 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 701 { 702 #ifdef KMEMSTATS 703 struct kmemstats *km; 704 int i; 705 706 (*pr)("%15s %5s %6s %7s %6s %9s %8s\n", 707 "Type", "InUse", "MemUse", "HighUse", "Limit", "Requests", 708 "Type Lim"); 709 for (i = 0, km = kmemstats; i < M_LAST; i++, km++) { 710 if (!km->ks_calls || !memname[i]) 711 continue; 712 713 (*pr)("%15s %5ld %6ldK %7ldK %6ldK %9ld %8d\n", 714 memname[i], km->ks_inuse, km->ks_memuse / 1024, 715 km->ks_maxused / 1024, km->ks_limit / 1024, 716 km->ks_calls, km->ks_limblocks); 717 } 718 #else 719 (*pr)("No KMEMSTATS compiled in\n"); 720 #endif 721 } 722 #endif /* DDB */ 723 724 /* 725 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> 726 * 727 * Permission to use, copy, modify, and distribute this software for any 728 * purpose with or without fee is hereby granted, provided that the above 729 * copyright notice and this permission notice appear in all copies. 730 * 731 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 732 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 733 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 734 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 735 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 736 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 737 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 738 */ 739 740 /* 741 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX 742 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW 743 */ 744 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4)) 745 746 void * 747 mallocarray(size_t nmemb, size_t size, int type, int flags) 748 { 749 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 750 nmemb > 0 && SIZE_MAX / nmemb < size) { 751 if (flags & M_CANFAIL) 752 return (NULL); 753 panic("mallocarray: overflow %zu * %zu", nmemb, size); 754 } 755 return (malloc(size * nmemb, type, flags)); 756 } 757