1 /* $OpenBSD: kern_malloc.c,v 1.128 2015/03/14 03:38:50 jsg Exp $ */ 2 /* $NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $ */ 3 4 /* 5 * Copyright (c) 1987, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/kernel.h> 37 #include <sys/malloc.h> 38 #include <sys/stdint.h> 39 #include <sys/systm.h> 40 #include <sys/sysctl.h> 41 #include <sys/time.h> 42 #include <sys/rwlock.h> 43 44 #include <uvm/uvm_extern.h> 45 46 static 47 #ifndef SMALL_KERNEL 48 __inline__ 49 #endif 50 long BUCKETINDX(size_t sz) 51 { 52 long b, d; 53 54 /* note that this relies upon MINALLOCSIZE being 1 << MINBUCKET */ 55 b = 7 + MINBUCKET; d = 4; 56 while (d != 0) { 57 if (sz <= (1 << b)) 58 b -= d; 59 else 60 b += d; 61 d >>= 1; 62 } 63 if (sz <= (1 << b)) 64 b += 0; 65 else 66 b += 1; 67 return b; 68 } 69 70 static struct vm_map kmem_map_store; 71 struct vm_map *kmem_map = NULL; 72 73 /* 74 * Default number of pages in kmem_map. We attempt to calculate this 75 * at run-time, but allow it to be either patched or set in the kernel 76 * config file. 77 */ 78 #ifndef NKMEMPAGES 79 #define NKMEMPAGES 0 80 #endif 81 u_int nkmempages = NKMEMPAGES; 82 83 /* 84 * Defaults for lower- and upper-bounds for the kmem_map page count. 85 * Can be overridden by kernel config options. 86 */ 87 #ifndef NKMEMPAGES_MIN 88 #define NKMEMPAGES_MIN 0 89 #endif 90 u_int nkmempages_min = 0; 91 92 #ifndef NKMEMPAGES_MAX 93 #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT 94 #endif 95 u_int nkmempages_max = 0; 96 97 struct kmembuckets bucket[MINBUCKET + 16]; 98 #ifdef KMEMSTATS 99 struct kmemstats kmemstats[M_LAST]; 100 #endif 101 struct kmemusage *kmemusage; 102 char *kmembase, *kmemlimit; 103 char buckstring[16 * sizeof("123456,")]; 104 int buckstring_init = 0; 105 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES) 106 char *memname[] = INITKMEMNAMES; 107 char *memall = NULL; 108 struct rwlock sysctl_kmemlock = RWLOCK_INITIALIZER("sysctlklk"); 109 #endif 110 111 /* 112 * Normally the freelist structure is used only to hold the list pointer 113 * for free objects. However, when running with diagnostics, the first 114 * 8 bytes of the structure is unused except for diagnostic information, 115 * and the free list pointer is at offset 8 in the structure. Since the 116 * first 8 bytes is the portion of the structure most often modified, this 117 * helps to detect memory reuse problems and avoid free list corruption. 118 */ 119 struct kmem_freelist { 120 int32_t kf_spare0; 121 int16_t kf_type; 122 int16_t kf_spare1; 123 XSIMPLEQ_ENTRY(kmem_freelist) kf_flist; 124 }; 125 126 #ifdef DIAGNOSTIC 127 /* 128 * This structure provides a set of masks to catch unaligned frees. 129 */ 130 const long addrmask[] = { 0, 131 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 132 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 133 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, 134 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff, 135 }; 136 137 #endif /* DIAGNOSTIC */ 138 139 #ifndef SMALL_KERNEL 140 struct timeval malloc_errintvl = { 5, 0 }; 141 struct timeval malloc_lasterr; 142 #endif 143 144 /* 145 * Allocate a block of memory 146 */ 147 void * 148 malloc(size_t size, int type, int flags) 149 { 150 struct kmembuckets *kbp; 151 struct kmemusage *kup; 152 struct kmem_freelist *freep; 153 long indx, npg, allocsize; 154 int s; 155 caddr_t va, cp; 156 #ifdef DIAGNOSTIC 157 int freshalloc; 158 char *savedtype; 159 #endif 160 #ifdef KMEMSTATS 161 struct kmemstats *ksp = &kmemstats[type]; 162 163 if (((unsigned long)type) <= 1 || ((unsigned long)type) >= M_LAST) 164 panic("malloc: bogus type %d", type); 165 #endif 166 167 KASSERT(flags & (M_WAITOK | M_NOWAIT)); 168 169 if ((flags & M_NOWAIT) == 0) { 170 extern int pool_debug; 171 #ifdef DIAGNOSTIC 172 assertwaitok(); 173 if (pool_debug == 2) 174 yield(); 175 #endif 176 if (!cold && pool_debug) { 177 KERNEL_UNLOCK(); 178 KERNEL_LOCK(); 179 } 180 } 181 182 #ifdef MALLOC_DEBUG 183 if (debug_malloc(size, type, flags, (void **)&va)) { 184 if ((flags & M_ZERO) && va != NULL) 185 memset(va, 0, size); 186 return (va); 187 } 188 #endif 189 190 if (size > 65535 * PAGE_SIZE) { 191 if (flags & M_CANFAIL) { 192 #ifndef SMALL_KERNEL 193 if (ratecheck(&malloc_lasterr, &malloc_errintvl)) 194 printf("malloc(): allocation too large, " 195 "type = %d, size = %lu\n", type, size); 196 #endif 197 return (NULL); 198 } else 199 panic("malloc: allocation too large, " 200 "type = %d, size = %lu\n", type, size); 201 } 202 203 indx = BUCKETINDX(size); 204 kbp = &bucket[indx]; 205 s = splvm(); 206 #ifdef KMEMSTATS 207 while (ksp->ks_memuse >= ksp->ks_limit) { 208 if (flags & M_NOWAIT) { 209 splx(s); 210 return (NULL); 211 } 212 if (ksp->ks_limblocks < 65535) 213 ksp->ks_limblocks++; 214 tsleep(ksp, PSWP+2, memname[type], 0); 215 } 216 ksp->ks_size |= 1 << indx; 217 #endif 218 if (size > MAXALLOCSAVE) 219 allocsize = round_page(size); 220 else 221 allocsize = 1 << indx; 222 if (XSIMPLEQ_FIRST(&kbp->kb_freelist) == NULL) { 223 npg = atop(round_page(allocsize)); 224 va = (caddr_t)uvm_km_kmemalloc_pla(kmem_map, NULL, 225 (vsize_t)ptoa(npg), 0, 226 ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) | 227 ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0), 228 no_constraint.ucr_low, no_constraint.ucr_high, 229 0, 0, 0); 230 if (va == NULL) { 231 /* 232 * Kmem_malloc() can return NULL, even if it can 233 * wait, if there is no map space available, because 234 * it can't fix that problem. Neither can we, 235 * right now. (We should release pages which 236 * are completely free and which are in buckets 237 * with too many free elements.) 238 */ 239 if ((flags & (M_NOWAIT|M_CANFAIL)) == 0) 240 panic("malloc: out of space in kmem_map"); 241 splx(s); 242 return (NULL); 243 } 244 #ifdef KMEMSTATS 245 kbp->kb_total += kbp->kb_elmpercl; 246 #endif 247 kup = btokup(va); 248 kup->ku_indx = indx; 249 #ifdef DIAGNOSTIC 250 freshalloc = 1; 251 #endif 252 if (allocsize > MAXALLOCSAVE) { 253 kup->ku_pagecnt = npg; 254 #ifdef KMEMSTATS 255 ksp->ks_memuse += allocsize; 256 #endif 257 goto out; 258 } 259 #ifdef KMEMSTATS 260 kup->ku_freecnt = kbp->kb_elmpercl; 261 kbp->kb_totalfree += kbp->kb_elmpercl; 262 #endif 263 cp = va + (npg * PAGE_SIZE) - allocsize; 264 for (;;) { 265 freep = (struct kmem_freelist *)cp; 266 #ifdef DIAGNOSTIC 267 /* 268 * Copy in known text to detect modification 269 * after freeing. 270 */ 271 poison_mem(cp, allocsize); 272 freep->kf_type = M_FREE; 273 #endif /* DIAGNOSTIC */ 274 XSIMPLEQ_INSERT_HEAD(&kbp->kb_freelist, freep, kf_flist); 275 if (cp <= va) 276 break; 277 cp -= allocsize; 278 } 279 } else { 280 #ifdef DIAGNOSTIC 281 freshalloc = 0; 282 #endif 283 } 284 freep = XSIMPLEQ_FIRST(&kbp->kb_freelist); 285 XSIMPLEQ_REMOVE_HEAD(&kbp->kb_freelist, kf_flist); 286 va = (caddr_t)freep; 287 #ifdef DIAGNOSTIC 288 savedtype = (unsigned)freep->kf_type < M_LAST ? 289 memname[freep->kf_type] : "???"; 290 if (freshalloc == 0 && XSIMPLEQ_FIRST(&kbp->kb_freelist)) { 291 int rv; 292 vaddr_t addr = (vaddr_t)XSIMPLEQ_FIRST(&kbp->kb_freelist); 293 294 vm_map_lock(kmem_map); 295 rv = uvm_map_checkprot(kmem_map, addr, 296 addr + sizeof(struct kmem_freelist), PROT_WRITE); 297 vm_map_unlock(kmem_map); 298 299 if (!rv) { 300 printf("%s %zd of object %p size 0x%lx %s %s" 301 " (invalid addr %p)\n", 302 "Data modified on freelist: word", 303 (int32_t *)&addr - (int32_t *)kbp, va, size, 304 "previous type", savedtype, (void *)addr); 305 } 306 } 307 308 /* Fill the fields that we've used with poison */ 309 poison_mem(freep, sizeof(*freep)); 310 311 /* and check that the data hasn't been modified. */ 312 if (freshalloc == 0) { 313 size_t pidx; 314 uint32_t pval; 315 if (poison_check(va, allocsize, &pidx, &pval)) { 316 panic("%s %zd of object %p size 0x%lx %s %s" 317 " (0x%x != 0x%x)\n", 318 "Data modified on freelist: word", 319 pidx, va, size, "previous type", 320 savedtype, ((int32_t*)va)[pidx], pval); 321 } 322 } 323 324 freep->kf_spare0 = 0; 325 #endif /* DIAGNOSTIC */ 326 #ifdef KMEMSTATS 327 kup = btokup(va); 328 if (kup->ku_indx != indx) 329 panic("malloc: wrong bucket"); 330 if (kup->ku_freecnt == 0) 331 panic("malloc: lost data"); 332 kup->ku_freecnt--; 333 kbp->kb_totalfree--; 334 ksp->ks_memuse += 1 << indx; 335 out: 336 kbp->kb_calls++; 337 ksp->ks_inuse++; 338 ksp->ks_calls++; 339 if (ksp->ks_memuse > ksp->ks_maxused) 340 ksp->ks_maxused = ksp->ks_memuse; 341 #else 342 out: 343 #endif 344 splx(s); 345 346 if ((flags & M_ZERO) && va != NULL) 347 memset(va, 0, size); 348 return (va); 349 } 350 351 /* 352 * Free a block of memory allocated by malloc. 353 */ 354 void 355 free(void *addr, int type, size_t freedsize) 356 { 357 struct kmembuckets *kbp; 358 struct kmemusage *kup; 359 struct kmem_freelist *freep; 360 long size; 361 int s; 362 #ifdef DIAGNOSTIC 363 long alloc; 364 #endif 365 #ifdef KMEMSTATS 366 struct kmemstats *ksp = &kmemstats[type]; 367 #endif 368 369 if (addr == NULL) 370 return; 371 372 #ifdef MALLOC_DEBUG 373 if (debug_free(addr, type)) 374 return; 375 #endif 376 377 #ifdef DIAGNOSTIC 378 if (addr < (void *)kmembase || addr >= (void *)kmemlimit) 379 panic("free: non-malloced addr %p type %s", addr, 380 memname[type]); 381 #endif 382 383 kup = btokup(addr); 384 size = 1 << kup->ku_indx; 385 kbp = &bucket[kup->ku_indx]; 386 if (size > MAXALLOCSAVE) 387 size = kup->ku_pagecnt << PAGE_SHIFT; 388 s = splvm(); 389 #ifdef DIAGNOSTIC 390 if (freedsize != 0 && freedsize > size) 391 panic("free: size too large %zu > %ld (%p) type %s", 392 freedsize, size, addr, memname[type]); 393 if (freedsize != 0 && size > MINALLOCSIZE && freedsize < size / 2) 394 panic("free: size too small %zu < %ld / 2 (%p) type %s", 395 freedsize, size, addr, memname[type]); 396 /* 397 * Check for returns of data that do not point to the 398 * beginning of the allocation. 399 */ 400 if (size > PAGE_SIZE) 401 alloc = addrmask[BUCKETINDX(PAGE_SIZE)]; 402 else 403 alloc = addrmask[kup->ku_indx]; 404 if (((u_long)addr & alloc) != 0) 405 panic("free: unaligned addr %p, size %ld, type %s, mask %ld", 406 addr, size, memname[type], alloc); 407 #endif /* DIAGNOSTIC */ 408 if (size > MAXALLOCSAVE) { 409 uvm_km_free(kmem_map, (vaddr_t)addr, ptoa(kup->ku_pagecnt)); 410 #ifdef KMEMSTATS 411 ksp->ks_memuse -= size; 412 kup->ku_indx = 0; 413 kup->ku_pagecnt = 0; 414 if (ksp->ks_memuse + size >= ksp->ks_limit && 415 ksp->ks_memuse < ksp->ks_limit) 416 wakeup(ksp); 417 ksp->ks_inuse--; 418 kbp->kb_total -= 1; 419 #endif 420 splx(s); 421 return; 422 } 423 freep = (struct kmem_freelist *)addr; 424 #ifdef DIAGNOSTIC 425 /* 426 * Check for multiple frees. Use a quick check to see if 427 * it looks free before laboriously searching the freelist. 428 */ 429 if (freep->kf_spare0 == poison_value(freep)) { 430 struct kmem_freelist *fp; 431 XSIMPLEQ_FOREACH(fp, &kbp->kb_freelist, kf_flist) { 432 if (addr != fp) 433 continue; 434 printf("multiply freed item %p\n", addr); 435 panic("free: duplicated free"); 436 } 437 } 438 /* 439 * Copy in known text to detect modification after freeing 440 * and to make it look free. Also, save the type being freed 441 * so we can list likely culprit if modification is detected 442 * when the object is reallocated. 443 */ 444 poison_mem(addr, size); 445 freep->kf_spare0 = poison_value(freep); 446 447 freep->kf_type = type; 448 #endif /* DIAGNOSTIC */ 449 #ifdef KMEMSTATS 450 kup->ku_freecnt++; 451 if (kup->ku_freecnt >= kbp->kb_elmpercl) { 452 if (kup->ku_freecnt > kbp->kb_elmpercl) 453 panic("free: multiple frees"); 454 else if (kbp->kb_totalfree > kbp->kb_highwat) 455 kbp->kb_couldfree++; 456 } 457 kbp->kb_totalfree++; 458 ksp->ks_memuse -= size; 459 if (ksp->ks_memuse + size >= ksp->ks_limit && 460 ksp->ks_memuse < ksp->ks_limit) 461 wakeup(ksp); 462 ksp->ks_inuse--; 463 #endif 464 XSIMPLEQ_INSERT_TAIL(&kbp->kb_freelist, freep, kf_flist); 465 splx(s); 466 } 467 468 /* 469 * Compute the number of pages that kmem_map will map, that is, 470 * the size of the kernel malloc arena. 471 */ 472 void 473 kmeminit_nkmempages(void) 474 { 475 u_int npages; 476 477 if (nkmempages != 0) { 478 /* 479 * It's already been set (by us being here before, or 480 * by patching or kernel config options), bail out now. 481 */ 482 return; 483 } 484 485 /* 486 * We can't initialize these variables at compilation time, since 487 * the page size may not be known (on sparc GENERIC kernels, for 488 * example). But we still want the MD code to be able to provide 489 * better values. 490 */ 491 if (nkmempages_min == 0) 492 nkmempages_min = NKMEMPAGES_MIN; 493 if (nkmempages_max == 0) 494 nkmempages_max = NKMEMPAGES_MAX; 495 496 /* 497 * We use the following (simple) formula: 498 * 499 * - Starting point is physical memory / 4. 500 * 501 * - Clamp it down to nkmempages_max. 502 * 503 * - Round it up to nkmempages_min. 504 */ 505 npages = physmem / 4; 506 507 if (npages > nkmempages_max) 508 npages = nkmempages_max; 509 510 if (npages < nkmempages_min) 511 npages = nkmempages_min; 512 513 nkmempages = npages; 514 } 515 516 /* 517 * Initialize the kernel memory allocator 518 */ 519 void 520 kmeminit(void) 521 { 522 vaddr_t base, limit; 523 long indx; 524 525 #ifdef DIAGNOSTIC 526 if (sizeof(struct kmem_freelist) > (1 << MINBUCKET)) 527 panic("kmeminit: minbucket too small/struct freelist too big"); 528 #endif 529 530 /* 531 * Compute the number of kmem_map pages, if we have not 532 * done so already. 533 */ 534 kmeminit_nkmempages(); 535 base = vm_map_min(kernel_map); 536 kmem_map = uvm_km_suballoc(kernel_map, &base, &limit, 537 (vsize_t)nkmempages << PAGE_SHIFT, 538 #ifdef KVA_GUARDPAGES 539 VM_MAP_INTRSAFE | VM_MAP_GUARDPAGES, 540 #else 541 VM_MAP_INTRSAFE, 542 #endif 543 FALSE, &kmem_map_store); 544 kmembase = (char *)base; 545 kmemlimit = (char *)limit; 546 kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map, 547 (vsize_t)(nkmempages * sizeof(struct kmemusage))); 548 for (indx = 0; indx < MINBUCKET + 16; indx++) { 549 XSIMPLEQ_INIT(&bucket[indx].kb_freelist); 550 } 551 #ifdef KMEMSTATS 552 for (indx = 0; indx < MINBUCKET + 16; indx++) { 553 if (1 << indx >= PAGE_SIZE) 554 bucket[indx].kb_elmpercl = 1; 555 else 556 bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx); 557 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; 558 } 559 for (indx = 0; indx < M_LAST; indx++) 560 kmemstats[indx].ks_limit = nkmempages * PAGE_SIZE * 6 / 10; 561 #endif 562 #ifdef MALLOC_DEBUG 563 debug_malloc_init(); 564 #endif 565 } 566 567 /* 568 * Return kernel malloc statistics information. 569 */ 570 int 571 sysctl_malloc(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 572 size_t newlen, struct proc *p) 573 { 574 struct kmembuckets kb; 575 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES) 576 int error; 577 #endif 578 int i, siz; 579 580 if (namelen != 2 && name[0] != KERN_MALLOC_BUCKETS && 581 name[0] != KERN_MALLOC_KMEMNAMES) 582 return (ENOTDIR); /* overloaded */ 583 584 switch (name[0]) { 585 case KERN_MALLOC_BUCKETS: 586 /* Initialize the first time */ 587 if (buckstring_init == 0) { 588 buckstring_init = 1; 589 memset(buckstring, 0, sizeof(buckstring)); 590 for (siz = 0, i = MINBUCKET; i < MINBUCKET + 16; i++) { 591 snprintf(buckstring + siz, 592 sizeof buckstring - siz, 593 "%d,", (u_int)(1<<i)); 594 siz += strlen(buckstring + siz); 595 } 596 /* Remove trailing comma */ 597 if (siz) 598 buckstring[siz - 1] = '\0'; 599 } 600 return (sysctl_rdstring(oldp, oldlenp, newp, buckstring)); 601 602 case KERN_MALLOC_BUCKET: 603 memcpy(&kb, &bucket[BUCKETINDX(name[1])], sizeof(kb)); 604 memset(&kb.kb_freelist, 0, sizeof(kb.kb_freelist)); 605 return (sysctl_rdstruct(oldp, oldlenp, newp, &kb, sizeof(kb))); 606 case KERN_MALLOC_KMEMSTATS: 607 #ifdef KMEMSTATS 608 if ((name[1] < 0) || (name[1] >= M_LAST)) 609 return (EINVAL); 610 return (sysctl_rdstruct(oldp, oldlenp, newp, 611 &kmemstats[name[1]], sizeof(struct kmemstats))); 612 #else 613 return (EOPNOTSUPP); 614 #endif 615 case KERN_MALLOC_KMEMNAMES: 616 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES) 617 error = rw_enter(&sysctl_kmemlock, RW_WRITE|RW_INTR); 618 if (error) 619 return (error); 620 if (memall == NULL) { 621 int totlen; 622 623 /* Figure out how large a buffer we need */ 624 for (totlen = 0, i = 0; i < M_LAST; i++) { 625 if (memname[i]) 626 totlen += strlen(memname[i]); 627 totlen++; 628 } 629 memall = malloc(totlen + M_LAST, M_SYSCTL, 630 M_WAITOK|M_ZERO); 631 for (siz = 0, i = 0; i < M_LAST; i++) { 632 snprintf(memall + siz, 633 totlen + M_LAST - siz, 634 "%s,", memname[i] ? memname[i] : ""); 635 siz += strlen(memall + siz); 636 } 637 /* Remove trailing comma */ 638 if (siz) 639 memall[siz - 1] = '\0'; 640 641 /* Now, convert all spaces to underscores */ 642 for (i = 0; i < totlen; i++) 643 if (memall[i] == ' ') 644 memall[i] = '_'; 645 } 646 rw_exit_write(&sysctl_kmemlock); 647 return (sysctl_rdstring(oldp, oldlenp, newp, memall)); 648 #else 649 return (EOPNOTSUPP); 650 #endif 651 default: 652 return (EOPNOTSUPP); 653 } 654 /* NOTREACHED */ 655 } 656 657 /* 658 * Round up a size to how much malloc would actually allocate. 659 */ 660 size_t 661 malloc_roundup(size_t sz) 662 { 663 if (sz > MAXALLOCSAVE) 664 return round_page(sz); 665 666 return (1 << BUCKETINDX(sz)); 667 } 668 669 #if defined(DDB) 670 #include <machine/db_machdep.h> 671 #include <ddb/db_output.h> 672 673 void 674 malloc_printit( 675 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 676 { 677 #ifdef KMEMSTATS 678 struct kmemstats *km; 679 int i; 680 681 (*pr)("%15s %5s %6s %7s %6s %9s %8s %8s\n", 682 "Type", "InUse", "MemUse", "HighUse", "Limit", "Requests", 683 "Type Lim", "Kern Lim"); 684 for (i = 0, km = kmemstats; i < M_LAST; i++, km++) { 685 if (!km->ks_calls || !memname[i]) 686 continue; 687 688 (*pr)("%15s %5ld %6ldK %7ldK %6ldK %9ld %8d %8d\n", 689 memname[i], km->ks_inuse, km->ks_memuse / 1024, 690 km->ks_maxused / 1024, km->ks_limit / 1024, 691 km->ks_calls, km->ks_limblocks, km->ks_mapblocks); 692 } 693 #else 694 (*pr)("No KMEMSTATS compiled in\n"); 695 #endif 696 } 697 #endif /* DDB */ 698 699 /* 700 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> 701 * 702 * Permission to use, copy, modify, and distribute this software for any 703 * purpose with or without fee is hereby granted, provided that the above 704 * copyright notice and this permission notice appear in all copies. 705 * 706 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 707 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 708 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 709 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 710 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 711 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 712 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 713 */ 714 715 /* 716 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX 717 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW 718 */ 719 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4)) 720 721 void * 722 mallocarray(size_t nmemb, size_t size, int type, int flags) 723 { 724 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 725 nmemb > 0 && SIZE_MAX / nmemb < size) { 726 if (flags & M_CANFAIL) 727 return (NULL); 728 panic("mallocarray: overflow %zu * %zu", nmemb, size); 729 } 730 return (malloc(size * nmemb, type, flags)); 731 } 732