1 /* $NetBSD: kern_malloc.c,v 1.79 2003/05/06 18:07:57 fvdl Exp $ */ 2 3 /* 4 * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved. 5 * Copyright (c) 1987, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.79 2003/05/06 18:07:57 fvdl Exp $"); 41 42 #include "opt_lockdebug.h" 43 44 #include <sys/param.h> 45 #include <sys/proc.h> 46 #include <sys/kernel.h> 47 #include <sys/malloc.h> 48 #include <sys/systm.h> 49 50 #include <uvm/uvm_extern.h> 51 52 static struct vm_map kmem_map_store; 53 struct vm_map *kmem_map = NULL; 54 55 #include "opt_kmempages.h" 56 57 #ifdef NKMEMCLUSTERS 58 #error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size 59 #endif 60 61 /* 62 * Default number of pages in kmem_map. We attempt to calculate this 63 * at run-time, but allow it to be either patched or set in the kernel 64 * config file. 65 */ 66 #ifndef NKMEMPAGES 67 #define NKMEMPAGES 0 68 #endif 69 int nkmempages = NKMEMPAGES; 70 71 /* 72 * Defaults for lower- and upper-bounds for the kmem_map page count. 73 * Can be overridden by kernel config options. 74 */ 75 #ifndef NKMEMPAGES_MIN 76 #define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT 77 #endif 78 79 #ifndef NKMEMPAGES_MAX 80 #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT 81 #endif 82 83 #include "opt_kmemstats.h" 84 #include "opt_malloclog.h" 85 #include "opt_malloc_debug.h" 86 87 struct kmembuckets bucket[MINBUCKET + 16]; 88 struct kmemusage *kmemusage; 89 char *kmembase, *kmemlimit; 90 91 struct malloc_type *kmemstatistics; 92 93 #ifdef MALLOCLOG 94 #ifndef MALLOCLOGSIZE 95 #define MALLOCLOGSIZE 100000 96 #endif 97 98 struct malloclog { 99 void *addr; 100 long size; 101 struct malloc_type *type; 102 int action; 103 const char *file; 104 long line; 105 } malloclog[MALLOCLOGSIZE]; 106 107 long malloclogptr; 108 109 static void 110 domlog(void *a, long size, struct malloc_type *type, int action, 111 const char *file, long line) 112 { 113 114 malloclog[malloclogptr].addr = a; 115 malloclog[malloclogptr].size = size; 116 malloclog[malloclogptr].type = type; 117 malloclog[malloclogptr].action = action; 118 malloclog[malloclogptr].file = file; 119 malloclog[malloclogptr].line = line; 120 malloclogptr++; 121 if (malloclogptr >= MALLOCLOGSIZE) 122 malloclogptr = 0; 123 } 124 125 static void 126 hitmlog(void *a) 127 { 128 struct malloclog *lp; 129 long l; 130 131 #define PRT do { \ 132 if (malloclog[l].addr == a && malloclog[l].action) { \ 133 lp = &malloclog[l]; \ 134 printf("malloc log entry %ld:\n", l); \ 135 printf("\taddr = %p\n", lp->addr); \ 136 printf("\tsize = %ld\n", lp->size); \ 137 printf("\ttype = %s\n", lp->type->ks_shortdesc); \ 138 printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \ 139 printf("\tfile = %s\n", lp->file); \ 140 printf("\tline = %ld\n", lp->line); \ 141 } \ 142 } while (/* CONSTCOND */0) 143 144 for (l = malloclogptr; l < MALLOCLOGSIZE; l++) 145 PRT; 146 147 for (l = 0; l < malloclogptr; l++) 148 PRT; 149 } 150 #endif /* MALLOCLOG */ 151 152 #ifdef DIAGNOSTIC 153 /* 154 * This structure provides a set of masks to catch unaligned frees. 155 */ 156 const long addrmask[] = { 0, 157 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 158 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 159 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, 160 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff, 161 }; 162 163 /* 164 * The WEIRD_ADDR is used as known text to copy into free objects so 165 * that modifications after frees can be detected. 166 */ 167 #define WEIRD_ADDR ((uint32_t) 0xdeadbeef) 168 #ifdef DEBUG 169 #define MAX_COPY PAGE_SIZE 170 #else 171 #define MAX_COPY 32 172 #endif 173 174 /* 175 * Normally the freelist structure is used only to hold the list pointer 176 * for free objects. However, when running with diagnostics, the first 177 * 8/16 bytes of the structure is unused except for diagnostic information, 178 * and the free list pointer is at offset 8/16 in the structure. Since the 179 * first 8 bytes is the portion of the structure most often modified, this 180 * helps to detect memory reuse problems and avoid free list corruption. 181 */ 182 struct freelist { 183 uint32_t spare0; 184 #ifdef _LP64 185 uint32_t spare1; /* explicit padding */ 186 #endif 187 struct malloc_type *type; 188 caddr_t next; 189 }; 190 #else /* !DIAGNOSTIC */ 191 struct freelist { 192 caddr_t next; 193 }; 194 #endif /* DIAGNOSTIC */ 195 196 /* 197 * The following are standard, build-in malloc types are are not 198 * specific to any one subsystem. 199 */ 200 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 201 MALLOC_DEFINE(M_DMAMAP, "DMA map", "bus_dma(9) structures"); 202 MALLOC_DEFINE(M_FREE, "free", "should be on free list"); 203 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 204 MALLOC_DEFINE(M_SOFTINTR, "softintr", "Softinterrupt structures"); 205 MALLOC_DEFINE(M_TEMP, "temp", "misc. temporary data buffers"); 206 207 /* XXX These should all be elsewhere. */ 208 MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables"); 209 MALLOC_DEFINE(M_FTABLE, "fragtbl", "fragment reassembly header"); 210 MALLOC_DEFINE(M_UFSMNT, "UFS mount", "UFS mount structure"); 211 MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 212 MALLOC_DEFINE(M_IPMOPTS, "ip_moptions", "internet multicast options"); 213 MALLOC_DEFINE(M_IPMADDR, "in_multi", "internet multicast address"); 214 MALLOC_DEFINE(M_MRTABLE, "mrt", "multicast routing tables"); 215 MALLOC_DEFINE(M_1394DATA, "1394data", "IEEE 1394 data buffers"); 216 217 struct simplelock malloc_slock = SIMPLELOCK_INITIALIZER; 218 219 /* 220 * Allocate a block of memory 221 */ 222 #ifdef MALLOCLOG 223 void * 224 _malloc(unsigned long size, struct malloc_type *ksp, int flags, 225 const char *file, long line) 226 #else 227 void * 228 malloc(unsigned long size, struct malloc_type *ksp, int flags) 229 #endif /* MALLOCLOG */ 230 { 231 struct kmembuckets *kbp; 232 struct kmemusage *kup; 233 struct freelist *freep; 234 long indx, npg, allocsize; 235 int s; 236 caddr_t va, cp, savedlist; 237 #ifdef DIAGNOSTIC 238 uint32_t *end, *lp; 239 int copysize; 240 const char *savedtype; 241 #endif 242 243 #ifdef LOCKDEBUG 244 if ((flags & M_NOWAIT) == 0) 245 simple_lock_only_held(NULL, "malloc"); 246 #endif 247 #ifdef MALLOC_DEBUG 248 if (debug_malloc(size, ksp, flags, (void **) &va)) 249 return ((void *) va); 250 #endif 251 indx = BUCKETINDX(size); 252 kbp = &bucket[indx]; 253 s = splvm(); 254 simple_lock(&malloc_slock); 255 #ifdef KMEMSTATS 256 while (ksp->ks_memuse >= ksp->ks_limit) { 257 if (flags & M_NOWAIT) { 258 simple_unlock(&malloc_slock); 259 splx(s); 260 return ((void *) NULL); 261 } 262 if (ksp->ks_limblocks < 65535) 263 ksp->ks_limblocks++; 264 ltsleep((caddr_t)ksp, PSWP+2, ksp->ks_shortdesc, 0, 265 &malloc_slock); 266 } 267 ksp->ks_size |= 1 << indx; 268 #endif 269 #ifdef DIAGNOSTIC 270 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY; 271 #endif 272 if (kbp->kb_next == NULL) { 273 kbp->kb_last = NULL; 274 if (size > MAXALLOCSAVE) 275 allocsize = round_page(size); 276 else 277 allocsize = 1 << indx; 278 npg = btoc(allocsize); 279 simple_unlock(&malloc_slock); 280 va = (caddr_t) uvm_km_kmemalloc(kmem_map, NULL, 281 (vsize_t)ctob(npg), 282 ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) | 283 ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0)); 284 if (__predict_false(va == NULL)) { 285 /* 286 * Kmem_malloc() can return NULL, even if it can 287 * wait, if there is no map space avaiable, because 288 * it can't fix that problem. Neither can we, 289 * right now. (We should release pages which 290 * are completely free and which are in buckets 291 * with too many free elements.) 292 */ 293 if ((flags & (M_NOWAIT|M_CANFAIL)) == 0) 294 panic("malloc: out of space in kmem_map"); 295 splx(s); 296 return (NULL); 297 } 298 simple_lock(&malloc_slock); 299 #ifdef KMEMSTATS 300 kbp->kb_total += kbp->kb_elmpercl; 301 #endif 302 kup = btokup(va); 303 kup->ku_indx = indx; 304 if (allocsize > MAXALLOCSAVE) { 305 if (npg > 65535) 306 panic("malloc: allocation too large"); 307 kup->ku_pagecnt = npg; 308 #ifdef KMEMSTATS 309 ksp->ks_memuse += allocsize; 310 #endif 311 goto out; 312 } 313 #ifdef KMEMSTATS 314 kup->ku_freecnt = kbp->kb_elmpercl; 315 kbp->kb_totalfree += kbp->kb_elmpercl; 316 #endif 317 /* 318 * Just in case we blocked while allocating memory, 319 * and someone else also allocated memory for this 320 * bucket, don't assume the list is still empty. 321 */ 322 savedlist = kbp->kb_next; 323 kbp->kb_next = cp = va + (npg << PAGE_SHIFT) - allocsize; 324 for (;;) { 325 freep = (struct freelist *)cp; 326 #ifdef DIAGNOSTIC 327 /* 328 * Copy in known text to detect modification 329 * after freeing. 330 */ 331 end = (int32_t *)&cp[copysize]; 332 for (lp = (int32_t *)cp; lp < end; lp++) 333 *lp = WEIRD_ADDR; 334 freep->type = M_FREE; 335 #endif /* DIAGNOSTIC */ 336 if (cp <= va) 337 break; 338 cp -= allocsize; 339 freep->next = cp; 340 } 341 freep->next = savedlist; 342 if (kbp->kb_last == NULL) 343 kbp->kb_last = (caddr_t)freep; 344 } 345 va = kbp->kb_next; 346 kbp->kb_next = ((struct freelist *)va)->next; 347 #ifdef DIAGNOSTIC 348 freep = (struct freelist *)va; 349 /* XXX potential to get garbage pointer here. */ 350 savedtype = freep->type->ks_shortdesc; 351 if (kbp->kb_next) { 352 int rv; 353 vaddr_t addr = (vaddr_t)kbp->kb_next; 354 355 vm_map_lock(kmem_map); 356 rv = uvm_map_checkprot(kmem_map, addr, 357 addr + sizeof(struct freelist), VM_PROT_WRITE); 358 vm_map_unlock(kmem_map); 359 360 if (__predict_false(rv == 0)) { 361 printf("Data modified on freelist: " 362 "word %ld of object %p size %ld previous type %s " 363 "(invalid addr %p)\n", 364 (long)((int32_t *)&kbp->kb_next - (int32_t *)kbp), 365 va, size, savedtype, kbp->kb_next); 366 #ifdef MALLOCLOG 367 hitmlog(va); 368 #endif 369 kbp->kb_next = NULL; 370 } 371 } 372 373 /* Fill the fields that we've used with WEIRD_ADDR */ 374 #ifdef _LP64 375 freep->type = (struct malloc_type *) 376 (WEIRD_ADDR | (((u_long) WEIRD_ADDR) << 32)); 377 #else 378 freep->type = (struct malloc_type *) WEIRD_ADDR; 379 #endif 380 end = (int32_t *)&freep->next + 381 (sizeof(freep->next) / sizeof(int32_t)); 382 for (lp = (int32_t *)&freep->next; lp < end; lp++) 383 *lp = WEIRD_ADDR; 384 385 /* and check that the data hasn't been modified. */ 386 end = (uint32_t *)&va[copysize]; 387 for (lp = (int32_t *)va; lp < end; lp++) { 388 if (__predict_true(*lp == WEIRD_ADDR)) 389 continue; 390 printf("Data modified on freelist: " 391 "word %ld of object %p size %ld previous type %s " 392 "(0x%x != 0x%x)\n", 393 (long)(lp - (uint32_t *)va), va, size, 394 savedtype, *lp, WEIRD_ADDR); 395 #ifdef MALLOCLOG 396 hitmlog(va); 397 #endif 398 break; 399 } 400 401 freep->spare0 = 0; 402 #endif /* DIAGNOSTIC */ 403 #ifdef KMEMSTATS 404 kup = btokup(va); 405 if (kup->ku_indx != indx) 406 panic("malloc: wrong bucket"); 407 if (kup->ku_freecnt == 0) 408 panic("malloc: lost data"); 409 kup->ku_freecnt--; 410 kbp->kb_totalfree--; 411 ksp->ks_memuse += 1 << indx; 412 out: 413 kbp->kb_calls++; 414 ksp->ks_inuse++; 415 ksp->ks_calls++; 416 if (ksp->ks_memuse > ksp->ks_maxused) 417 ksp->ks_maxused = ksp->ks_memuse; 418 #else 419 out: 420 #endif 421 #ifdef MALLOCLOG 422 domlog(va, size, type, 1, file, line); 423 #endif 424 simple_unlock(&malloc_slock); 425 splx(s); 426 if ((flags & M_ZERO) != 0) 427 memset(va, 0, size); 428 return ((void *) va); 429 } 430 431 /* 432 * Free a block of memory allocated by malloc. 433 */ 434 #ifdef MALLOCLOG 435 void 436 _free(void *addr, struct malloc_type *type, const char *file, long line) 437 #else 438 void 439 free(void *addr, struct malloc_type *ksp) 440 #endif /* MALLOCLOG */ 441 { 442 struct kmembuckets *kbp; 443 struct kmemusage *kup; 444 struct freelist *freep; 445 long size; 446 int s; 447 #ifdef DIAGNOSTIC 448 caddr_t cp; 449 int32_t *end, *lp; 450 long alloc, copysize; 451 #endif 452 453 #ifdef MALLOC_DEBUG 454 if (debug_free(addr, ksp)) 455 return; 456 #endif 457 458 #ifdef DIAGNOSTIC 459 /* 460 * Ensure that we're free'ing something that we could 461 * have allocated in the first place. That is, check 462 * to see that the address is within kmem_map. 463 */ 464 if (__predict_false((vaddr_t)addr < kmem_map->header.start || 465 (vaddr_t)addr >= kmem_map->header.end)) 466 panic("free: addr %p not within kmem_map", addr); 467 #endif 468 469 kup = btokup(addr); 470 size = 1 << kup->ku_indx; 471 kbp = &bucket[kup->ku_indx]; 472 s = splvm(); 473 simple_lock(&malloc_slock); 474 #ifdef MALLOCLOG 475 domlog(addr, 0, type, 2, file, line); 476 #endif 477 #ifdef DIAGNOSTIC 478 /* 479 * Check for returns of data that do not point to the 480 * beginning of the allocation. 481 */ 482 if (size > PAGE_SIZE) 483 alloc = addrmask[BUCKETINDX(PAGE_SIZE)]; 484 else 485 alloc = addrmask[kup->ku_indx]; 486 if (((u_long)addr & alloc) != 0) 487 panic("free: unaligned addr %p, size %ld, type %s, mask %ld", 488 addr, size, ksp->ks_shortdesc, alloc); 489 #endif /* DIAGNOSTIC */ 490 if (size > MAXALLOCSAVE) { 491 uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt)); 492 #ifdef KMEMSTATS 493 size = kup->ku_pagecnt << PGSHIFT; 494 ksp->ks_memuse -= size; 495 kup->ku_indx = 0; 496 kup->ku_pagecnt = 0; 497 if (ksp->ks_memuse + size >= ksp->ks_limit && 498 ksp->ks_memuse < ksp->ks_limit) 499 wakeup((caddr_t)ksp); 500 #ifdef DIAGNOSTIC 501 if (ksp->ks_inuse == 0) 502 panic("free 1: inuse 0, probable double free"); 503 #endif 504 ksp->ks_inuse--; 505 kbp->kb_total -= 1; 506 #endif 507 simple_unlock(&malloc_slock); 508 splx(s); 509 return; 510 } 511 freep = (struct freelist *)addr; 512 #ifdef DIAGNOSTIC 513 /* 514 * Check for multiple frees. Use a quick check to see if 515 * it looks free before laboriously searching the freelist. 516 */ 517 if (__predict_false(freep->spare0 == WEIRD_ADDR)) { 518 for (cp = kbp->kb_next; cp; 519 cp = ((struct freelist *)cp)->next) { 520 if (addr != cp) 521 continue; 522 printf("multiply freed item %p\n", addr); 523 #ifdef MALLOCLOG 524 hitmlog(addr); 525 #endif 526 panic("free: duplicated free"); 527 } 528 } 529 #ifdef LOCKDEBUG 530 /* 531 * Check if we're freeing a locked simple lock. 532 */ 533 simple_lock_freecheck(addr, (char *)addr + size); 534 #endif 535 /* 536 * Copy in known text to detect modification after freeing 537 * and to make it look free. Also, save the type being freed 538 * so we can list likely culprit if modification is detected 539 * when the object is reallocated. 540 */ 541 copysize = size < MAX_COPY ? size : MAX_COPY; 542 end = (int32_t *)&((caddr_t)addr)[copysize]; 543 for (lp = (int32_t *)addr; lp < end; lp++) 544 *lp = WEIRD_ADDR; 545 freep->type = ksp; 546 #endif /* DIAGNOSTIC */ 547 #ifdef KMEMSTATS 548 kup->ku_freecnt++; 549 if (kup->ku_freecnt >= kbp->kb_elmpercl) { 550 if (kup->ku_freecnt > kbp->kb_elmpercl) 551 panic("free: multiple frees"); 552 else if (kbp->kb_totalfree > kbp->kb_highwat) 553 kbp->kb_couldfree++; 554 } 555 kbp->kb_totalfree++; 556 ksp->ks_memuse -= size; 557 if (ksp->ks_memuse + size >= ksp->ks_limit && 558 ksp->ks_memuse < ksp->ks_limit) 559 wakeup((caddr_t)ksp); 560 #ifdef DIAGNOSTIC 561 if (ksp->ks_inuse == 0) 562 panic("free 2: inuse 0, probable double free"); 563 #endif 564 ksp->ks_inuse--; 565 #endif 566 if (kbp->kb_next == NULL) 567 kbp->kb_next = addr; 568 else 569 ((struct freelist *)kbp->kb_last)->next = addr; 570 freep->next = NULL; 571 kbp->kb_last = addr; 572 simple_unlock(&malloc_slock); 573 splx(s); 574 } 575 576 /* 577 * Change the size of a block of memory. 578 */ 579 void * 580 realloc(void *curaddr, unsigned long newsize, struct malloc_type *ksp, 581 int flags) 582 { 583 struct kmemusage *kup; 584 unsigned long cursize; 585 void *newaddr; 586 #ifdef DIAGNOSTIC 587 long alloc; 588 #endif 589 590 /* 591 * realloc() with a NULL pointer is the same as malloc(). 592 */ 593 if (curaddr == NULL) 594 return (malloc(newsize, ksp, flags)); 595 596 /* 597 * realloc() with zero size is the same as free(). 598 */ 599 if (newsize == 0) { 600 free(curaddr, ksp); 601 return (NULL); 602 } 603 604 #ifdef LOCKDEBUG 605 if ((flags & M_NOWAIT) == 0) 606 simple_lock_only_held(NULL, "realloc"); 607 #endif 608 609 /* 610 * Find out how large the old allocation was (and do some 611 * sanity checking). 612 */ 613 kup = btokup(curaddr); 614 cursize = 1 << kup->ku_indx; 615 616 #ifdef DIAGNOSTIC 617 /* 618 * Check for returns of data that do not point to the 619 * beginning of the allocation. 620 */ 621 if (cursize > PAGE_SIZE) 622 alloc = addrmask[BUCKETINDX(PAGE_SIZE)]; 623 else 624 alloc = addrmask[kup->ku_indx]; 625 if (((u_long)curaddr & alloc) != 0) 626 panic("realloc: " 627 "unaligned addr %p, size %ld, type %s, mask %ld\n", 628 curaddr, cursize, ksp->ks_shortdesc, alloc); 629 #endif /* DIAGNOSTIC */ 630 631 if (cursize > MAXALLOCSAVE) 632 cursize = ctob(kup->ku_pagecnt); 633 634 /* 635 * If we already actually have as much as they want, we're done. 636 */ 637 if (newsize <= cursize) 638 return (curaddr); 639 640 /* 641 * Can't satisfy the allocation with the existing block. 642 * Allocate a new one and copy the data. 643 */ 644 newaddr = malloc(newsize, ksp, flags); 645 if (__predict_false(newaddr == NULL)) { 646 /* 647 * malloc() failed, because flags included M_NOWAIT. 648 * Return NULL to indicate that failure. The old 649 * pointer is still valid. 650 */ 651 return (NULL); 652 } 653 memcpy(newaddr, curaddr, cursize); 654 655 /* 656 * We were successful: free the old allocation and return 657 * the new one. 658 */ 659 free(curaddr, ksp); 660 return (newaddr); 661 } 662 663 /* 664 * Roundup size to the actual allocation size. 665 */ 666 unsigned long 667 malloc_roundup(unsigned long size) 668 { 669 670 if (size > MAXALLOCSAVE) 671 return (roundup(size, PAGE_SIZE)); 672 else 673 return (1 << BUCKETINDX(size)); 674 } 675 676 /* 677 * Add a malloc type to the system. 678 */ 679 void 680 malloc_type_attach(struct malloc_type *type) 681 { 682 683 if (nkmempages == 0) 684 panic("malloc_type_attach: nkmempages == 0"); 685 686 if (type->ks_magic != M_MAGIC) 687 panic("malloc_type_attach: bad magic"); 688 689 #ifdef DIAGNOSTIC 690 { 691 struct malloc_type *ksp; 692 for (ksp = kmemstatistics; ksp != NULL; ksp = ksp->ks_next) { 693 if (ksp == type) 694 panic("malloc_type_attach: already on list"); 695 } 696 } 697 #endif 698 699 #ifdef KMEMSTATS 700 if (type->ks_limit == 0) 701 type->ks_limit = ((u_long)nkmempages << PAGE_SHIFT) * 6U / 10U; 702 #else 703 type->ks_limit = 0; 704 #endif 705 706 type->ks_next = kmemstatistics; 707 kmemstatistics = type; 708 } 709 710 /* 711 * Remove a malloc type from the system.. 712 */ 713 void 714 malloc_type_detach(struct malloc_type *type) 715 { 716 struct malloc_type *ksp; 717 718 #ifdef DIAGNOSTIC 719 if (type->ks_magic != M_MAGIC) 720 panic("malloc_type_detach: bad magic"); 721 #endif 722 723 if (type == kmemstatistics) 724 kmemstatistics = type->ks_next; 725 else { 726 for (ksp = kmemstatistics; ksp->ks_next != NULL; 727 ksp = ksp->ks_next) { 728 if (ksp->ks_next == type) { 729 ksp->ks_next = type->ks_next; 730 break; 731 } 732 } 733 #ifdef DIAGNOSTIC 734 if (ksp->ks_next == NULL) 735 panic("malloc_type_detach: not on list"); 736 #endif 737 } 738 type->ks_next = NULL; 739 } 740 741 /* 742 * Set the limit on a malloc type. 743 */ 744 void 745 malloc_type_setlimit(struct malloc_type *type, u_long limit) 746 { 747 #ifdef KMEMSTATS 748 int s; 749 750 s = splvm(); 751 type->ks_limit = limit; 752 splx(s); 753 #endif 754 } 755 756 /* 757 * Compute the number of pages that kmem_map will map, that is, 758 * the size of the kernel malloc arena. 759 */ 760 void 761 kmeminit_nkmempages(void) 762 { 763 int npages; 764 765 if (nkmempages != 0) { 766 /* 767 * It's already been set (by us being here before, or 768 * by patching or kernel config options), bail out now. 769 */ 770 return; 771 } 772 773 /* 774 * We use the following (simple) formula: 775 * 776 * - Starting point is physical memory / 4. 777 * 778 * - Clamp it down to NKMEMPAGES_MAX. 779 * 780 * - Round it up to NKMEMPAGES_MIN. 781 */ 782 npages = physmem / 4; 783 784 if (npages > NKMEMPAGES_MAX) 785 npages = NKMEMPAGES_MAX; 786 787 if (npages < NKMEMPAGES_MIN) 788 npages = NKMEMPAGES_MIN; 789 790 nkmempages = npages; 791 } 792 793 /* 794 * Initialize the kernel memory allocator 795 */ 796 void 797 kmeminit(void) 798 { 799 __link_set_decl(malloc_types, struct malloc_type); 800 struct malloc_type * const *ksp; 801 #ifdef KMEMSTATS 802 long indx; 803 #endif 804 805 #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0) 806 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2 807 #endif 808 #if (MAXALLOCSAVE > MINALLOCSIZE * 32768) 809 ERROR!_kmeminit:_MAXALLOCSAVE_too_big 810 #endif 811 #if (MAXALLOCSAVE < NBPG) 812 ERROR!_kmeminit:_MAXALLOCSAVE_too_small 813 #endif 814 815 if (sizeof(struct freelist) > (1 << MINBUCKET)) 816 panic("minbucket too small/struct freelist too big"); 817 818 /* 819 * Compute the number of kmem_map pages, if we have not 820 * done so already. 821 */ 822 kmeminit_nkmempages(); 823 824 kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map, 825 (vsize_t)(nkmempages * sizeof(struct kmemusage))); 826 kmem_map = uvm_km_suballoc(kernel_map, (void *)&kmembase, 827 (void *)&kmemlimit, (vsize_t)(nkmempages << PAGE_SHIFT), 828 VM_MAP_INTRSAFE, FALSE, &kmem_map_store); 829 #ifdef KMEMSTATS 830 for (indx = 0; indx < MINBUCKET + 16; indx++) { 831 if (1 << indx >= PAGE_SIZE) 832 bucket[indx].kb_elmpercl = 1; 833 else 834 bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx); 835 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; 836 } 837 #endif 838 839 /* Attach all of the statically-linked malloc types. */ 840 __link_set_foreach(ksp, malloc_types) 841 malloc_type_attach(*ksp); 842 843 #ifdef MALLOC_DEBUG 844 debug_malloc_init(); 845 #endif 846 } 847 848 #ifdef DDB 849 #include <ddb/db_output.h> 850 851 /* 852 * Dump kmem statistics from ddb. 853 * 854 * usage: call dump_kmemstats 855 */ 856 void dump_kmemstats(void); 857 858 void 859 dump_kmemstats(void) 860 { 861 #ifdef KMEMSTATS 862 struct malloc_type *ksp; 863 864 for (ksp = kmemstatistics; ksp != NULL; ksp = ksp->ks_next) { 865 if (ksp->ks_memuse == 0) 866 continue; 867 db_printf("%s%.*s %ld\n", ksp->ks_shortdesc, 868 (int)(20 - strlen(ksp->ks_shortdesc)), 869 " ", 870 ksp->ks_memuse); 871 } 872 #else 873 db_printf("Kmem stats are not being collected.\n"); 874 #endif /* KMEMSTATS */ 875 } 876 #endif /* DDB */ 877