1 /* $OpenBSD: kern_malloc.c,v 1.45 2001/12/19 08:58:06 art Exp $ */ 2 /* $NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $ */ 3 4 /* 5 * Copyright (c) 1987, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 37 */ 38 39 #include <sys/param.h> 40 #include <sys/proc.h> 41 #include <sys/map.h> 42 #include <sys/kernel.h> 43 #include <sys/malloc.h> 44 #include <sys/systm.h> 45 #include <sys/sysctl.h> 46 47 #include <uvm/uvm_extern.h> 48 49 static struct vm_map_intrsafe kmem_map_store; 50 struct vm_map *kmem_map = NULL; 51 52 #ifdef NKMEMCLUSTERS 53 #error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size 54 #endif 55 56 /* 57 * Default number of pages in kmem_map. We attempt to calculate this 58 * at run-time, but allow it to be either patched or set in the kernel 59 * config file. 60 */ 61 #ifndef NKMEMPAGES 62 #define NKMEMPAGES 0 63 #endif 64 int nkmempages = NKMEMPAGES; 65 66 /* 67 * Defaults for lower- and upper-bounds for the kmem_map page count. 68 * Can be overridden by kernel config options. 69 */ 70 #ifndef NKMEMPAGES_MIN 71 #define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT 72 #endif 73 74 #ifndef NKMEMPAGES_MAX 75 #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT 76 #endif 77 78 struct kmembuckets bucket[MINBUCKET + 16]; 79 struct kmemstats kmemstats[M_LAST]; 80 struct kmemusage *kmemusage; 81 char *kmembase, *kmemlimit; 82 char buckstring[16 * sizeof("123456,")]; 83 int buckstring_init = 0; 84 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES) 85 char *memname[] = INITKMEMNAMES; 86 char *memall = NULL; 87 extern struct lock sysctl_kmemlock; 88 #endif 89 90 #ifdef DIAGNOSTIC 91 /* 92 * This structure provides a set of masks to catch unaligned frees. 93 */ 94 long addrmask[] = { 0, 95 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 96 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 97 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, 98 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff, 99 }; 100 101 /* 102 * The WEIRD_ADDR is used as known text to copy into free objects so 103 * that modifications after frees can be detected. 104 */ 105 #define WEIRD_ADDR ((unsigned) 0xdeadbeef) 106 #define MAX_COPY 32 107 108 /* 109 * Normally the freelist structure is used only to hold the list pointer 110 * for free objects. However, when running with diagnostics, the first 111 * 8 bytes of the structure is unused except for diagnostic information, 112 * and the free list pointer is at offset 8 in the structure. Since the 113 * first 8 bytes is the portion of the structure most often modified, this 114 * helps to detect memory reuse problems and avoid free list corruption. 115 */ 116 struct freelist { 117 int32_t spare0; 118 int16_t type; 119 int16_t spare1; 120 caddr_t next; 121 }; 122 #else /* !DIAGNOSTIC */ 123 struct freelist { 124 caddr_t next; 125 }; 126 #endif /* DIAGNOSTIC */ 127 128 /* 129 * Allocate a block of memory 130 */ 131 void * 132 malloc(size, type, flags) 133 unsigned long size; 134 int type, flags; 135 { 136 register struct kmembuckets *kbp; 137 register struct kmemusage *kup; 138 register struct freelist *freep; 139 long indx, npg, allocsize; 140 int s; 141 caddr_t va, cp, savedlist; 142 #ifdef DIAGNOSTIC 143 int32_t *end, *lp; 144 int copysize; 145 char *savedtype; 146 #endif 147 #ifdef KMEMSTATS 148 register struct kmemstats *ksp = &kmemstats[type]; 149 150 if (((unsigned long)type) > M_LAST) 151 panic("malloc - bogus type"); 152 #endif 153 154 #ifdef MALLOC_DEBUG 155 if (debug_malloc(size, type, flags, (void **)&va)) 156 return ((void *) va); 157 #endif 158 159 indx = BUCKETINDX(size); 160 kbp = &bucket[indx]; 161 s = splimp(); 162 #ifdef KMEMSTATS 163 while (ksp->ks_memuse >= ksp->ks_limit) { 164 if (flags & M_NOWAIT) { 165 splx(s); 166 return ((void *) NULL); 167 } 168 if (ksp->ks_limblocks < 65535) 169 ksp->ks_limblocks++; 170 tsleep((caddr_t)ksp, PSWP+2, memname[type], 0); 171 } 172 ksp->ks_size |= 1 << indx; 173 #endif 174 #ifdef DIAGNOSTIC 175 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY; 176 #endif 177 if (kbp->kb_next == NULL) { 178 kbp->kb_last = NULL; 179 if (size > MAXALLOCSAVE) 180 allocsize = round_page(size); 181 else 182 allocsize = 1 << indx; 183 npg = btoc(allocsize); 184 va = (caddr_t) uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object, 185 (vsize_t)ctob(npg), 186 (flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0); 187 if (va == NULL) { 188 /* 189 * Kmem_malloc() can return NULL, even if it can 190 * wait, if there is no map space available, because 191 * it can't fix that problem. Neither can we, 192 * right now. (We should release pages which 193 * are completely free and which are in buckets 194 * with too many free elements.) 195 */ 196 if ((flags & M_NOWAIT) == 0) 197 panic("malloc: out of space in kmem_map"); 198 splx(s); 199 return ((void *) NULL); 200 } 201 #ifdef KMEMSTATS 202 kbp->kb_total += kbp->kb_elmpercl; 203 #endif 204 kup = btokup(va); 205 kup->ku_indx = indx; 206 if (allocsize > MAXALLOCSAVE) { 207 if (npg > 65535) 208 panic("malloc: allocation too large"); 209 kup->ku_pagecnt = npg; 210 #ifdef KMEMSTATS 211 ksp->ks_memuse += allocsize; 212 #endif 213 goto out; 214 } 215 #ifdef KMEMSTATS 216 kup->ku_freecnt = kbp->kb_elmpercl; 217 kbp->kb_totalfree += kbp->kb_elmpercl; 218 #endif 219 /* 220 * Just in case we blocked while allocating memory, 221 * and someone else also allocated memory for this 222 * bucket, don't assume the list is still empty. 223 */ 224 savedlist = kbp->kb_next; 225 kbp->kb_next = cp = va + (npg * PAGE_SIZE) - allocsize; 226 for (;;) { 227 freep = (struct freelist *)cp; 228 #ifdef DIAGNOSTIC 229 /* 230 * Copy in known text to detect modification 231 * after freeing. 232 */ 233 end = (int32_t *)&cp[copysize]; 234 for (lp = (int32_t *)cp; lp < end; lp++) 235 *lp = WEIRD_ADDR; 236 freep->type = M_FREE; 237 #endif /* DIAGNOSTIC */ 238 if (cp <= va) 239 break; 240 cp -= allocsize; 241 freep->next = cp; 242 } 243 freep->next = savedlist; 244 if (kbp->kb_last == NULL) 245 kbp->kb_last = (caddr_t)freep; 246 } 247 va = kbp->kb_next; 248 kbp->kb_next = ((struct freelist *)va)->next; 249 #ifdef DIAGNOSTIC 250 freep = (struct freelist *)va; 251 savedtype = (unsigned)freep->type < M_LAST ? 252 memname[freep->type] : "???"; 253 if (kbp->kb_next) { 254 int rv; 255 vaddr_t addr = (vaddr_t)kbp->kb_next; 256 257 vm_map_lock(kmem_map); 258 rv = uvm_map_checkprot(kmem_map, addr, 259 addr + sizeof(struct freelist), VM_PROT_WRITE); 260 vm_map_unlock(kmem_map); 261 262 if (!rv) { 263 printf("%s %d of object %p size 0x%lx %s %s (invalid addr %p)\n", 264 "Data modified on freelist: word", 265 (int32_t *)&kbp->kb_next - (int32_t *)kbp, va, size, 266 "previous type", savedtype, kbp->kb_next); 267 kbp->kb_next = NULL; 268 } 269 } 270 271 /* Fill the fields that we've used with WEIRD_ADDR */ 272 #if BYTE_ORDER == BIG_ENDIAN 273 freep->type = WEIRD_ADDR >> 16; 274 #endif 275 #if BYTE_ORDER == LITTLE_ENDIAN 276 freep->type = (short)WEIRD_ADDR; 277 #endif 278 end = (int32_t *)&freep->next + 279 (sizeof(freep->next) / sizeof(int32_t)); 280 for (lp = (int32_t *)&freep->next; lp < end; lp++) 281 *lp = WEIRD_ADDR; 282 283 /* and check that the data hasn't been modified. */ 284 end = (int32_t *)&va[copysize]; 285 for (lp = (int32_t *)va; lp < end; lp++) { 286 if (*lp == WEIRD_ADDR) 287 continue; 288 printf("%s %d of object %p size 0x%lx %s %s (0x%x != 0x%x)\n", 289 "Data modified on freelist: word", lp - (int32_t *)va, 290 va, size, "previous type", savedtype, *lp, WEIRD_ADDR); 291 break; 292 } 293 294 freep->spare0 = 0; 295 #endif /* DIAGNOSTIC */ 296 #ifdef KMEMSTATS 297 kup = btokup(va); 298 if (kup->ku_indx != indx) 299 panic("malloc: wrong bucket"); 300 if (kup->ku_freecnt == 0) 301 panic("malloc: lost data"); 302 kup->ku_freecnt--; 303 kbp->kb_totalfree--; 304 ksp->ks_memuse += 1 << indx; 305 out: 306 kbp->kb_calls++; 307 ksp->ks_inuse++; 308 ksp->ks_calls++; 309 if (ksp->ks_memuse > ksp->ks_maxused) 310 ksp->ks_maxused = ksp->ks_memuse; 311 #else 312 out: 313 #endif 314 splx(s); 315 return ((void *) va); 316 } 317 318 /* 319 * Free a block of memory allocated by malloc. 320 */ 321 void 322 free(addr, type) 323 void *addr; 324 int type; 325 { 326 register struct kmembuckets *kbp; 327 register struct kmemusage *kup; 328 register struct freelist *freep; 329 long size; 330 int s; 331 #ifdef DIAGNOSTIC 332 caddr_t cp; 333 int32_t *end, *lp; 334 long alloc, copysize; 335 #endif 336 #ifdef KMEMSTATS 337 register struct kmemstats *ksp = &kmemstats[type]; 338 #endif 339 340 #ifdef MALLOC_DEBUG 341 if (debug_free(addr, type)) 342 return; 343 #endif 344 345 #ifdef DIAGNOSTIC 346 if (addr < (void *)kmembase || addr >= (void *)kmemlimit) 347 panic("free: non-malloced addr %p type %s", addr, 348 memname[type]); 349 #endif 350 351 kup = btokup(addr); 352 size = 1 << kup->ku_indx; 353 kbp = &bucket[kup->ku_indx]; 354 s = splimp(); 355 #ifdef DIAGNOSTIC 356 /* 357 * Check for returns of data that do not point to the 358 * beginning of the allocation. 359 */ 360 if (size > PAGE_SIZE) 361 alloc = addrmask[BUCKETINDX(PAGE_SIZE)]; 362 else 363 alloc = addrmask[kup->ku_indx]; 364 if (((u_long)addr & alloc) != 0) 365 panic("free: unaligned addr %p, size %ld, type %s, mask %ld", 366 addr, size, memname[type], alloc); 367 #endif /* DIAGNOSTIC */ 368 if (size > MAXALLOCSAVE) { 369 uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt)); 370 #ifdef KMEMSTATS 371 size = kup->ku_pagecnt << PGSHIFT; 372 ksp->ks_memuse -= size; 373 kup->ku_indx = 0; 374 kup->ku_pagecnt = 0; 375 if (ksp->ks_memuse + size >= ksp->ks_limit && 376 ksp->ks_memuse < ksp->ks_limit) 377 wakeup((caddr_t)ksp); 378 ksp->ks_inuse--; 379 kbp->kb_total -= 1; 380 #endif 381 splx(s); 382 return; 383 } 384 freep = (struct freelist *)addr; 385 #ifdef DIAGNOSTIC 386 /* 387 * Check for multiple frees. Use a quick check to see if 388 * it looks free before laboriously searching the freelist. 389 */ 390 if (freep->spare0 == WEIRD_ADDR) { 391 for (cp = kbp->kb_next; cp; 392 cp = ((struct freelist *)cp)->next) { 393 if (addr != cp) 394 continue; 395 printf("multiply freed item %p\n", addr); 396 panic("free: duplicated free"); 397 } 398 } 399 /* 400 * Copy in known text to detect modification after freeing 401 * and to make it look free. Also, save the type being freed 402 * so we can list likely culprit if modification is detected 403 * when the object is reallocated. 404 */ 405 copysize = size < MAX_COPY ? size : MAX_COPY; 406 end = (int32_t *)&((caddr_t)addr)[copysize]; 407 for (lp = (int32_t *)addr; lp < end; lp++) 408 *lp = WEIRD_ADDR; 409 freep->type = type; 410 #endif /* DIAGNOSTIC */ 411 #ifdef KMEMSTATS 412 kup->ku_freecnt++; 413 if (kup->ku_freecnt >= kbp->kb_elmpercl) { 414 if (kup->ku_freecnt > kbp->kb_elmpercl) 415 panic("free: multiple frees"); 416 else if (kbp->kb_totalfree > kbp->kb_highwat) 417 kbp->kb_couldfree++; 418 } 419 kbp->kb_totalfree++; 420 ksp->ks_memuse -= size; 421 if (ksp->ks_memuse + size >= ksp->ks_limit && 422 ksp->ks_memuse < ksp->ks_limit) 423 wakeup((caddr_t)ksp); 424 ksp->ks_inuse--; 425 #endif 426 if (kbp->kb_next == NULL) 427 kbp->kb_next = addr; 428 else 429 ((struct freelist *)kbp->kb_last)->next = addr; 430 freep->next = NULL; 431 kbp->kb_last = addr; 432 splx(s); 433 } 434 435 /* 436 * Compute the number of pages that kmem_map will map, that is, 437 * the size of the kernel malloc arena. 438 */ 439 void 440 kmeminit_nkmempages() 441 { 442 int npages; 443 444 if (nkmempages != 0) { 445 /* 446 * It's already been set (by us being here before, or 447 * by patching or kernel config options), bail out now. 448 */ 449 return; 450 } 451 452 /* 453 * We use the following (simple) formula: 454 * 455 * - Starting point is physical memory / 4. 456 * 457 * - Clamp it down to NKMEMPAGES_MAX. 458 * 459 * - Round it up to NKMEMPAGES_MIN. 460 */ 461 npages = physmem / 4; 462 463 if (npages > NKMEMPAGES_MAX) 464 npages = NKMEMPAGES_MAX; 465 466 if (npages < NKMEMPAGES_MIN) 467 npages = NKMEMPAGES_MIN; 468 469 nkmempages = npages; 470 } 471 472 /* 473 * Initialize the kernel memory allocator 474 */ 475 void 476 kmeminit() 477 { 478 #ifdef KMEMSTATS 479 long indx; 480 #endif 481 482 #ifdef DIAGNOSTIC 483 if (sizeof(struct freelist) > (1 << MINBUCKET)) 484 panic("kmeminit: minbucket too small/struct freelist too big"); 485 #endif 486 487 /* 488 * Compute the number of kmem_map pages, if we have not 489 * done so already. 490 */ 491 kmeminit_nkmempages(); 492 493 kmem_map = uvm_km_suballoc(kernel_map, (vaddr_t *)&kmembase, 494 (vaddr_t *)&kmemlimit, (vsize_t)(nkmempages * PAGE_SIZE), 495 VM_MAP_INTRSAFE, FALSE, &kmem_map_store.vmi_map); 496 kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map, 497 (vsize_t)(nkmempages * sizeof(struct kmemusage))); 498 #ifdef KMEMSTATS 499 for (indx = 0; indx < MINBUCKET + 16; indx++) { 500 if (1 << indx >= PAGE_SIZE) 501 bucket[indx].kb_elmpercl = 1; 502 else 503 bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx); 504 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; 505 } 506 for (indx = 0; indx < M_LAST; indx++) 507 kmemstats[indx].ks_limit = nkmempages * PAGE_SIZE * 6 / 10; 508 #endif 509 #ifdef MALLOC_DEBUG 510 debug_malloc_init(); 511 #endif 512 } 513 514 /* 515 * Return kernel malloc statistics information. 516 */ 517 int 518 sysctl_malloc(name, namelen, oldp, oldlenp, newp, newlen, p) 519 int *name; 520 u_int namelen; 521 void *oldp; 522 size_t *oldlenp; 523 void *newp; 524 size_t newlen; 525 struct proc *p; 526 { 527 struct kmembuckets kb; 528 int i, siz; 529 530 if (namelen != 2 && name[0] != KERN_MALLOC_BUCKETS && 531 name[0] != KERN_MALLOC_KMEMNAMES) 532 return (ENOTDIR); /* overloaded */ 533 534 switch (name[0]) { 535 case KERN_MALLOC_BUCKETS: 536 /* Initialize the first time */ 537 if (buckstring_init == 0) { 538 buckstring_init = 1; 539 bzero(buckstring, sizeof(buckstring)); 540 for (siz = 0, i = MINBUCKET; i < MINBUCKET + 16; i++) 541 siz += sprintf(buckstring + siz, 542 "%d,", (u_int)(1<<i)); 543 /* Remove trailing comma */ 544 if (siz) 545 buckstring[siz - 1] = '\0'; 546 } 547 return (sysctl_rdstring(oldp, oldlenp, newp, buckstring)); 548 549 case KERN_MALLOC_BUCKET: 550 bcopy(&bucket[BUCKETINDX(name[1])], &kb, sizeof(kb)); 551 kb.kb_next = kb.kb_last = 0; 552 return (sysctl_rdstruct(oldp, oldlenp, newp, &kb, sizeof(kb))); 553 case KERN_MALLOC_KMEMSTATS: 554 #ifdef KMEMSTATS 555 if ((name[1] < 0) || (name[1] >= M_LAST)) 556 return (EINVAL); 557 return (sysctl_rdstruct(oldp, oldlenp, newp, 558 &kmemstats[name[1]], sizeof(struct kmemstats))); 559 #else 560 return (EOPNOTSUPP); 561 #endif 562 case KERN_MALLOC_KMEMNAMES: 563 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES) 564 if (memall == NULL) { 565 int totlen; 566 567 i = lockmgr(&sysctl_kmemlock, LK_EXCLUSIVE, NULL, p); 568 if (i) 569 return (i); 570 571 /* Figure out how large a buffer we need */ 572 for (totlen = 0, i = 0; i < M_LAST; i++) { 573 if (memname[i]) 574 totlen += strlen(memname[i]); 575 totlen++; 576 } 577 memall = malloc(totlen + M_LAST, M_SYSCTL, M_WAITOK); 578 bzero(memall, totlen + M_LAST); 579 for (siz = 0, i = 0; i < M_LAST; i++) 580 siz += sprintf(memall + siz, "%s,", 581 memname[i] ? memname[i] : ""); 582 583 /* Remove trailing comma */ 584 if (siz) 585 memall[siz - 1] = '\0'; 586 587 /* Now, convert all spaces to underscores */ 588 for (i = 0; i < totlen; i++) 589 if (memall[i] == ' ') 590 memall[i] = '_'; 591 lockmgr(&sysctl_kmemlock, LK_RELEASE, NULL, p); 592 } 593 return (sysctl_rdstring(oldp, oldlenp, newp, memall)); 594 #else 595 return (EOPNOTSUPP); 596 #endif 597 default: 598 return (EOPNOTSUPP); 599 } 600 /* NOTREACHED */ 601 } 602