1 /* $OpenBSD: kern_malloc.c,v 1.37 2001/08/02 11:06:38 art Exp $ */ 2 /* $NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $ */ 3 4 /* 5 * Copyright (c) 1987, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 37 */ 38 39 #include <sys/param.h> 40 #include <sys/proc.h> 41 #include <sys/map.h> 42 #include <sys/kernel.h> 43 #include <sys/malloc.h> 44 #include <sys/systm.h> 45 #include <sys/sysctl.h> 46 47 #include <vm/vm.h> 48 #include <vm/vm_kern.h> 49 50 #include <uvm/uvm_extern.h> 51 52 static struct vm_map_intrsafe kmem_map_store; 53 vm_map_t kmem_map = NULL; 54 55 int nkmempages; 56 57 struct kmembuckets bucket[MINBUCKET + 16]; 58 struct kmemstats kmemstats[M_LAST]; 59 struct kmemusage *kmemusage; 60 char *kmembase, *kmemlimit; 61 char buckstring[16 * sizeof("123456,")]; 62 int buckstring_init = 0; 63 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES) 64 char *memname[] = INITKMEMNAMES; 65 char *memall = NULL; 66 extern struct lock sysctl_kmemlock; 67 #endif 68 69 #ifdef MALLOC_DEBUG 70 extern int debug_malloc __P((unsigned long, int, int, void **)); 71 extern int debug_free __P((void *, int)); 72 extern void debug_malloc_init __P((void)); 73 #endif 74 75 #ifdef DIAGNOSTIC 76 /* 77 * This structure provides a set of masks to catch unaligned frees. 78 */ 79 long addrmask[] = { 0, 80 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 81 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 82 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, 83 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff, 84 }; 85 86 /* 87 * The WEIRD_ADDR is used as known text to copy into free objects so 88 * that modifications after frees can be detected. 89 */ 90 #define WEIRD_ADDR ((unsigned) 0xdeadbeef) 91 #define MAX_COPY 32 92 93 /* 94 * Normally the freelist structure is used only to hold the list pointer 95 * for free objects. However, when running with diagnostics, the first 96 * 8 bytes of the structure is unused except for diagnostic information, 97 * and the free list pointer is at offset 8 in the structure. Since the 98 * first 8 bytes is the portion of the structure most often modified, this 99 * helps to detect memory reuse problems and avoid free list corruption. 100 */ 101 struct freelist { 102 int32_t spare0; 103 int16_t type; 104 int16_t spare1; 105 caddr_t next; 106 }; 107 #else /* !DIAGNOSTIC */ 108 struct freelist { 109 caddr_t next; 110 }; 111 #endif /* DIAGNOSTIC */ 112 113 /* 114 * Allocate a block of memory 115 */ 116 void * 117 malloc(size, type, flags) 118 unsigned long size; 119 int type, flags; 120 { 121 register struct kmembuckets *kbp; 122 register struct kmemusage *kup; 123 register struct freelist *freep; 124 long indx, npg, allocsize; 125 int s; 126 caddr_t va, cp, savedlist; 127 #ifdef DIAGNOSTIC 128 int32_t *end, *lp; 129 int copysize; 130 char *savedtype; 131 #endif 132 #ifdef KMEMSTATS 133 register struct kmemstats *ksp = &kmemstats[type]; 134 135 if (((unsigned long)type) > M_LAST) 136 panic("malloc - bogus type"); 137 #endif 138 139 #ifdef MALLOC_DEBUG 140 if (debug_malloc(size, type, flags, (void **)&va)) 141 return ((void *) va); 142 #endif 143 144 indx = BUCKETINDX(size); 145 kbp = &bucket[indx]; 146 s = splimp(); 147 #ifdef KMEMSTATS 148 while (ksp->ks_memuse >= ksp->ks_limit) { 149 if (flags & M_NOWAIT) { 150 splx(s); 151 return ((void *) NULL); 152 } 153 if (ksp->ks_limblocks < 65535) 154 ksp->ks_limblocks++; 155 tsleep((caddr_t)ksp, PSWP+2, memname[type], 0); 156 } 157 ksp->ks_size |= 1 << indx; 158 #endif 159 #ifdef DIAGNOSTIC 160 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY; 161 #endif 162 if (kbp->kb_next == NULL) { 163 kbp->kb_last = NULL; 164 if (size > MAXALLOCSAVE) 165 allocsize = round_page(size); 166 else 167 allocsize = 1 << indx; 168 npg = btoc(allocsize); 169 va = (caddr_t) uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object, 170 (vsize_t)ctob(npg), 171 (flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0); 172 if (va == NULL) { 173 /* 174 * Kmem_malloc() can return NULL, even if it can 175 * wait, if there is no map space available, because 176 * it can't fix that problem. Neither can we, 177 * right now. (We should release pages which 178 * are completely free and which are in buckets 179 * with too many free elements.) 180 */ 181 if ((flags & M_NOWAIT) == 0) 182 panic("malloc: out of space in kmem_map"); 183 splx(s); 184 return ((void *) NULL); 185 } 186 #ifdef KMEMSTATS 187 kbp->kb_total += kbp->kb_elmpercl; 188 #endif 189 kup = btokup(va); 190 kup->ku_indx = indx; 191 if (allocsize > MAXALLOCSAVE) { 192 if (npg > 65535) 193 panic("malloc: allocation too large"); 194 kup->ku_pagecnt = npg; 195 #ifdef KMEMSTATS 196 ksp->ks_memuse += allocsize; 197 #endif 198 goto out; 199 } 200 #ifdef KMEMSTATS 201 kup->ku_freecnt = kbp->kb_elmpercl; 202 kbp->kb_totalfree += kbp->kb_elmpercl; 203 #endif 204 /* 205 * Just in case we blocked while allocating memory, 206 * and someone else also allocated memory for this 207 * bucket, don't assume the list is still empty. 208 */ 209 savedlist = kbp->kb_next; 210 kbp->kb_next = cp = va + (npg * PAGE_SIZE) - allocsize; 211 for (;;) { 212 freep = (struct freelist *)cp; 213 #ifdef DIAGNOSTIC 214 /* 215 * Copy in known text to detect modification 216 * after freeing. 217 */ 218 end = (int32_t *)&cp[copysize]; 219 for (lp = (int32_t *)cp; lp < end; lp++) 220 *lp = WEIRD_ADDR; 221 freep->type = M_FREE; 222 #endif /* DIAGNOSTIC */ 223 if (cp <= va) 224 break; 225 cp -= allocsize; 226 freep->next = cp; 227 } 228 freep->next = savedlist; 229 if (kbp->kb_last == NULL) 230 kbp->kb_last = (caddr_t)freep; 231 } 232 va = kbp->kb_next; 233 kbp->kb_next = ((struct freelist *)va)->next; 234 #ifdef DIAGNOSTIC 235 freep = (struct freelist *)va; 236 savedtype = (unsigned)freep->type < M_LAST ? 237 memname[freep->type] : "???"; 238 if (kbp->kb_next) { 239 int rv; 240 vaddr_t addr = (vaddr_t)kbp->kb_next; 241 242 vm_map_lock(kmem_map); 243 rv = uvm_map_checkprot(kmem_map, addr, 244 addr + sizeof(struct freelist), VM_PROT_WRITE); 245 vm_map_unlock(kmem_map); 246 247 if (!rv) { 248 printf("%s %d of object %p size 0x%lx %s %s (invalid addr %p)\n", 249 "Data modified on freelist: word", 250 (int32_t *)&kbp->kb_next - (int32_t *)kbp, va, size, 251 "previous type", savedtype, kbp->kb_next); 252 kbp->kb_next = NULL; 253 } 254 } 255 256 /* Fill the fields that we've used with WEIRD_ADDR */ 257 #if BYTE_ORDER == BIG_ENDIAN 258 freep->type = WEIRD_ADDR >> 16; 259 #endif 260 #if BYTE_ORDER == LITTLE_ENDIAN 261 freep->type = (short)WEIRD_ADDR; 262 #endif 263 end = (int32_t *)&freep->next + 264 (sizeof(freep->next) / sizeof(int32_t)); 265 for (lp = (int32_t *)&freep->next; lp < end; lp++) 266 *lp = WEIRD_ADDR; 267 268 /* and check that the data hasn't been modified. */ 269 end = (int32_t *)&va[copysize]; 270 for (lp = (int32_t *)va; lp < end; lp++) { 271 if (*lp == WEIRD_ADDR) 272 continue; 273 printf("%s %d of object %p size 0x%lx %s %s (0x%x != 0x%x)\n", 274 "Data modified on freelist: word", lp - (int32_t *)va, 275 va, size, "previous type", savedtype, *lp, WEIRD_ADDR); 276 break; 277 } 278 279 freep->spare0 = 0; 280 #endif /* DIAGNOSTIC */ 281 #ifdef KMEMSTATS 282 kup = btokup(va); 283 if (kup->ku_indx != indx) 284 panic("malloc: wrong bucket"); 285 if (kup->ku_freecnt == 0) 286 panic("malloc: lost data"); 287 kup->ku_freecnt--; 288 kbp->kb_totalfree--; 289 ksp->ks_memuse += 1 << indx; 290 out: 291 kbp->kb_calls++; 292 ksp->ks_inuse++; 293 ksp->ks_calls++; 294 if (ksp->ks_memuse > ksp->ks_maxused) 295 ksp->ks_maxused = ksp->ks_memuse; 296 #else 297 out: 298 #endif 299 splx(s); 300 return ((void *) va); 301 } 302 303 /* 304 * Free a block of memory allocated by malloc. 305 */ 306 void 307 free(addr, type) 308 void *addr; 309 int type; 310 { 311 register struct kmembuckets *kbp; 312 register struct kmemusage *kup; 313 register struct freelist *freep; 314 long size; 315 int s; 316 #ifdef DIAGNOSTIC 317 caddr_t cp; 318 int32_t *end, *lp; 319 long alloc, copysize; 320 #endif 321 #ifdef KMEMSTATS 322 register struct kmemstats *ksp = &kmemstats[type]; 323 #endif 324 325 #ifdef MALLOC_DEBUG 326 if (debug_free(addr, type)) 327 return; 328 #endif 329 330 #ifdef DIAGNOSTIC 331 if (addr < (void *)kmembase || addr >= (void *)kmemlimit) 332 panic("free: non-malloced addr %p type %s", addr, 333 memname[type]); 334 #endif 335 336 kup = btokup(addr); 337 size = 1 << kup->ku_indx; 338 kbp = &bucket[kup->ku_indx]; 339 s = splimp(); 340 #ifdef DIAGNOSTIC 341 /* 342 * Check for returns of data that do not point to the 343 * beginning of the allocation. 344 */ 345 if (size > PAGE_SIZE) 346 alloc = addrmask[BUCKETINDX(PAGE_SIZE)]; 347 else 348 alloc = addrmask[kup->ku_indx]; 349 if (((u_long)addr & alloc) != 0) 350 panic("free: unaligned addr %p, size %ld, type %s, mask %ld", 351 addr, size, memname[type], alloc); 352 #endif /* DIAGNOSTIC */ 353 if (size > MAXALLOCSAVE) { 354 uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt)); 355 #ifdef KMEMSTATS 356 size = kup->ku_pagecnt << PGSHIFT; 357 ksp->ks_memuse -= size; 358 kup->ku_indx = 0; 359 kup->ku_pagecnt = 0; 360 if (ksp->ks_memuse + size >= ksp->ks_limit && 361 ksp->ks_memuse < ksp->ks_limit) 362 wakeup((caddr_t)ksp); 363 ksp->ks_inuse--; 364 kbp->kb_total -= 1; 365 #endif 366 splx(s); 367 return; 368 } 369 freep = (struct freelist *)addr; 370 #ifdef DIAGNOSTIC 371 /* 372 * Check for multiple frees. Use a quick check to see if 373 * it looks free before laboriously searching the freelist. 374 */ 375 if (freep->spare0 == WEIRD_ADDR) { 376 for (cp = kbp->kb_next; cp; 377 cp = ((struct freelist *)cp)->next) { 378 if (addr != cp) 379 continue; 380 printf("multiply freed item %p\n", addr); 381 panic("free: duplicated free"); 382 } 383 } 384 /* 385 * Copy in known text to detect modification after freeing 386 * and to make it look free. Also, save the type being freed 387 * so we can list likely culprit if modification is detected 388 * when the object is reallocated. 389 */ 390 copysize = size < MAX_COPY ? size : MAX_COPY; 391 end = (int32_t *)&((caddr_t)addr)[copysize]; 392 for (lp = (int32_t *)addr; lp < end; lp++) 393 *lp = WEIRD_ADDR; 394 freep->type = type; 395 #endif /* DIAGNOSTIC */ 396 #ifdef KMEMSTATS 397 kup->ku_freecnt++; 398 if (kup->ku_freecnt >= kbp->kb_elmpercl) { 399 if (kup->ku_freecnt > kbp->kb_elmpercl) 400 panic("free: multiple frees"); 401 else if (kbp->kb_totalfree > kbp->kb_highwat) 402 kbp->kb_couldfree++; 403 } 404 kbp->kb_totalfree++; 405 ksp->ks_memuse -= size; 406 if (ksp->ks_memuse + size >= ksp->ks_limit && 407 ksp->ks_memuse < ksp->ks_limit) 408 wakeup((caddr_t)ksp); 409 ksp->ks_inuse--; 410 #endif 411 if (kbp->kb_next == NULL) 412 kbp->kb_next = addr; 413 else 414 ((struct freelist *)kbp->kb_last)->next = addr; 415 freep->next = NULL; 416 kbp->kb_last = addr; 417 splx(s); 418 } 419 420 /* 421 * Initialize the kernel memory allocator 422 */ 423 void 424 kmeminit() 425 { 426 #ifdef KMEMSTATS 427 long indx; 428 #endif 429 int npg; 430 431 #ifdef DIAGNOSTIC 432 if (sizeof(struct freelist) > (1 << MINBUCKET)) 433 panic("kmeminit: minbucket too small/struct freelist too big"); 434 #endif 435 436 npg = VM_KMEM_SIZE / PAGE_SIZE; 437 kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map, 438 (vsize_t)(npg * sizeof(struct kmemusage))); 439 kmem_map = uvm_km_suballoc(kernel_map, (vaddr_t *)&kmembase, 440 (vaddr_t *)&kmemlimit, (vsize_t)(npg * PAGE_SIZE), 441 VM_MAP_INTRSAFE, FALSE, &kmem_map_store.vmi_map); 442 #ifdef KMEMSTATS 443 for (indx = 0; indx < MINBUCKET + 16; indx++) { 444 if (1 << indx >= PAGE_SIZE) 445 bucket[indx].kb_elmpercl = 1; 446 else 447 bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx); 448 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; 449 } 450 for (indx = 0; indx < M_LAST; indx++) 451 kmemstats[indx].ks_limit = npg * PAGE_SIZE * 6 / 10; 452 #endif 453 #ifdef MALLOC_DEBUG 454 debug_malloc_init(); 455 #endif 456 457 nkmempages = npg; 458 } 459 460 /* 461 * Return kernel malloc statistics information. 462 */ 463 int 464 sysctl_malloc(name, namelen, oldp, oldlenp, newp, newlen, p) 465 int *name; 466 u_int namelen; 467 void *oldp; 468 size_t *oldlenp; 469 void *newp; 470 size_t newlen; 471 struct proc *p; 472 { 473 struct kmembuckets kb; 474 int i, siz; 475 476 if (namelen != 2 && name[0] != KERN_MALLOC_BUCKETS && 477 name[0] != KERN_MALLOC_KMEMNAMES) 478 return (ENOTDIR); /* overloaded */ 479 480 switch (name[0]) { 481 case KERN_MALLOC_BUCKETS: 482 /* Initialize the first time */ 483 if (buckstring_init == 0) { 484 buckstring_init = 1; 485 bzero(buckstring, sizeof(buckstring)); 486 for (siz = 0, i = MINBUCKET; i < MINBUCKET + 16; i++) 487 siz += sprintf(buckstring + siz, 488 "%d,", (u_int)(1<<i)); 489 /* Remove trailing comma */ 490 if (siz) 491 buckstring[siz - 1] = '\0'; 492 } 493 return (sysctl_rdstring(oldp, oldlenp, newp, buckstring)); 494 495 case KERN_MALLOC_BUCKET: 496 bcopy(&bucket[BUCKETINDX(name[1])], &kb, sizeof(kb)); 497 kb.kb_next = kb.kb_last = 0; 498 return (sysctl_rdstruct(oldp, oldlenp, newp, &kb, sizeof(kb))); 499 case KERN_MALLOC_KMEMSTATS: 500 #ifdef KMEMSTATS 501 if ((name[1] < 0) || (name[1] >= M_LAST)) 502 return (EINVAL); 503 return (sysctl_rdstruct(oldp, oldlenp, newp, 504 &kmemstats[name[1]], sizeof(struct kmemstats))); 505 #else 506 return (EOPNOTSUPP); 507 #endif 508 case KERN_MALLOC_KMEMNAMES: 509 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES) 510 if (memall == NULL) { 511 int totlen; 512 513 i = lockmgr(&sysctl_kmemlock, LK_EXCLUSIVE, NULL, p); 514 if (i) 515 return (i); 516 517 /* Figure out how large a buffer we need */ 518 for (totlen = 0, i = 0; i < M_LAST; i++) { 519 if (memname[i]) 520 totlen += strlen(memname[i]); 521 totlen++; 522 } 523 memall = malloc(totlen + M_LAST, M_SYSCTL, M_WAITOK); 524 bzero(memall, totlen + M_LAST); 525 for (siz = 0, i = 0; i < M_LAST; i++) 526 siz += sprintf(memall + siz, "%s,", 527 memname[i] ? memname[i] : ""); 528 529 /* Remove trailing comma */ 530 if (siz) 531 memall[siz - 1] = '\0'; 532 533 /* Now, convert all spaces to underscores */ 534 for (i = 0; i < totlen; i++) 535 if (memall[i] == ' ') 536 memall[i] = '_'; 537 lockmgr(&sysctl_kmemlock, LK_RELEASE, NULL, p); 538 } 539 return (sysctl_rdstring(oldp, oldlenp, newp, memall)); 540 #else 541 return (EOPNOTSUPP); 542 #endif 543 default: 544 return (EOPNOTSUPP); 545 } 546 /* NOTREACHED */ 547 } 548