1 /* $OpenBSD: kern_malloc.c,v 1.38 2001/08/17 23:39:58 art Exp $ */ 2 /* $NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $ */ 3 4 /* 5 * Copyright (c) 1987, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 37 */ 38 39 #include <sys/param.h> 40 #include <sys/proc.h> 41 #include <sys/map.h> 42 #include <sys/kernel.h> 43 #include <sys/malloc.h> 44 #include <sys/systm.h> 45 #include <sys/sysctl.h> 46 47 #include <vm/vm.h> 48 #include <vm/vm_kern.h> 49 50 #include <uvm/uvm_extern.h> 51 52 static struct vm_map_intrsafe kmem_map_store; 53 vm_map_t kmem_map = NULL; 54 55 int nkmempages; 56 57 struct kmembuckets bucket[MINBUCKET + 16]; 58 struct kmemstats kmemstats[M_LAST]; 59 struct kmemusage *kmemusage; 60 char *kmembase, *kmemlimit; 61 char buckstring[16 * sizeof("123456,")]; 62 int buckstring_init = 0; 63 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES) 64 char *memname[] = INITKMEMNAMES; 65 char *memall = NULL; 66 extern struct lock sysctl_kmemlock; 67 #endif 68 69 #ifdef DIAGNOSTIC 70 /* 71 * This structure provides a set of masks to catch unaligned frees. 72 */ 73 long addrmask[] = { 0, 74 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 75 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 76 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, 77 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff, 78 }; 79 80 /* 81 * The WEIRD_ADDR is used as known text to copy into free objects so 82 * that modifications after frees can be detected. 83 */ 84 #define WEIRD_ADDR ((unsigned) 0xdeadbeef) 85 #define MAX_COPY 32 86 87 /* 88 * Normally the freelist structure is used only to hold the list pointer 89 * for free objects. However, when running with diagnostics, the first 90 * 8 bytes of the structure is unused except for diagnostic information, 91 * and the free list pointer is at offset 8 in the structure. Since the 92 * first 8 bytes is the portion of the structure most often modified, this 93 * helps to detect memory reuse problems and avoid free list corruption. 94 */ 95 struct freelist { 96 int32_t spare0; 97 int16_t type; 98 int16_t spare1; 99 caddr_t next; 100 }; 101 #else /* !DIAGNOSTIC */ 102 struct freelist { 103 caddr_t next; 104 }; 105 #endif /* DIAGNOSTIC */ 106 107 /* 108 * Allocate a block of memory 109 */ 110 void * 111 malloc(size, type, flags) 112 unsigned long size; 113 int type, flags; 114 { 115 register struct kmembuckets *kbp; 116 register struct kmemusage *kup; 117 register struct freelist *freep; 118 long indx, npg, allocsize; 119 int s; 120 caddr_t va, cp, savedlist; 121 #ifdef DIAGNOSTIC 122 int32_t *end, *lp; 123 int copysize; 124 char *savedtype; 125 #endif 126 #ifdef KMEMSTATS 127 register struct kmemstats *ksp = &kmemstats[type]; 128 129 if (((unsigned long)type) > M_LAST) 130 panic("malloc - bogus type"); 131 #endif 132 133 #ifdef MALLOC_DEBUG 134 if (debug_malloc(size, type, flags, (void **)&va)) 135 return ((void *) va); 136 #endif 137 138 indx = BUCKETINDX(size); 139 kbp = &bucket[indx]; 140 s = splimp(); 141 #ifdef KMEMSTATS 142 while (ksp->ks_memuse >= ksp->ks_limit) { 143 if (flags & M_NOWAIT) { 144 splx(s); 145 return ((void *) NULL); 146 } 147 if (ksp->ks_limblocks < 65535) 148 ksp->ks_limblocks++; 149 tsleep((caddr_t)ksp, PSWP+2, memname[type], 0); 150 } 151 ksp->ks_size |= 1 << indx; 152 #endif 153 #ifdef DIAGNOSTIC 154 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY; 155 #endif 156 if (kbp->kb_next == NULL) { 157 kbp->kb_last = NULL; 158 if (size > MAXALLOCSAVE) 159 allocsize = round_page(size); 160 else 161 allocsize = 1 << indx; 162 npg = btoc(allocsize); 163 va = (caddr_t) uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object, 164 (vsize_t)ctob(npg), 165 (flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0); 166 if (va == NULL) { 167 /* 168 * Kmem_malloc() can return NULL, even if it can 169 * wait, if there is no map space available, because 170 * it can't fix that problem. Neither can we, 171 * right now. (We should release pages which 172 * are completely free and which are in buckets 173 * with too many free elements.) 174 */ 175 if ((flags & M_NOWAIT) == 0) 176 panic("malloc: out of space in kmem_map"); 177 splx(s); 178 return ((void *) NULL); 179 } 180 #ifdef KMEMSTATS 181 kbp->kb_total += kbp->kb_elmpercl; 182 #endif 183 kup = btokup(va); 184 kup->ku_indx = indx; 185 if (allocsize > MAXALLOCSAVE) { 186 if (npg > 65535) 187 panic("malloc: allocation too large"); 188 kup->ku_pagecnt = npg; 189 #ifdef KMEMSTATS 190 ksp->ks_memuse += allocsize; 191 #endif 192 goto out; 193 } 194 #ifdef KMEMSTATS 195 kup->ku_freecnt = kbp->kb_elmpercl; 196 kbp->kb_totalfree += kbp->kb_elmpercl; 197 #endif 198 /* 199 * Just in case we blocked while allocating memory, 200 * and someone else also allocated memory for this 201 * bucket, don't assume the list is still empty. 202 */ 203 savedlist = kbp->kb_next; 204 kbp->kb_next = cp = va + (npg * PAGE_SIZE) - allocsize; 205 for (;;) { 206 freep = (struct freelist *)cp; 207 #ifdef DIAGNOSTIC 208 /* 209 * Copy in known text to detect modification 210 * after freeing. 211 */ 212 end = (int32_t *)&cp[copysize]; 213 for (lp = (int32_t *)cp; lp < end; lp++) 214 *lp = WEIRD_ADDR; 215 freep->type = M_FREE; 216 #endif /* DIAGNOSTIC */ 217 if (cp <= va) 218 break; 219 cp -= allocsize; 220 freep->next = cp; 221 } 222 freep->next = savedlist; 223 if (kbp->kb_last == NULL) 224 kbp->kb_last = (caddr_t)freep; 225 } 226 va = kbp->kb_next; 227 kbp->kb_next = ((struct freelist *)va)->next; 228 #ifdef DIAGNOSTIC 229 freep = (struct freelist *)va; 230 savedtype = (unsigned)freep->type < M_LAST ? 231 memname[freep->type] : "???"; 232 if (kbp->kb_next) { 233 int rv; 234 vaddr_t addr = (vaddr_t)kbp->kb_next; 235 236 vm_map_lock(kmem_map); 237 rv = uvm_map_checkprot(kmem_map, addr, 238 addr + sizeof(struct freelist), VM_PROT_WRITE); 239 vm_map_unlock(kmem_map); 240 241 if (!rv) { 242 printf("%s %d of object %p size 0x%lx %s %s (invalid addr %p)\n", 243 "Data modified on freelist: word", 244 (int32_t *)&kbp->kb_next - (int32_t *)kbp, va, size, 245 "previous type", savedtype, kbp->kb_next); 246 kbp->kb_next = NULL; 247 } 248 } 249 250 /* Fill the fields that we've used with WEIRD_ADDR */ 251 #if BYTE_ORDER == BIG_ENDIAN 252 freep->type = WEIRD_ADDR >> 16; 253 #endif 254 #if BYTE_ORDER == LITTLE_ENDIAN 255 freep->type = (short)WEIRD_ADDR; 256 #endif 257 end = (int32_t *)&freep->next + 258 (sizeof(freep->next) / sizeof(int32_t)); 259 for (lp = (int32_t *)&freep->next; lp < end; lp++) 260 *lp = WEIRD_ADDR; 261 262 /* and check that the data hasn't been modified. */ 263 end = (int32_t *)&va[copysize]; 264 for (lp = (int32_t *)va; lp < end; lp++) { 265 if (*lp == WEIRD_ADDR) 266 continue; 267 printf("%s %d of object %p size 0x%lx %s %s (0x%x != 0x%x)\n", 268 "Data modified on freelist: word", lp - (int32_t *)va, 269 va, size, "previous type", savedtype, *lp, WEIRD_ADDR); 270 break; 271 } 272 273 freep->spare0 = 0; 274 #endif /* DIAGNOSTIC */ 275 #ifdef KMEMSTATS 276 kup = btokup(va); 277 if (kup->ku_indx != indx) 278 panic("malloc: wrong bucket"); 279 if (kup->ku_freecnt == 0) 280 panic("malloc: lost data"); 281 kup->ku_freecnt--; 282 kbp->kb_totalfree--; 283 ksp->ks_memuse += 1 << indx; 284 out: 285 kbp->kb_calls++; 286 ksp->ks_inuse++; 287 ksp->ks_calls++; 288 if (ksp->ks_memuse > ksp->ks_maxused) 289 ksp->ks_maxused = ksp->ks_memuse; 290 #else 291 out: 292 #endif 293 splx(s); 294 return ((void *) va); 295 } 296 297 /* 298 * Free a block of memory allocated by malloc. 299 */ 300 void 301 free(addr, type) 302 void *addr; 303 int type; 304 { 305 register struct kmembuckets *kbp; 306 register struct kmemusage *kup; 307 register struct freelist *freep; 308 long size; 309 int s; 310 #ifdef DIAGNOSTIC 311 caddr_t cp; 312 int32_t *end, *lp; 313 long alloc, copysize; 314 #endif 315 #ifdef KMEMSTATS 316 register struct kmemstats *ksp = &kmemstats[type]; 317 #endif 318 319 #ifdef MALLOC_DEBUG 320 if (debug_free(addr, type)) 321 return; 322 #endif 323 324 #ifdef DIAGNOSTIC 325 if (addr < (void *)kmembase || addr >= (void *)kmemlimit) 326 panic("free: non-malloced addr %p type %s", addr, 327 memname[type]); 328 #endif 329 330 kup = btokup(addr); 331 size = 1 << kup->ku_indx; 332 kbp = &bucket[kup->ku_indx]; 333 s = splimp(); 334 #ifdef DIAGNOSTIC 335 /* 336 * Check for returns of data that do not point to the 337 * beginning of the allocation. 338 */ 339 if (size > PAGE_SIZE) 340 alloc = addrmask[BUCKETINDX(PAGE_SIZE)]; 341 else 342 alloc = addrmask[kup->ku_indx]; 343 if (((u_long)addr & alloc) != 0) 344 panic("free: unaligned addr %p, size %ld, type %s, mask %ld", 345 addr, size, memname[type], alloc); 346 #endif /* DIAGNOSTIC */ 347 if (size > MAXALLOCSAVE) { 348 uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt)); 349 #ifdef KMEMSTATS 350 size = kup->ku_pagecnt << PGSHIFT; 351 ksp->ks_memuse -= size; 352 kup->ku_indx = 0; 353 kup->ku_pagecnt = 0; 354 if (ksp->ks_memuse + size >= ksp->ks_limit && 355 ksp->ks_memuse < ksp->ks_limit) 356 wakeup((caddr_t)ksp); 357 ksp->ks_inuse--; 358 kbp->kb_total -= 1; 359 #endif 360 splx(s); 361 return; 362 } 363 freep = (struct freelist *)addr; 364 #ifdef DIAGNOSTIC 365 /* 366 * Check for multiple frees. Use a quick check to see if 367 * it looks free before laboriously searching the freelist. 368 */ 369 if (freep->spare0 == WEIRD_ADDR) { 370 for (cp = kbp->kb_next; cp; 371 cp = ((struct freelist *)cp)->next) { 372 if (addr != cp) 373 continue; 374 printf("multiply freed item %p\n", addr); 375 panic("free: duplicated free"); 376 } 377 } 378 /* 379 * Copy in known text to detect modification after freeing 380 * and to make it look free. Also, save the type being freed 381 * so we can list likely culprit if modification is detected 382 * when the object is reallocated. 383 */ 384 copysize = size < MAX_COPY ? size : MAX_COPY; 385 end = (int32_t *)&((caddr_t)addr)[copysize]; 386 for (lp = (int32_t *)addr; lp < end; lp++) 387 *lp = WEIRD_ADDR; 388 freep->type = type; 389 #endif /* DIAGNOSTIC */ 390 #ifdef KMEMSTATS 391 kup->ku_freecnt++; 392 if (kup->ku_freecnt >= kbp->kb_elmpercl) { 393 if (kup->ku_freecnt > kbp->kb_elmpercl) 394 panic("free: multiple frees"); 395 else if (kbp->kb_totalfree > kbp->kb_highwat) 396 kbp->kb_couldfree++; 397 } 398 kbp->kb_totalfree++; 399 ksp->ks_memuse -= size; 400 if (ksp->ks_memuse + size >= ksp->ks_limit && 401 ksp->ks_memuse < ksp->ks_limit) 402 wakeup((caddr_t)ksp); 403 ksp->ks_inuse--; 404 #endif 405 if (kbp->kb_next == NULL) 406 kbp->kb_next = addr; 407 else 408 ((struct freelist *)kbp->kb_last)->next = addr; 409 freep->next = NULL; 410 kbp->kb_last = addr; 411 splx(s); 412 } 413 414 /* 415 * Initialize the kernel memory allocator 416 */ 417 void 418 kmeminit() 419 { 420 #ifdef KMEMSTATS 421 long indx; 422 #endif 423 int npg; 424 425 #ifdef DIAGNOSTIC 426 if (sizeof(struct freelist) > (1 << MINBUCKET)) 427 panic("kmeminit: minbucket too small/struct freelist too big"); 428 #endif 429 430 npg = VM_KMEM_SIZE / PAGE_SIZE; 431 kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map, 432 (vsize_t)(npg * sizeof(struct kmemusage))); 433 kmem_map = uvm_km_suballoc(kernel_map, (vaddr_t *)&kmembase, 434 (vaddr_t *)&kmemlimit, (vsize_t)(npg * PAGE_SIZE), 435 VM_MAP_INTRSAFE, FALSE, &kmem_map_store.vmi_map); 436 #ifdef KMEMSTATS 437 for (indx = 0; indx < MINBUCKET + 16; indx++) { 438 if (1 << indx >= PAGE_SIZE) 439 bucket[indx].kb_elmpercl = 1; 440 else 441 bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx); 442 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; 443 } 444 for (indx = 0; indx < M_LAST; indx++) 445 kmemstats[indx].ks_limit = npg * PAGE_SIZE * 6 / 10; 446 #endif 447 #ifdef MALLOC_DEBUG 448 debug_malloc_init(); 449 #endif 450 451 nkmempages = npg; 452 } 453 454 /* 455 * Return kernel malloc statistics information. 456 */ 457 int 458 sysctl_malloc(name, namelen, oldp, oldlenp, newp, newlen, p) 459 int *name; 460 u_int namelen; 461 void *oldp; 462 size_t *oldlenp; 463 void *newp; 464 size_t newlen; 465 struct proc *p; 466 { 467 struct kmembuckets kb; 468 int i, siz; 469 470 if (namelen != 2 && name[0] != KERN_MALLOC_BUCKETS && 471 name[0] != KERN_MALLOC_KMEMNAMES) 472 return (ENOTDIR); /* overloaded */ 473 474 switch (name[0]) { 475 case KERN_MALLOC_BUCKETS: 476 /* Initialize the first time */ 477 if (buckstring_init == 0) { 478 buckstring_init = 1; 479 bzero(buckstring, sizeof(buckstring)); 480 for (siz = 0, i = MINBUCKET; i < MINBUCKET + 16; i++) 481 siz += sprintf(buckstring + siz, 482 "%d,", (u_int)(1<<i)); 483 /* Remove trailing comma */ 484 if (siz) 485 buckstring[siz - 1] = '\0'; 486 } 487 return (sysctl_rdstring(oldp, oldlenp, newp, buckstring)); 488 489 case KERN_MALLOC_BUCKET: 490 bcopy(&bucket[BUCKETINDX(name[1])], &kb, sizeof(kb)); 491 kb.kb_next = kb.kb_last = 0; 492 return (sysctl_rdstruct(oldp, oldlenp, newp, &kb, sizeof(kb))); 493 case KERN_MALLOC_KMEMSTATS: 494 #ifdef KMEMSTATS 495 if ((name[1] < 0) || (name[1] >= M_LAST)) 496 return (EINVAL); 497 return (sysctl_rdstruct(oldp, oldlenp, newp, 498 &kmemstats[name[1]], sizeof(struct kmemstats))); 499 #else 500 return (EOPNOTSUPP); 501 #endif 502 case KERN_MALLOC_KMEMNAMES: 503 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES) 504 if (memall == NULL) { 505 int totlen; 506 507 i = lockmgr(&sysctl_kmemlock, LK_EXCLUSIVE, NULL, p); 508 if (i) 509 return (i); 510 511 /* Figure out how large a buffer we need */ 512 for (totlen = 0, i = 0; i < M_LAST; i++) { 513 if (memname[i]) 514 totlen += strlen(memname[i]); 515 totlen++; 516 } 517 memall = malloc(totlen + M_LAST, M_SYSCTL, M_WAITOK); 518 bzero(memall, totlen + M_LAST); 519 for (siz = 0, i = 0; i < M_LAST; i++) 520 siz += sprintf(memall + siz, "%s,", 521 memname[i] ? memname[i] : ""); 522 523 /* Remove trailing comma */ 524 if (siz) 525 memall[siz - 1] = '\0'; 526 527 /* Now, convert all spaces to underscores */ 528 for (i = 0; i < totlen; i++) 529 if (memall[i] == ' ') 530 memall[i] = '_'; 531 lockmgr(&sysctl_kmemlock, LK_RELEASE, NULL, p); 532 } 533 return (sysctl_rdstring(oldp, oldlenp, newp, memall)); 534 #else 535 return (EOPNOTSUPP); 536 #endif 537 default: 538 return (EOPNOTSUPP); 539 } 540 /* NOTREACHED */ 541 } 542