1 /* $NetBSD: malloc.c,v 1.16 1999/01/29 08:11:36 kleink Exp $ */ 2 3 /* 4 * Copyright (c) 1983, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 #if defined(LIBC_SCCS) && !defined(lint) 38 #if 0 39 static char sccsid[] = "@(#)malloc.c 8.1 (Berkeley) 6/4/93"; 40 #else 41 __RCSID("$NetBSD: malloc.c,v 1.16 1999/01/29 08:11:36 kleink Exp $"); 42 #endif 43 #endif /* LIBC_SCCS and not lint */ 44 45 /* 46 * malloc.c (Caltech) 2/21/82 47 * Chris Kingsley, kingsley@cit-20. 48 * 49 * This is a very fast storage allocator. It allocates blocks of a small 50 * number of different sizes, and keeps free lists of each size. Blocks that 51 * don't exactly fit are passed up to the next larger size. In this 52 * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long. 53 * This is designed for use in a virtual memory environment. 54 */ 55 56 #include "namespace.h" 57 #include <sys/types.h> 58 #if defined(DEBUG) || defined(RCHECK) 59 #include <sys/uio.h> 60 #endif 61 #if defined(RCHECK) || defined(MSTATS) 62 #include <stdio.h> 63 #endif 64 #include <stdlib.h> 65 #include <string.h> 66 #include <unistd.h> 67 #include "reentrant.h" 68 69 70 /* 71 * The overhead on a block is at least 4 bytes. When free, this space 72 * contains a pointer to the next free block, and the bottom two bits must 73 * be zero. When in use, the first byte is set to MAGIC, and the second 74 * byte is the size index. The remaining bytes are for alignment. 75 * If range checking is enabled then a second word holds the size of the 76 * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC). 77 * The order of elements is critical: ov_magic must overlay the low order 78 * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern. 79 */ 80 union overhead { 81 union overhead *ov_next; /* when free */ 82 struct { 83 u_char ovu_magic; /* magic number */ 84 u_char ovu_index; /* bucket # */ 85 #ifdef RCHECK 86 u_short ovu_rmagic; /* range magic number */ 87 u_long ovu_size; /* actual block size */ 88 #endif 89 } ovu; 90 #define ov_magic ovu.ovu_magic 91 #define ov_index ovu.ovu_index 92 #define ov_rmagic ovu.ovu_rmagic 93 #define ov_size ovu.ovu_size 94 }; 95 96 #define MAGIC 0xef /* magic # on accounting info */ 97 #ifdef RCHECK 98 #define RMAGIC 0x5555 /* magic # on range info */ 99 #endif 100 101 #ifdef RCHECK 102 #define RSLOP sizeof (u_short) 103 #else 104 #define RSLOP 0 105 #endif 106 107 /* 108 * nextf[i] is the pointer to the next free block of size 2^(i+3). The 109 * smallest allocatable block is 8 bytes. The overhead information 110 * precedes the data area returned to the user. 111 */ 112 #define NBUCKETS 30 113 static union overhead *nextf[NBUCKETS]; 114 115 static long pagesz; /* page size */ 116 static int pagebucket; /* page size bucket */ 117 118 #ifdef MSTATS 119 /* 120 * nmalloc[i] is the difference between the number of mallocs and frees 121 * for a given block size. 122 */ 123 static u_int nmalloc[NBUCKETS]; 124 #endif 125 126 #ifdef _REENT 127 static mutex_t malloc_mutex = MUTEX_INITIALIZER; 128 #endif 129 130 static void morecore __P((int)); 131 static int findbucket __P((union overhead *, int)); 132 #ifdef MSTATS 133 void mstats __P((const char *)); 134 #endif 135 136 #if defined(DEBUG) || defined(RCHECK) 137 #define ASSERT(p) if (!(p)) botch(__STRING(p)) 138 139 static void botch __P((const char *)); 140 141 /* 142 * NOTE: since this may be called while malloc_mutex is locked, stdio must not 143 * be used in this function. 144 */ 145 static void 146 botch(s) 147 const char *s; 148 { 149 struct iovec iov[3]; 150 151 iov[0].iov_base = "\nassertion botched: "; 152 iov[0].iov_len = 20; 153 iov[1].iov_base = (void *)s; 154 iov[1].iov_len = strlen(s); 155 iov[2].iov_base = "\n"; 156 iov[2].iov_len = 1; 157 158 /* 159 * This place deserves a word of warning: a cancellation point will 160 * occur when executing writev(), and we might be still owning 161 * malloc_mutex. At this point we need to disable cancellation 162 * until `after' abort() because i) establishing a cancellation handler 163 * might, depending on the implementation, result in another malloc() 164 * to be executed, and ii) it is really not desirable to let execution 165 * continue. `Fix me.' 166 * 167 * Note that holding mutex_lock during abort() is safe. 168 */ 169 170 (void)writev(STDERR_FILENO, iov, 3); 171 abort(); 172 } 173 #else 174 #define ASSERT(p) 175 #endif 176 177 void * 178 malloc(nbytes) 179 size_t nbytes; 180 { 181 union overhead *op; 182 int bucket; 183 long n; 184 unsigned amt; 185 186 mutex_lock(&malloc_mutex); 187 188 /* 189 * First time malloc is called, setup page size and 190 * align break pointer so all data will be page aligned. 191 */ 192 if (pagesz == 0) { 193 pagesz = n = getpagesize(); 194 ASSERT(pagesz > 0); 195 op = (union overhead *)(void *)sbrk(0); 196 n = n - sizeof (*op) - ((long)op & (n - 1)); 197 if (n < 0) 198 n += pagesz; 199 if (n) { 200 if (sbrk((int)n) == (void *)-1) { 201 mutex_unlock(&malloc_mutex); 202 return (NULL); 203 } 204 } 205 bucket = 0; 206 amt = 8; 207 while (pagesz > amt) { 208 amt <<= 1; 209 bucket++; 210 } 211 pagebucket = bucket; 212 } 213 /* 214 * Convert amount of memory requested into closest block size 215 * stored in hash buckets which satisfies request. 216 * Account for space used per block for accounting. 217 */ 218 if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) { 219 #ifndef RCHECK 220 amt = 8; /* size of first bucket */ 221 bucket = 0; 222 #else 223 amt = 16; /* size of first bucket */ 224 bucket = 1; 225 #endif 226 n = -((long)sizeof (*op) + RSLOP); 227 } else { 228 amt = (unsigned)pagesz; 229 bucket = pagebucket; 230 } 231 while (nbytes > amt + n) { 232 amt <<= 1; 233 if (amt == 0) 234 return (NULL); 235 bucket++; 236 } 237 /* 238 * If nothing in hash bucket right now, 239 * request more memory from the system. 240 */ 241 if ((op = nextf[bucket]) == NULL) { 242 morecore(bucket); 243 if ((op = nextf[bucket]) == NULL) { 244 mutex_unlock(&malloc_mutex); 245 return (NULL); 246 } 247 } 248 /* remove from linked list */ 249 nextf[bucket] = op->ov_next; 250 op->ov_magic = MAGIC; 251 op->ov_index = bucket; 252 #ifdef MSTATS 253 nmalloc[bucket]++; 254 #endif 255 mutex_unlock(&malloc_mutex); 256 #ifdef RCHECK 257 /* 258 * Record allocated size of block and 259 * bound space with magic numbers. 260 */ 261 op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); 262 op->ov_rmagic = RMAGIC; 263 *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; 264 #endif 265 return ((void *)(op + 1)); 266 } 267 268 /* 269 * Allocate more memory to the indicated bucket. 270 */ 271 static void 272 morecore(bucket) 273 int bucket; 274 { 275 union overhead *op; 276 long sz; /* size of desired block */ 277 long amt; /* amount to allocate */ 278 long nblks; /* how many blocks we get */ 279 280 /* 281 * sbrk_size <= 0 only for big, FLUFFY, requests (about 282 * 2^30 bytes on a VAX, I think) or for a negative arg. 283 */ 284 sz = 1 << (bucket + 3); 285 #ifdef DEBUG 286 ASSERT(sz > 0); 287 #else 288 if (sz <= 0) 289 return; 290 #endif 291 if (sz < pagesz) { 292 amt = pagesz; 293 nblks = amt / sz; 294 } else { 295 amt = sz + pagesz; 296 nblks = 1; 297 } 298 op = (union overhead *)(void *)sbrk((int)amt); 299 /* no more room! */ 300 if ((long)op == -1) 301 return; 302 /* 303 * Add new memory allocated to that on 304 * free list for this hash bucket. 305 */ 306 nextf[bucket] = op; 307 while (--nblks > 0) { 308 op->ov_next = 309 (union overhead *)(void *)((caddr_t)(void *)op+(size_t)sz); 310 op = op->ov_next; 311 } 312 } 313 314 void 315 free(cp) 316 void *cp; 317 { 318 long size; 319 union overhead *op; 320 321 if (cp == NULL) 322 return; 323 op = (union overhead *)(void *)((caddr_t)cp - sizeof (union overhead)); 324 #ifdef DEBUG 325 ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */ 326 #else 327 if (op->ov_magic != MAGIC) 328 return; /* sanity */ 329 #endif 330 #ifdef RCHECK 331 ASSERT(op->ov_rmagic == RMAGIC); 332 ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC); 333 #endif 334 size = op->ov_index; 335 ASSERT(size < NBUCKETS); 336 mutex_lock(&malloc_mutex); 337 op->ov_next = nextf[(unsigned int)size];/* also clobbers ov_magic */ 338 nextf[(unsigned int)size] = op; 339 #ifdef MSTATS 340 nmalloc[(size_t)size]--; 341 #endif 342 mutex_unlock(&malloc_mutex); 343 } 344 345 /* 346 * When a program attempts "storage compaction" as mentioned in the 347 * old malloc man page, it realloc's an already freed block. Usually 348 * this is the last block it freed; occasionally it might be farther 349 * back. We have to search all the free lists for the block in order 350 * to determine its bucket: 1st we make one pass thru the lists 351 * checking only the first block in each; if that fails we search 352 * ``__realloc_srchlen'' blocks in each list for a match (the variable 353 * is extern so the caller can modify it). If that fails we just copy 354 * however many bytes was given to realloc() and hope it's not huge. 355 */ 356 int __realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */ 357 358 void * 359 realloc(cp, nbytes) 360 void *cp; 361 size_t nbytes; 362 { 363 u_long onb; 364 long i; 365 union overhead *op; 366 char *res; 367 int was_alloced = 0; 368 369 if (cp == NULL) 370 return (malloc(nbytes)); 371 if (nbytes == 0) { 372 free (cp); 373 return (NULL); 374 } 375 op = (union overhead *)(void *)((caddr_t)cp - sizeof (union overhead)); 376 mutex_lock(&malloc_mutex); 377 if (op->ov_magic == MAGIC) { 378 was_alloced++; 379 i = op->ov_index; 380 } else { 381 /* 382 * Already free, doing "compaction". 383 * 384 * Search for the old block of memory on the 385 * free list. First, check the most common 386 * case (last element free'd), then (this failing) 387 * the last ``__realloc_srchlen'' items free'd. 388 * If all lookups fail, then assume the size of 389 * the memory block being realloc'd is the 390 * largest possible (so that all "nbytes" of new 391 * memory are copied into). Note that this could cause 392 * a memory fault if the old area was tiny, and the moon 393 * is gibbous. However, that is very unlikely. 394 */ 395 if ((i = findbucket(op, 1)) < 0 && 396 (i = findbucket(op, __realloc_srchlen)) < 0) 397 i = NBUCKETS; 398 } 399 onb = (u_long)1 << (u_long)(i + 3); 400 if (onb < pagesz) 401 onb -= sizeof (*op) + RSLOP; 402 else 403 onb += pagesz - sizeof (*op) - RSLOP; 404 /* avoid the copy if same size block */ 405 if (was_alloced) { 406 if (i) { 407 i = (long)1 << (long)(i + 2); 408 if (i < pagesz) 409 i -= sizeof (*op) + RSLOP; 410 else 411 i += pagesz - sizeof (*op) - RSLOP; 412 } 413 if (nbytes <= onb && nbytes > i) { 414 #ifdef RCHECK 415 op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); 416 *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; 417 #endif 418 mutex_unlock(&malloc_mutex); 419 return (cp); 420 421 } 422 #ifndef _REENT 423 else 424 free(cp); 425 #endif 426 } 427 mutex_unlock(&malloc_mutex); 428 if ((res = malloc(nbytes)) == NULL) { 429 #ifdef _REENT 430 free(cp); 431 #endif 432 return (NULL); 433 } 434 #ifndef _REENT 435 if (cp != res) /* common optimization if "compacting" */ 436 (void)memmove(res, cp, (size_t)((nbytes < onb) ? nbytes : onb)); 437 #else 438 (void)memmove(res, cp, (size_t)((nbytes < onb) ? nbytes : onb)); 439 free(cp); 440 #endif 441 return (res); 442 } 443 444 /* 445 * Search ``srchlen'' elements of each free list for a block whose 446 * header starts at ``freep''. If srchlen is -1 search the whole list. 447 * Return bucket number, or -1 if not found. 448 */ 449 static int 450 findbucket(freep, srchlen) 451 union overhead *freep; 452 int srchlen; 453 { 454 union overhead *p; 455 int i, j; 456 457 for (i = 0; i < NBUCKETS; i++) { 458 j = 0; 459 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { 460 if (p == freep) 461 return (i); 462 j++; 463 } 464 } 465 return (-1); 466 } 467 468 #ifdef MSTATS 469 /* 470 * mstats - print out statistics about malloc 471 * 472 * Prints two lines of numbers, one showing the length of the free list 473 * for each size category, the second showing the number of mallocs - 474 * frees for each size category. 475 */ 476 void 477 mstats(s) 478 char *s; 479 { 480 int i, j; 481 union overhead *p; 482 int totfree = 0, 483 totused = 0; 484 485 fprintf(stderr, "Memory allocation statistics %s\nfree:\t", s); 486 for (i = 0; i < NBUCKETS; i++) { 487 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) 488 ; 489 fprintf(stderr, " %d", j); 490 totfree += j * (1 << (i + 3)); 491 } 492 fprintf(stderr, "\nused:\t"); 493 for (i = 0; i < NBUCKETS; i++) { 494 fprintf(stderr, " %d", nmalloc[i]); 495 totused += nmalloc[i] * (1 << (i + 3)); 496 } 497 fprintf(stderr, "\n\tTotal in use: %d, total free: %d\n", 498 totused, totfree); 499 } 500 #endif 501