xref: /netbsd-src/lib/libbsdmalloc/malloc.c (revision 001c68bd94f75ce9270b69227c4199fbf34ee396)
1 /*	$NetBSD: malloc.c,v 1.1 2003/04/21 22:21:06 elric Exp $	*/
2 
3 /*
4  * Copyright (c) 1983, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 #if defined(LIBC_SCCS) && !defined(lint)
38 #if 0
39 static char sccsid[] = "@(#)malloc.c	8.1 (Berkeley) 6/4/93";
40 #else
41 __RCSID("$NetBSD: malloc.c,v 1.1 2003/04/21 22:21:06 elric Exp $");
42 #endif
43 #endif /* LIBC_SCCS and not lint */
44 
45 /*
46  * malloc.c (Caltech) 2/21/82
47  * Chris Kingsley, kingsley@cit-20.
48  *
49  * This is a very fast storage allocator.  It allocates blocks of a small
50  * number of different sizes, and keeps free lists of each size.  Blocks that
51  * don't exactly fit are passed up to the next larger size.  In this
52  * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long.
53  * This is designed for use in a virtual memory environment.
54  */
55 
56 #include <sys/types.h>
57 #if defined(DEBUG) || defined(RCHECK)
58 #include <sys/uio.h>
59 #endif
60 #if defined(RCHECK) || defined(MSTATS)
61 #include <stdio.h>
62 #endif
63 #include <stdlib.h>
64 #include <string.h>
65 #include <unistd.h>
66 #include "reentrant.h"
67 
68 
69 /*
70  * The overhead on a block is at least 4 bytes.  When free, this space
71  * contains a pointer to the next free block, and the bottom two bits must
72  * be zero.  When in use, the first byte is set to MAGIC, and the second
73  * byte is the size index.  The remaining bytes are for alignment.
74  * If range checking is enabled then a second word holds the size of the
75  * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC).
76  * The order of elements is critical: ov_magic must overlay the low order
77  * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern.
78  */
79 union	overhead {
80 	union	overhead *ov_next;	/* when free */
81 	struct {
82 		u_char	ovu_magic;	/* magic number */
83 		u_char	ovu_index;	/* bucket # */
84 #ifdef RCHECK
85 		u_short	ovu_rmagic;	/* range magic number */
86 		u_long	ovu_size;	/* actual block size */
87 #endif
88 	} ovu;
89 #define	ov_magic	ovu.ovu_magic
90 #define	ov_index	ovu.ovu_index
91 #define	ov_rmagic	ovu.ovu_rmagic
92 #define	ov_size		ovu.ovu_size
93 };
94 
95 #define	MAGIC		0xef		/* magic # on accounting info */
96 #ifdef RCHECK
97 #define RMAGIC		0x5555		/* magic # on range info */
98 #endif
99 
100 #ifdef RCHECK
101 #define	RSLOP		sizeof (u_short)
102 #else
103 #define	RSLOP		0
104 #endif
105 
106 /*
107  * nextf[i] is the pointer to the next free block of size 2^(i+3).  The
108  * smallest allocatable block is 8 bytes.  The overhead information
109  * precedes the data area returned to the user.
110  */
111 #define	NBUCKETS 30
112 static	union overhead *nextf[NBUCKETS];
113 
114 static	long pagesz;			/* page size */
115 static	int pagebucket;			/* page size bucket */
116 
117 #ifdef MSTATS
118 /*
119  * nmalloc[i] is the difference between the number of mallocs and frees
120  * for a given block size.
121  */
122 static	u_int nmalloc[NBUCKETS];
123 #endif
124 
125 #ifdef _REENT
126 static	mutex_t malloc_mutex = MUTEX_INITIALIZER;
127 #endif
128 
129 static void morecore __P((int));
130 static int findbucket __P((union overhead *, int));
131 #ifdef MSTATS
132 void mstats __P((const char *));
133 #endif
134 
135 #if defined(DEBUG) || defined(RCHECK)
136 #define	ASSERT(p)   if (!(p)) botch(__STRING(p))
137 
138 static void botch __P((const char *));
139 
140 /*
141  * NOTE: since this may be called while malloc_mutex is locked, stdio must not
142  *       be used in this function.
143  */
144 static void
145 botch(s)
146 	const char *s;
147 {
148 	struct iovec iov[3];
149 
150 	iov[0].iov_base	= "\nassertion botched: ";
151 	iov[0].iov_len	= 20;
152 	iov[1].iov_base	= (void *)s;
153 	iov[1].iov_len	= strlen(s);
154 	iov[2].iov_base	= "\n";
155 	iov[2].iov_len	= 1;
156 
157 	/*
158 	 * This place deserves a word of warning: a cancellation point will
159 	 * occur when executing writev(), and we might be still owning
160 	 * malloc_mutex.  At this point we need to disable cancellation
161 	 * until `after' abort() because i) establishing a cancellation handler
162 	 * might, depending on the implementation, result in another malloc()
163 	 * to be executed, and ii) it is really not desirable to let execution
164 	 * continue.  `Fix me.'
165 	 *
166 	 * Note that holding mutex_lock during abort() is safe.
167 	 */
168 
169 	(void)writev(STDERR_FILENO, iov, 3);
170 	abort();
171 }
172 #else
173 #define	ASSERT(p)
174 #endif
175 
176 void *
177 malloc(nbytes)
178 	size_t nbytes;
179 {
180   	union overhead *op;
181 	int bucket;
182   	long n;
183 	unsigned amt;
184 
185 	mutex_lock(&malloc_mutex);
186 
187 	/*
188 	 * First time malloc is called, setup page size and
189 	 * align break pointer so all data will be page aligned.
190 	 */
191 	if (pagesz == 0) {
192 		pagesz = n = getpagesize();
193 		ASSERT(pagesz > 0);
194 		op = (union overhead *)(void *)sbrk(0);
195   		n = n - sizeof (*op) - ((long)op & (n - 1));
196 		if (n < 0)
197 			n += pagesz;
198 		if (n) {
199 			if (sbrk((int)n) == (void *)-1) {
200 				mutex_unlock(&malloc_mutex);
201 				return (NULL);
202 			}
203 		}
204 		bucket = 0;
205 		amt = 8;
206 		while (pagesz > amt) {
207 			amt <<= 1;
208 			bucket++;
209 		}
210 		pagebucket = bucket;
211 	}
212 	/*
213 	 * Convert amount of memory requested into closest block size
214 	 * stored in hash buckets which satisfies request.
215 	 * Account for space used per block for accounting.
216 	 */
217 	if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) {
218 #ifndef RCHECK
219 		amt = 8;	/* size of first bucket */
220 		bucket = 0;
221 #else
222 		amt = 16;	/* size of first bucket */
223 		bucket = 1;
224 #endif
225 		n = -((long)sizeof (*op) + RSLOP);
226 	} else {
227 		amt = (unsigned)pagesz;
228 		bucket = pagebucket;
229 	}
230 	while (nbytes > amt + n) {
231 		amt <<= 1;
232 		if (amt == 0)
233 			return (NULL);
234 		bucket++;
235 	}
236 	/*
237 	 * If nothing in hash bucket right now,
238 	 * request more memory from the system.
239 	 */
240   	if ((op = nextf[bucket]) == NULL) {
241   		morecore(bucket);
242   		if ((op = nextf[bucket]) == NULL) {
243 			mutex_unlock(&malloc_mutex);
244   			return (NULL);
245 		}
246 	}
247 	/* remove from linked list */
248   	nextf[bucket] = op->ov_next;
249 	op->ov_magic = MAGIC;
250 	op->ov_index = bucket;
251 #ifdef MSTATS
252   	nmalloc[bucket]++;
253 #endif
254 	mutex_unlock(&malloc_mutex);
255 #ifdef RCHECK
256 	/*
257 	 * Record allocated size of block and
258 	 * bound space with magic numbers.
259 	 */
260 	op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
261 	op->ov_rmagic = RMAGIC;
262   	*(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
263 #endif
264   	return ((void *)(op + 1));
265 }
266 
267 /*
268  * Allocate more memory to the indicated bucket.
269  */
270 static void
271 morecore(bucket)
272 	int bucket;
273 {
274   	union overhead *op;
275 	long sz;		/* size of desired block */
276   	long amt;			/* amount to allocate */
277   	long nblks;			/* how many blocks we get */
278 
279 	/*
280 	 * sbrk_size <= 0 only for big, FLUFFY, requests (about
281 	 * 2^30 bytes on a VAX, I think) or for a negative arg.
282 	 */
283 	sz = 1 << (bucket + 3);
284 #ifdef DEBUG
285 	ASSERT(sz > 0);
286 #else
287 	if (sz <= 0)
288 		return;
289 #endif
290 	if (sz < pagesz) {
291 		amt = pagesz;
292   		nblks = amt / sz;
293 	} else {
294 		amt = sz + pagesz;
295 		nblks = 1;
296 	}
297 	op = (union overhead *)(void *)sbrk((int)amt);
298 	/* no more room! */
299   	if ((long)op == -1)
300   		return;
301 	/*
302 	 * Add new memory allocated to that on
303 	 * free list for this hash bucket.
304 	 */
305   	nextf[bucket] = op;
306   	while (--nblks > 0) {
307 		op->ov_next =
308 		    (union overhead *)(void *)((caddr_t)(void *)op+(size_t)sz);
309 		op = op->ov_next;
310   	}
311 }
312 
313 void
314 free(cp)
315 	void *cp;
316 {
317 	long size;
318 	union overhead *op;
319 
320   	if (cp == NULL)
321   		return;
322 	op = (union overhead *)(void *)((caddr_t)cp - sizeof (union overhead));
323 #ifdef DEBUG
324   	ASSERT(op->ov_magic == MAGIC);		/* make sure it was in use */
325 #else
326 	if (op->ov_magic != MAGIC)
327 		return;				/* sanity */
328 #endif
329 #ifdef RCHECK
330   	ASSERT(op->ov_rmagic == RMAGIC);
331 	ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC);
332 #endif
333   	size = op->ov_index;
334   	ASSERT(size < NBUCKETS);
335 	mutex_lock(&malloc_mutex);
336 	op->ov_next = nextf[(unsigned int)size];/* also clobbers ov_magic */
337   	nextf[(unsigned int)size] = op;
338 #ifdef MSTATS
339   	nmalloc[(size_t)size]--;
340 #endif
341 	mutex_unlock(&malloc_mutex);
342 }
343 
344 /*
345  * When a program attempts "storage compaction" as mentioned in the
346  * old malloc man page, it realloc's an already freed block.  Usually
347  * this is the last block it freed; occasionally it might be farther
348  * back.  We have to search all the free lists for the block in order
349  * to determine its bucket: 1st we make one pass thru the lists
350  * checking only the first block in each; if that fails we search
351  * ``__realloc_srchlen'' blocks in each list for a match (the variable
352  * is extern so the caller can modify it).  If that fails we just copy
353  * however many bytes was given to realloc() and hope it's not huge.
354  */
355 int __realloc_srchlen = 4;	/* 4 should be plenty, -1 =>'s whole list */
356 
357 void *
358 realloc(cp, nbytes)
359 	void *cp;
360 	size_t nbytes;
361 {
362   	u_long onb;
363 	long i;
364 	union overhead *op;
365 	char *res;
366 	int was_alloced = 0;
367 
368   	if (cp == NULL)
369   		return (malloc(nbytes));
370 	if (nbytes == 0) {
371 		free (cp);
372 		return (NULL);
373 	}
374 	op = (union overhead *)(void *)((caddr_t)cp - sizeof (union overhead));
375 	mutex_lock(&malloc_mutex);
376 	if (op->ov_magic == MAGIC) {
377 		was_alloced++;
378 		i = op->ov_index;
379 	} else {
380 		/*
381 		 * Already free, doing "compaction".
382 		 *
383 		 * Search for the old block of memory on the
384 		 * free list.  First, check the most common
385 		 * case (last element free'd), then (this failing)
386 		 * the last ``__realloc_srchlen'' items free'd.
387 		 * If all lookups fail, then assume the size of
388 		 * the memory block being realloc'd is the
389 		 * largest possible (so that all "nbytes" of new
390 		 * memory are copied into).  Note that this could cause
391 		 * a memory fault if the old area was tiny, and the moon
392 		 * is gibbous.  However, that is very unlikely.
393 		 */
394 		if ((i = findbucket(op, 1)) < 0 &&
395 		    (i = findbucket(op, __realloc_srchlen)) < 0)
396 			i = NBUCKETS;
397 	}
398 	onb = (u_long)1 << (u_long)(i + 3);
399 	if (onb < pagesz)
400 		onb -= sizeof (*op) + RSLOP;
401 	else
402 		onb += pagesz - sizeof (*op) - RSLOP;
403 	/* avoid the copy if same size block */
404 	if (was_alloced) {
405 		if (i) {
406 			i = (long)1 << (long)(i + 2);
407 			if (i < pagesz)
408 				i -= sizeof (*op) + RSLOP;
409 			else
410 				i += pagesz - sizeof (*op) - RSLOP;
411 		}
412 		if (nbytes <= onb && nbytes > i) {
413 #ifdef RCHECK
414 			op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
415 			*(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
416 #endif
417 			mutex_unlock(&malloc_mutex);
418 			return (cp);
419 
420 		}
421 #ifndef _REENT
422 		else
423 			free(cp);
424 #endif
425 	}
426 	mutex_unlock(&malloc_mutex);
427 	if ((res = malloc(nbytes)) == NULL) {
428 #ifdef _REENT
429 		free(cp);
430 #endif
431 		return (NULL);
432 	}
433 #ifndef _REENT
434 	if (cp != res)		/* common optimization if "compacting" */
435 		(void)memmove(res, cp, (size_t)((nbytes < onb) ? nbytes : onb));
436 #else
437 	(void)memmove(res, cp, (size_t)((nbytes < onb) ? nbytes : onb));
438 	free(cp);
439 #endif
440   	return (res);
441 }
442 
443 /*
444  * Search ``srchlen'' elements of each free list for a block whose
445  * header starts at ``freep''.  If srchlen is -1 search the whole list.
446  * Return bucket number, or -1 if not found.
447  */
448 static int
449 findbucket(freep, srchlen)
450 	union overhead *freep;
451 	int srchlen;
452 {
453 	union overhead *p;
454 	int i, j;
455 
456 	for (i = 0; i < NBUCKETS; i++) {
457 		j = 0;
458 		for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
459 			if (p == freep)
460 				return (i);
461 			j++;
462 		}
463 	}
464 	return (-1);
465 }
466 
467 #ifdef MSTATS
468 /*
469  * mstats - print out statistics about malloc
470  *
471  * Prints two lines of numbers, one showing the length of the free list
472  * for each size category, the second showing the number of mallocs -
473  * frees for each size category.
474  */
475 void
476 mstats(s)
477 	char *s;
478 {
479   	int i, j;
480   	union overhead *p;
481   	int totfree = 0,
482   	totused = 0;
483 
484   	fprintf(stderr, "Memory allocation statistics %s\nfree:\t", s);
485   	for (i = 0; i < NBUCKETS; i++) {
486   		for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
487   			;
488   		fprintf(stderr, " %d", j);
489   		totfree += j * (1 << (i + 3));
490   	}
491   	fprintf(stderr, "\nused:\t");
492   	for (i = 0; i < NBUCKETS; i++) {
493   		fprintf(stderr, " %d", nmalloc[i]);
494   		totused += nmalloc[i] * (1 << (i + 3));
495   	}
496   	fprintf(stderr, "\n\tTotal in use: %d, total free: %d\n",
497 	    totused, totfree);
498 }
499 #endif
500