xref: /netbsd-src/lib/libc/stdlib/malloc.c (revision 8da02df781576414867ceac279115e0607ffe916)
1 /*	$NetBSD: malloc.c,v 1.24 1999/07/08 22:18:06 thorpej Exp $	*/
2 
3 /*
4  * ----------------------------------------------------------------------------
5  * "THE BEER-WARE LICENSE" (Revision 42):
6  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
7  * can do whatever you want with this stuff. If we meet some day, and you think
8  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
9  * ----------------------------------------------------------------------------
10  *
11  * From FreeBSD: malloc.c,v 1.43 1998/09/30 06:13:59 jb
12  *
13  */
14 
15 /*
16  * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
17  * to internal conditions and consistency in malloc.c. This has a
18  * noticeable runtime performance hit, and generally will not do you
19  * any good unless you fiddle with the internals of malloc or want
20  * to catch random pointer corruption as early as possible.
21  */
22 #ifndef MALLOC_EXTRA_SANITY
23 #undef MALLOC_EXTRA_SANITY
24 #endif
25 
26 /*
27  * What to use for Junk.  This is the byte value we use to fill with
28  * when the 'J' option is enabled.
29  */
30 #define SOME_JUNK	0xd0		/* as in "Duh" :-) */
31 
32 /*
33  * The basic parameters you can tweak.
34  *
35  * malloc_minsize	minimum size of an allocation in bytes.
36  *			If this is too small it's too much work
37  *			to manage them.  This is also the smallest
38  *			unit of alignment used for the storage
39  *			returned by malloc/realloc.
40  *
41  */
42 
43 #if defined(__FreeBSD__)
44 #   if defined(__i386__)
45 #       define malloc_minsize		16U
46 #   endif
47 #   if defined(__alpha__)
48 #       define malloc_minsize		16U
49 #   endif
50 #   if !defined(__NETBSD_SYSCALLS)
51 #       define HAS_UTRACE
52 #   endif
53     /*
54      * Make malloc/free/realloc thread-safe in libc for use with
55      * kernel threads.
56      */
57 #   include "libc_private.h"
58 #   include "spinlock.h"
59     static spinlock_t thread_lock	= _SPINLOCK_INITIALIZER;
60 #   define THREAD_LOCK()		if (__isthreaded) _SPINLOCK(&thread_lock);
61 #   define THREAD_UNLOCK()		if (__isthreaded) _SPINUNLOCK(&thread_lock);
62 #endif /* __FreeBSD__ */
63 
64 #if defined(__NetBSD__)
65 #   define malloc_minsize               16U
66 #endif /* __NetBSD__ */
67 
68 #if defined(__sparc__) && defined(sun)
69 #   define malloc_minsize		16U
70 #   define MAP_ANON			(0)
71     static int fdzero;
72 #   define MMAP_FD	fdzero
73 #   define INIT_MMAP() \
74 	{ if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \
75 	    wrterror("open of /dev/zero"); }
76 #endif /* __sparc__ */
77 
78 /* Insert your combination here... */
79 #if defined(__FOOCPU__) && defined(__BAROS__)
80 #   define malloc_minsize		16U
81 #endif /* __FOOCPU__ && __BAROS__ */
82 
83 
84 /*
85  * No user serviceable parts behind this point.
86  */
87 #include <sys/types.h>
88 #include <sys/mman.h>
89 #include <errno.h>
90 #include <fcntl.h>
91 #include <stddef.h>
92 #include <stdio.h>
93 #include <stdlib.h>
94 #include <string.h>
95 #include <unistd.h>
96 
97 /*
98  * This structure describes a page worth of chunks.
99  */
100 
101 struct pginfo {
102     struct pginfo	*next;	/* next on the free list */
103     void		*page;	/* Pointer to the page */
104     u_short		size;	/* size of this page's chunks */
105     u_short		shift;	/* How far to shift for this size chunks */
106     u_short		free;	/* How many free chunks */
107     u_short		total;	/* How many chunk */
108     u_int		bits[1]; /* Which chunks are free */
109 };
110 
111 /*
112  * This structure describes a number of free pages.
113  */
114 
115 struct pgfree {
116     struct pgfree	*next;	/* next run of free pages */
117     struct pgfree	*prev;	/* prev run of free pages */
118     void		*page;	/* pointer to free pages */
119     void		*end;	/* pointer to end of free pages */
120     size_t		size;	/* number of bytes free */
121 };
122 
123 /*
124  * How many bits per u_int in the bitmap.
125  * Change only if not 8 bits/byte
126  */
127 #define	MALLOC_BITS	(8*sizeof(u_int))
128 
129 /*
130  * Magic values to put in the page_directory
131  */
132 #define MALLOC_NOT_MINE	((struct pginfo*) 0)
133 #define MALLOC_FREE 	((struct pginfo*) 1)
134 #define MALLOC_FIRST	((struct pginfo*) 2)
135 #define MALLOC_FOLLOW	((struct pginfo*) 3)
136 #define MALLOC_MAGIC	((struct pginfo*) 4)
137 
138 /*
139  * Page size related parameters, computed at run-time.
140  */
141 static size_t malloc_pagesize;
142 static size_t malloc_pageshift;
143 static size_t malloc_pagemask;
144 
145 #ifndef malloc_minsize
146 #define malloc_minsize			16U
147 #endif
148 
149 #ifndef malloc_maxsize
150 #define malloc_maxsize			((malloc_pagesize)>>1)
151 #endif
152 
153 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
154 #define ptr2idx(foo) (((u_long)(foo) >> malloc_pageshift)-malloc_origo)
155 
156 #ifndef THREAD_LOCK
157 #define THREAD_LOCK()
158 #endif
159 
160 #ifndef THREAD_UNLOCK
161 #define THREAD_UNLOCK()
162 #endif
163 
164 #ifndef MMAP_FD
165 #define MMAP_FD (-1)
166 #endif
167 
168 #ifndef INIT_MMAP
169 #define INIT_MMAP()
170 #endif
171 
172 #ifndef MADV_FREE
173 #define MADV_FREE MADV_DONTNEED
174 #endif
175 
176 /* Set when initialization has been done */
177 static unsigned malloc_started;
178 
179 /* Recusion flag for public interface. */
180 static int malloc_active;
181 
182 /* Number of free pages we cache */
183 static unsigned malloc_cache = 16;
184 
185 /* The offset from pagenumber to index into the page directory */
186 static u_long malloc_origo;
187 
188 /* The last index in the page directory we care about */
189 static u_long last_idx;
190 
191 /* Pointer to page directory. Allocated "as if with" malloc */
192 static struct	pginfo **page_dir;
193 
194 /* How many slots in the page directory */
195 static unsigned	malloc_ninfo;
196 
197 /* Free pages line up here */
198 static struct pgfree free_list;
199 
200 /* Abort(), user doesn't handle problems.  */
201 static int malloc_abort;
202 
203 /* Are we trying to die ?  */
204 static int suicide;
205 
206 /* always realloc ?  */
207 static int malloc_realloc;
208 
209 /* pass the kernel a hint on free pages ?  */
210 static int malloc_hint = 1;
211 
212 /* xmalloc behaviour ?  */
213 static int malloc_xmalloc;
214 
215 /* sysv behaviour for malloc(0) ?  */
216 static int malloc_sysv;
217 
218 /* zero fill ?  */
219 static int malloc_zero;
220 
221 /* junk fill ?  */
222 static int malloc_junk;
223 
224 #ifdef HAS_UTRACE
225 
226 /* utrace ?  */
227 static int malloc_utrace;
228 
229 struct ut { void *p; size_t s; void *r; };
230 
231 void utrace __P((struct ut *, int));
232 
233 #define UTRACE(a, b, c) \
234 	if (malloc_utrace) \
235 		{struct ut u; u.p=a; u.s = b; u.r=c; utrace(&u, sizeof u);}
236 #else /* !HAS_UTRACE */
237 #define UTRACE(a,b,c)
238 #endif /* HAS_UTRACE */
239 
240 /* my last break. */
241 static void *malloc_brk;
242 
243 /* one location cache for free-list holders */
244 static struct pgfree *px;
245 
246 /* compile-time options */
247 char *malloc_options;
248 
249 /* Name of the current public function */
250 static char *malloc_func;
251 
252 /* Macro for mmap */
253 #define MMAP(size) \
254 	mmap(0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
255 	    MMAP_FD, 0);
256 
257 /*
258  * Necessary function declarations
259  */
260 static int extend_pgdir(u_long idx);
261 static void *imalloc(size_t size);
262 static void ifree(void *ptr);
263 static void *irealloc(void *ptr, size_t size);
264 
265 extern char *__progname;
266 
267 static void
268 wrterror(char *p)
269 {
270     char *q = " error: ";
271     write(STDERR_FILENO, __progname, strlen(__progname));
272     write(STDERR_FILENO, malloc_func, strlen(malloc_func));
273     write(STDERR_FILENO, q, strlen(q));
274     write(STDERR_FILENO, p, strlen(p));
275     suicide = 1;
276     abort();
277 }
278 
279 static void
280 wrtwarning(char *p)
281 {
282     char *q = " warning: ";
283     if (malloc_abort)
284 	wrterror(p);
285     write(STDERR_FILENO, __progname, strlen(__progname));
286     write(STDERR_FILENO, malloc_func, strlen(malloc_func));
287     write(STDERR_FILENO, q, strlen(q));
288     write(STDERR_FILENO, p, strlen(p));
289 }
290 
291 
292 /*
293  * Allocate a number of pages from the OS
294  */
295 static void *
296 map_pages(int pages)
297 {
298     caddr_t result, tail;
299 
300     result = (caddr_t)pageround((u_long)sbrk(0));
301     tail = result + (pages << malloc_pageshift);
302 
303     if (brk(tail)) {
304 #ifdef MALLOC_EXTRA_SANITY
305 	wrterror("(ES): map_pages fails\n");
306 #endif /* MALLOC_EXTRA_SANITY */
307 	return 0;
308     }
309 
310     last_idx = ptr2idx(tail) - 1;
311     malloc_brk = tail;
312 
313     if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx))
314 	return 0;;
315 
316     return result;
317 }
318 
319 /*
320  * Extend page directory
321  */
322 static int
323 extend_pgdir(u_long idx)
324 {
325     struct  pginfo **new, **old;
326     int i, oldlen;
327 
328     /* Make it this many pages */
329     i = idx * sizeof *page_dir;
330     i /= malloc_pagesize;
331     i += 2;
332 
333     /* remember the old mapping size */
334     oldlen = malloc_ninfo * sizeof *page_dir;
335 
336     /*
337      * NOTE: we allocate new pages and copy the directory rather than tempt
338      * fate by trying to "grow" the region.. There is nothing to prevent
339      * us from accidently re-mapping space that's been allocated by our caller
340      * via dlopen() or other mmap().
341      *
342      * The copy problem is not too bad, as there is 4K of page index per
343      * 4MB of malloc arena.
344      *
345      * We can totally avoid the copy if we open a file descriptor to associate
346      * the anon mappings with.  Then, when we remap the pages at the new
347      * address, the old pages will be "magically" remapped..  But this means
348      * keeping open a "secret" file descriptor.....
349      */
350 
351     /* Get new pages */
352     new = (struct pginfo**) MMAP(i * malloc_pagesize);
353     if (new == (struct pginfo **)-1)
354 	return 0;
355 
356     /* Copy the old stuff */
357     memcpy(new, page_dir,
358 	    malloc_ninfo * sizeof *page_dir);
359 
360     /* register the new size */
361     malloc_ninfo = i * malloc_pagesize / sizeof *page_dir;
362 
363     /* swap the pointers */
364     old = page_dir;
365     page_dir = new;
366 
367     /* Now free the old stuff */
368     munmap(old, oldlen);
369     return 1;
370 }
371 
372 /*
373  * Initialize the world
374  */
375 static void
376 malloc_init (void)
377 {
378     char *p, b[64];
379     int i, j;
380     int errnosave;
381 
382     /*
383      * Compute page-size related variables.
384      */
385     malloc_pagesize = sysconf(_SC_PAGESIZE);
386     malloc_pagemask = malloc_pagesize - 1;
387     for (malloc_pageshift = 0;
388 	 (1UL << malloc_pageshift) != malloc_pagesize;
389 	 malloc_pageshift++)
390 	/* nothing */ ;
391 
392     INIT_MMAP();
393 
394 #ifdef MALLOC_EXTRA_SANITY
395     malloc_junk = 1;
396 #endif /* MALLOC_EXTRA_SANITY */
397 
398     for (i = 0; i < 3; i++) {
399 	if (i == 0) {
400 	    errnosave = errno;
401 	    j = readlink("/etc/malloc.conf", b, sizeof b - 1);
402 	    errno = errnosave;
403 	    if (j <= 0)
404 		continue;
405 	    b[j] = '\0';
406 	    p = b;
407 	} else if (i == 1) {
408 	    p = getenv("MALLOC_OPTIONS");
409 	} else {
410 	    p = malloc_options;
411 	}
412 	for (; p && *p; p++) {
413 	    switch (*p) {
414 		case '>': malloc_cache   <<= 1; break;
415 		case '<': malloc_cache   >>= 1; break;
416 		case 'a': malloc_abort   = 0; break;
417 		case 'A': malloc_abort   = 1; break;
418 		case 'h': malloc_hint    = 0; break;
419 		case 'H': malloc_hint    = 1; break;
420 		case 'r': malloc_realloc = 0; break;
421 		case 'R': malloc_realloc = 1; break;
422 		case 'j': malloc_junk    = 0; break;
423 		case 'J': malloc_junk    = 1; break;
424 #ifdef HAS_UTRACE
425 		case 'u': malloc_utrace  = 0; break;
426 		case 'U': malloc_utrace  = 1; break;
427 #endif
428 		case 'v': malloc_sysv    = 0; break;
429 		case 'V': malloc_sysv    = 1; break;
430 		case 'x': malloc_xmalloc = 0; break;
431 		case 'X': malloc_xmalloc = 1; break;
432 		case 'z': malloc_zero    = 0; break;
433 		case 'Z': malloc_zero    = 1; break;
434 		default:
435 		    j = malloc_abort;
436 		    malloc_abort = 0;
437 		    wrtwarning("unknown char in MALLOC_OPTIONS\n");
438 		    malloc_abort = j;
439 		    break;
440 	    }
441 	}
442     }
443 
444     UTRACE(0, 0, 0);
445 
446     /*
447      * We want junk in the entire allocation, and zero only in the part
448      * the user asked for.
449      */
450     if (malloc_zero)
451 	malloc_junk=1;
452 
453     /*
454      * If we run with junk (or implicitly from above: zero), we want to
455      * force realloc() to get new storage, so we can DTRT with it.
456      */
457     if (malloc_junk)
458 	malloc_realloc=1;
459 
460     /* Allocate one page for the page directory */
461     page_dir = (struct pginfo **) MMAP(malloc_pagesize);
462 
463     if (page_dir == (struct pginfo **) -1)
464 	wrterror("mmap(2) failed, check limits.\n");
465 
466     /*
467      * We need a maximum of malloc_pageshift buckets, steal these from the
468      * front of the page_directory;
469      */
470     malloc_origo = ((u_long)pageround((u_long)sbrk(0))) >> malloc_pageshift;
471     malloc_origo -= malloc_pageshift;
472 
473     malloc_ninfo = malloc_pagesize / sizeof *page_dir;
474 
475     /* Recalculate the cache size in bytes, and make sure it's nonzero */
476 
477     if (!malloc_cache)
478 	malloc_cache++;
479 
480     malloc_cache <<= malloc_pageshift;
481 
482     /*
483      * This is a nice hack from Kaleb Keithly (kaleb@x.org).
484      * We can sbrk(2) further back when we keep this on a low address.
485      */
486     px = (struct pgfree *) imalloc (sizeof *px);
487 
488     /* Been here, done that */
489     malloc_started++;
490 }
491 
492 /*
493  * Allocate a number of complete pages
494  */
495 static void *
496 malloc_pages(size_t size)
497 {
498     void *p, *delay_free = 0;
499     int i;
500     struct pgfree *pf;
501     u_long idx;
502 
503     size = pageround(size);
504 
505     p = 0;
506 
507     /* Look for free pages before asking for more */
508     for(pf = free_list.next; pf; pf = pf->next) {
509 
510 #ifdef MALLOC_EXTRA_SANITY
511 	if (pf->size & malloc_pagemask)
512 	    wrterror("(ES): junk length entry on free_list\n");
513 	if (!pf->size)
514 	    wrterror("(ES): zero length entry on free_list\n");
515 	if (pf->page == pf->end)
516 	    wrterror("(ES): zero entry on free_list\n");
517 	if (pf->page > pf->end)
518 	    wrterror("(ES): sick entry on free_list\n");
519 	if ((void*)pf->page >= (void*)sbrk(0))
520 	    wrterror("(ES): entry on free_list past brk\n");
521 	if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
522 	    wrterror("(ES): non-free first page on free-list\n");
523 	if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
524 	    wrterror("(ES): non-free last page on free-list\n");
525 #endif /* MALLOC_EXTRA_SANITY */
526 
527 	if (pf->size < size)
528 	    continue;
529 
530 	if (pf->size == size) {
531 	    p = pf->page;
532 	    if (pf->next)
533 		    pf->next->prev = pf->prev;
534 	    pf->prev->next = pf->next;
535 	    delay_free = pf;
536 	    break;
537 	}
538 
539 	p = pf->page;
540 	pf->page = (char *)pf->page + size;
541 	pf->size -= size;
542 	break;
543     }
544 
545 #ifdef MALLOC_EXTRA_SANITY
546     if (p && page_dir[ptr2idx(p)] != MALLOC_FREE)
547 	wrterror("(ES): allocated non-free page on free-list\n");
548 #endif /* MALLOC_EXTRA_SANITY */
549 
550     size >>= malloc_pageshift;
551 
552     /* Map new pages */
553     if (!p)
554 	p = map_pages(size);
555 
556     if (p) {
557 
558 	idx = ptr2idx(p);
559 	page_dir[idx] = MALLOC_FIRST;
560 	for (i=1;i<size;i++)
561 	    page_dir[idx+i] = MALLOC_FOLLOW;
562 
563 	if (malloc_junk)
564 	    memset(p, SOME_JUNK, size << malloc_pageshift);
565     }
566 
567     if (delay_free) {
568 	if (!px)
569 	    px = delay_free;
570 	else
571 	    ifree(delay_free);
572     }
573 
574     return p;
575 }
576 
577 /*
578  * Allocate a page of fragments
579  */
580 
581 static __inline__ int
582 malloc_make_chunks(int bits)
583 {
584     struct  pginfo *bp;
585     void *pp;
586     int i, k, l;
587 
588     /* Allocate a new bucket */
589     pp = malloc_pages(malloc_pagesize);
590     if (!pp)
591 	return 0;
592 
593     /* Find length of admin structure */
594     l = offsetof(struct pginfo, bits[0]);
595     l += sizeof bp->bits[0] *
596 	(((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
597 
598     /* Don't waste more than two chunks on this */
599     if ((1<<(bits)) <= l+l) {
600 	bp = (struct  pginfo *)pp;
601     } else {
602 	bp = (struct  pginfo *)imalloc(l);
603 	if (!bp) {
604 	    ifree(pp);
605 	    return 0;
606 	}
607     }
608 
609     bp->size = (1<<bits);
610     bp->shift = bits;
611     bp->total = bp->free = malloc_pagesize >> bits;
612     bp->page = pp;
613 
614     /* set all valid bits in the bitmap */
615     k = bp->total;
616     i = 0;
617 
618     /* Do a bunch at a time */
619     for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
620 	bp->bits[i / MALLOC_BITS] = ~0;
621 
622     for(; i < k; i++)
623         bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
624 
625     if (bp == bp->page) {
626 	/* Mark the ones we stole for ourselves */
627 	for(i=0;l > 0;i++) {
628 	    bp->bits[i/MALLOC_BITS] &= ~(1<<(i%MALLOC_BITS));
629 	    bp->free--;
630 	    bp->total--;
631 	    l -= (1 << bits);
632 	}
633     }
634 
635     /* MALLOC_LOCK */
636 
637     page_dir[ptr2idx(pp)] = bp;
638 
639     bp->next = page_dir[bits];
640     page_dir[bits] = bp;
641 
642     /* MALLOC_UNLOCK */
643 
644     return 1;
645 }
646 
647 /*
648  * Allocate a fragment
649  */
650 static void *
651 malloc_bytes(size_t size)
652 {
653     int i,j;
654     u_int u;
655     struct  pginfo *bp;
656     int k;
657     u_int *lp;
658 
659     /* Don't bother with anything less than this */
660     if (size < malloc_minsize)
661 	size = malloc_minsize;
662 
663     /* Find the right bucket */
664     j = 1;
665     i = size-1;
666     while (i >>= 1)
667 	j++;
668 
669     /* If it's empty, make a page more of that size chunks */
670     if (!page_dir[j] && !malloc_make_chunks(j))
671 	return 0;
672 
673     bp = page_dir[j];
674 
675     /* Find first word of bitmap which isn't empty */
676     for (lp = bp->bits; !*lp; lp++)
677 	;
678 
679     /* Find that bit, and tweak it */
680     u = 1;
681     k = 0;
682     while (!(*lp & u)) {
683 	u += u;
684 	k++;
685     }
686     *lp ^= u;
687 
688     /* If there are no more free, remove from free-list */
689     if (!--bp->free) {
690 	page_dir[j] = bp->next;
691 	bp->next = 0;
692     }
693 
694     /* Adjust to the real offset of that chunk */
695     k += (lp-bp->bits)*MALLOC_BITS;
696     k <<= bp->shift;
697 
698     if (malloc_junk)
699 	memset((u_char*)bp->page + k, SOME_JUNK, bp->size);
700 
701     return (u_char *)bp->page + k;
702 }
703 
704 /*
705  * Allocate a piece of memory
706  */
707 static void *
708 imalloc(size_t size)
709 {
710     void *result;
711 
712     if (suicide)
713 	abort();
714 
715     if ((size + malloc_pagesize) < size)	/* Check for overflow */
716 	result = 0;
717     else if (size <= malloc_maxsize)
718 	result =  malloc_bytes(size);
719     else
720 	result =  malloc_pages(size);
721 
722     if (malloc_abort && !result)
723 	wrterror("allocation failed.\n");
724 
725     if (malloc_zero && result)
726 	memset(result, 0, size);
727 
728     return result;
729 }
730 
731 /*
732  * Change the size of an allocation.
733  */
734 static void *
735 irealloc(void *ptr, size_t size)
736 {
737     void *p;
738     u_long osize, idx;
739     struct pginfo **mp;
740     int i;
741 
742     if (suicide)
743 	abort();
744 
745     idx = ptr2idx(ptr);
746 
747     if (idx < malloc_pageshift) {
748 	wrtwarning("junk pointer, too low to make sense.\n");
749 	return 0;
750     }
751 
752     if (idx > last_idx) {
753 	wrtwarning("junk pointer, too high to make sense.\n");
754 	return 0;
755     }
756 
757     mp = &page_dir[idx];
758 
759     if (*mp == MALLOC_FIRST) {			/* Page allocation */
760 
761 	/* Check the pointer */
762 	if ((u_long)ptr & malloc_pagemask) {
763 	    wrtwarning("modified (page-) pointer.\n");
764 	    return 0;
765 	}
766 
767 	/* Find the size in bytes */
768 	for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
769 	    osize += malloc_pagesize;
770 
771         if (!malloc_realloc && 			/* unless we have to, */
772 	  size <= osize && 			/* .. or are too small, */
773 	  size > (osize - malloc_pagesize)) {	/* .. or can free a page, */
774 	    return ptr;				/* don't do anything. */
775 	}
776 
777     } else if (*mp >= MALLOC_MAGIC) {		/* Chunk allocation */
778 
779 	/* Check the pointer for sane values */
780 	if (((u_long)ptr & ((*mp)->size-1))) {
781 	    wrtwarning("modified (chunk-) pointer.\n");
782 	    return 0;
783 	}
784 
785 	/* Find the chunk index in the page */
786 	i = ((u_long)ptr & malloc_pagemask) >> (*mp)->shift;
787 
788 	/* Verify that it isn't a free chunk already */
789         if ((*mp)->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
790 	    wrtwarning("chunk is already free.\n");
791 	    return 0;
792 	}
793 
794 	osize = (*mp)->size;
795 
796 	if (!malloc_realloc &&		/* Unless we have to, */
797 	  size < osize && 		/* ..or are too small, */
798 	  (size > osize/2 ||	 	/* ..or could use a smaller size, */
799 	  osize == malloc_minsize)) {	/* ..(if there is one) */
800 	    return ptr;			/* ..Don't do anything */
801 	}
802 
803     } else {
804 	wrtwarning("pointer to wrong page.\n");
805 	return 0;
806     }
807 
808     p = imalloc(size);
809 
810     if (p) {
811 	/* copy the lesser of the two sizes, and free the old one */
812 	if (!size || !osize)
813 	    ;
814 	else if (osize < size)
815 	    memcpy(p, ptr, osize);
816 	else
817 	    memcpy(p, ptr, size);
818 	ifree(ptr);
819     }
820     return p;
821 }
822 
823 /*
824  * Free a sequence of pages
825  */
826 
827 static __inline__ void
828 free_pages(void *ptr, int idx, struct pginfo *info)
829 {
830     int i;
831     struct pgfree *pf, *pt=0;
832     u_long l;
833     void *tail;
834 
835     if (info == MALLOC_FREE) {
836 	wrtwarning("page is already free.\n");
837 	return;
838     }
839 
840     if (info != MALLOC_FIRST) {
841 	wrtwarning("pointer to wrong page.\n");
842 	return;
843     }
844 
845     if ((u_long)ptr & malloc_pagemask) {
846 	wrtwarning("modified (page-) pointer.\n");
847 	return;
848     }
849 
850     /* Count how many pages and mark them free at the same time */
851     page_dir[idx] = MALLOC_FREE;
852     for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
853 	page_dir[idx + i] = MALLOC_FREE;
854 
855     l = i << malloc_pageshift;
856 
857     if (malloc_junk)
858 	memset(ptr, SOME_JUNK, l);
859 
860     if (malloc_hint)
861 	madvise(ptr, l, MADV_FREE);
862 
863     tail = (char *)ptr+l;
864 
865     /* add to free-list */
866     if (!px)
867 	px = imalloc(sizeof *pt);	/* This cannot fail... */
868     px->page = ptr;
869     px->end =  tail;
870     px->size = l;
871     if (!free_list.next) {
872 
873 	/* Nothing on free list, put this at head */
874 	px->next = free_list.next;
875 	px->prev = &free_list;
876 	free_list.next = px;
877 	pf = px;
878 	px = 0;
879 
880     } else {
881 
882 	/* Find the right spot, leave pf pointing to the modified entry. */
883 	tail = (char *)ptr+l;
884 
885 	for(pf = free_list.next; pf->end < ptr && pf->next; pf = pf->next)
886 	    ; /* Race ahead here */
887 
888 	if (pf->page > tail) {
889 	    /* Insert before entry */
890 	    px->next = pf;
891 	    px->prev = pf->prev;
892 	    pf->prev = px;
893 	    px->prev->next = px;
894 	    pf = px;
895 	    px = 0;
896 	} else if (pf->end == ptr ) {
897 	    /* Append to the previous entry */
898 	    pf->end = (char *)pf->end + l;
899 	    pf->size += l;
900 	    if (pf->next && pf->end == pf->next->page ) {
901 		/* And collapse the next too. */
902 		pt = pf->next;
903 		pf->end = pt->end;
904 		pf->size += pt->size;
905 		pf->next = pt->next;
906 		if (pf->next)
907 		    pf->next->prev = pf;
908 	    }
909 	} else if (pf->page == tail) {
910 	    /* Prepend to entry */
911 	    pf->size += l;
912 	    pf->page = ptr;
913 	} else if (!pf->next) {
914 	    /* Append at tail of chain */
915 	    px->next = 0;
916 	    px->prev = pf;
917 	    pf->next = px;
918 	    pf = px;
919 	    px = 0;
920 	} else {
921 	    wrterror("freelist is destroyed.\n");
922 	}
923     }
924 
925     /* Return something to OS ? */
926     if (!pf->next &&				/* If we're the last one, */
927       pf->size > malloc_cache &&		/* ..and the cache is full, */
928       pf->end == malloc_brk &&			/* ..and none behind us, */
929       malloc_brk == sbrk(0)) {			/* ..and it's OK to do... */
930 
931 	/*
932 	 * Keep the cache intact.  Notice that the '>' above guarantees that
933 	 * the pf will always have at least one page afterwards.
934 	 */
935 	pf->end = (char *)pf->page + malloc_cache;
936 	pf->size = malloc_cache;
937 
938 	brk(pf->end);
939 	malloc_brk = pf->end;
940 
941 	idx = ptr2idx(pf->end);
942 	last_idx = idx - 1;
943 
944 	for(i=idx;i <= last_idx;)
945 	    page_dir[i++] = MALLOC_NOT_MINE;
946 
947 	/* XXX: We could realloc/shrink the pagedir here I guess. */
948     }
949     if (pt)
950 	ifree(pt);
951 }
952 
953 /*
954  * Free a chunk, and possibly the page it's on, if the page becomes empty.
955  */
956 
957 static __inline__ void
958 free_bytes(void *ptr, int idx, struct pginfo *info)
959 {
960     int i;
961     struct pginfo **mp;
962     void *vp;
963 
964     /* Find the chunk number on the page */
965     i = ((u_long)ptr & malloc_pagemask) >> info->shift;
966 
967     if (((u_long)ptr & (info->size-1))) {
968 	wrtwarning("modified (chunk-) pointer.\n");
969 	return;
970     }
971 
972     if (info->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
973 	wrtwarning("chunk is already free.\n");
974 	return;
975     }
976 
977     if (malloc_junk)
978 	memset(ptr, SOME_JUNK, info->size);
979 
980     info->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
981     info->free++;
982 
983     mp = page_dir + info->shift;
984 
985     if (info->free == 1) {
986 
987 	/* Page became non-full */
988 
989 	mp = page_dir + info->shift;
990 	/* Insert in address order */
991 	while (*mp && (*mp)->next && (*mp)->next->page < info->page)
992 	    mp = &(*mp)->next;
993 	info->next = *mp;
994 	*mp = info;
995 	return;
996     }
997 
998     if (info->free != info->total)
999 	return;
1000 
1001     /* Find & remove this page in the queue */
1002     while (*mp != info) {
1003 	mp = &((*mp)->next);
1004 #ifdef MALLOC_EXTRA_SANITY
1005 	if (!*mp)
1006 		wrterror("(ES): Not on queue\n");
1007 #endif /* MALLOC_EXTRA_SANITY */
1008     }
1009     *mp = info->next;
1010 
1011     /* Free the page & the info structure if need be */
1012     page_dir[ptr2idx(info->page)] = MALLOC_FIRST;
1013     vp = info->page;		/* Order is important ! */
1014     if(vp != (void*)info)
1015 	ifree(info);
1016     ifree(vp);
1017 }
1018 
1019 static void
1020 ifree(void *ptr)
1021 {
1022     struct pginfo *info;
1023     int idx;
1024 
1025     /* This is legal */
1026     if (!ptr)
1027 	return;
1028 
1029     if (!malloc_started) {
1030 	wrtwarning("malloc() has never been called.\n");
1031 	return;
1032     }
1033 
1034     /* If we're already sinking, don't make matters any worse. */
1035     if (suicide)
1036 	return;
1037 
1038     idx = ptr2idx(ptr);
1039 
1040     if (idx < malloc_pageshift) {
1041 	wrtwarning("junk pointer, too low to make sense.\n");
1042 	return;
1043     }
1044 
1045     if (idx > last_idx) {
1046 	wrtwarning("junk pointer, too high to make sense.\n");
1047 	return;
1048     }
1049 
1050     info = page_dir[idx];
1051 
1052     if (info < MALLOC_MAGIC)
1053         free_pages(ptr, idx, info);
1054     else
1055 	free_bytes(ptr, idx, info);
1056     return;
1057 }
1058 
1059 /*
1060  * These are the public exported interface routines.
1061  */
1062 
1063 
1064 void *
1065 malloc(size_t size)
1066 {
1067     register void *r;
1068 
1069     THREAD_LOCK();
1070     malloc_func = " in malloc():";
1071     if (malloc_active++) {
1072 	wrtwarning("recursive call.\n");
1073         malloc_active--;
1074 	return (0);
1075     }
1076     if (!malloc_started)
1077 	malloc_init();
1078     if (malloc_sysv && !size)
1079 	r = 0;
1080     else
1081 	r = imalloc(size);
1082     UTRACE(0, size, r);
1083     malloc_active--;
1084     THREAD_UNLOCK();
1085     if (r == NULL) {
1086 	if (malloc_xmalloc)
1087 	    wrterror("out of memory.\n");
1088 	errno = ENOMEM;
1089     }
1090     return (r);
1091 }
1092 
1093 void
1094 free(void *ptr)
1095 {
1096     THREAD_LOCK();
1097     malloc_func = " in free():";
1098     if (malloc_active++) {
1099 	wrtwarning("recursive call.\n");
1100 	malloc_active--;
1101 	return;
1102     } else {
1103 	ifree(ptr);
1104 	UTRACE(ptr, 0, 0);
1105     }
1106     malloc_active--;
1107     THREAD_UNLOCK();
1108     return;
1109 }
1110 
1111 void *
1112 realloc(void *ptr, size_t size)
1113 {
1114     register void *r;
1115 
1116     THREAD_LOCK();
1117     malloc_func = " in realloc():";
1118     if (malloc_active++) {
1119 	wrtwarning("recursive call.\n");
1120         malloc_active--;
1121 	return (0);
1122     }
1123     if (ptr && !malloc_started) {
1124 	wrtwarning("malloc() has never been called.\n");
1125 	ptr = 0;
1126     }
1127     if (!malloc_started)
1128 	malloc_init();
1129     if (malloc_sysv && !size) {
1130 	ifree(ptr);
1131 	r = 0;
1132     } else if (!ptr) {
1133 	r = imalloc(size);
1134     } else {
1135         r = irealloc(ptr, size);
1136     }
1137     UTRACE(ptr, size, r);
1138     malloc_active--;
1139     THREAD_UNLOCK();
1140     if (r == NULL) {
1141 	if (malloc_xmalloc)
1142 	    wrterror("out of memory.\n");
1143 	errno = ENOMEM;
1144     }
1145     return (r);
1146 }
1147