xref: /netbsd-src/lib/libc/stdlib/malloc.c (revision 481fca6e59249d8ffcf24fef7cfbe7b131bfb080)
1 /*	$NetBSD: malloc.c,v 1.33 2000/07/06 03:13:22 christos Exp $	*/
2 
3 /*
4  * ----------------------------------------------------------------------------
5  * "THE BEER-WARE LICENSE" (Revision 42):
6  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
7  * can do whatever you want with this stuff. If we meet some day, and you think
8  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
9  * ----------------------------------------------------------------------------
10  *
11  * From FreeBSD: malloc.c,v 1.43 1998/09/30 06:13:59 jb
12  *
13  */
14 
15 /*
16  * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
17  * to internal conditions and consistency in malloc.c. This has a
18  * noticeable runtime performance hit, and generally will not do you
19  * any good unless you fiddle with the internals of malloc or want
20  * to catch random pointer corruption as early as possible.
21  */
22 #ifndef MALLOC_EXTRA_SANITY
23 #undef MALLOC_EXTRA_SANITY
24 #endif
25 
26 /*
27  * What to use for Junk.  This is the byte value we use to fill with
28  * when the 'J' option is enabled.
29  */
30 #define SOME_JUNK	0xd0		/* as in "Duh" :-) */
31 
32 /*
33  * The basic parameters you can tweak.
34  *
35  * malloc_minsize	minimum size of an allocation in bytes.
36  *			If this is too small it's too much work
37  *			to manage them.  This is also the smallest
38  *			unit of alignment used for the storage
39  *			returned by malloc/realloc.
40  *
41  */
42 
43 #if defined(__FreeBSD__)
44 #   if defined(__i386__)
45 #       define malloc_minsize		16U
46 #   endif
47 #   if defined(__alpha__)
48 #       define malloc_minsize		16U
49 #   endif
50 #   if !defined(__NETBSD_SYSCALLS)
51 #       define HAS_UTRACE
52 #   endif
53     /*
54      * Make malloc/free/realloc thread-safe in libc for use with
55      * kernel threads.
56      */
57 #   include "libc_private.h"
58 #   include "spinlock.h"
59     static spinlock_t thread_lock	= _SPINLOCK_INITIALIZER;
60 #   define THREAD_LOCK()		if (__isthreaded) _SPINLOCK(&thread_lock);
61 #   define THREAD_UNLOCK()		if (__isthreaded) _SPINUNLOCK(&thread_lock);
62 #endif /* __FreeBSD__ */
63 
64 #if defined(__NetBSD__)
65 #   define malloc_minsize               16U
66 #endif /* __NetBSD__ */
67 
68 #if defined(__sparc__) && defined(sun)
69 #   define malloc_minsize		16U
70 #   define MAP_ANON			(0)
71     static int fdzero;
72 #   define MMAP_FD	fdzero
73 #   define INIT_MMAP() \
74 	{ if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \
75 	    wrterror("open of /dev/zero"); }
76 #endif /* __sparc__ */
77 
78 /* Insert your combination here... */
79 #if defined(__FOOCPU__) && defined(__BAROS__)
80 #   define malloc_minsize		16U
81 #endif /* __FOOCPU__ && __BAROS__ */
82 
83 
84 /*
85  * No user serviceable parts behind this point.
86  */
87 #include "namespace.h"
88 #include <sys/types.h>
89 #include <sys/mman.h>
90 #include <errno.h>
91 #include <fcntl.h>
92 #include <stddef.h>
93 #include <stdio.h>
94 #include <stdlib.h>
95 #include <string.h>
96 #include <unistd.h>
97 
98 /*
99  * This structure describes a page worth of chunks.
100  */
101 
102 struct pginfo {
103     struct pginfo	*next;	/* next on the free list */
104     void		*page;	/* Pointer to the page */
105     u_short		size;	/* size of this page's chunks */
106     u_short		shift;	/* How far to shift for this size chunks */
107     u_short		free;	/* How many free chunks */
108     u_short		total;	/* How many chunk */
109     u_int		bits[1]; /* Which chunks are free */
110 };
111 
112 /*
113  * This structure describes a number of free pages.
114  */
115 
116 struct pgfree {
117     struct pgfree	*next;	/* next run of free pages */
118     struct pgfree	*prev;	/* prev run of free pages */
119     void		*page;	/* pointer to free pages */
120     void		*end;	/* pointer to end of free pages */
121     size_t		size;	/* number of bytes free */
122 };
123 
124 /*
125  * How many bits per u_int in the bitmap.
126  * Change only if not 8 bits/byte
127  */
128 #define	MALLOC_BITS	(8*sizeof(u_int))
129 
130 /*
131  * Magic values to put in the page_directory
132  */
133 #define MALLOC_NOT_MINE	((struct pginfo*) 0)
134 #define MALLOC_FREE 	((struct pginfo*) 1)
135 #define MALLOC_FIRST	((struct pginfo*) 2)
136 #define MALLOC_FOLLOW	((struct pginfo*) 3)
137 #define MALLOC_MAGIC	((struct pginfo*) 4)
138 
139 /*
140  * Page size related parameters, computed at run-time.
141  */
142 static size_t malloc_pagesize;
143 static size_t malloc_pageshift;
144 static size_t malloc_pagemask;
145 
146 #ifndef malloc_minsize
147 #define malloc_minsize			16U
148 #endif
149 
150 #ifndef malloc_maxsize
151 #define malloc_maxsize			((malloc_pagesize)>>1)
152 #endif
153 
154 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
155 #define ptr2idx(foo) (((size_t)(u_long)(foo) >> malloc_pageshift)-malloc_origo)
156 
157 #ifndef THREAD_LOCK
158 #define THREAD_LOCK()
159 #endif
160 
161 #ifndef THREAD_UNLOCK
162 #define THREAD_UNLOCK()
163 #endif
164 
165 #ifndef MMAP_FD
166 #define MMAP_FD (-1)
167 #endif
168 
169 #ifndef INIT_MMAP
170 #define INIT_MMAP()
171 #endif
172 
173 #ifndef MADV_FREE
174 #define MADV_FREE MADV_DONTNEED
175 #endif
176 
177 /* Set when initialization has been done */
178 static unsigned malloc_started;
179 
180 /* Recusion flag for public interface. */
181 static int malloc_active;
182 
183 /* Number of free pages we cache */
184 static unsigned malloc_cache = 16;
185 
186 /* The offset from pagenumber to index into the page directory */
187 static size_t malloc_origo;
188 
189 /* The last index in the page directory we care about */
190 static size_t last_idx;
191 
192 /* Pointer to page directory. Allocated "as if with" malloc */
193 static struct	pginfo **page_dir;
194 
195 /* How many slots in the page directory */
196 static unsigned	malloc_ninfo;
197 
198 /* Free pages line up here */
199 static struct pgfree free_list;
200 
201 /* Abort(), user doesn't handle problems.  */
202 static int malloc_abort;
203 
204 /* Are we trying to die ?  */
205 static int suicide;
206 
207 /* always realloc ?  */
208 static int malloc_realloc;
209 
210 /* pass the kernel a hint on free pages ?  */
211 static int malloc_hint = 0;
212 
213 /* xmalloc behaviour ?  */
214 static int malloc_xmalloc;
215 
216 /* sysv behaviour for malloc(0) ?  */
217 static int malloc_sysv;
218 
219 /* zero fill ?  */
220 static int malloc_zero;
221 
222 /* junk fill ?  */
223 static int malloc_junk;
224 
225 #ifdef HAS_UTRACE
226 
227 /* utrace ?  */
228 static int malloc_utrace;
229 
230 struct ut { void *p; size_t s; void *r; };
231 
232 void utrace __P((struct ut *, int));
233 
234 #define UTRACE(a, b, c) \
235 	if (malloc_utrace) \
236 		{struct ut u; u.p=a; u.s = b; u.r=c; utrace(&u, sizeof u);}
237 #else /* !HAS_UTRACE */
238 #define UTRACE(a,b,c)
239 #endif /* HAS_UTRACE */
240 
241 /* my last break. */
242 static void *malloc_brk;
243 
244 /* one location cache for free-list holders */
245 static struct pgfree *px;
246 
247 /* compile-time options */
248 char *malloc_options;
249 
250 /* Name of the current public function */
251 static char *malloc_func;
252 
253 /* Macro for mmap */
254 #define MMAP(size) \
255 	mmap(0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
256 	    MMAP_FD, (off_t)0);
257 
258 /*
259  * Necessary function declarations
260  */
261 static int extend_pgdir(size_t idx);
262 static void *imalloc(size_t size);
263 static void ifree(void *ptr);
264 static void *irealloc(void *ptr, size_t size);
265 
266 extern char *__progname;
267 
268 static void
269 wrterror(char *p)
270 {
271     char *q = " error: ";
272     write(STDERR_FILENO, __progname, strlen(__progname));
273     write(STDERR_FILENO, malloc_func, strlen(malloc_func));
274     write(STDERR_FILENO, q, strlen(q));
275     write(STDERR_FILENO, p, strlen(p));
276     suicide = 1;
277     abort();
278 }
279 
280 static void
281 wrtwarning(char *p)
282 {
283     char *q = " warning: ";
284     if (malloc_abort)
285 	wrterror(p);
286     write(STDERR_FILENO, __progname, strlen(__progname));
287     write(STDERR_FILENO, malloc_func, strlen(malloc_func));
288     write(STDERR_FILENO, q, strlen(q));
289     write(STDERR_FILENO, p, strlen(p));
290 }
291 
292 
293 /*
294  * Allocate a number of pages from the OS
295  */
296 static void *
297 map_pages(size_t pages)
298 {
299     caddr_t result, tail;
300 
301     result = (caddr_t)pageround((size_t)(u_long)sbrk(0));
302     tail = result + (pages << malloc_pageshift);
303 
304     if (brk(tail)) {
305 #ifdef MALLOC_EXTRA_SANITY
306 	wrterror("(ES): map_pages fails\n");
307 #endif /* MALLOC_EXTRA_SANITY */
308 	return 0;
309     }
310     last_idx = ptr2idx(tail) - 1;
311     malloc_brk = tail;
312 
313     if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx))
314 	return 0;;
315 
316     return result;
317 }
318 
319 /*
320  * Extend page directory
321  */
322 static int
323 extend_pgdir(size_t idx)
324 {
325     struct  pginfo **new, **old;
326     size_t newlen, oldlen;
327 
328     /* Make it this many pages */
329     newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
330 
331     /* remember the old mapping size */
332     oldlen = malloc_ninfo * sizeof *page_dir;
333 
334     /*
335      * NOTE: we allocate new pages and copy the directory rather than tempt
336      * fate by trying to "grow" the region.. There is nothing to prevent
337      * us from accidently re-mapping space that's been allocated by our caller
338      * via dlopen() or other mmap().
339      *
340      * The copy problem is not too bad, as there is 4K of page index per
341      * 4MB of malloc arena.
342      *
343      * We can totally avoid the copy if we open a file descriptor to associate
344      * the anon mappings with.  Then, when we remap the pages at the new
345      * address, the old pages will be "magically" remapped..  But this means
346      * keeping open a "secret" file descriptor.....
347      */
348 
349     /* Get new pages */
350     new = (struct pginfo**) MMAP(newlen);
351     if (new == (struct pginfo **)-1)
352 	return 0;
353 
354     /* Copy the old stuff */
355     memcpy(new, page_dir, oldlen);
356 
357     /* register the new size */
358     malloc_ninfo = newlen / sizeof *page_dir;
359 
360     /* swap the pointers */
361     old = page_dir;
362     page_dir = new;
363 
364     /* Now free the old stuff */
365     munmap(old, oldlen);
366     return 1;
367 }
368 
369 /*
370  * Initialize the world
371  */
372 static void
373 malloc_init (void)
374 {
375     char *p, b[64];
376     int i, j;
377     int errnosave;
378 
379     /*
380      * Compute page-size related variables.
381      */
382     malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
383     malloc_pagemask = malloc_pagesize - 1;
384     for (malloc_pageshift = 0;
385 	 (1UL << malloc_pageshift) != malloc_pagesize;
386 	 malloc_pageshift++)
387 	/* nothing */ ;
388 
389     INIT_MMAP();
390 
391 #ifdef MALLOC_EXTRA_SANITY
392     malloc_junk = 1;
393 #endif /* MALLOC_EXTRA_SANITY */
394 
395     for (i = 0; i < 3; i++) {
396 	if (i == 0) {
397 	    errnosave = errno;
398 	    j = readlink("/etc/malloc.conf", b, sizeof b - 1);
399 	    errno = errnosave;
400 	    if (j <= 0)
401 		continue;
402 	    b[j] = '\0';
403 	    p = b;
404 	} else if (i == 1) {
405 	    p = getenv("MALLOC_OPTIONS");
406 	} else {
407 	    p = malloc_options;
408 	}
409 	for (; p && *p; p++) {
410 	    switch (*p) {
411 		case '>': malloc_cache   <<= 1; break;
412 		case '<': malloc_cache   >>= 1; break;
413 		case 'a': malloc_abort   = 0; break;
414 		case 'A': malloc_abort   = 1; break;
415 		case 'h': malloc_hint    = 0; break;
416 		case 'H': malloc_hint    = 1; break;
417 		case 'r': malloc_realloc = 0; break;
418 		case 'R': malloc_realloc = 1; break;
419 		case 'j': malloc_junk    = 0; break;
420 		case 'J': malloc_junk    = 1; break;
421 #ifdef HAS_UTRACE
422 		case 'u': malloc_utrace  = 0; break;
423 		case 'U': malloc_utrace  = 1; break;
424 #endif
425 		case 'v': malloc_sysv    = 0; break;
426 		case 'V': malloc_sysv    = 1; break;
427 		case 'x': malloc_xmalloc = 0; break;
428 		case 'X': malloc_xmalloc = 1; break;
429 		case 'z': malloc_zero    = 0; break;
430 		case 'Z': malloc_zero    = 1; break;
431 		default:
432 		    j = malloc_abort;
433 		    malloc_abort = 0;
434 		    wrtwarning("unknown char in MALLOC_OPTIONS\n");
435 		    malloc_abort = j;
436 		    break;
437 	    }
438 	}
439     }
440 
441     UTRACE(0, 0, 0);
442 
443     /*
444      * We want junk in the entire allocation, and zero only in the part
445      * the user asked for.
446      */
447     if (malloc_zero)
448 	malloc_junk=1;
449 
450     /*
451      * If we run with junk (or implicitly from above: zero), we want to
452      * force realloc() to get new storage, so we can DTRT with it.
453      */
454     if (malloc_junk)
455 	malloc_realloc=1;
456 
457     /* Allocate one page for the page directory */
458     page_dir = (struct pginfo **) MMAP(malloc_pagesize);
459 
460     if (page_dir == (struct pginfo **) -1)
461 	wrterror("mmap(2) failed, check limits.\n");
462 
463     /*
464      * We need a maximum of malloc_pageshift buckets, steal these from the
465      * front of the page_directory;
466      */
467     malloc_origo = pageround((size_t)(u_long)sbrk(0)) >> malloc_pageshift;
468     malloc_origo -= malloc_pageshift;
469 
470     malloc_ninfo = malloc_pagesize / sizeof *page_dir;
471 
472     /* Recalculate the cache size in bytes, and make sure it's nonzero */
473 
474     if (!malloc_cache)
475 	malloc_cache++;
476 
477     malloc_cache <<= malloc_pageshift;
478 
479     /*
480      * This is a nice hack from Kaleb Keithly (kaleb@x.org).
481      * We can sbrk(2) further back when we keep this on a low address.
482      */
483     px = (struct pgfree *) imalloc (sizeof *px);
484 
485     /* Been here, done that */
486     malloc_started++;
487 }
488 
489 /*
490  * Allocate a number of complete pages
491  */
492 static void *
493 malloc_pages(size_t size)
494 {
495     void *p, *delay_free = 0;
496     int i;
497     struct pgfree *pf;
498     size_t idx;
499 
500     size = pageround(size);
501 
502     p = 0;
503 
504     /* Look for free pages before asking for more */
505     for(pf = free_list.next; pf; pf = pf->next) {
506 
507 #ifdef MALLOC_EXTRA_SANITY
508 	if (pf->size & malloc_pagemask)
509 	    wrterror("(ES): junk length entry on free_list\n");
510 	if (!pf->size)
511 	    wrterror("(ES): zero length entry on free_list\n");
512 	if (pf->page == pf->end)
513 	    wrterror("(ES): zero entry on free_list\n");
514 	if (pf->page > pf->end)
515 	    wrterror("(ES): sick entry on free_list\n");
516 	if ((void*)pf->page >= (void*)sbrk(0))
517 	    wrterror("(ES): entry on free_list past brk\n");
518 	if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
519 	    wrterror("(ES): non-free first page on free-list\n");
520 	if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
521 	    wrterror("(ES): non-free last page on free-list\n");
522 #endif /* MALLOC_EXTRA_SANITY */
523 
524 	if (pf->size < size)
525 	    continue;
526 
527 	if (pf->size == size) {
528 	    p = pf->page;
529 	    if (pf->next)
530 		    pf->next->prev = pf->prev;
531 	    pf->prev->next = pf->next;
532 	    delay_free = pf;
533 	    break;
534 	}
535 
536 	p = pf->page;
537 	pf->page = (char *)pf->page + size;
538 	pf->size -= size;
539 	break;
540     }
541 
542 #ifdef MALLOC_EXTRA_SANITY
543     if (p && page_dir[ptr2idx(p)] != MALLOC_FREE)
544 	wrterror("(ES): allocated non-free page on free-list\n");
545 #endif /* MALLOC_EXTRA_SANITY */
546 
547     size >>= malloc_pageshift;
548 
549     /* Map new pages */
550     if (!p)
551 	p = map_pages(size);
552 
553     if (p) {
554 
555 	idx = ptr2idx(p);
556 	page_dir[idx] = MALLOC_FIRST;
557 	for (i=1;i<size;i++)
558 	    page_dir[idx+i] = MALLOC_FOLLOW;
559 
560 	if (malloc_junk)
561 	    memset(p, SOME_JUNK, size << malloc_pageshift);
562     }
563 
564     if (delay_free) {
565 	if (!px)
566 	    px = delay_free;
567 	else
568 	    ifree(delay_free);
569     }
570 
571     return p;
572 }
573 
574 /*
575  * Allocate a page of fragments
576  */
577 
578 static __inline__ int
579 malloc_make_chunks(int bits)
580 {
581     struct  pginfo *bp;
582     void *pp;
583     int i, k, l;
584 
585     /* Allocate a new bucket */
586     pp = malloc_pages(malloc_pagesize);
587     if (!pp)
588 	return 0;
589 
590     /* Find length of admin structure */
591     l = (int)offsetof(struct pginfo, bits[0]);
592     l += sizeof bp->bits[0] *
593 	(((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
594 
595     /* Don't waste more than two chunks on this */
596     if ((1<<(bits)) <= l+l) {
597 	bp = (struct  pginfo *)pp;
598     } else {
599 	bp = (struct  pginfo *)imalloc((size_t)l);
600 	if (!bp) {
601 	    ifree(pp);
602 	    return 0;
603 	}
604     }
605 
606     bp->size = (1<<bits);
607     bp->shift = bits;
608     bp->total = bp->free = malloc_pagesize >> bits;
609     bp->page = pp;
610 
611     /* set all valid bits in the bitmap */
612     k = bp->total;
613     i = 0;
614 
615     /* Do a bunch at a time */
616     for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
617 	bp->bits[i / MALLOC_BITS] = ~0U;
618 
619     for(; i < k; i++)
620         bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
621 
622     if (bp == bp->page) {
623 	/* Mark the ones we stole for ourselves */
624 	for(i=0;l > 0;i++) {
625 	    bp->bits[i/MALLOC_BITS] &= ~(1<<(i%MALLOC_BITS));
626 	    bp->free--;
627 	    bp->total--;
628 	    l -= (1 << bits);
629 	}
630     }
631 
632     /* MALLOC_LOCK */
633 
634     page_dir[ptr2idx(pp)] = bp;
635 
636     bp->next = page_dir[bits];
637     page_dir[bits] = bp;
638 
639     /* MALLOC_UNLOCK */
640 
641     return 1;
642 }
643 
644 /*
645  * Allocate a fragment
646  */
647 static void *
648 malloc_bytes(size_t size)
649 {
650     size_t i;
651     int j;
652     u_int u;
653     struct  pginfo *bp;
654     int k;
655     u_int *lp;
656 
657     /* Don't bother with anything less than this */
658     if (size < malloc_minsize)
659 	size = malloc_minsize;
660 
661     /* Find the right bucket */
662     j = 1;
663     i = size-1;
664     while (i >>= 1)
665 	j++;
666 
667     /* If it's empty, make a page more of that size chunks */
668     if (!page_dir[j] && !malloc_make_chunks(j))
669 	return 0;
670 
671     bp = page_dir[j];
672 
673     /* Find first word of bitmap which isn't empty */
674     for (lp = bp->bits; !*lp; lp++)
675 	;
676 
677     /* Find that bit, and tweak it */
678     u = 1;
679     k = 0;
680     while (!(*lp & u)) {
681 	u += u;
682 	k++;
683     }
684     *lp ^= u;
685 
686     /* If there are no more free, remove from free-list */
687     if (!--bp->free) {
688 	page_dir[j] = bp->next;
689 	bp->next = 0;
690     }
691 
692     /* Adjust to the real offset of that chunk */
693     k += (lp-bp->bits)*MALLOC_BITS;
694     k <<= bp->shift;
695 
696     if (malloc_junk)
697 	memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
698 
699     return (u_char *)bp->page + k;
700 }
701 
702 /*
703  * Allocate a piece of memory
704  */
705 static void *
706 imalloc(size_t size)
707 {
708     void *result;
709 
710     if (suicide)
711 	abort();
712 
713     if ((size + malloc_pagesize) < size)	/* Check for overflow */
714 	result = 0;
715     else if (size <= malloc_maxsize)
716 	result =  malloc_bytes(size);
717     else
718 	result =  malloc_pages(size);
719 
720     if (malloc_abort && !result)
721 	wrterror("allocation failed.\n");
722 
723     if (malloc_zero && result)
724 	memset(result, 0, size);
725 
726     return result;
727 }
728 
729 /*
730  * Change the size of an allocation.
731  */
732 static void *
733 irealloc(void *ptr, size_t size)
734 {
735     void *p;
736     size_t osize, idx;
737     struct pginfo **mp;
738     size_t i;
739 
740     if (suicide)
741 	abort();
742 
743     idx = ptr2idx(ptr);
744 
745     if (idx < malloc_pageshift) {
746 	wrtwarning("junk pointer, too low to make sense.\n");
747 	return 0;
748     }
749 
750     if (idx > last_idx) {
751 	wrtwarning("junk pointer, too high to make sense.\n");
752 	return 0;
753     }
754 
755     mp = &page_dir[idx];
756 
757     if (*mp == MALLOC_FIRST) {			/* Page allocation */
758 
759 	/* Check the pointer */
760 	if ((size_t)(u_long)ptr & malloc_pagemask) {
761 	    wrtwarning("modified (page-) pointer.\n");
762 	    return 0;
763 	}
764 
765 	/* Find the size in bytes */
766 	for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
767 	    osize += malloc_pagesize;
768 
769         if (!malloc_realloc && 			/* unless we have to, */
770 	  size <= osize && 			/* .. or are too small, */
771 	  size > (osize - malloc_pagesize)) {	/* .. or can free a page, */
772 	    return ptr;				/* don't do anything. */
773 	}
774 
775     } else if (*mp >= MALLOC_MAGIC) {		/* Chunk allocation */
776 
777 	/* Check the pointer for sane values */
778 	if (((size_t)(u_long)ptr & ((*mp)->size-1))) {
779 	    wrtwarning("modified (chunk-) pointer.\n");
780 	    return 0;
781 	}
782 
783 	/* Find the chunk index in the page */
784 	i = ((size_t)(u_long)ptr & malloc_pagemask) >> (*mp)->shift;
785 
786 	/* Verify that it isn't a free chunk already */
787         if ((*mp)->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
788 	    wrtwarning("chunk is already free.\n");
789 	    return 0;
790 	}
791 
792 	osize = (*mp)->size;
793 
794 	if (!malloc_realloc &&		/* Unless we have to, */
795 	  size < osize && 		/* ..or are too small, */
796 	  (size > osize/2 ||	 	/* ..or could use a smaller size, */
797 	  osize == malloc_minsize)) {	/* ..(if there is one) */
798 	    return ptr;			/* ..Don't do anything */
799 	}
800 
801     } else {
802 	wrtwarning("pointer to wrong page.\n");
803 	return 0;
804     }
805 
806     p = imalloc(size);
807 
808     if (p) {
809 	/* copy the lesser of the two sizes, and free the old one */
810 	if (!size || !osize)
811 	    ;
812 	else if (osize < size)
813 	    memcpy(p, ptr, osize);
814 	else
815 	    memcpy(p, ptr, size);
816 	ifree(ptr);
817     }
818     return p;
819 }
820 
821 /*
822  * Free a sequence of pages
823  */
824 
825 static __inline__ void
826 free_pages(void *ptr, size_t idx, struct pginfo *info)
827 {
828     size_t i;
829     struct pgfree *pf, *pt=0;
830     size_t l;
831     void *tail;
832 
833     if (info == MALLOC_FREE) {
834 	wrtwarning("page is already free.\n");
835 	return;
836     }
837 
838     if (info != MALLOC_FIRST) {
839 	wrtwarning("pointer to wrong page.\n");
840 	return;
841     }
842 
843     if ((size_t)(u_long)ptr & malloc_pagemask) {
844 	wrtwarning("modified (page-) pointer.\n");
845 	return;
846     }
847 
848     /* Count how many pages and mark them free at the same time */
849     page_dir[idx] = MALLOC_FREE;
850     for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
851 	page_dir[idx + i] = MALLOC_FREE;
852 
853     l = i << malloc_pageshift;
854 
855     if (malloc_junk)
856 	memset(ptr, SOME_JUNK, l);
857 
858     if (malloc_hint)
859 	madvise(ptr, l, MADV_FREE);
860 
861     tail = (char *)ptr+l;
862 
863     /* add to free-list */
864     if (!px)
865 	px = imalloc(sizeof *pt);	/* This cannot fail... */
866     px->page = ptr;
867     px->end =  tail;
868     px->size = l;
869     if (!free_list.next) {
870 
871 	/* Nothing on free list, put this at head */
872 	px->next = free_list.next;
873 	px->prev = &free_list;
874 	free_list.next = px;
875 	pf = px;
876 	px = 0;
877 
878     } else {
879 
880 	/* Find the right spot, leave pf pointing to the modified entry. */
881 	tail = (char *)ptr+l;
882 
883 	for(pf = free_list.next; pf->end < ptr && pf->next; pf = pf->next)
884 	    ; /* Race ahead here */
885 
886 	if (pf->page > tail) {
887 	    /* Insert before entry */
888 	    px->next = pf;
889 	    px->prev = pf->prev;
890 	    pf->prev = px;
891 	    px->prev->next = px;
892 	    pf = px;
893 	    px = 0;
894 	} else if (pf->end == ptr ) {
895 	    /* Append to the previous entry */
896 	    pf->end = (char *)pf->end + l;
897 	    pf->size += l;
898 	    if (pf->next && pf->end == pf->next->page ) {
899 		/* And collapse the next too. */
900 		pt = pf->next;
901 		pf->end = pt->end;
902 		pf->size += pt->size;
903 		pf->next = pt->next;
904 		if (pf->next)
905 		    pf->next->prev = pf;
906 	    }
907 	} else if (pf->page == tail) {
908 	    /* Prepend to entry */
909 	    pf->size += l;
910 	    pf->page = ptr;
911 	} else if (!pf->next) {
912 	    /* Append at tail of chain */
913 	    px->next = 0;
914 	    px->prev = pf;
915 	    pf->next = px;
916 	    pf = px;
917 	    px = 0;
918 	} else {
919 	    wrterror("freelist is destroyed.\n");
920 	}
921     }
922 
923     /* Return something to OS ? */
924     if (!pf->next &&				/* If we're the last one, */
925       pf->size > malloc_cache &&		/* ..and the cache is full, */
926       pf->end == malloc_brk &&			/* ..and none behind us, */
927       malloc_brk == sbrk(0)) {			/* ..and it's OK to do... */
928 
929 	/*
930 	 * Keep the cache intact.  Notice that the '>' above guarantees that
931 	 * the pf will always have at least one page afterwards.
932 	 */
933 	pf->end = (char *)pf->page + malloc_cache;
934 	pf->size = malloc_cache;
935 
936 	brk(pf->end);
937 	malloc_brk = pf->end;
938 
939 	idx = ptr2idx(pf->end);
940 	last_idx = idx - 1;
941 
942 	for(i=idx;i <= last_idx;)
943 	    page_dir[i++] = MALLOC_NOT_MINE;
944 
945 	/* XXX: We could realloc/shrink the pagedir here I guess. */
946     }
947     if (pt)
948 	ifree(pt);
949 }
950 
951 /*
952  * Free a chunk, and possibly the page it's on, if the page becomes empty.
953  */
954 
955 static __inline__ void
956 free_bytes(void *ptr, size_t idx, struct pginfo *info)
957 {
958     size_t i;
959     struct pginfo **mp;
960     void *vp;
961 
962     /* Find the chunk number on the page */
963     i = ((size_t)(u_long)ptr & malloc_pagemask) >> info->shift;
964 
965     if (((size_t)(u_long)ptr & (info->size-1))) {
966 	wrtwarning("modified (chunk-) pointer.\n");
967 	return;
968     }
969 
970     if (info->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
971 	wrtwarning("chunk is already free.\n");
972 	return;
973     }
974 
975     if (malloc_junk)
976 	memset(ptr, SOME_JUNK, (size_t)info->size);
977 
978     info->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
979     info->free++;
980 
981     mp = page_dir + info->shift;
982 
983     if (info->free == 1) {
984 
985 	/* Page became non-full */
986 
987 	mp = page_dir + info->shift;
988 	/* Insert in address order */
989 	while (*mp && (*mp)->next && (*mp)->next->page < info->page)
990 	    mp = &(*mp)->next;
991 	info->next = *mp;
992 	*mp = info;
993 	return;
994     }
995 
996     if (info->free != info->total)
997 	return;
998 
999     /* Find & remove this page in the queue */
1000     while (*mp != info) {
1001 	mp = &((*mp)->next);
1002 #ifdef MALLOC_EXTRA_SANITY
1003 	if (!*mp)
1004 		wrterror("(ES): Not on queue\n");
1005 #endif /* MALLOC_EXTRA_SANITY */
1006     }
1007     *mp = info->next;
1008 
1009     /* Free the page & the info structure if need be */
1010     page_dir[idx] = MALLOC_FIRST;
1011     vp = info->page;		/* Order is important ! */
1012     if(vp != (void*)info)
1013 	ifree(info);
1014     ifree(vp);
1015 }
1016 
1017 static void
1018 ifree(void *ptr)
1019 {
1020     struct pginfo *info;
1021     size_t idx;
1022 
1023     /* This is legal */
1024     if (!ptr)
1025 	return;
1026 
1027     if (!malloc_started) {
1028 	wrtwarning("malloc() has never been called.\n");
1029 	return;
1030     }
1031 
1032     /* If we're already sinking, don't make matters any worse. */
1033     if (suicide)
1034 	return;
1035 
1036     idx = ptr2idx(ptr);
1037 
1038     if (idx < malloc_pageshift) {
1039 	wrtwarning("junk pointer, too low to make sense.\n");
1040 	return;
1041     }
1042 
1043     if (idx > last_idx) {
1044 	wrtwarning("junk pointer, too high to make sense.\n");
1045 	return;
1046     }
1047 
1048     info = page_dir[idx];
1049 
1050     if (info < MALLOC_MAGIC)
1051         free_pages(ptr, idx, info);
1052     else
1053 	free_bytes(ptr, idx, info);
1054     return;
1055 }
1056 
1057 /*
1058  * These are the public exported interface routines.
1059  */
1060 
1061 
1062 void *
1063 malloc(size_t size)
1064 {
1065     register void *r;
1066 
1067     THREAD_LOCK();
1068     malloc_func = " in malloc():";
1069     if (malloc_active++) {
1070 	wrtwarning("recursive call.\n");
1071         malloc_active--;
1072 	return (0);
1073     }
1074     if (!malloc_started)
1075 	malloc_init();
1076     if (malloc_sysv && !size)
1077 	r = 0;
1078     else
1079 	r = imalloc(size);
1080     UTRACE(0, size, r);
1081     malloc_active--;
1082     THREAD_UNLOCK();
1083     if (r == NULL && (size != 0 || !malloc_sysv)) {
1084 	if (malloc_xmalloc)
1085 	    wrterror("out of memory.\n");
1086 	errno = ENOMEM;
1087     }
1088     return (r);
1089 }
1090 
1091 void
1092 free(void *ptr)
1093 {
1094     THREAD_LOCK();
1095     malloc_func = " in free():";
1096     if (malloc_active++) {
1097 	wrtwarning("recursive call.\n");
1098 	malloc_active--;
1099 	return;
1100     } else {
1101 	ifree(ptr);
1102 	UTRACE(ptr, 0, 0);
1103     }
1104     malloc_active--;
1105     THREAD_UNLOCK();
1106     return;
1107 }
1108 
1109 void *
1110 realloc(void *ptr, size_t size)
1111 {
1112     register void *r;
1113 
1114     THREAD_LOCK();
1115     malloc_func = " in realloc():";
1116     if (malloc_active++) {
1117 	wrtwarning("recursive call.\n");
1118         malloc_active--;
1119 	return (0);
1120     }
1121     if (ptr && !malloc_started) {
1122 	wrtwarning("malloc() has never been called.\n");
1123 	ptr = 0;
1124     }
1125     if (!malloc_started)
1126 	malloc_init();
1127     if (malloc_sysv && !size) {
1128 	ifree(ptr);
1129 	r = 0;
1130     } else if (!ptr) {
1131 	r = imalloc(size);
1132     } else {
1133         r = irealloc(ptr, size);
1134     }
1135     UTRACE(ptr, size, r);
1136     malloc_active--;
1137     THREAD_UNLOCK();
1138     if (r == NULL && (size != 0 || !malloc_sysv)) {
1139 	if (malloc_xmalloc)
1140 	    wrterror("out of memory.\n");
1141 	errno = ENOMEM;
1142     }
1143     return (r);
1144 }
1145