xref: /netbsd-src/lib/libc/stdlib/malloc.c (revision 5aefcfdc06931dd97e76246d2fe0302f7b3fe094)
1 /*	$NetBSD: malloc.c,v 1.34 2000/12/20 20:56:01 christos Exp $	*/
2 
3 /*
4  * ----------------------------------------------------------------------------
5  * "THE BEER-WARE LICENSE" (Revision 42):
6  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
7  * can do whatever you want with this stuff. If we meet some day, and you think
8  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
9  * ----------------------------------------------------------------------------
10  *
11  * From FreeBSD: malloc.c,v 1.43 1998/09/30 06:13:59 jb
12  *
13  */
14 
15 /*
16  * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
17  * to internal conditions and consistency in malloc.c. This has a
18  * noticeable runtime performance hit, and generally will not do you
19  * any good unless you fiddle with the internals of malloc or want
20  * to catch random pointer corruption as early as possible.
21  */
22 #ifndef MALLOC_EXTRA_SANITY
23 #undef MALLOC_EXTRA_SANITY
24 #endif
25 
26 /*
27  * What to use for Junk.  This is the byte value we use to fill with
28  * when the 'J' option is enabled.
29  */
30 #define SOME_JUNK	0xd0		/* as in "Duh" :-) */
31 
32 /*
33  * The basic parameters you can tweak.
34  *
35  * malloc_minsize	minimum size of an allocation in bytes.
36  *			If this is too small it's too much work
37  *			to manage them.  This is also the smallest
38  *			unit of alignment used for the storage
39  *			returned by malloc/realloc.
40  *
41  */
42 
43 #if defined(__FreeBSD__)
44 #   if defined(__i386__)
45 #       define malloc_minsize		16U
46 #   endif
47 #   if defined(__alpha__)
48 #       define malloc_minsize		16U
49 #   endif
50 #   if !defined(__NETBSD_SYSCALLS)
51 #       define HAS_UTRACE
52 #   endif
53     /*
54      * Make malloc/free/realloc thread-safe in libc for use with
55      * kernel threads.
56      */
57 #   include "libc_private.h"
58 #   include "spinlock.h"
59     static spinlock_t thread_lock	= _SPINLOCK_INITIALIZER;
60 #   define THREAD_LOCK()		if (__isthreaded) _SPINLOCK(&thread_lock);
61 #   define THREAD_UNLOCK()		if (__isthreaded) _SPINUNLOCK(&thread_lock);
62 #endif /* __FreeBSD__ */
63 
64 #if defined(__NetBSD__)
65 #   define malloc_minsize               16U
66 #endif /* __NetBSD__ */
67 
68 #if defined(__sparc__) && defined(sun)
69 #   define malloc_minsize		16U
70 #   define MAP_ANON			(0)
71     static int fdzero;
72 #   define MMAP_FD	fdzero
73 #   define INIT_MMAP() \
74 	{ if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \
75 	    wrterror("open of /dev/zero"); }
76 #endif /* __sparc__ */
77 
78 /* Insert your combination here... */
79 #if defined(__FOOCPU__) && defined(__BAROS__)
80 #   define malloc_minsize		16U
81 #endif /* __FOOCPU__ && __BAROS__ */
82 
83 
84 /*
85  * No user serviceable parts behind this point.
86  */
87 #include "namespace.h"
88 #include <sys/types.h>
89 #include <sys/mman.h>
90 #include <errno.h>
91 #include <fcntl.h>
92 #include <stddef.h>
93 #include <stdio.h>
94 #include <stdlib.h>
95 #include <string.h>
96 #include <unistd.h>
97 
98 /*
99  * This structure describes a page worth of chunks.
100  */
101 
102 struct pginfo {
103     struct pginfo	*next;	/* next on the free list */
104     void		*page;	/* Pointer to the page */
105     u_short		size;	/* size of this page's chunks */
106     u_short		shift;	/* How far to shift for this size chunks */
107     u_short		free;	/* How many free chunks */
108     u_short		total;	/* How many chunk */
109     u_int		bits[1]; /* Which chunks are free */
110 };
111 
112 /*
113  * This structure describes a number of free pages.
114  */
115 
116 struct pgfree {
117     struct pgfree	*next;	/* next run of free pages */
118     struct pgfree	*prev;	/* prev run of free pages */
119     void		*page;	/* pointer to free pages */
120     void		*end;	/* pointer to end of free pages */
121     size_t		size;	/* number of bytes free */
122 };
123 
124 /*
125  * How many bits per u_int in the bitmap.
126  * Change only if not 8 bits/byte
127  */
128 #define	MALLOC_BITS	(8*sizeof(u_int))
129 
130 /*
131  * Magic values to put in the page_directory
132  */
133 #define MALLOC_NOT_MINE	((struct pginfo*) 0)
134 #define MALLOC_FREE 	((struct pginfo*) 1)
135 #define MALLOC_FIRST	((struct pginfo*) 2)
136 #define MALLOC_FOLLOW	((struct pginfo*) 3)
137 #define MALLOC_MAGIC	((struct pginfo*) 4)
138 
139 /*
140  * Page size related parameters, computed at run-time.
141  */
142 static size_t malloc_pagesize;
143 static size_t malloc_pageshift;
144 static size_t malloc_pagemask;
145 
146 #ifndef malloc_minsize
147 #define malloc_minsize			16U
148 #endif
149 
150 #ifndef malloc_maxsize
151 #define malloc_maxsize			((malloc_pagesize)>>1)
152 #endif
153 
154 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
155 #define ptr2idx(foo) (((size_t)(u_long)(foo) >> malloc_pageshift)-malloc_origo)
156 
157 #ifndef THREAD_LOCK
158 #define THREAD_LOCK()
159 #endif
160 
161 #ifndef THREAD_UNLOCK
162 #define THREAD_UNLOCK()
163 #endif
164 
165 #ifndef MMAP_FD
166 #define MMAP_FD (-1)
167 #endif
168 
169 #ifndef INIT_MMAP
170 #define INIT_MMAP()
171 #endif
172 
173 #ifndef MADV_FREE
174 #define MADV_FREE MADV_DONTNEED
175 #endif
176 
177 /* Set when initialization has been done */
178 static unsigned malloc_started;
179 
180 /* Recusion flag for public interface. */
181 static int malloc_active;
182 
183 /* Number of free pages we cache */
184 static unsigned malloc_cache = 16;
185 
186 /* The offset from pagenumber to index into the page directory */
187 static size_t malloc_origo;
188 
189 /* The last index in the page directory we care about */
190 static size_t last_idx;
191 
192 /* Pointer to page directory. Allocated "as if with" malloc */
193 static struct	pginfo **page_dir;
194 
195 /* How many slots in the page directory */
196 static unsigned	malloc_ninfo;
197 
198 /* Free pages line up here */
199 static struct pgfree free_list;
200 
201 /* Abort(), user doesn't handle problems.  */
202 static int malloc_abort;
203 
204 /* Are we trying to die ?  */
205 static int suicide;
206 
207 /* always realloc ?  */
208 static int malloc_realloc;
209 
210 /* pass the kernel a hint on free pages ?  */
211 static int malloc_hint = 0;
212 
213 /* xmalloc behaviour ?  */
214 static int malloc_xmalloc;
215 
216 /* sysv behaviour for malloc(0) ?  */
217 static int malloc_sysv;
218 
219 /* zero fill ?  */
220 static int malloc_zero;
221 
222 /* junk fill ?  */
223 static int malloc_junk;
224 
225 #ifdef HAS_UTRACE
226 
227 /* utrace ?  */
228 static int malloc_utrace;
229 
230 struct ut { void *p; size_t s; void *r; };
231 
232 void utrace __P((struct ut *, int));
233 
234 #define UTRACE(a, b, c) \
235 	if (malloc_utrace) \
236 		{struct ut u; u.p=a; u.s = b; u.r=c; utrace(&u, sizeof u);}
237 #else /* !HAS_UTRACE */
238 #define UTRACE(a,b,c)
239 #endif /* HAS_UTRACE */
240 
241 /* my last break. */
242 static void *malloc_brk;
243 
244 /* one location cache for free-list holders */
245 static struct pgfree *px;
246 
247 /* compile-time options */
248 char *malloc_options;
249 
250 /* Name of the current public function */
251 static char *malloc_func;
252 
253 /* Macro for mmap */
254 #define MMAP(size) \
255 	mmap(0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
256 	    MMAP_FD, (off_t)0);
257 
258 /*
259  * Necessary function declarations
260  */
261 static int extend_pgdir(size_t idx);
262 static void *imalloc(size_t size);
263 static void ifree(void *ptr);
264 static void *irealloc(void *ptr, size_t size);
265 
266 extern char *__progname;
267 
268 static void
269 wrterror(char *p)
270 {
271     char *q = " error: ";
272     write(STDERR_FILENO, __progname, strlen(__progname));
273     write(STDERR_FILENO, malloc_func, strlen(malloc_func));
274     write(STDERR_FILENO, q, strlen(q));
275     write(STDERR_FILENO, p, strlen(p));
276     suicide = 1;
277     abort();
278 }
279 
280 static void
281 wrtwarning(char *p)
282 {
283     char *q = " warning: ";
284     if (malloc_abort)
285 	wrterror(p);
286     write(STDERR_FILENO, __progname, strlen(__progname));
287     write(STDERR_FILENO, malloc_func, strlen(malloc_func));
288     write(STDERR_FILENO, q, strlen(q));
289     write(STDERR_FILENO, p, strlen(p));
290 }
291 
292 
293 /*
294  * Allocate a number of pages from the OS
295  */
296 static void *
297 map_pages(size_t pages)
298 {
299     caddr_t result, tail;
300 
301     result = (caddr_t)pageround((size_t)(u_long)sbrk((intptr_t)0));
302     tail = result + (pages << malloc_pageshift);
303 
304     if (brk(tail)) {
305 #ifdef MALLOC_EXTRA_SANITY
306 	wrterror("(ES): map_pages fails\n");
307 #endif /* MALLOC_EXTRA_SANITY */
308 	return 0;
309     }
310     last_idx = ptr2idx(tail) - 1;
311     malloc_brk = tail;
312 
313     if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx))
314 	return 0;;
315 
316     return result;
317 }
318 
319 /*
320  * Extend page directory
321  */
322 static int
323 extend_pgdir(size_t idx)
324 {
325     struct  pginfo **new, **old;
326     size_t newlen, oldlen;
327 
328     /* Make it this many pages */
329     newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
330 
331     /* remember the old mapping size */
332     oldlen = malloc_ninfo * sizeof *page_dir;
333 
334     /*
335      * NOTE: we allocate new pages and copy the directory rather than tempt
336      * fate by trying to "grow" the region.. There is nothing to prevent
337      * us from accidently re-mapping space that's been allocated by our caller
338      * via dlopen() or other mmap().
339      *
340      * The copy problem is not too bad, as there is 4K of page index per
341      * 4MB of malloc arena.
342      *
343      * We can totally avoid the copy if we open a file descriptor to associate
344      * the anon mappings with.  Then, when we remap the pages at the new
345      * address, the old pages will be "magically" remapped..  But this means
346      * keeping open a "secret" file descriptor.....
347      */
348 
349     /* Get new pages */
350     new = (struct pginfo**) MMAP(newlen);
351     if (new == (struct pginfo **)-1)
352 	return 0;
353 
354     /* Copy the old stuff */
355     memcpy(new, page_dir, oldlen);
356 
357     /* register the new size */
358     malloc_ninfo = newlen / sizeof *page_dir;
359 
360     /* swap the pointers */
361     old = page_dir;
362     page_dir = new;
363 
364     /* Now free the old stuff */
365     munmap(old, oldlen);
366     return 1;
367 }
368 
369 /*
370  * Initialize the world
371  */
372 static void
373 malloc_init (void)
374 {
375     char *p, b[64];
376     int i, j;
377     int errnosave;
378 
379     /*
380      * Compute page-size related variables.
381      */
382     malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
383     malloc_pagemask = malloc_pagesize - 1;
384     for (malloc_pageshift = 0;
385 	 (1UL << malloc_pageshift) != malloc_pagesize;
386 	 malloc_pageshift++)
387 	/* nothing */ ;
388 
389     INIT_MMAP();
390 
391 #ifdef MALLOC_EXTRA_SANITY
392     malloc_junk = 1;
393 #endif /* MALLOC_EXTRA_SANITY */
394 
395     for (i = 0; i < 3; i++) {
396 	if (i == 0) {
397 	    errnosave = errno;
398 	    j = readlink("/etc/malloc.conf", b, sizeof b - 1);
399 	    errno = errnosave;
400 	    if (j <= 0)
401 		continue;
402 	    b[j] = '\0';
403 	    p = b;
404 	} else if (i == 1) {
405 	    p = getenv("MALLOC_OPTIONS");
406 	} else {
407 	    p = malloc_options;
408 	}
409 	for (; p && *p; p++) {
410 	    switch (*p) {
411 		case '>': malloc_cache   <<= 1; break;
412 		case '<': malloc_cache   >>= 1; break;
413 		case 'a': malloc_abort   = 0; break;
414 		case 'A': malloc_abort   = 1; break;
415 		case 'h': malloc_hint    = 0; break;
416 		case 'H': malloc_hint    = 1; break;
417 		case 'r': malloc_realloc = 0; break;
418 		case 'R': malloc_realloc = 1; break;
419 		case 'j': malloc_junk    = 0; break;
420 		case 'J': malloc_junk    = 1; break;
421 #ifdef HAS_UTRACE
422 		case 'u': malloc_utrace  = 0; break;
423 		case 'U': malloc_utrace  = 1; break;
424 #endif
425 		case 'v': malloc_sysv    = 0; break;
426 		case 'V': malloc_sysv    = 1; break;
427 		case 'x': malloc_xmalloc = 0; break;
428 		case 'X': malloc_xmalloc = 1; break;
429 		case 'z': malloc_zero    = 0; break;
430 		case 'Z': malloc_zero    = 1; break;
431 		default:
432 		    j = malloc_abort;
433 		    malloc_abort = 0;
434 		    wrtwarning("unknown char in MALLOC_OPTIONS\n");
435 		    malloc_abort = j;
436 		    break;
437 	    }
438 	}
439     }
440 
441     UTRACE(0, 0, 0);
442 
443     /*
444      * We want junk in the entire allocation, and zero only in the part
445      * the user asked for.
446      */
447     if (malloc_zero)
448 	malloc_junk=1;
449 
450     /*
451      * If we run with junk (or implicitly from above: zero), we want to
452      * force realloc() to get new storage, so we can DTRT with it.
453      */
454     if (malloc_junk)
455 	malloc_realloc=1;
456 
457     /* Allocate one page for the page directory */
458     page_dir = (struct pginfo **) MMAP(malloc_pagesize);
459 
460     if (page_dir == (struct pginfo **) -1)
461 	wrterror("mmap(2) failed, check limits.\n");
462 
463     /*
464      * We need a maximum of malloc_pageshift buckets, steal these from the
465      * front of the page_directory;
466      */
467     malloc_origo = pageround((size_t)(u_long)sbrk((intptr_t)0))
468 	>> malloc_pageshift;
469     malloc_origo -= malloc_pageshift;
470 
471     malloc_ninfo = malloc_pagesize / sizeof *page_dir;
472 
473     /* Recalculate the cache size in bytes, and make sure it's nonzero */
474 
475     if (!malloc_cache)
476 	malloc_cache++;
477 
478     malloc_cache <<= malloc_pageshift;
479 
480     /*
481      * This is a nice hack from Kaleb Keithly (kaleb@x.org).
482      * We can sbrk(2) further back when we keep this on a low address.
483      */
484     px = (struct pgfree *) imalloc (sizeof *px);
485 
486     /* Been here, done that */
487     malloc_started++;
488 }
489 
490 /*
491  * Allocate a number of complete pages
492  */
493 static void *
494 malloc_pages(size_t size)
495 {
496     void *p, *delay_free = 0;
497     int i;
498     struct pgfree *pf;
499     size_t idx;
500 
501     size = pageround(size);
502 
503     p = 0;
504 
505     /* Look for free pages before asking for more */
506     for(pf = free_list.next; pf; pf = pf->next) {
507 
508 #ifdef MALLOC_EXTRA_SANITY
509 	if (pf->size & malloc_pagemask)
510 	    wrterror("(ES): junk length entry on free_list\n");
511 	if (!pf->size)
512 	    wrterror("(ES): zero length entry on free_list\n");
513 	if (pf->page == pf->end)
514 	    wrterror("(ES): zero entry on free_list\n");
515 	if (pf->page > pf->end)
516 	    wrterror("(ES): sick entry on free_list\n");
517 	if ((void*)pf->page >= (void*)sbrk(0))
518 	    wrterror("(ES): entry on free_list past brk\n");
519 	if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
520 	    wrterror("(ES): non-free first page on free-list\n");
521 	if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
522 	    wrterror("(ES): non-free last page on free-list\n");
523 #endif /* MALLOC_EXTRA_SANITY */
524 
525 	if (pf->size < size)
526 	    continue;
527 
528 	if (pf->size == size) {
529 	    p = pf->page;
530 	    if (pf->next)
531 		    pf->next->prev = pf->prev;
532 	    pf->prev->next = pf->next;
533 	    delay_free = pf;
534 	    break;
535 	}
536 
537 	p = pf->page;
538 	pf->page = (char *)pf->page + size;
539 	pf->size -= size;
540 	break;
541     }
542 
543 #ifdef MALLOC_EXTRA_SANITY
544     if (p && page_dir[ptr2idx(p)] != MALLOC_FREE)
545 	wrterror("(ES): allocated non-free page on free-list\n");
546 #endif /* MALLOC_EXTRA_SANITY */
547 
548     size >>= malloc_pageshift;
549 
550     /* Map new pages */
551     if (!p)
552 	p = map_pages(size);
553 
554     if (p) {
555 
556 	idx = ptr2idx(p);
557 	page_dir[idx] = MALLOC_FIRST;
558 	for (i=1;i<size;i++)
559 	    page_dir[idx+i] = MALLOC_FOLLOW;
560 
561 	if (malloc_junk)
562 	    memset(p, SOME_JUNK, size << malloc_pageshift);
563     }
564 
565     if (delay_free) {
566 	if (!px)
567 	    px = delay_free;
568 	else
569 	    ifree(delay_free);
570     }
571 
572     return p;
573 }
574 
575 /*
576  * Allocate a page of fragments
577  */
578 
579 static __inline__ int
580 malloc_make_chunks(int bits)
581 {
582     struct  pginfo *bp;
583     void *pp;
584     int i, k, l;
585 
586     /* Allocate a new bucket */
587     pp = malloc_pages(malloc_pagesize);
588     if (!pp)
589 	return 0;
590 
591     /* Find length of admin structure */
592     l = (int)offsetof(struct pginfo, bits[0]);
593     l += sizeof bp->bits[0] *
594 	(((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
595 
596     /* Don't waste more than two chunks on this */
597     if ((1<<(bits)) <= l+l) {
598 	bp = (struct  pginfo *)pp;
599     } else {
600 	bp = (struct  pginfo *)imalloc((size_t)l);
601 	if (!bp) {
602 	    ifree(pp);
603 	    return 0;
604 	}
605     }
606 
607     bp->size = (1<<bits);
608     bp->shift = bits;
609     bp->total = bp->free = malloc_pagesize >> bits;
610     bp->page = pp;
611 
612     /* set all valid bits in the bitmap */
613     k = bp->total;
614     i = 0;
615 
616     /* Do a bunch at a time */
617     for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
618 	bp->bits[i / MALLOC_BITS] = ~0U;
619 
620     for(; i < k; i++)
621         bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
622 
623     if (bp == bp->page) {
624 	/* Mark the ones we stole for ourselves */
625 	for(i=0;l > 0;i++) {
626 	    bp->bits[i/MALLOC_BITS] &= ~(1<<(i%MALLOC_BITS));
627 	    bp->free--;
628 	    bp->total--;
629 	    l -= (1 << bits);
630 	}
631     }
632 
633     /* MALLOC_LOCK */
634 
635     page_dir[ptr2idx(pp)] = bp;
636 
637     bp->next = page_dir[bits];
638     page_dir[bits] = bp;
639 
640     /* MALLOC_UNLOCK */
641 
642     return 1;
643 }
644 
645 /*
646  * Allocate a fragment
647  */
648 static void *
649 malloc_bytes(size_t size)
650 {
651     size_t i;
652     int j;
653     u_int u;
654     struct  pginfo *bp;
655     int k;
656     u_int *lp;
657 
658     /* Don't bother with anything less than this */
659     if (size < malloc_minsize)
660 	size = malloc_minsize;
661 
662     /* Find the right bucket */
663     j = 1;
664     i = size-1;
665     while (i >>= 1)
666 	j++;
667 
668     /* If it's empty, make a page more of that size chunks */
669     if (!page_dir[j] && !malloc_make_chunks(j))
670 	return 0;
671 
672     bp = page_dir[j];
673 
674     /* Find first word of bitmap which isn't empty */
675     for (lp = bp->bits; !*lp; lp++)
676 	;
677 
678     /* Find that bit, and tweak it */
679     u = 1;
680     k = 0;
681     while (!(*lp & u)) {
682 	u += u;
683 	k++;
684     }
685     *lp ^= u;
686 
687     /* If there are no more free, remove from free-list */
688     if (!--bp->free) {
689 	page_dir[j] = bp->next;
690 	bp->next = 0;
691     }
692 
693     /* Adjust to the real offset of that chunk */
694     k += (lp-bp->bits)*MALLOC_BITS;
695     k <<= bp->shift;
696 
697     if (malloc_junk)
698 	memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
699 
700     return (u_char *)bp->page + k;
701 }
702 
703 /*
704  * Allocate a piece of memory
705  */
706 static void *
707 imalloc(size_t size)
708 {
709     void *result;
710 
711     if (suicide)
712 	abort();
713 
714     if ((size + malloc_pagesize) < size)	/* Check for overflow */
715 	result = 0;
716     else if (size <= malloc_maxsize)
717 	result =  malloc_bytes(size);
718     else
719 	result =  malloc_pages(size);
720 
721     if (malloc_abort && !result)
722 	wrterror("allocation failed.\n");
723 
724     if (malloc_zero && result)
725 	memset(result, 0, size);
726 
727     return result;
728 }
729 
730 /*
731  * Change the size of an allocation.
732  */
733 static void *
734 irealloc(void *ptr, size_t size)
735 {
736     void *p;
737     size_t osize, idx;
738     struct pginfo **mp;
739     size_t i;
740 
741     if (suicide)
742 	abort();
743 
744     idx = ptr2idx(ptr);
745 
746     if (idx < malloc_pageshift) {
747 	wrtwarning("junk pointer, too low to make sense.\n");
748 	return 0;
749     }
750 
751     if (idx > last_idx) {
752 	wrtwarning("junk pointer, too high to make sense.\n");
753 	return 0;
754     }
755 
756     mp = &page_dir[idx];
757 
758     if (*mp == MALLOC_FIRST) {			/* Page allocation */
759 
760 	/* Check the pointer */
761 	if ((size_t)(u_long)ptr & malloc_pagemask) {
762 	    wrtwarning("modified (page-) pointer.\n");
763 	    return 0;
764 	}
765 
766 	/* Find the size in bytes */
767 	for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
768 	    osize += malloc_pagesize;
769 
770         if (!malloc_realloc && 			/* unless we have to, */
771 	  size <= osize && 			/* .. or are too small, */
772 	  size > (osize - malloc_pagesize)) {	/* .. or can free a page, */
773 	    return ptr;				/* don't do anything. */
774 	}
775 
776     } else if (*mp >= MALLOC_MAGIC) {		/* Chunk allocation */
777 
778 	/* Check the pointer for sane values */
779 	if (((size_t)(u_long)ptr & ((*mp)->size-1))) {
780 	    wrtwarning("modified (chunk-) pointer.\n");
781 	    return 0;
782 	}
783 
784 	/* Find the chunk index in the page */
785 	i = ((size_t)(u_long)ptr & malloc_pagemask) >> (*mp)->shift;
786 
787 	/* Verify that it isn't a free chunk already */
788         if ((*mp)->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
789 	    wrtwarning("chunk is already free.\n");
790 	    return 0;
791 	}
792 
793 	osize = (*mp)->size;
794 
795 	if (!malloc_realloc &&		/* Unless we have to, */
796 	  size < osize && 		/* ..or are too small, */
797 	  (size > osize/2 ||	 	/* ..or could use a smaller size, */
798 	  osize == malloc_minsize)) {	/* ..(if there is one) */
799 	    return ptr;			/* ..Don't do anything */
800 	}
801 
802     } else {
803 	wrtwarning("pointer to wrong page.\n");
804 	return 0;
805     }
806 
807     p = imalloc(size);
808 
809     if (p) {
810 	/* copy the lesser of the two sizes, and free the old one */
811 	if (!size || !osize)
812 	    ;
813 	else if (osize < size)
814 	    memcpy(p, ptr, osize);
815 	else
816 	    memcpy(p, ptr, size);
817 	ifree(ptr);
818     }
819     return p;
820 }
821 
822 /*
823  * Free a sequence of pages
824  */
825 
826 static __inline__ void
827 free_pages(void *ptr, size_t idx, struct pginfo *info)
828 {
829     size_t i;
830     struct pgfree *pf, *pt=0;
831     size_t l;
832     void *tail;
833 
834     if (info == MALLOC_FREE) {
835 	wrtwarning("page is already free.\n");
836 	return;
837     }
838 
839     if (info != MALLOC_FIRST) {
840 	wrtwarning("pointer to wrong page.\n");
841 	return;
842     }
843 
844     if ((size_t)(u_long)ptr & malloc_pagemask) {
845 	wrtwarning("modified (page-) pointer.\n");
846 	return;
847     }
848 
849     /* Count how many pages and mark them free at the same time */
850     page_dir[idx] = MALLOC_FREE;
851     for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
852 	page_dir[idx + i] = MALLOC_FREE;
853 
854     l = i << malloc_pageshift;
855 
856     if (malloc_junk)
857 	memset(ptr, SOME_JUNK, l);
858 
859     if (malloc_hint)
860 	madvise(ptr, l, MADV_FREE);
861 
862     tail = (char *)ptr+l;
863 
864     /* add to free-list */
865     if (!px)
866 	px = imalloc(sizeof *pt);	/* This cannot fail... */
867     px->page = ptr;
868     px->end =  tail;
869     px->size = l;
870     if (!free_list.next) {
871 
872 	/* Nothing on free list, put this at head */
873 	px->next = free_list.next;
874 	px->prev = &free_list;
875 	free_list.next = px;
876 	pf = px;
877 	px = 0;
878 
879     } else {
880 
881 	/* Find the right spot, leave pf pointing to the modified entry. */
882 	tail = (char *)ptr+l;
883 
884 	for(pf = free_list.next; pf->end < ptr && pf->next; pf = pf->next)
885 	    ; /* Race ahead here */
886 
887 	if (pf->page > tail) {
888 	    /* Insert before entry */
889 	    px->next = pf;
890 	    px->prev = pf->prev;
891 	    pf->prev = px;
892 	    px->prev->next = px;
893 	    pf = px;
894 	    px = 0;
895 	} else if (pf->end == ptr ) {
896 	    /* Append to the previous entry */
897 	    pf->end = (char *)pf->end + l;
898 	    pf->size += l;
899 	    if (pf->next && pf->end == pf->next->page ) {
900 		/* And collapse the next too. */
901 		pt = pf->next;
902 		pf->end = pt->end;
903 		pf->size += pt->size;
904 		pf->next = pt->next;
905 		if (pf->next)
906 		    pf->next->prev = pf;
907 	    }
908 	} else if (pf->page == tail) {
909 	    /* Prepend to entry */
910 	    pf->size += l;
911 	    pf->page = ptr;
912 	} else if (!pf->next) {
913 	    /* Append at tail of chain */
914 	    px->next = 0;
915 	    px->prev = pf;
916 	    pf->next = px;
917 	    pf = px;
918 	    px = 0;
919 	} else {
920 	    wrterror("freelist is destroyed.\n");
921 	}
922     }
923 
924     /* Return something to OS ? */
925     if (!pf->next &&				/* If we're the last one, */
926       pf->size > malloc_cache &&		/* ..and the cache is full, */
927       pf->end == malloc_brk &&			/* ..and none behind us, */
928       malloc_brk == sbrk((intptr_t)0)) {	/* ..and it's OK to do... */
929 
930 	/*
931 	 * Keep the cache intact.  Notice that the '>' above guarantees that
932 	 * the pf will always have at least one page afterwards.
933 	 */
934 	pf->end = (char *)pf->page + malloc_cache;
935 	pf->size = malloc_cache;
936 
937 	brk(pf->end);
938 	malloc_brk = pf->end;
939 
940 	idx = ptr2idx(pf->end);
941 	last_idx = idx - 1;
942 
943 	for(i=idx;i <= last_idx;)
944 	    page_dir[i++] = MALLOC_NOT_MINE;
945 
946 	/* XXX: We could realloc/shrink the pagedir here I guess. */
947     }
948     if (pt)
949 	ifree(pt);
950 }
951 
952 /*
953  * Free a chunk, and possibly the page it's on, if the page becomes empty.
954  */
955 
956 static __inline__ void
957 free_bytes(void *ptr, size_t idx, struct pginfo *info)
958 {
959     size_t i;
960     struct pginfo **mp;
961     void *vp;
962 
963     /* Find the chunk number on the page */
964     i = ((size_t)(u_long)ptr & malloc_pagemask) >> info->shift;
965 
966     if (((size_t)(u_long)ptr & (info->size-1))) {
967 	wrtwarning("modified (chunk-) pointer.\n");
968 	return;
969     }
970 
971     if (info->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
972 	wrtwarning("chunk is already free.\n");
973 	return;
974     }
975 
976     if (malloc_junk)
977 	memset(ptr, SOME_JUNK, (size_t)info->size);
978 
979     info->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
980     info->free++;
981 
982     mp = page_dir + info->shift;
983 
984     if (info->free == 1) {
985 
986 	/* Page became non-full */
987 
988 	mp = page_dir + info->shift;
989 	/* Insert in address order */
990 	while (*mp && (*mp)->next && (*mp)->next->page < info->page)
991 	    mp = &(*mp)->next;
992 	info->next = *mp;
993 	*mp = info;
994 	return;
995     }
996 
997     if (info->free != info->total)
998 	return;
999 
1000     /* Find & remove this page in the queue */
1001     while (*mp != info) {
1002 	mp = &((*mp)->next);
1003 #ifdef MALLOC_EXTRA_SANITY
1004 	if (!*mp)
1005 		wrterror("(ES): Not on queue\n");
1006 #endif /* MALLOC_EXTRA_SANITY */
1007     }
1008     *mp = info->next;
1009 
1010     /* Free the page & the info structure if need be */
1011     page_dir[idx] = MALLOC_FIRST;
1012     vp = info->page;		/* Order is important ! */
1013     if(vp != (void*)info)
1014 	ifree(info);
1015     ifree(vp);
1016 }
1017 
1018 static void
1019 ifree(void *ptr)
1020 {
1021     struct pginfo *info;
1022     size_t idx;
1023 
1024     /* This is legal */
1025     if (!ptr)
1026 	return;
1027 
1028     if (!malloc_started) {
1029 	wrtwarning("malloc() has never been called.\n");
1030 	return;
1031     }
1032 
1033     /* If we're already sinking, don't make matters any worse. */
1034     if (suicide)
1035 	return;
1036 
1037     idx = ptr2idx(ptr);
1038 
1039     if (idx < malloc_pageshift) {
1040 	wrtwarning("junk pointer, too low to make sense.\n");
1041 	return;
1042     }
1043 
1044     if (idx > last_idx) {
1045 	wrtwarning("junk pointer, too high to make sense.\n");
1046 	return;
1047     }
1048 
1049     info = page_dir[idx];
1050 
1051     if (info < MALLOC_MAGIC)
1052         free_pages(ptr, idx, info);
1053     else
1054 	free_bytes(ptr, idx, info);
1055     return;
1056 }
1057 
1058 /*
1059  * These are the public exported interface routines.
1060  */
1061 
1062 
1063 void *
1064 malloc(size_t size)
1065 {
1066     register void *r;
1067 
1068     THREAD_LOCK();
1069     malloc_func = " in malloc():";
1070     if (malloc_active++) {
1071 	wrtwarning("recursive call.\n");
1072         malloc_active--;
1073 	return (0);
1074     }
1075     if (!malloc_started)
1076 	malloc_init();
1077     if (malloc_sysv && !size)
1078 	r = 0;
1079     else
1080 	r = imalloc(size);
1081     UTRACE(0, size, r);
1082     malloc_active--;
1083     THREAD_UNLOCK();
1084     if (r == NULL && (size != 0 || !malloc_sysv)) {
1085 	if (malloc_xmalloc)
1086 	    wrterror("out of memory.\n");
1087 	errno = ENOMEM;
1088     }
1089     return (r);
1090 }
1091 
1092 void
1093 free(void *ptr)
1094 {
1095     THREAD_LOCK();
1096     malloc_func = " in free():";
1097     if (malloc_active++) {
1098 	wrtwarning("recursive call.\n");
1099 	malloc_active--;
1100 	return;
1101     } else {
1102 	ifree(ptr);
1103 	UTRACE(ptr, 0, 0);
1104     }
1105     malloc_active--;
1106     THREAD_UNLOCK();
1107     return;
1108 }
1109 
1110 void *
1111 realloc(void *ptr, size_t size)
1112 {
1113     register void *r;
1114 
1115     THREAD_LOCK();
1116     malloc_func = " in realloc():";
1117     if (malloc_active++) {
1118 	wrtwarning("recursive call.\n");
1119         malloc_active--;
1120 	return (0);
1121     }
1122     if (ptr && !malloc_started) {
1123 	wrtwarning("malloc() has never been called.\n");
1124 	ptr = 0;
1125     }
1126     if (!malloc_started)
1127 	malloc_init();
1128     if (malloc_sysv && !size) {
1129 	ifree(ptr);
1130 	r = 0;
1131     } else if (!ptr) {
1132 	r = imalloc(size);
1133     } else {
1134         r = irealloc(ptr, size);
1135     }
1136     UTRACE(ptr, size, r);
1137     malloc_active--;
1138     THREAD_UNLOCK();
1139     if (r == NULL && (size != 0 || !malloc_sysv)) {
1140 	if (malloc_xmalloc)
1141 	    wrterror("out of memory.\n");
1142 	errno = ENOMEM;
1143     }
1144     return (r);
1145 }
1146