xref: /netbsd-src/lib/libc/stdlib/malloc.c (revision 6b25d775138e13c8d80d355923af2cf3729ef131)
1 /*	$NetBSD: malloc.c,v 1.36 2001/02/19 22:22:17 cgd Exp $	*/
2 
3 /*
4  * ----------------------------------------------------------------------------
5  * "THE BEER-WARE LICENSE" (Revision 42):
6  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
7  * can do whatever you want with this stuff. If we meet some day, and you think
8  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
9  * ----------------------------------------------------------------------------
10  *
11  * From FreeBSD: malloc.c,v 1.43 1998/09/30 06:13:59 jb
12  *
13  */
14 
15 /*
16  * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
17  * to internal conditions and consistency in malloc.c. This has a
18  * noticeable runtime performance hit, and generally will not do you
19  * any good unless you fiddle with the internals of malloc or want
20  * to catch random pointer corruption as early as possible.
21  */
22 #ifndef MALLOC_EXTRA_SANITY
23 #undef MALLOC_EXTRA_SANITY
24 #endif
25 
26 /*
27  * What to use for Junk.  This is the byte value we use to fill with
28  * when the 'J' option is enabled.
29  */
30 #define SOME_JUNK	0xd0		/* as in "Duh" :-) */
31 
32 /*
33  * The basic parameters you can tweak.
34  *
35  * malloc_minsize	minimum size of an allocation in bytes.
36  *			If this is too small it's too much work
37  *			to manage them.  This is also the smallest
38  *			unit of alignment used for the storage
39  *			returned by malloc/realloc.
40  *
41  */
42 
43 #if defined(__FreeBSD__)
44 #   if defined(__i386__)
45 #       define malloc_minsize		16U
46 #   endif
47 #   if defined(__alpha__)
48 #       define malloc_minsize		16U
49 #   endif
50 #   define HAS_UTRACE
51 #   define UTRACE_LABEL
52 
53 #include <sys/cdefs.h>
54 void utrace __P((struct ut *, int));
55 
56     /*
57      * Make malloc/free/realloc thread-safe in libc for use with
58      * kernel threads.
59      */
60 #   include "libc_private.h"
61 #   include "spinlock.h"
62     static spinlock_t thread_lock	= _SPINLOCK_INITIALIZER;
63 #   define THREAD_LOCK()		if (__isthreaded) _SPINLOCK(&thread_lock);
64 #   define THREAD_UNLOCK()		if (__isthreaded) _SPINUNLOCK(&thread_lock);
65 #endif /* __FreeBSD__ */
66 
67 #if defined(__NetBSD__)
68 #   define malloc_minsize               16U
69 #   define HAS_UTRACE
70 #   define UTRACE_LABEL "malloc",
71 #include <sys/cdefs.h>
72 #include <sys/types.h>
73 int utrace __P((const char *, void *, size_t));
74 #endif /* __NetBSD__ */
75 
76 #if defined(__sparc__) && defined(sun)
77 #   define malloc_minsize		16U
78 #   define MAP_ANON			(0)
79     static int fdzero;
80 #   define MMAP_FD	fdzero
81 #   define INIT_MMAP() \
82 	{ if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \
83 	    wrterror("open of /dev/zero"); }
84 #endif /* __sparc__ */
85 
86 /* Insert your combination here... */
87 #if defined(__FOOCPU__) && defined(__BAROS__)
88 #   define malloc_minsize		16U
89 #endif /* __FOOCPU__ && __BAROS__ */
90 
91 
92 /*
93  * No user serviceable parts behind this point.
94  */
95 #include "namespace.h"
96 #include <sys/types.h>
97 #include <sys/mman.h>
98 #include <errno.h>
99 #include <fcntl.h>
100 #include <stddef.h>
101 #include <stdio.h>
102 #include <stdlib.h>
103 #include <string.h>
104 #include <unistd.h>
105 
106 /*
107  * This structure describes a page worth of chunks.
108  */
109 
110 struct pginfo {
111     struct pginfo	*next;	/* next on the free list */
112     void		*page;	/* Pointer to the page */
113     u_short		size;	/* size of this page's chunks */
114     u_short		shift;	/* How far to shift for this size chunks */
115     u_short		free;	/* How many free chunks */
116     u_short		total;	/* How many chunk */
117     u_int		bits[1]; /* Which chunks are free */
118 };
119 
120 /*
121  * This structure describes a number of free pages.
122  */
123 
124 struct pgfree {
125     struct pgfree	*next;	/* next run of free pages */
126     struct pgfree	*prev;	/* prev run of free pages */
127     void		*page;	/* pointer to free pages */
128     void		*end;	/* pointer to end of free pages */
129     size_t		size;	/* number of bytes free */
130 };
131 
132 /*
133  * How many bits per u_int in the bitmap.
134  * Change only if not 8 bits/byte
135  */
136 #define	MALLOC_BITS	(8*sizeof(u_int))
137 
138 /*
139  * Magic values to put in the page_directory
140  */
141 #define MALLOC_NOT_MINE	((struct pginfo*) 0)
142 #define MALLOC_FREE 	((struct pginfo*) 1)
143 #define MALLOC_FIRST	((struct pginfo*) 2)
144 #define MALLOC_FOLLOW	((struct pginfo*) 3)
145 #define MALLOC_MAGIC	((struct pginfo*) 4)
146 
147 /*
148  * Page size related parameters, computed at run-time.
149  */
150 static size_t malloc_pagesize;
151 static size_t malloc_pageshift;
152 static size_t malloc_pagemask;
153 
154 #ifndef malloc_minsize
155 #define malloc_minsize			16U
156 #endif
157 
158 #ifndef malloc_maxsize
159 #define malloc_maxsize			((malloc_pagesize)>>1)
160 #endif
161 
162 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
163 #define ptr2idx(foo) (((size_t)(u_long)(foo) >> malloc_pageshift)-malloc_origo)
164 
165 #ifndef THREAD_LOCK
166 #define THREAD_LOCK()
167 #endif
168 
169 #ifndef THREAD_UNLOCK
170 #define THREAD_UNLOCK()
171 #endif
172 
173 #ifndef MMAP_FD
174 #define MMAP_FD (-1)
175 #endif
176 
177 #ifndef INIT_MMAP
178 #define INIT_MMAP()
179 #endif
180 
181 #ifndef MADV_FREE
182 #define MADV_FREE MADV_DONTNEED
183 #endif
184 
185 /* Set when initialization has been done */
186 static unsigned malloc_started;
187 
188 /* Recusion flag for public interface. */
189 static int malloc_active;
190 
191 /* Number of free pages we cache */
192 static unsigned malloc_cache = 16;
193 
194 /* The offset from pagenumber to index into the page directory */
195 static size_t malloc_origo;
196 
197 /* The last index in the page directory we care about */
198 static size_t last_idx;
199 
200 /* Pointer to page directory. Allocated "as if with" malloc */
201 static struct	pginfo **page_dir;
202 
203 /* How many slots in the page directory */
204 static unsigned	malloc_ninfo;
205 
206 /* Free pages line up here */
207 static struct pgfree free_list;
208 
209 /* Abort(), user doesn't handle problems.  */
210 static int malloc_abort;
211 
212 /* Are we trying to die ?  */
213 static int suicide;
214 
215 /* always realloc ?  */
216 static int malloc_realloc;
217 
218 /* pass the kernel a hint on free pages ?  */
219 static int malloc_hint = 0;
220 
221 /* xmalloc behaviour ?  */
222 static int malloc_xmalloc;
223 
224 /* sysv behaviour for malloc(0) ?  */
225 static int malloc_sysv;
226 
227 /* zero fill ?  */
228 static int malloc_zero;
229 
230 /* junk fill ?  */
231 static int malloc_junk;
232 
233 #ifdef HAS_UTRACE
234 
235 /* utrace ?  */
236 static int malloc_utrace;
237 
238 struct ut { void *p; size_t s; void *r; };
239 
240 #define UTRACE(a, b, c) \
241 	if (malloc_utrace) {			\
242 		struct ut u;			\
243 		u.p=a; u.s = b; u.r=c;		\
244 		utrace(UTRACE_LABEL (void *) &u, sizeof u);	\
245 	}
246 #else /* !HAS_UTRACE */
247 #define UTRACE(a,b,c)
248 #endif /* HAS_UTRACE */
249 
250 /* my last break. */
251 static void *malloc_brk;
252 
253 /* one location cache for free-list holders */
254 static struct pgfree *px;
255 
256 /* compile-time options */
257 char *malloc_options;
258 
259 /* Name of the current public function */
260 static char *malloc_func;
261 
262 /* Macro for mmap */
263 #define MMAP(size) \
264 	mmap(0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
265 	    MMAP_FD, (off_t)0);
266 
267 /*
268  * Necessary function declarations
269  */
270 static int extend_pgdir(size_t idx);
271 static void *imalloc(size_t size);
272 static void ifree(void *ptr);
273 static void *irealloc(void *ptr, size_t size);
274 
275 static void
276 wrterror(char *p)
277 {
278     const char *progname = getprogname();
279     char *q = " error: ";
280     write(STDERR_FILENO, progname, strlen(progname));
281     write(STDERR_FILENO, malloc_func, strlen(malloc_func));
282     write(STDERR_FILENO, q, strlen(q));
283     write(STDERR_FILENO, p, strlen(p));
284     suicide = 1;
285     abort();
286 }
287 
288 static void
289 wrtwarning(char *p)
290 {
291     const char *progname = getprogname();
292     char *q = " warning: ";
293     if (malloc_abort)
294 	wrterror(p);
295     write(STDERR_FILENO, progname, strlen(progname));
296     write(STDERR_FILENO, malloc_func, strlen(malloc_func));
297     write(STDERR_FILENO, q, strlen(q));
298     write(STDERR_FILENO, p, strlen(p));
299 }
300 
301 
302 /*
303  * Allocate a number of pages from the OS
304  */
305 static void *
306 map_pages(size_t pages)
307 {
308     caddr_t result, tail;
309 
310     result = (caddr_t)pageround((size_t)(u_long)sbrk((intptr_t)0));
311     tail = result + (pages << malloc_pageshift);
312 
313     if (brk(tail)) {
314 #ifdef MALLOC_EXTRA_SANITY
315 	wrterror("(ES): map_pages fails\n");
316 #endif /* MALLOC_EXTRA_SANITY */
317 	return 0;
318     }
319     last_idx = ptr2idx(tail) - 1;
320     malloc_brk = tail;
321 
322     if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx))
323 	return 0;;
324 
325     return result;
326 }
327 
328 /*
329  * Extend page directory
330  */
331 static int
332 extend_pgdir(size_t idx)
333 {
334     struct  pginfo **new, **old;
335     size_t newlen, oldlen;
336 
337     /* Make it this many pages */
338     newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
339 
340     /* remember the old mapping size */
341     oldlen = malloc_ninfo * sizeof *page_dir;
342 
343     /*
344      * NOTE: we allocate new pages and copy the directory rather than tempt
345      * fate by trying to "grow" the region.. There is nothing to prevent
346      * us from accidently re-mapping space that's been allocated by our caller
347      * via dlopen() or other mmap().
348      *
349      * The copy problem is not too bad, as there is 4K of page index per
350      * 4MB of malloc arena.
351      *
352      * We can totally avoid the copy if we open a file descriptor to associate
353      * the anon mappings with.  Then, when we remap the pages at the new
354      * address, the old pages will be "magically" remapped..  But this means
355      * keeping open a "secret" file descriptor.....
356      */
357 
358     /* Get new pages */
359     new = (struct pginfo**) MMAP(newlen);
360     if (new == (struct pginfo **)-1)
361 	return 0;
362 
363     /* Copy the old stuff */
364     memcpy(new, page_dir, oldlen);
365 
366     /* register the new size */
367     malloc_ninfo = newlen / sizeof *page_dir;
368 
369     /* swap the pointers */
370     old = page_dir;
371     page_dir = new;
372 
373     /* Now free the old stuff */
374     munmap(old, oldlen);
375     return 1;
376 }
377 
378 /*
379  * Initialize the world
380  */
381 static void
382 malloc_init (void)
383 {
384     char *p, b[64];
385     int i, j;
386     int errnosave;
387 
388     /*
389      * Compute page-size related variables.
390      */
391     malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
392     malloc_pagemask = malloc_pagesize - 1;
393     for (malloc_pageshift = 0;
394 	 (1UL << malloc_pageshift) != malloc_pagesize;
395 	 malloc_pageshift++)
396 	/* nothing */ ;
397 
398     INIT_MMAP();
399 
400 #ifdef MALLOC_EXTRA_SANITY
401     malloc_junk = 1;
402 #endif /* MALLOC_EXTRA_SANITY */
403 
404     for (i = 0; i < 3; i++) {
405 	if (i == 0) {
406 	    errnosave = errno;
407 	    j = readlink("/etc/malloc.conf", b, sizeof b - 1);
408 	    errno = errnosave;
409 	    if (j <= 0)
410 		continue;
411 	    b[j] = '\0';
412 	    p = b;
413 	} else if (i == 1) {
414 	    p = getenv("MALLOC_OPTIONS");
415 	} else {
416 	    p = malloc_options;
417 	}
418 	for (; p && *p; p++) {
419 	    switch (*p) {
420 		case '>': malloc_cache   <<= 1; break;
421 		case '<': malloc_cache   >>= 1; break;
422 		case 'a': malloc_abort   = 0; break;
423 		case 'A': malloc_abort   = 1; break;
424 		case 'h': malloc_hint    = 0; break;
425 		case 'H': malloc_hint    = 1; break;
426 		case 'r': malloc_realloc = 0; break;
427 		case 'R': malloc_realloc = 1; break;
428 		case 'j': malloc_junk    = 0; break;
429 		case 'J': malloc_junk    = 1; break;
430 #ifdef HAS_UTRACE
431 		case 'u': malloc_utrace  = 0; break;
432 		case 'U': malloc_utrace  = 1; break;
433 #endif
434 		case 'v': malloc_sysv    = 0; break;
435 		case 'V': malloc_sysv    = 1; break;
436 		case 'x': malloc_xmalloc = 0; break;
437 		case 'X': malloc_xmalloc = 1; break;
438 		case 'z': malloc_zero    = 0; break;
439 		case 'Z': malloc_zero    = 1; break;
440 		default:
441 		    j = malloc_abort;
442 		    malloc_abort = 0;
443 		    wrtwarning("unknown char in MALLOC_OPTIONS\n");
444 		    malloc_abort = j;
445 		    break;
446 	    }
447 	}
448     }
449 
450     UTRACE(0, 0, 0);
451 
452     /*
453      * We want junk in the entire allocation, and zero only in the part
454      * the user asked for.
455      */
456     if (malloc_zero)
457 	malloc_junk=1;
458 
459     /*
460      * If we run with junk (or implicitly from above: zero), we want to
461      * force realloc() to get new storage, so we can DTRT with it.
462      */
463     if (malloc_junk)
464 	malloc_realloc=1;
465 
466     /* Allocate one page for the page directory */
467     page_dir = (struct pginfo **) MMAP(malloc_pagesize);
468 
469     if (page_dir == (struct pginfo **) -1)
470 	wrterror("mmap(2) failed, check limits.\n");
471 
472     /*
473      * We need a maximum of malloc_pageshift buckets, steal these from the
474      * front of the page_directory;
475      */
476     malloc_origo = pageround((size_t)(u_long)sbrk((intptr_t)0))
477 	>> malloc_pageshift;
478     malloc_origo -= malloc_pageshift;
479 
480     malloc_ninfo = malloc_pagesize / sizeof *page_dir;
481 
482     /* Recalculate the cache size in bytes, and make sure it's nonzero */
483 
484     if (!malloc_cache)
485 	malloc_cache++;
486 
487     malloc_cache <<= malloc_pageshift;
488 
489     /*
490      * This is a nice hack from Kaleb Keithly (kaleb@x.org).
491      * We can sbrk(2) further back when we keep this on a low address.
492      */
493     px = (struct pgfree *) imalloc (sizeof *px);
494 
495     /* Been here, done that */
496     malloc_started++;
497 }
498 
499 /*
500  * Allocate a number of complete pages
501  */
502 static void *
503 malloc_pages(size_t size)
504 {
505     void *p, *delay_free = 0;
506     int i;
507     struct pgfree *pf;
508     size_t idx;
509 
510     size = pageround(size);
511 
512     p = 0;
513 
514     /* Look for free pages before asking for more */
515     for(pf = free_list.next; pf; pf = pf->next) {
516 
517 #ifdef MALLOC_EXTRA_SANITY
518 	if (pf->size & malloc_pagemask)
519 	    wrterror("(ES): junk length entry on free_list\n");
520 	if (!pf->size)
521 	    wrterror("(ES): zero length entry on free_list\n");
522 	if (pf->page == pf->end)
523 	    wrterror("(ES): zero entry on free_list\n");
524 	if (pf->page > pf->end)
525 	    wrterror("(ES): sick entry on free_list\n");
526 	if ((void*)pf->page >= (void*)sbrk(0))
527 	    wrterror("(ES): entry on free_list past brk\n");
528 	if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
529 	    wrterror("(ES): non-free first page on free-list\n");
530 	if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
531 	    wrterror("(ES): non-free last page on free-list\n");
532 #endif /* MALLOC_EXTRA_SANITY */
533 
534 	if (pf->size < size)
535 	    continue;
536 
537 	if (pf->size == size) {
538 	    p = pf->page;
539 	    if (pf->next)
540 		    pf->next->prev = pf->prev;
541 	    pf->prev->next = pf->next;
542 	    delay_free = pf;
543 	    break;
544 	}
545 
546 	p = pf->page;
547 	pf->page = (char *)pf->page + size;
548 	pf->size -= size;
549 	break;
550     }
551 
552 #ifdef MALLOC_EXTRA_SANITY
553     if (p && page_dir[ptr2idx(p)] != MALLOC_FREE)
554 	wrterror("(ES): allocated non-free page on free-list\n");
555 #endif /* MALLOC_EXTRA_SANITY */
556 
557     size >>= malloc_pageshift;
558 
559     /* Map new pages */
560     if (!p)
561 	p = map_pages(size);
562 
563     if (p) {
564 
565 	idx = ptr2idx(p);
566 	page_dir[idx] = MALLOC_FIRST;
567 	for (i=1;i<size;i++)
568 	    page_dir[idx+i] = MALLOC_FOLLOW;
569 
570 	if (malloc_junk)
571 	    memset(p, SOME_JUNK, size << malloc_pageshift);
572     }
573 
574     if (delay_free) {
575 	if (!px)
576 	    px = delay_free;
577 	else
578 	    ifree(delay_free);
579     }
580 
581     return p;
582 }
583 
584 /*
585  * Allocate a page of fragments
586  */
587 
588 static __inline__ int
589 malloc_make_chunks(int bits)
590 {
591     struct  pginfo *bp;
592     void *pp;
593     int i, k, l;
594 
595     /* Allocate a new bucket */
596     pp = malloc_pages(malloc_pagesize);
597     if (!pp)
598 	return 0;
599 
600     /* Find length of admin structure */
601     l = (int)offsetof(struct pginfo, bits[0]);
602     l += sizeof bp->bits[0] *
603 	(((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
604 
605     /* Don't waste more than two chunks on this */
606     if ((1<<(bits)) <= l+l) {
607 	bp = (struct  pginfo *)pp;
608     } else {
609 	bp = (struct  pginfo *)imalloc((size_t)l);
610 	if (!bp) {
611 	    ifree(pp);
612 	    return 0;
613 	}
614     }
615 
616     bp->size = (1<<bits);
617     bp->shift = bits;
618     bp->total = bp->free = malloc_pagesize >> bits;
619     bp->page = pp;
620 
621     /* set all valid bits in the bitmap */
622     k = bp->total;
623     i = 0;
624 
625     /* Do a bunch at a time */
626     for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
627 	bp->bits[i / MALLOC_BITS] = ~0U;
628 
629     for(; i < k; i++)
630         bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
631 
632     if (bp == bp->page) {
633 	/* Mark the ones we stole for ourselves */
634 	for(i=0;l > 0;i++) {
635 	    bp->bits[i/MALLOC_BITS] &= ~(1<<(i%MALLOC_BITS));
636 	    bp->free--;
637 	    bp->total--;
638 	    l -= (1 << bits);
639 	}
640     }
641 
642     /* MALLOC_LOCK */
643 
644     page_dir[ptr2idx(pp)] = bp;
645 
646     bp->next = page_dir[bits];
647     page_dir[bits] = bp;
648 
649     /* MALLOC_UNLOCK */
650 
651     return 1;
652 }
653 
654 /*
655  * Allocate a fragment
656  */
657 static void *
658 malloc_bytes(size_t size)
659 {
660     size_t i;
661     int j;
662     u_int u;
663     struct  pginfo *bp;
664     int k;
665     u_int *lp;
666 
667     /* Don't bother with anything less than this */
668     if (size < malloc_minsize)
669 	size = malloc_minsize;
670 
671     /* Find the right bucket */
672     j = 1;
673     i = size-1;
674     while (i >>= 1)
675 	j++;
676 
677     /* If it's empty, make a page more of that size chunks */
678     if (!page_dir[j] && !malloc_make_chunks(j))
679 	return 0;
680 
681     bp = page_dir[j];
682 
683     /* Find first word of bitmap which isn't empty */
684     for (lp = bp->bits; !*lp; lp++)
685 	;
686 
687     /* Find that bit, and tweak it */
688     u = 1;
689     k = 0;
690     while (!(*lp & u)) {
691 	u += u;
692 	k++;
693     }
694     *lp ^= u;
695 
696     /* If there are no more free, remove from free-list */
697     if (!--bp->free) {
698 	page_dir[j] = bp->next;
699 	bp->next = 0;
700     }
701 
702     /* Adjust to the real offset of that chunk */
703     k += (lp-bp->bits)*MALLOC_BITS;
704     k <<= bp->shift;
705 
706     if (malloc_junk)
707 	memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
708 
709     return (u_char *)bp->page + k;
710 }
711 
712 /*
713  * Allocate a piece of memory
714  */
715 static void *
716 imalloc(size_t size)
717 {
718     void *result;
719 
720     if (suicide)
721 	abort();
722 
723     if ((size + malloc_pagesize) < size)	/* Check for overflow */
724 	result = 0;
725     else if (size <= malloc_maxsize)
726 	result =  malloc_bytes(size);
727     else
728 	result =  malloc_pages(size);
729 
730     if (malloc_abort && !result)
731 	wrterror("allocation failed.\n");
732 
733     if (malloc_zero && result)
734 	memset(result, 0, size);
735 
736     return result;
737 }
738 
739 /*
740  * Change the size of an allocation.
741  */
742 static void *
743 irealloc(void *ptr, size_t size)
744 {
745     void *p;
746     size_t osize, idx;
747     struct pginfo **mp;
748     size_t i;
749 
750     if (suicide)
751 	abort();
752 
753     idx = ptr2idx(ptr);
754 
755     if (idx < malloc_pageshift) {
756 	wrtwarning("junk pointer, too low to make sense.\n");
757 	return 0;
758     }
759 
760     if (idx > last_idx) {
761 	wrtwarning("junk pointer, too high to make sense.\n");
762 	return 0;
763     }
764 
765     mp = &page_dir[idx];
766 
767     if (*mp == MALLOC_FIRST) {			/* Page allocation */
768 
769 	/* Check the pointer */
770 	if ((size_t)(u_long)ptr & malloc_pagemask) {
771 	    wrtwarning("modified (page-) pointer.\n");
772 	    return 0;
773 	}
774 
775 	/* Find the size in bytes */
776 	for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
777 	    osize += malloc_pagesize;
778 
779         if (!malloc_realloc && 			/* unless we have to, */
780 	  size <= osize && 			/* .. or are too small, */
781 	  size > (osize - malloc_pagesize)) {	/* .. or can free a page, */
782 	    return ptr;				/* don't do anything. */
783 	}
784 
785     } else if (*mp >= MALLOC_MAGIC) {		/* Chunk allocation */
786 
787 	/* Check the pointer for sane values */
788 	if (((size_t)(u_long)ptr & ((*mp)->size-1))) {
789 	    wrtwarning("modified (chunk-) pointer.\n");
790 	    return 0;
791 	}
792 
793 	/* Find the chunk index in the page */
794 	i = ((size_t)(u_long)ptr & malloc_pagemask) >> (*mp)->shift;
795 
796 	/* Verify that it isn't a free chunk already */
797         if ((*mp)->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
798 	    wrtwarning("chunk is already free.\n");
799 	    return 0;
800 	}
801 
802 	osize = (*mp)->size;
803 
804 	if (!malloc_realloc &&		/* Unless we have to, */
805 	  size < osize && 		/* ..or are too small, */
806 	  (size > osize/2 ||	 	/* ..or could use a smaller size, */
807 	  osize == malloc_minsize)) {	/* ..(if there is one) */
808 	    return ptr;			/* ..Don't do anything */
809 	}
810 
811     } else {
812 	wrtwarning("pointer to wrong page.\n");
813 	return 0;
814     }
815 
816     p = imalloc(size);
817 
818     if (p) {
819 	/* copy the lesser of the two sizes, and free the old one */
820 	if (!size || !osize)
821 	    ;
822 	else if (osize < size)
823 	    memcpy(p, ptr, osize);
824 	else
825 	    memcpy(p, ptr, size);
826 	ifree(ptr);
827     }
828     return p;
829 }
830 
831 /*
832  * Free a sequence of pages
833  */
834 
835 static __inline__ void
836 free_pages(void *ptr, size_t idx, struct pginfo *info)
837 {
838     size_t i;
839     struct pgfree *pf, *pt=0;
840     size_t l;
841     void *tail;
842 
843     if (info == MALLOC_FREE) {
844 	wrtwarning("page is already free.\n");
845 	return;
846     }
847 
848     if (info != MALLOC_FIRST) {
849 	wrtwarning("pointer to wrong page.\n");
850 	return;
851     }
852 
853     if ((size_t)(u_long)ptr & malloc_pagemask) {
854 	wrtwarning("modified (page-) pointer.\n");
855 	return;
856     }
857 
858     /* Count how many pages and mark them free at the same time */
859     page_dir[idx] = MALLOC_FREE;
860     for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
861 	page_dir[idx + i] = MALLOC_FREE;
862 
863     l = i << malloc_pageshift;
864 
865     if (malloc_junk)
866 	memset(ptr, SOME_JUNK, l);
867 
868     if (malloc_hint)
869 	madvise(ptr, l, MADV_FREE);
870 
871     tail = (char *)ptr+l;
872 
873     /* add to free-list */
874     if (!px)
875 	px = imalloc(sizeof *pt);	/* This cannot fail... */
876     px->page = ptr;
877     px->end =  tail;
878     px->size = l;
879     if (!free_list.next) {
880 
881 	/* Nothing on free list, put this at head */
882 	px->next = free_list.next;
883 	px->prev = &free_list;
884 	free_list.next = px;
885 	pf = px;
886 	px = 0;
887 
888     } else {
889 
890 	/* Find the right spot, leave pf pointing to the modified entry. */
891 	tail = (char *)ptr+l;
892 
893 	for(pf = free_list.next; pf->end < ptr && pf->next; pf = pf->next)
894 	    ; /* Race ahead here */
895 
896 	if (pf->page > tail) {
897 	    /* Insert before entry */
898 	    px->next = pf;
899 	    px->prev = pf->prev;
900 	    pf->prev = px;
901 	    px->prev->next = px;
902 	    pf = px;
903 	    px = 0;
904 	} else if (pf->end == ptr ) {
905 	    /* Append to the previous entry */
906 	    pf->end = (char *)pf->end + l;
907 	    pf->size += l;
908 	    if (pf->next && pf->end == pf->next->page ) {
909 		/* And collapse the next too. */
910 		pt = pf->next;
911 		pf->end = pt->end;
912 		pf->size += pt->size;
913 		pf->next = pt->next;
914 		if (pf->next)
915 		    pf->next->prev = pf;
916 	    }
917 	} else if (pf->page == tail) {
918 	    /* Prepend to entry */
919 	    pf->size += l;
920 	    pf->page = ptr;
921 	} else if (!pf->next) {
922 	    /* Append at tail of chain */
923 	    px->next = 0;
924 	    px->prev = pf;
925 	    pf->next = px;
926 	    pf = px;
927 	    px = 0;
928 	} else {
929 	    wrterror("freelist is destroyed.\n");
930 	}
931     }
932 
933     /* Return something to OS ? */
934     if (!pf->next &&				/* If we're the last one, */
935       pf->size > malloc_cache &&		/* ..and the cache is full, */
936       pf->end == malloc_brk &&			/* ..and none behind us, */
937       malloc_brk == sbrk((intptr_t)0)) {	/* ..and it's OK to do... */
938 
939 	/*
940 	 * Keep the cache intact.  Notice that the '>' above guarantees that
941 	 * the pf will always have at least one page afterwards.
942 	 */
943 	pf->end = (char *)pf->page + malloc_cache;
944 	pf->size = malloc_cache;
945 
946 	brk(pf->end);
947 	malloc_brk = pf->end;
948 
949 	idx = ptr2idx(pf->end);
950 	last_idx = idx - 1;
951 
952 	for(i=idx;i <= last_idx;)
953 	    page_dir[i++] = MALLOC_NOT_MINE;
954 
955 	/* XXX: We could realloc/shrink the pagedir here I guess. */
956     }
957     if (pt)
958 	ifree(pt);
959 }
960 
961 /*
962  * Free a chunk, and possibly the page it's on, if the page becomes empty.
963  */
964 
965 static __inline__ void
966 free_bytes(void *ptr, size_t idx, struct pginfo *info)
967 {
968     size_t i;
969     struct pginfo **mp;
970     void *vp;
971 
972     /* Find the chunk number on the page */
973     i = ((size_t)(u_long)ptr & malloc_pagemask) >> info->shift;
974 
975     if (((size_t)(u_long)ptr & (info->size-1))) {
976 	wrtwarning("modified (chunk-) pointer.\n");
977 	return;
978     }
979 
980     if (info->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
981 	wrtwarning("chunk is already free.\n");
982 	return;
983     }
984 
985     if (malloc_junk)
986 	memset(ptr, SOME_JUNK, (size_t)info->size);
987 
988     info->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
989     info->free++;
990 
991     mp = page_dir + info->shift;
992 
993     if (info->free == 1) {
994 
995 	/* Page became non-full */
996 
997 	mp = page_dir + info->shift;
998 	/* Insert in address order */
999 	while (*mp && (*mp)->next && (*mp)->next->page < info->page)
1000 	    mp = &(*mp)->next;
1001 	info->next = *mp;
1002 	*mp = info;
1003 	return;
1004     }
1005 
1006     if (info->free != info->total)
1007 	return;
1008 
1009     /* Find & remove this page in the queue */
1010     while (*mp != info) {
1011 	mp = &((*mp)->next);
1012 #ifdef MALLOC_EXTRA_SANITY
1013 	if (!*mp)
1014 		wrterror("(ES): Not on queue\n");
1015 #endif /* MALLOC_EXTRA_SANITY */
1016     }
1017     *mp = info->next;
1018 
1019     /* Free the page & the info structure if need be */
1020     page_dir[idx] = MALLOC_FIRST;
1021     vp = info->page;		/* Order is important ! */
1022     if(vp != (void*)info)
1023 	ifree(info);
1024     ifree(vp);
1025 }
1026 
1027 static void
1028 ifree(void *ptr)
1029 {
1030     struct pginfo *info;
1031     size_t idx;
1032 
1033     /* This is legal */
1034     if (!ptr)
1035 	return;
1036 
1037     if (!malloc_started) {
1038 	wrtwarning("malloc() has never been called.\n");
1039 	return;
1040     }
1041 
1042     /* If we're already sinking, don't make matters any worse. */
1043     if (suicide)
1044 	return;
1045 
1046     idx = ptr2idx(ptr);
1047 
1048     if (idx < malloc_pageshift) {
1049 	wrtwarning("junk pointer, too low to make sense.\n");
1050 	return;
1051     }
1052 
1053     if (idx > last_idx) {
1054 	wrtwarning("junk pointer, too high to make sense.\n");
1055 	return;
1056     }
1057 
1058     info = page_dir[idx];
1059 
1060     if (info < MALLOC_MAGIC)
1061         free_pages(ptr, idx, info);
1062     else
1063 	free_bytes(ptr, idx, info);
1064     return;
1065 }
1066 
1067 /*
1068  * These are the public exported interface routines.
1069  */
1070 
1071 
1072 void *
1073 malloc(size_t size)
1074 {
1075     register void *r;
1076 
1077     THREAD_LOCK();
1078     malloc_func = " in malloc():";
1079     if (malloc_active++) {
1080 	wrtwarning("recursive call.\n");
1081         malloc_active--;
1082 	return (0);
1083     }
1084     if (!malloc_started)
1085 	malloc_init();
1086     if (malloc_sysv && !size)
1087 	r = 0;
1088     else
1089 	r = imalloc(size);
1090     UTRACE(0, size, r);
1091     malloc_active--;
1092     THREAD_UNLOCK();
1093     if (r == NULL && (size != 0 || !malloc_sysv)) {
1094 	if (malloc_xmalloc)
1095 	    wrterror("out of memory.\n");
1096 	errno = ENOMEM;
1097     }
1098     return (r);
1099 }
1100 
1101 void
1102 free(void *ptr)
1103 {
1104     THREAD_LOCK();
1105     malloc_func = " in free():";
1106     if (malloc_active++) {
1107 	wrtwarning("recursive call.\n");
1108 	malloc_active--;
1109 	return;
1110     } else {
1111 	ifree(ptr);
1112 	UTRACE(ptr, 0, 0);
1113     }
1114     malloc_active--;
1115     THREAD_UNLOCK();
1116     return;
1117 }
1118 
1119 void *
1120 realloc(void *ptr, size_t size)
1121 {
1122     register void *r;
1123 
1124     THREAD_LOCK();
1125     malloc_func = " in realloc():";
1126     if (malloc_active++) {
1127 	wrtwarning("recursive call.\n");
1128         malloc_active--;
1129 	return (0);
1130     }
1131     if (ptr && !malloc_started) {
1132 	wrtwarning("malloc() has never been called.\n");
1133 	ptr = 0;
1134     }
1135     if (!malloc_started)
1136 	malloc_init();
1137     if (malloc_sysv && !size) {
1138 	ifree(ptr);
1139 	r = 0;
1140     } else if (!ptr) {
1141 	r = imalloc(size);
1142     } else {
1143         r = irealloc(ptr, size);
1144     }
1145     UTRACE(ptr, size, r);
1146     malloc_active--;
1147     THREAD_UNLOCK();
1148     if (r == NULL && (size != 0 || !malloc_sysv)) {
1149 	if (malloc_xmalloc)
1150 	    wrterror("out of memory.\n");
1151 	errno = ENOMEM;
1152     }
1153     return (r);
1154 }
1155