xref: /minix3/lib/libc/stdlib/malloc.c (revision ebfedea0ce5bbe81e252ddf32d732e40fb633fae)
1 /*	$NetBSD: malloc.c,v 1.55 2012/12/30 21:23:20 dholland Exp $	*/
2 
3 /*
4  * ----------------------------------------------------------------------------
5  * "THE BEER-WARE LICENSE" (Revision 42):
6  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
7  * can do whatever you want with this stuff. If we meet some day, and you think
8  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
9  * ----------------------------------------------------------------------------
10  *
11  * From FreeBSD: malloc.c,v 1.91 2006/01/12 07:28:20 jasone
12  *
13  */
14 
15 #ifdef __minix
16 #include "extern.h"
17 #ifdef _LIBSYS
18 #include <minix/sysutil.h>
19 #include <machine/param.h>
20 #include <machine/vmparam.h>
21 #define MALLOC_NO_SYSCALLS
22 #define wrtwarning(w) printf("libminc malloc warning: %s\n", w)
23 #define wrterror(w) panic("libminc malloc error: %s\n", w)
24 #endif
25 #endif
26 
27 /*
28  * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
29  * to internal conditions and consistency in malloc.c. This has a
30  * noticeable runtime performance hit, and generally will not do you
31  * any good unless you fiddle with the internals of malloc or want
32  * to catch random pointer corruption as early as possible.
33  */
34 #ifndef MALLOC_EXTRA_SANITY
35 #undef MALLOC_EXTRA_SANITY
36 #endif
37 
38 /*
39  * What to use for Junk.  This is the byte value we use to fill with
40  * when the 'J' option is enabled.
41  */
42 #define SOME_JUNK	0xd0		/* as in "Duh" :-) */
43 
44 /*
45  * The basic parameters you can tweak.
46  *
47  * malloc_minsize	minimum size of an allocation in bytes.
48  *			If this is too small it's too much work
49  *			to manage them.  This is also the smallest
50  *			unit of alignment used for the storage
51  *			returned by malloc/realloc.
52  *
53  */
54 
55 #include "namespace.h"
56 #if defined(__FreeBSD__)
57 #   if defined(__i386__)
58 #       define malloc_minsize		16U
59 #   endif
60 #   if defined(__ia64__)
61 #	define malloc_pageshift		13U
62 #	define malloc_minsize		16U
63 #   endif
64 #   if defined(__alpha__)
65 #       define malloc_pageshift		13U
66 #       define malloc_minsize		16U
67 #   endif
68 #   if defined(__sparc64__)
69 #       define malloc_pageshift		13U
70 #       define malloc_minsize		16U
71 #   endif
72 #   if defined(__amd64__)
73 #       define malloc_pageshift		12U
74 #       define malloc_minsize		16U
75 #   endif
76 #   if defined(__arm__)
77 #       define malloc_pageshift         12U
78 #       define malloc_minsize           16U
79 #   endif
80 #   define HAS_UTRACE
81 #   define UTRACE_LABEL
82 
83 #include <sys/cdefs.h>
84 void utrace(struct ut *, int);
85 
86     /*
87      * Make malloc/free/realloc thread-safe in libc for use with
88      * kernel threads.
89      */
90 #   include "libc_private.h"
91 #   include "spinlock.h"
92     static spinlock_t thread_lock	= _SPINLOCK_INITIALIZER;
93 #   define _MALLOC_LOCK()		if (__isthreaded) _SPINLOCK(&thread_lock);
94 #   define _MALLOC_UNLOCK()		if (__isthreaded) _SPINUNLOCK(&thread_lock);
95 #endif /* __FreeBSD__ */
96 
97 /* #undef these things so that malloc uses the non-internal symbols.
98  * This is necessary for VM to be able to define its own versions, and
99  * use this malloc.
100  */
101 #undef mmap
102 #undef munmap
103 
104 #include <sys/types.h>
105 #if defined(__NetBSD__)
106 #   define malloc_minsize               16U
107 #   define HAS_UTRACE
108 #   define UTRACE_LABEL "malloc",
109 #include <sys/cdefs.h>
110 #include "extern.h"
111 #if defined(LIBC_SCCS) && !defined(lint)
112 __RCSID("$NetBSD: malloc.c,v 1.55 2012/12/30 21:23:20 dholland Exp $");
113 #endif /* LIBC_SCCS and not lint */
114 int utrace(const char *, void *, size_t);
115 
116 #include <reentrant.h>
117 extern int __isthreaded;
118 static mutex_t thread_lock = MUTEX_INITIALIZER;
119 #define _MALLOC_LOCK()	if (__isthreaded) mutex_lock(&thread_lock);
120 #define _MALLOC_UNLOCK()	if (__isthreaded) mutex_unlock(&thread_lock);
121 #endif /* __NetBSD__ */
122 
123 #if defined(__sparc__) && defined(sun)
124 #   define malloc_minsize		16U
125 #   define MAP_ANON			(0)
126     static int fdzero;
127 #   define MMAP_FD	fdzero
128 #   define INIT_MMAP() \
129 	{ if ((fdzero = open(_PATH_DEVZERO, O_RDWR, 0000)) == -1) \
130 	    wrterror("open of /dev/zero"); }
131 #endif /* __sparc__ */
132 
133 /* Insert your combination here... */
134 #if defined(__FOOCPU__) && defined(__BAROS__)
135 #   define malloc_minsize		16U
136 #endif /* __FOOCPU__ && __BAROS__ */
137 
138 #ifndef ZEROSIZEPTR
139 #define ZEROSIZEPTR	((void *)(uintptr_t)(1UL << (malloc_pageshift - 1)))
140 #endif
141 
142 /*
143  * No user serviceable parts behind this point.
144  */
145 #include <sys/types.h>
146 #include <sys/mman.h>
147 #include <errno.h>
148 #include <fcntl.h>
149 #include <paths.h>
150 #include <stddef.h>
151 #include <stdio.h>
152 #include <stdlib.h>
153 #include <string.h>
154 #include <unistd.h>
155 
156 /*
157  * This structure describes a page worth of chunks.
158  */
159 
160 struct pginfo {
161     struct pginfo	*next;	/* next on the free list */
162     void		*page;	/* Pointer to the page */
163     u_short		size;	/* size of this page's chunks */
164     u_short		shift;	/* How far to shift for this size chunks */
165     u_short		free;	/* How many free chunks */
166     u_short		total;	/* How many chunk */
167     u_int		bits[1]; /* Which chunks are free */
168 };
169 
170 /*
171  * This structure describes a number of free pages.
172  */
173 
174 struct pgfree {
175     struct pgfree	*next;	/* next run of free pages */
176     struct pgfree	*prev;	/* prev run of free pages */
177     void		*page;	/* pointer to free pages */
178     void		*end;	/* pointer to end of free pages */
179     size_t		size;	/* number of bytes free */
180 };
181 
182 /*
183  * How many bits per u_int in the bitmap.
184  * Change only if not 8 bits/byte
185  */
186 #define	MALLOC_BITS	((int)(8*sizeof(u_int)))
187 
188 /*
189  * Magic values to put in the page_directory
190  */
191 #define MALLOC_NOT_MINE	((struct pginfo*) 0)
192 #define MALLOC_FREE 	((struct pginfo*) 1)
193 #define MALLOC_FIRST	((struct pginfo*) 2)
194 #define MALLOC_FOLLOW	((struct pginfo*) 3)
195 #define MALLOC_MAGIC	((struct pginfo*) 4)
196 
197 /*
198  * Page size related parameters, computed at run-time.
199  */
200 static size_t malloc_pagesize;
201 static size_t malloc_pageshift;
202 static size_t malloc_pagemask;
203 
204 #ifndef malloc_minsize
205 #define malloc_minsize			16U
206 #endif
207 
208 #ifndef malloc_maxsize
209 #define malloc_maxsize			((malloc_pagesize)>>1)
210 #endif
211 
212 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
213 #define ptr2idx(foo) \
214     (((size_t)(uintptr_t)(foo) >> malloc_pageshift)-malloc_origo)
215 
216 #ifndef _MALLOC_LOCK
217 #define _MALLOC_LOCK()
218 #endif
219 
220 #ifndef _MALLOC_UNLOCK
221 #define _MALLOC_UNLOCK()
222 #endif
223 
224 #ifndef MMAP_FD
225 #define MMAP_FD (-1)
226 #endif
227 
228 #ifndef INIT_MMAP
229 #define INIT_MMAP()
230 #endif
231 
232 #if !defined(__minix)
233 #ifndef MADV_FREE
234 #define MADV_FREE MADV_DONTNEED
235 #endif
236 #else
237 #undef MADV_FREE
238 #endif /* !defined(__minix) */
239 
240 /* Number of free pages we cache */
241 static size_t malloc_cache = 16;
242 
243 /* The offset from pagenumber to index into the page directory */
244 static size_t malloc_origo;
245 
246 /* The last index in the page directory we care about */
247 static size_t last_idx;
248 
249 /* Pointer to page directory. Allocated "as if with" malloc */
250 static struct	pginfo **page_dir;
251 
252 /* How many slots in the page directory */
253 static size_t	malloc_ninfo;
254 
255 /* Free pages line up here */
256 static struct pgfree free_list;
257 
258 /* Abort(), user doesn't handle problems.  */
259 static int malloc_abort;
260 
261 /* Are we trying to die ?  */
262 static int suicide;
263 
264 /* always realloc ?  */
265 static int malloc_realloc;
266 
267 /* pass the kernel a hint on free pages ?  */
268 #if defined(MADV_FREE)
269 static int malloc_hint = 0;
270 #endif
271 
272 /* xmalloc behaviour ?  */
273 static int malloc_xmalloc;
274 
275 /* sysv behaviour for malloc(0) ?  */
276 static int malloc_sysv;
277 
278 /* zero fill ?  */
279 static int malloc_zero;
280 
281 /* junk fill ?  */
282 static int malloc_junk;
283 
284 #ifdef HAS_UTRACE
285 
286 /* utrace ?  */
287 static int malloc_utrace;
288 
289 struct ut { void *p; size_t s; void *r; };
290 
291 #define UTRACE(a, b, c) \
292 	if (malloc_utrace) {			\
293 		struct ut u;			\
294 		u.p=a; u.s = b; u.r=c;		\
295 		utrace(UTRACE_LABEL (void *) &u, sizeof u);	\
296 	}
297 #else /* !HAS_UTRACE */
298 #define UTRACE(a,b,c)
299 #endif /* HAS_UTRACE */
300 
301 /* my last break. */
302 static void *malloc_brk;
303 
304 /* one location cache for free-list holders */
305 static struct pgfree *px;
306 
307 /* compile-time options */
308 const char *_malloc_options;
309 
310 /* Name of the current public function */
311 static const char *malloc_func;
312 
313 /* Macro for mmap */
314 #define MMAP(size) \
315 	mmap(NULL, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
316 	    MMAP_FD, (off_t)0);
317 
318 /*
319  * Necessary function declarations
320  */
321 static int extend_pgdir(size_t idx);
322 static void *imalloc(size_t size);
323 static void ifree(void *ptr);
324 static void *irealloc(void *ptr, size_t size);
325 
326 #ifndef MALLOC_NO_SYSCALLS
327 static void
328 wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
329 {
330 
331     write(STDERR_FILENO, p1, strlen(p1));
332     write(STDERR_FILENO, p2, strlen(p2));
333     write(STDERR_FILENO, p3, strlen(p3));
334     write(STDERR_FILENO, p4, strlen(p4));
335 }
336 
337 void (*_malloc_message)(const char *p1, const char *p2, const char *p3,
338 	    const char *p4) = wrtmessage;
339 static void
340 wrterror(const char *p)
341 {
342 
343     suicide = 1;
344     _malloc_message(getprogname(), malloc_func, " error: ", p);
345     abort();
346 }
347 
348 static void
349 wrtwarning(const char *p)
350 {
351 
352     /*
353      * Sensitive processes, somewhat arbitrarily defined here as setuid,
354      * setgid, root and wheel cannot afford to have malloc mistakes.
355      */
356     if (malloc_abort || issetugid() || getuid() == 0 || getgid() == 0)
357 	wrterror(p);
358 }
359 #endif
360 
361 /*
362  * Allocate a number of pages from the OS
363  */
364 static void *
365 map_pages(size_t pages)
366 {
367     caddr_t result, rresult, tail;
368     intptr_t bytes = pages << malloc_pageshift;
369 
370     if (bytes < 0 || (size_t)bytes < pages) {
371 	errno = ENOMEM;
372 	return NULL;
373     }
374 
375     if ((result = sbrk(bytes)) == (void *)-1)
376 	return NULL;
377 
378     /*
379      * Round to a page, in case sbrk(2) did not do this for us
380      */
381     rresult = (caddr_t)pageround((size_t)(uintptr_t)result);
382     if (result < rresult) {
383 	/* make sure we have enough space to fit bytes */
384 	if (sbrk((intptr_t)(rresult - result)) == (void *) -1) {
385 	    /* we failed, put everything back */
386 	    if (brk(result)) {
387 		wrterror("brk(2) failed [internal error]\n");
388 	    }
389 	}
390     }
391     tail = rresult + (size_t)bytes;
392 
393     last_idx = ptr2idx(tail) - 1;
394     malloc_brk = tail;
395 
396     if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx)) {
397 	malloc_brk = result;
398 	last_idx = ptr2idx(malloc_brk) - 1;
399 	/* Put back break point since we failed. */
400 	if (brk(malloc_brk))
401 	    wrterror("brk(2) failed [internal error]\n");
402 	return 0;
403     }
404 
405     return rresult;
406 }
407 
408 /*
409  * Extend page directory
410  */
411 static int
412 extend_pgdir(size_t idx)
413 {
414     struct  pginfo **new, **old;
415     size_t newlen, oldlen;
416 
417     /* check for overflow */
418     if ((((~(1UL << ((sizeof(size_t) * NBBY) - 1)) / sizeof(*page_dir)) + 1)
419 	+ (malloc_pagesize / sizeof *page_dir)) < idx) {
420 	errno = ENOMEM;
421 	return 0;
422     }
423 
424     /* Make it this many pages */
425     newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
426 
427     /* remember the old mapping size */
428     oldlen = malloc_ninfo * sizeof *page_dir;
429 
430     /*
431      * NOTE: we allocate new pages and copy the directory rather than tempt
432      * fate by trying to "grow" the region.. There is nothing to prevent
433      * us from accidentally re-mapping space that's been allocated by our caller
434      * via dlopen() or other mmap().
435      *
436      * The copy problem is not too bad, as there is 4K of page index per
437      * 4MB of malloc arena.
438      *
439      * We can totally avoid the copy if we open a file descriptor to associate
440      * the anon mappings with.  Then, when we remap the pages at the new
441      * address, the old pages will be "magically" remapped..  But this means
442      * keeping open a "secret" file descriptor.....
443      */
444 
445     /* Get new pages */
446     new = MMAP(newlen);
447     if (new == MAP_FAILED)
448 	return 0;
449 
450     /* Copy the old stuff */
451     memcpy(new, page_dir, oldlen);
452 
453     /* register the new size */
454     malloc_ninfo = newlen / sizeof *page_dir;
455 
456     /* swap the pointers */
457     old = page_dir;
458     page_dir = new;
459 
460     /* Now free the old stuff */
461     munmap(old, oldlen);
462     return 1;
463 }
464 
465 /*
466  * Initialize the world
467  */
468 static void
469 malloc_init(void)
470 {
471 #ifndef MALLOC_NO_SYSCALLS
472     const char *p;
473     char b[64];
474     size_t i;
475     ssize_t j;
476 #endif
477     int serrno = errno;
478 #ifndef MALLOC_NO_SYSCALLS
479 
480     /*
481      * Compute page-size related variables.
482      */
483     malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
484 #else
485     malloc_pagesize = PAGE_SIZE;
486 #endif
487     malloc_pagemask = malloc_pagesize - 1;
488     for (malloc_pageshift = 0;
489 	 (1UL << malloc_pageshift) != malloc_pagesize;
490 	 malloc_pageshift++)
491 	/* nothing */ ;
492 
493     INIT_MMAP();
494 
495 #ifdef MALLOC_EXTRA_SANITY
496     malloc_junk = 1;
497 #endif /* MALLOC_EXTRA_SANITY */
498 
499 #ifndef MALLOC_NO_SYSCALLS
500     for (i = 0; i < 3; i++) {
501 	if (i == 0) {
502 	    j = readlink("/etc/malloc.conf", b, sizeof b - 1);
503 	    if (j == -1)
504 		continue;
505 	    b[j] = '\0';
506 	    p = b;
507 	} else if (i == 1 && issetugid() == 0) {
508 	    p = getenv("MALLOC_OPTIONS");
509 	} else if (i == 1) {
510 	    continue;
511 	} else {
512 	    p = _malloc_options;
513 	}
514 	for (; p != NULL && *p != '\0'; p++) {
515 	    switch (*p) {
516 		case '>': malloc_cache   <<= 1; break;
517 		case '<': malloc_cache   >>= 1; break;
518 		case 'a': malloc_abort   = 0; break;
519 		case 'A': malloc_abort   = 1; break;
520 #if !defined(__minix)
521 		case 'h': malloc_hint    = 0; break;
522 		case 'H': malloc_hint    = 1; break;
523 #endif /* !defined(__minix) */
524 		case 'r': malloc_realloc = 0; break;
525 		case 'R': malloc_realloc = 1; break;
526 		case 'j': malloc_junk    = 0; break;
527 		case 'J': malloc_junk    = 1; break;
528 #ifdef HAS_UTRACE
529 		case 'u': malloc_utrace  = 0; break;
530 		case 'U': malloc_utrace  = 1; break;
531 #endif
532 		case 'v': malloc_sysv    = 0; break;
533 		case 'V': malloc_sysv    = 1; break;
534 		case 'x': malloc_xmalloc = 0; break;
535 		case 'X': malloc_xmalloc = 1; break;
536 		case 'z': malloc_zero    = 0; break;
537 		case 'Z': malloc_zero    = 1; break;
538 		default:
539 		    _malloc_message(getprogname(), malloc_func,
540 			 " warning: ", "unknown char in MALLOC_OPTIONS\n");
541 		    break;
542 	    }
543 	}
544     }
545 #endif
546 
547     UTRACE(0, 0, 0);
548 
549     /*
550      * We want junk in the entire allocation, and zero only in the part
551      * the user asked for.
552      */
553     if (malloc_zero)
554 	malloc_junk = 1;
555 
556     /* Allocate one page for the page directory */
557     page_dir = MMAP(malloc_pagesize);
558 
559     if (page_dir == MAP_FAILED)
560 	wrterror("mmap(2) failed, check limits.\n");
561 
562     /*
563      * We need a maximum of malloc_pageshift buckets, steal these from the
564      * front of the page_directory;
565      */
566     malloc_origo = pageround((size_t)(uintptr_t)sbrk((intptr_t)0))
567 	>> malloc_pageshift;
568     malloc_origo -= malloc_pageshift;
569 
570     malloc_ninfo = malloc_pagesize / sizeof *page_dir;
571 
572     /* Recalculate the cache size in bytes, and make sure it's nonzero */
573 
574     if (!malloc_cache)
575 	malloc_cache++;
576 
577     malloc_cache <<= malloc_pageshift;
578 
579     /*
580      * This is a nice hack from Kaleb Keithly (kaleb@x.org).
581      * We can sbrk(2) further back when we keep this on a low address.
582      */
583     px = imalloc(sizeof *px);
584 
585     errno = serrno;
586 }
587 
588 /*
589  * Allocate a number of complete pages
590  */
591 static void *
592 malloc_pages(size_t size)
593 {
594     void *p, *delay_free = NULL;
595     size_t i;
596     struct pgfree *pf;
597     size_t idx;
598 
599     idx = pageround(size);
600     if (idx < size) {
601 	errno = ENOMEM;
602 	return NULL;
603     } else
604 	size = idx;
605 
606     p = NULL;
607 
608     /* Look for free pages before asking for more */
609     for(pf = free_list.next; pf; pf = pf->next) {
610 
611 #ifdef MALLOC_EXTRA_SANITY
612 	if (pf->size & malloc_pagemask)
613 	    wrterror("(ES): junk length entry on free_list.\n");
614 	if (!pf->size)
615 	    wrterror("(ES): zero length entry on free_list.\n");
616 	if (pf->page == pf->end)
617 	    wrterror("(ES): zero entry on free_list.\n");
618 	if (pf->page > pf->end)
619 	    wrterror("(ES): sick entry on free_list.\n");
620 	if ((void*)pf->page >= (void*)sbrk(0))
621 	    wrterror("(ES): entry on free_list past brk.\n");
622 	if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
623 	    wrterror("(ES): non-free first page on free-list.\n");
624 	if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
625 	    wrterror("(ES): non-free last page on free-list.\n");
626 #endif /* MALLOC_EXTRA_SANITY */
627 
628 	if (pf->size < size)
629 	    continue;
630 
631 	if (pf->size == size) {
632 	    p = pf->page;
633 	    if (pf->next != NULL)
634 		    pf->next->prev = pf->prev;
635 	    pf->prev->next = pf->next;
636 	    delay_free = pf;
637 	    break;
638 	}
639 
640 	p = pf->page;
641 	pf->page = (char *)pf->page + size;
642 	pf->size -= size;
643 	break;
644     }
645 
646 #ifdef MALLOC_EXTRA_SANITY
647     if (p != NULL && page_dir[ptr2idx(p)] != MALLOC_FREE)
648 	wrterror("(ES): allocated non-free page on free-list.\n");
649 #endif /* MALLOC_EXTRA_SANITY */
650 
651     size >>= malloc_pageshift;
652 
653     /* Map new pages */
654     if (p == NULL)
655 	p = map_pages(size);
656 
657     if (p != NULL) {
658 
659 	idx = ptr2idx(p);
660 	page_dir[idx] = MALLOC_FIRST;
661 	for (i=1;i<size;i++)
662 	    page_dir[idx+i] = MALLOC_FOLLOW;
663 
664 	if (malloc_junk)
665 	    memset(p, SOME_JUNK, size << malloc_pageshift);
666     }
667 
668     if (delay_free) {
669 	if (px == NULL)
670 	    px = delay_free;
671 	else
672 	    ifree(delay_free);
673     }
674 
675     return p;
676 }
677 
678 /*
679  * Allocate a page of fragments
680  */
681 
682 static inline int
683 malloc_make_chunks(int bits)
684 {
685     struct  pginfo *bp;
686     void *pp;
687     int i, k;
688     long l;
689 
690     /* Allocate a new bucket */
691     pp = malloc_pages(malloc_pagesize);
692     if (pp == NULL)
693 	return 0;
694 
695     /* Find length of admin structure */
696     l = (long)offsetof(struct pginfo, bits[0]);
697     l += (long)sizeof bp->bits[0] *
698 	(((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
699 
700     /* Don't waste more than two chunks on this */
701     if ((1<<(bits)) <= l+l) {
702 	bp = (struct  pginfo *)pp;
703     } else {
704 	bp = imalloc((size_t)l);
705 	if (bp == NULL) {
706 	    ifree(pp);
707 	    return 0;
708 	}
709     }
710 
711     bp->size = (1<<bits);
712     bp->shift = bits;
713     bp->total = bp->free = (u_short)(malloc_pagesize >> bits);
714     bp->page = pp;
715 
716     /* set all valid bits in the bitmap */
717     k = bp->total;
718     i = 0;
719 
720     /* Do a bunch at a time */
721     for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
722 	bp->bits[i / MALLOC_BITS] = ~0U;
723 
724     for(; i < k; i++)
725         bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
726 
727     if (bp == bp->page) {
728 	/* Mark the ones we stole for ourselves */
729 	for(i = 0; l > 0; i++) {
730 	    bp->bits[i / MALLOC_BITS] &= ~(1 << (i % MALLOC_BITS));
731 	    bp->free--;
732 	    bp->total--;
733 	    l -= (long)(1 << bits);
734 	}
735     }
736 
737     /* MALLOC_LOCK */
738 
739     page_dir[ptr2idx(pp)] = bp;
740 
741     bp->next = page_dir[bits];
742     page_dir[bits] = bp;
743 
744     /* MALLOC_UNLOCK */
745 
746     return 1;
747 }
748 
749 /*
750  * Allocate a fragment
751  */
752 static void *
753 malloc_bytes(size_t size)
754 {
755     size_t i;
756     int j;
757     u_int u;
758     struct  pginfo *bp;
759     size_t k;
760     u_int *lp;
761 
762     /* Don't bother with anything less than this */
763     if (size < malloc_minsize)
764 	size = malloc_minsize;
765 
766 
767     /* Find the right bucket */
768     j = 1;
769     i = size-1;
770     while (i >>= 1)
771 	j++;
772 
773     /* If it's empty, make a page more of that size chunks */
774     if (page_dir[j] == NULL && !malloc_make_chunks(j))
775 	return NULL;
776 
777     bp = page_dir[j];
778 
779     /* Find first word of bitmap which isn't empty */
780     for (lp = bp->bits; !*lp; lp++)
781 	;
782 
783     /* Find that bit, and tweak it */
784     u = 1;
785     k = 0;
786     while (!(*lp & u)) {
787 	u += u;
788 	k++;
789     }
790     *lp ^= u;
791 
792     /* If there are no more free, remove from free-list */
793     if (!--bp->free) {
794 	page_dir[j] = bp->next;
795 	bp->next = NULL;
796     }
797 
798     /* Adjust to the real offset of that chunk */
799     k += (lp-bp->bits)*MALLOC_BITS;
800     k <<= bp->shift;
801 
802     if (malloc_junk)
803 	memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
804 
805     return (u_char *)bp->page + k;
806 }
807 
808 /*
809  * Allocate a piece of memory
810  */
811 static void *
812 imalloc(size_t size)
813 {
814     void *result;
815 
816     if (suicide)
817 	abort();
818 
819     if ((size + malloc_pagesize) < size)	/* Check for overflow */
820 	result = NULL;
821     else if ((size + malloc_pagesize) >= (uintptr_t)page_dir)
822 	result = NULL;
823     else if (size <= malloc_maxsize)
824 	result = malloc_bytes(size);
825     else
826 	result = malloc_pages(size);
827 
828     if (malloc_abort && result == NULL)
829 	wrterror("allocation failed.\n");
830 
831     if (malloc_zero && result != NULL)
832 	memset(result, 0, size);
833 
834     return result;
835 }
836 
837 /*
838  * Change the size of an allocation.
839  */
840 static void *
841 irealloc(void *ptr, size_t size)
842 {
843     void *p;
844     size_t osize, idx;
845     struct pginfo **mp;
846     size_t i;
847 
848     if (suicide)
849 	abort();
850 
851     idx = ptr2idx(ptr);
852 
853     if (idx < malloc_pageshift) {
854 	wrtwarning("junk pointer, too low to make sense.\n");
855 	return 0;
856     }
857 
858     if (idx > last_idx) {
859 	wrtwarning("junk pointer, too high to make sense.\n");
860 	return 0;
861     }
862 
863     mp = &page_dir[idx];
864 
865     if (*mp == MALLOC_FIRST) {			/* Page allocation */
866 
867 	/* Check the pointer */
868 	if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
869 	    wrtwarning("modified (page-) pointer.\n");
870 	    return NULL;
871 	}
872 
873 	/* Find the size in bytes */
874 	for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
875 	    osize += malloc_pagesize;
876 
877         if (!malloc_realloc && 			/* unless we have to, */
878 	  size <= osize && 			/* .. or are too small, */
879 	  size > (osize - malloc_pagesize)) {	/* .. or can free a page, */
880 	    if (malloc_junk)
881 		memset((u_char *)ptr + size, SOME_JUNK, osize-size);
882 	    return ptr;				/* don't do anything. */
883 	}
884 
885     } else if (*mp >= MALLOC_MAGIC) {		/* Chunk allocation */
886 
887 	/* Check the pointer for sane values */
888 	if (((size_t)(uintptr_t)ptr & ((*mp)->size-1))) {
889 	    wrtwarning("modified (chunk-) pointer.\n");
890 	    return NULL;
891 	}
892 
893 	/* Find the chunk index in the page */
894 	i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> (*mp)->shift;
895 
896 	/* Verify that it isn't a free chunk already */
897         if ((*mp)->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
898 	    wrtwarning("chunk is already free.\n");
899 	    return NULL;
900 	}
901 
902 	osize = (*mp)->size;
903 
904 	if (!malloc_realloc &&		/* Unless we have to, */
905 	  size <= osize && 		/* ..or are too small, */
906 	  (size > osize / 2 ||	 	/* ..or could use a smaller size, */
907 	  osize == malloc_minsize)) {	/* ..(if there is one) */
908 	    if (malloc_junk)
909 		memset((u_char *)ptr + size, SOME_JUNK, osize-size);
910 	    return ptr;			/* ..Don't do anything */
911 	}
912 
913     } else {
914 	wrtwarning("pointer to wrong page.\n");
915 	return NULL;
916     }
917 
918     p = imalloc(size);
919 
920     if (p != NULL) {
921 	/* copy the lesser of the two sizes, and free the old one */
922 	if (!size || !osize)
923 	    ;
924 	else if (osize < size)
925 	    memcpy(p, ptr, osize);
926 	else
927 	    memcpy(p, ptr, size);
928 	ifree(ptr);
929     }
930     return p;
931 }
932 
933 /*
934  * Free a sequence of pages
935  */
936 
937 static inline void
938 free_pages(void *ptr, size_t idx, struct pginfo *info)
939 {
940     size_t i;
941     struct pgfree *pf, *pt=NULL;
942     size_t l;
943     void *tail;
944 
945     if (info == MALLOC_FREE) {
946 	wrtwarning("page is already free.\n");
947 	return;
948     }
949 
950     if (info != MALLOC_FIRST) {
951 	wrtwarning("pointer to wrong page.\n");
952 	return;
953     }
954 
955     if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
956 	wrtwarning("modified (page-) pointer.\n");
957 	return;
958     }
959 
960     /* Count how many pages and mark them free at the same time */
961     page_dir[idx] = MALLOC_FREE;
962     for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
963 	page_dir[idx + i] = MALLOC_FREE;
964 
965     l = i << malloc_pageshift;
966 
967     if (malloc_junk)
968 	memset(ptr, SOME_JUNK, l);
969 
970 #if !defined(__minix)
971     if (malloc_hint)
972 	madvise(ptr, l, MADV_FREE);
973 #endif /* !defined(__minix) */
974 
975     tail = (char *)ptr+l;
976 
977     /* add to free-list */
978     if (px == NULL)
979 	px = imalloc(sizeof *px);	/* This cannot fail... */
980     px->page = ptr;
981     px->end =  tail;
982     px->size = l;
983     if (free_list.next == NULL) {
984 
985 	/* Nothing on free list, put this at head */
986 	px->next = free_list.next;
987 	px->prev = &free_list;
988 	free_list.next = px;
989 	pf = px;
990 	px = NULL;
991 
992     } else {
993 
994 	/* Find the right spot, leave pf pointing to the modified entry. */
995 	tail = (char *)ptr+l;
996 
997 	for(pf = free_list.next; pf->end < ptr && pf->next != NULL;
998 	    pf = pf->next)
999 	    ; /* Race ahead here */
1000 
1001 	if (pf->page > tail) {
1002 	    /* Insert before entry */
1003 	    px->next = pf;
1004 	    px->prev = pf->prev;
1005 	    pf->prev = px;
1006 	    px->prev->next = px;
1007 	    pf = px;
1008 	    px = NULL;
1009 	} else if (pf->end == ptr ) {
1010 	    /* Append to the previous entry */
1011 	    pf->end = (char *)pf->end + l;
1012 	    pf->size += l;
1013 	    if (pf->next != NULL && pf->end == pf->next->page ) {
1014 		/* And collapse the next too. */
1015 		pt = pf->next;
1016 		pf->end = pt->end;
1017 		pf->size += pt->size;
1018 		pf->next = pt->next;
1019 		if (pf->next != NULL)
1020 		    pf->next->prev = pf;
1021 	    }
1022 	} else if (pf->page == tail) {
1023 	    /* Prepend to entry */
1024 	    pf->size += l;
1025 	    pf->page = ptr;
1026 	} else if (pf->next == NULL) {
1027 	    /* Append at tail of chain */
1028 	    px->next = NULL;
1029 	    px->prev = pf;
1030 	    pf->next = px;
1031 	    pf = px;
1032 	    px = NULL;
1033 	} else {
1034 	    wrterror("freelist is destroyed.\n");
1035 	}
1036     }
1037 
1038     /* Return something to OS ? */
1039     if (pf->next == NULL &&			/* If we're the last one, */
1040       pf->size > malloc_cache &&		/* ..and the cache is full, */
1041       pf->end == malloc_brk &&			/* ..and none behind us, */
1042       malloc_brk == sbrk((intptr_t)0)) {	/* ..and it's OK to do... */
1043 
1044 	/*
1045 	 * Keep the cache intact.  Notice that the '>' above guarantees that
1046 	 * the pf will always have at least one page afterwards.
1047 	 */
1048 	pf->end = (char *)pf->page + malloc_cache;
1049 	pf->size = malloc_cache;
1050 
1051 	brk(pf->end);
1052 	malloc_brk = pf->end;
1053 
1054 	idx = ptr2idx(pf->end);
1055 
1056 	for(i=idx;i <= last_idx;)
1057 	    page_dir[i++] = MALLOC_NOT_MINE;
1058 
1059 	last_idx = idx - 1;
1060 
1061 	/* XXX: We could realloc/shrink the pagedir here I guess. */
1062     }
1063     if (pt != NULL)
1064 	ifree(pt);
1065 }
1066 
1067 /*
1068  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1069  */
1070 
1071 static inline void
1072 free_bytes(void *ptr, size_t idx, struct pginfo *info)
1073 {
1074     size_t i;
1075     struct pginfo **mp;
1076     void *vp;
1077 
1078     /* Find the chunk number on the page */
1079     i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> info->shift;
1080 
1081     if (((size_t)(uintptr_t)ptr & (info->size-1))) {
1082 	wrtwarning("modified (chunk-) pointer.\n");
1083 	return;
1084     }
1085 
1086     if (info->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
1087 	wrtwarning("chunk is already free.\n");
1088 	return;
1089     }
1090 
1091     if (malloc_junk)
1092 	memset(ptr, SOME_JUNK, (size_t)info->size);
1093 
1094     info->bits[i/MALLOC_BITS] |= (u_int)(1UL << (i % MALLOC_BITS));
1095     info->free++;
1096 
1097     mp = page_dir + info->shift;
1098 
1099     if (info->free == 1) {
1100 
1101 	/* Page became non-full */
1102 
1103 	mp = page_dir + info->shift;
1104 	/* Insert in address order */
1105 	while (*mp && (*mp)->next && (*mp)->next->page < info->page)
1106 	    mp = &(*mp)->next;
1107 	info->next = *mp;
1108 	*mp = info;
1109 	return;
1110     }
1111 
1112     if (info->free != info->total)
1113 	return;
1114 
1115     /* Find & remove this page in the queue */
1116     while (*mp != info) {
1117 	mp = &((*mp)->next);
1118 #ifdef MALLOC_EXTRA_SANITY
1119 	if (!*mp)
1120 		wrterror("(ES): Not on queue.\n");
1121 #endif /* MALLOC_EXTRA_SANITY */
1122     }
1123     *mp = info->next;
1124 
1125     /* Free the page & the info structure if need be */
1126     page_dir[idx] = MALLOC_FIRST;
1127     vp = info->page;		/* Order is important ! */
1128     if(vp != (void*)info)
1129 	ifree(info);
1130     ifree(vp);
1131 }
1132 
1133 static void
1134 ifree(void *ptr)
1135 {
1136     struct pginfo *info;
1137     size_t idx;
1138 
1139     /* This is legal */
1140     if (ptr == NULL)
1141 	return;
1142 
1143     /* If we're already sinking, don't make matters any worse. */
1144     if (suicide)
1145 	return;
1146 
1147     idx = ptr2idx(ptr);
1148 
1149     if (idx < malloc_pageshift) {
1150 	wrtwarning("junk pointer, too low to make sense.\n");
1151 	return;
1152     }
1153 
1154     if (idx > last_idx) {
1155 	wrtwarning("junk pointer, too high to make sense.\n");
1156 	return;
1157     }
1158 
1159     info = page_dir[idx];
1160 
1161     if (info < MALLOC_MAGIC)
1162         free_pages(ptr, idx, info);
1163     else
1164 	free_bytes(ptr, idx, info);
1165     return;
1166 }
1167 
1168 static int malloc_active; /* Recursion flag for public interface. */
1169 static unsigned malloc_started; /* Set when initialization has been done */
1170 
1171 static void *
1172 pubrealloc(void *ptr, size_t size, const char *func)
1173 {
1174     void *r;
1175     int err = 0;
1176 
1177     /*
1178      * If a thread is inside our code with a functional lock held, and then
1179      * catches a signal which calls us again, we would get a deadlock if the
1180      * lock is not of a recursive type.
1181      */
1182     _MALLOC_LOCK();
1183     malloc_func = func;
1184     if (malloc_active > 0) {
1185 	if (malloc_active == 1) {
1186 	    wrtwarning("recursive call\n");
1187 	    malloc_active = 2;
1188 	}
1189         _MALLOC_UNLOCK();
1190 	errno = EINVAL;
1191 	return (NULL);
1192     }
1193     malloc_active = 1;
1194 
1195     if (!malloc_started) {
1196         if (ptr != NULL) {
1197 	    wrtwarning("malloc() has never been called\n");
1198 	    malloc_active = 0;
1199             _MALLOC_UNLOCK();
1200 	    errno = EINVAL;
1201 	    return (NULL);
1202 	}
1203 	malloc_init();
1204 	malloc_started = 1;
1205     }
1206 
1207     if (ptr == ZEROSIZEPTR)
1208 	ptr = NULL;
1209     if (malloc_sysv && !size) {
1210 	if (ptr != NULL)
1211 	    ifree(ptr);
1212 	r = NULL;
1213     } else if (!size) {
1214 	if (ptr != NULL)
1215 	    ifree(ptr);
1216 	r = ZEROSIZEPTR;
1217     } else if (ptr == NULL) {
1218 	r = imalloc(size);
1219 	err = (r == NULL);
1220     } else {
1221         r = irealloc(ptr, size);
1222 	err = (r == NULL);
1223     }
1224     UTRACE(ptr, size, r);
1225     malloc_active = 0;
1226     _MALLOC_UNLOCK();
1227     if (malloc_xmalloc && err)
1228 	wrterror("out of memory\n");
1229     if (err)
1230 	errno = ENOMEM;
1231     return (r);
1232 }
1233 
1234 /*
1235  * These are the public exported interface routines.
1236  */
1237 
1238 void *
1239 malloc(size_t size)
1240 {
1241 
1242     return pubrealloc(NULL, size, " in malloc():");
1243 }
1244 
1245 int
1246 posix_memalign(void **memptr, size_t alignment, size_t size)
1247 {
1248     int err;
1249     void *result;
1250 
1251     if (!malloc_started) {
1252 	    malloc_init();
1253 	    malloc_started = 1;
1254     }
1255     /* Make sure that alignment is a large enough power of 2. */
1256     if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *) ||
1257 	alignment > malloc_pagesize)
1258 	    return EINVAL;
1259 
1260     /*
1261      * (size | alignment) is enough to assure the requested alignment, since
1262      * the allocator always allocates power-of-two blocks.
1263      */
1264     err = errno; /* Protect errno against changes in pubrealloc(). */
1265     result = pubrealloc(NULL, (size | alignment), " in posix_memalign()");
1266     errno = err;
1267 
1268     if (result == NULL)
1269 	return ENOMEM;
1270 
1271     *memptr = result;
1272     return 0;
1273 }
1274 
1275 void *
1276 calloc(size_t num, size_t size)
1277 {
1278     void *ret;
1279 
1280     if (size != 0 && (num * size) / size != num) {
1281 	/* size_t overflow. */
1282 	errno = ENOMEM;
1283 	return (NULL);
1284     }
1285 
1286     ret = pubrealloc(NULL, num * size, " in calloc():");
1287 
1288     if (ret != NULL)
1289 	memset(ret, 0, num * size);
1290 
1291     return ret;
1292 }
1293 
1294 void
1295 free(void *ptr)
1296 {
1297 
1298     pubrealloc(ptr, 0, " in free():");
1299 }
1300 
1301 void *
1302 realloc(void *ptr, size_t size)
1303 {
1304 
1305     return pubrealloc(ptr, size, " in realloc():");
1306 }
1307 
1308 /*
1309  * Begin library-private functions, used by threading libraries for protection
1310  * of malloc during fork().  These functions are only called if the program is
1311  * running in threaded mode, so there is no need to check whether the program
1312  * is threaded here.
1313  */
1314 
1315 void
1316 _malloc_prefork(void)
1317 {
1318 
1319 	_MALLOC_LOCK();
1320 }
1321 
1322 void
1323 _malloc_postfork(void)
1324 {
1325 
1326 	_MALLOC_UNLOCK();
1327 }
1328