xref: /minix3/lib/libc/stdlib/malloc.c (revision 9152e1c5a7225885973292842eaa6ab5121d1a38)
1 /*	$NetBSD: malloc.c,v 1.52 2008/02/03 22:56:53 christos Exp $	*/
2 
3 /*
4  * ----------------------------------------------------------------------------
5  * "THE BEER-WARE LICENSE" (Revision 42):
6  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
7  * can do whatever you want with this stuff. If we meet some day, and you think
8  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
9  * ----------------------------------------------------------------------------
10  *
11  * From FreeBSD: malloc.c,v 1.91 2006/01/12 07:28:20 jasone
12  *
13  */
14 
15 #ifdef __minix
16 #define mmap minix_mmap
17 #define munmap minix_munmap
18 #ifdef _LIBSYS
19 #include <minix/sysutil.h>
20 #define MALLOC_NO_SYSCALLS
21 #define wrtwarning(w) printf("libminc malloc warning: %s\n", w)
22 #define wrterror(w) panic("libminc malloc error: %s\n", w)
23 #endif
24 #endif
25 
26 /*
27  * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
28  * to internal conditions and consistency in malloc.c. This has a
29  * noticeable runtime performance hit, and generally will not do you
30  * any good unless you fiddle with the internals of malloc or want
31  * to catch random pointer corruption as early as possible.
32  */
33 #ifndef MALLOC_EXTRA_SANITY
34 #undef MALLOC_EXTRA_SANITY
35 #endif
36 
37 /*
38  * What to use for Junk.  This is the byte value we use to fill with
39  * when the 'J' option is enabled.
40  */
41 #define SOME_JUNK	0xd0		/* as in "Duh" :-) */
42 
43 /*
44  * The basic parameters you can tweak.
45  *
46  * malloc_minsize	minimum size of an allocation in bytes.
47  *			If this is too small it's too much work
48  *			to manage them.  This is also the smallest
49  *			unit of alignment used for the storage
50  *			returned by malloc/realloc.
51  *
52  */
53 
54 #include "namespace.h"
55 #if defined(__FreeBSD__)
56 #   if defined(__i386__)
57 #       define malloc_minsize		16U
58 #   endif
59 #   if defined(__ia64__)
60 #	define malloc_pageshift		13U
61 #	define malloc_minsize		16U
62 #   endif
63 #   if defined(__alpha__)
64 #       define malloc_pageshift		13U
65 #       define malloc_minsize		16U
66 #   endif
67 #   if defined(__sparc64__)
68 #       define malloc_pageshift		13U
69 #       define malloc_minsize		16U
70 #   endif
71 #   if defined(__amd64__)
72 #       define malloc_pageshift		12U
73 #       define malloc_minsize		16U
74 #   endif
75 #   if defined(__arm__)
76 #       define malloc_pageshift         12U
77 #       define malloc_minsize           16U
78 #   endif
79 #ifndef __minix
80 #   define HAS_UTRACE
81 #   define UTRACE_LABEL
82 #endif /* __minix */
83 
84 #include <sys/cdefs.h>
85 void utrace(struct ut *, int);
86 
87     /*
88      * Make malloc/free/realloc thread-safe in libc for use with
89      * kernel threads.
90      */
91 #   include "libc_private.h"
92 #   include "spinlock.h"
93     static spinlock_t thread_lock	= _SPINLOCK_INITIALIZER;
94 #   define _MALLOC_LOCK()		if (__isthreaded) _SPINLOCK(&thread_lock);
95 #   define _MALLOC_UNLOCK()		if (__isthreaded) _SPINUNLOCK(&thread_lock);
96 #endif /* __FreeBSD__ */
97 
98 #include <assert.h>
99 
100 #include <sys/types.h>
101 #if defined(__NetBSD__)
102 #   define malloc_minsize               16U
103 #   define HAS_UTRACE
104 #   define UTRACE_LABEL "malloc",
105 #include <sys/cdefs.h>
106 #include "extern.h"
107 #if defined(LIBC_SCCS) && !defined(lint)
108 __RCSID("$NetBSD: malloc.c,v 1.52 2008/02/03 22:56:53 christos Exp $");
109 #endif /* LIBC_SCCS and not lint */
110 int utrace(const char *, void *, size_t);
111 
112 #include <reentrant.h>
113 extern int __isthreaded;
114 static mutex_t thread_lock = MUTEX_INITIALIZER;
115 #define _MALLOC_LOCK()	if (__isthreaded) mutex_lock(&thread_lock);
116 #define _MALLOC_UNLOCK()	if (__isthreaded) mutex_unlock(&thread_lock);
117 #endif /* __NetBSD__ */
118 
119 #if defined(__sparc__) && defined(sun)
120 #   define malloc_minsize		16U
121 #   define MAP_ANON			(0)
122     static int fdzero;
123 #   define MMAP_FD	fdzero
124 #   define INIT_MMAP() \
125 	{ if ((fdzero = open(_PATH_DEVZERO, O_RDWR, 0000)) == -1) \
126 	    wrterror("open of /dev/zero"); }
127 #endif /* __sparc__ */
128 
129 /* Insert your combination here... */
130 #if defined(__FOOCPU__) && defined(__BAROS__)
131 #   define malloc_minsize		16U
132 #endif /* __FOOCPU__ && __BAROS__ */
133 
134 #ifndef ZEROSIZEPTR
135 #define ZEROSIZEPTR	((void *)(uintptr_t)(1UL << (malloc_pageshift - 1)))
136 #endif
137 
138 /*
139  * No user serviceable parts behind this point.
140  */
141 #include <sys/types.h>
142 #include <sys/mman.h>
143 #include <errno.h>
144 #include <fcntl.h>
145 #include <paths.h>
146 #include <stddef.h>
147 #include <stdio.h>
148 #include <stdlib.h>
149 #include <string.h>
150 #include <unistd.h>
151 
152 /*
153  * This structure describes a page worth of chunks.
154  */
155 
156 struct pginfo {
157     struct pginfo	*next;	/* next on the free list */
158     void		*page;	/* Pointer to the page */
159     u_short		size;	/* size of this page's chunks */
160     u_short		shift;	/* How far to shift for this size chunks */
161     u_short		free;	/* How many free chunks */
162     u_short		total;	/* How many chunk */
163     u_int		bits[1]; /* Which chunks are free */
164 };
165 
166 /*
167  * This structure describes a number of free pages.
168  */
169 
170 struct pgfree {
171     struct pgfree	*next;	/* next run of free pages */
172     struct pgfree	*prev;	/* prev run of free pages */
173     void		*page;	/* pointer to free pages */
174     void		*end;	/* pointer to end of free pages */
175     size_t		size;	/* number of bytes free */
176 };
177 
178 /*
179  * How many bits per u_int in the bitmap.
180  * Change only if not 8 bits/byte
181  */
182 #define	MALLOC_BITS	((int)(8*sizeof(u_int)))
183 
184 /*
185  * Magic values to put in the page_directory
186  */
187 #define MALLOC_NOT_MINE	((struct pginfo*) 0)
188 #define MALLOC_FREE 	((struct pginfo*) 1)
189 #define MALLOC_FIRST	((struct pginfo*) 2)
190 #define MALLOC_FOLLOW	((struct pginfo*) 3)
191 #define MALLOC_MAGIC	((struct pginfo*) 4)
192 
193 /*
194  * Page size related parameters, computed at run-time.
195  */
196 static size_t malloc_pagesize;
197 static size_t malloc_pageshift;
198 static size_t malloc_pagemask;
199 
200 #ifndef malloc_minsize
201 #define malloc_minsize			16U
202 #endif
203 
204 #ifndef malloc_maxsize
205 #define malloc_maxsize			((malloc_pagesize)>>1)
206 #endif
207 
208 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
209 #define ptr2idx(foo) \
210     (((size_t)(uintptr_t)(foo) >> malloc_pageshift)-malloc_origo)
211 
212 #ifndef _MALLOC_LOCK
213 #define _MALLOC_LOCK()
214 #endif
215 
216 #ifndef _MALLOC_UNLOCK
217 #define _MALLOC_UNLOCK()
218 #endif
219 
220 #ifndef MMAP_FD
221 #define MMAP_FD (-1)
222 #endif
223 
224 #ifndef INIT_MMAP
225 #define INIT_MMAP()
226 #endif
227 
228 #ifndef __minix
229 #ifndef MADV_FREE
230 #define MADV_FREE MADV_DONTNEED
231 #endif
232 #endif /* !__minix */
233 
234 /* Number of free pages we cache */
235 static size_t malloc_cache = 16;
236 
237 /* The offset from pagenumber to index into the page directory */
238 static size_t malloc_origo;
239 
240 /* The last index in the page directory we care about */
241 static size_t last_idx;
242 
243 /* Pointer to page directory. Allocated "as if with" malloc */
244 static struct	pginfo **page_dir;
245 
246 /* How many slots in the page directory */
247 static size_t	malloc_ninfo;
248 
249 /* Free pages line up here */
250 static struct pgfree free_list;
251 
252 /* Abort(), user doesn't handle problems.  */
253 static int malloc_abort;
254 
255 /* Are we trying to die ?  */
256 static int suicide;
257 
258 /* always realloc ?  */
259 static int malloc_realloc;
260 
261 /* pass the kernel a hint on free pages ?  */
262 #if defined(MADV_FREE)
263 static int malloc_hint = 0;
264 #endif
265 
266 /* xmalloc behaviour ?  */
267 static int malloc_xmalloc;
268 
269 /* sysv behaviour for malloc(0) ?  */
270 static int malloc_sysv;
271 
272 /* zero fill ?  */
273 static int malloc_zero;
274 
275 /* junk fill ?  */
276 static int malloc_junk;
277 
278 #ifdef HAS_UTRACE
279 
280 /* utrace ?  */
281 static int malloc_utrace;
282 
283 struct ut { void *p; size_t s; void *r; };
284 
285 #define UTRACE(a, b, c) \
286 	if (malloc_utrace) {			\
287 		struct ut u;			\
288 		u.p=a; u.s = b; u.r=c;		\
289 		utrace(UTRACE_LABEL (void *) &u, sizeof u);	\
290 	}
291 #else /* !HAS_UTRACE */
292 #define UTRACE(a,b,c)
293 #endif /* HAS_UTRACE */
294 
295 /* my last break. */
296 static void *malloc_brk;
297 
298 /* one location cache for free-list holders */
299 static struct pgfree *px;
300 
301 /* compile-time options */
302 const char *_malloc_options;
303 
304 /* Name of the current public function */
305 static const char *malloc_func;
306 
307 /* Macro for mmap */
308 #define MMAP(size) \
309 	mmap(NULL, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
310 	    MMAP_FD, (off_t)0);
311 
312 /*
313  * Necessary function declarations
314  */
315 static int extend_pgdir(size_t idx);
316 static void *imalloc(size_t size);
317 static void ifree(void *ptr);
318 static void *irealloc(void *ptr, size_t size);
319 
320 #ifndef MALLOC_NO_SYSCALLS
321 static void
322 wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
323 {
324 
325     write(STDERR_FILENO, p1, strlen(p1));
326     write(STDERR_FILENO, p2, strlen(p2));
327     write(STDERR_FILENO, p3, strlen(p3));
328     write(STDERR_FILENO, p4, strlen(p4));
329 }
330 
331 void (*_malloc_message)(const char *p1, const char *p2, const char *p3,
332 	    const char *p4) = wrtmessage;
333 static void
334 wrterror(const char *p)
335 {
336 
337     suicide = 1;
338     _malloc_message(getprogname(), malloc_func, " error: ", p);
339     abort();
340 }
341 
342 static void
343 wrtwarning(const char *p)
344 {
345 
346     /*
347      * Sensitive processes, somewhat arbitrarily defined here as setuid,
348      * setgid, root and wheel cannot afford to have malloc mistakes.
349      */
350     if (malloc_abort || issetugid() || getuid() == 0 || getgid() == 0)
351 	wrterror(p);
352 }
353 #endif
354 
355 /*
356  * Allocate a number of pages from the OS
357  */
358 static void *
359 map_pages(size_t pages)
360 {
361     caddr_t result, rresult, tail;
362     intptr_t bytes = pages << malloc_pageshift;
363 
364     if (bytes < 0 || (size_t)bytes < pages) {
365 	errno = ENOMEM;
366 	return NULL;
367     }
368 
369     if ((result = sbrk(bytes)) == (void *)-1)
370 	return NULL;
371 
372     /*
373      * Round to a page, in case sbrk(2) did not do this for us
374      */
375     rresult = (caddr_t)pageround((size_t)(uintptr_t)result);
376     if (result < rresult) {
377 	/* make sure we have enough space to fit bytes */
378 	if (sbrk((intptr_t)(rresult - result)) == (void *) -1) {
379 	    /* we failed, put everything back */
380 	    if (brk(result)) {
381 		wrterror("brk(2) failed [internal error]\n");
382 	    }
383 	}
384     }
385     tail = rresult + (size_t)bytes;
386 
387     last_idx = ptr2idx(tail) - 1;
388     malloc_brk = tail;
389 
390     if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx)) {
391 	malloc_brk = result;
392 	last_idx = ptr2idx(malloc_brk) - 1;
393 	/* Put back break point since we failed. */
394 	if (brk(malloc_brk))
395 	    wrterror("brk(2) failed [internal error]\n");
396 	return 0;
397     }
398 
399     return rresult;
400 }
401 
402 /*
403  * Extend page directory
404  */
405 static int
406 extend_pgdir(size_t idx)
407 {
408     struct  pginfo **new, **old;
409     size_t newlen, oldlen;
410 
411     /* check for overflow */
412     if ((((~(1UL << ((sizeof(size_t) * NBBY) - 1)) / sizeof(*page_dir)) + 1)
413 	+ (malloc_pagesize / sizeof *page_dir)) < idx) {
414 	errno = ENOMEM;
415 	return 0;
416     }
417 
418     /* Make it this many pages */
419     newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
420 
421     /* remember the old mapping size */
422     oldlen = malloc_ninfo * sizeof *page_dir;
423 
424     /*
425      * NOTE: we allocate new pages and copy the directory rather than tempt
426      * fate by trying to "grow" the region.. There is nothing to prevent
427      * us from accidentally re-mapping space that's been allocated by our caller
428      * via dlopen() or other mmap().
429      *
430      * The copy problem is not too bad, as there is 4K of page index per
431      * 4MB of malloc arena.
432      *
433      * We can totally avoid the copy if we open a file descriptor to associate
434      * the anon mappings with.  Then, when we remap the pages at the new
435      * address, the old pages will be "magically" remapped..  But this means
436      * keeping open a "secret" file descriptor.....
437      */
438 
439     /* Get new pages */
440     new = MMAP(newlen);
441     if (new == MAP_FAILED)
442 	return 0;
443 
444     /* Copy the old stuff */
445     memcpy(new, page_dir, oldlen);
446 
447     /* register the new size */
448     malloc_ninfo = newlen / sizeof *page_dir;
449 
450     /* swap the pointers */
451     old = page_dir;
452     page_dir = new;
453 
454     /* Now free the old stuff */
455     munmap(old, oldlen);
456     return 1;
457 }
458 
459 /*
460  * Initialize the world
461  */
462 static void
463 malloc_init(void)
464 {
465     int save_errno = errno;
466 #ifndef MALLOC_NO_SYSCALLS
467     const char *p;
468     char b[64];
469     size_t i;
470     ssize_t j;
471 
472     /*
473      * Compute page-size related variables.
474      */
475     malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
476 #else
477     malloc_pagesize = PAGE_SIZE;
478 #endif
479     malloc_pagemask = malloc_pagesize - 1;
480     for (malloc_pageshift = 0;
481 	 (1UL << malloc_pageshift) != malloc_pagesize;
482 	 malloc_pageshift++)
483 	/* nothing */ ;
484 
485     INIT_MMAP();
486 
487 #ifdef MALLOC_EXTRA_SANITY
488     malloc_junk = 1;
489 #endif /* MALLOC_EXTRA_SANITY */
490 
491 #ifndef MALLOC_NO_SYSCALLS
492     for (i = 0; i < 3; i++) {
493 	if (i == 0) {
494 	    j = readlink("/etc/malloc.conf", b, sizeof b - 1);
495 	    if (j <= 0)
496 		continue;
497 	    b[j] = '\0';
498 	    p = b;
499 	} else if (i == 1 && issetugid() == 0) {
500 	    p = getenv("MALLOC_OPTIONS");
501 	} else if (i == 1) {
502 	    continue;
503 	} else {
504 	    p = _malloc_options;
505 	}
506 	for (; p != NULL && *p != '\0'; p++) {
507 	    switch (*p) {
508 		case '>': malloc_cache   <<= 1; break;
509 		case '<': malloc_cache   >>= 1; break;
510 		case 'a': malloc_abort   = 0; break;
511 		case 'A': malloc_abort   = 1; break;
512 #ifndef __minix
513 		case 'h': malloc_hint    = 0; break;
514 		case 'H': malloc_hint    = 1; break;
515 #endif /* !__minix */
516 		case 'r': malloc_realloc = 0; break;
517 		case 'R': malloc_realloc = 1; break;
518 		case 'j': malloc_junk    = 0; break;
519 		case 'J': malloc_junk    = 1; break;
520 #ifdef HAS_UTRACE
521 		case 'u': malloc_utrace  = 0; break;
522 		case 'U': malloc_utrace  = 1; break;
523 #endif
524 		case 'v': malloc_sysv    = 0; break;
525 		case 'V': malloc_sysv    = 1; break;
526 		case 'x': malloc_xmalloc = 0; break;
527 		case 'X': malloc_xmalloc = 1; break;
528 		case 'z': malloc_zero    = 0; break;
529 		case 'Z': malloc_zero    = 1; break;
530 		default:
531 		    _malloc_message(getprogname(), malloc_func,
532 			 " warning: ", "unknown char in MALLOC_OPTIONS\n");
533 		    break;
534 	    }
535 	}
536     }
537 #endif
538 
539     UTRACE(0, 0, 0);
540 
541     /*
542      * We want junk in the entire allocation, and zero only in the part
543      * the user asked for.
544      */
545     if (malloc_zero)
546 	malloc_junk = 1;
547 
548     /* Allocate one page for the page directory */
549     page_dir = MMAP(malloc_pagesize);
550 
551     if (page_dir == MAP_FAILED)
552 	wrterror("mmap(2) failed, check limits.\n");
553 
554     /*
555      * We need a maximum of malloc_pageshift buckets, steal these from the
556      * front of the page_directory;
557      */
558     malloc_origo = pageround((size_t)(uintptr_t)sbrk((intptr_t)0))
559 	>> malloc_pageshift;
560     malloc_origo -= malloc_pageshift;
561 
562     malloc_ninfo = malloc_pagesize / sizeof *page_dir;
563 
564     /* Recalculate the cache size in bytes, and make sure it's nonzero */
565 
566     if (!malloc_cache)
567 	malloc_cache++;
568 
569     malloc_cache <<= malloc_pageshift;
570 
571     /*
572      * This is a nice hack from Kaleb Keithly (kaleb@x.org).
573      * We can sbrk(2) further back when we keep this on a low address.
574      */
575     px = imalloc(sizeof *px);
576 
577     errno = save_errno;
578 }
579 
580 /*
581  * Allocate a number of complete pages
582  */
583 static void *
584 malloc_pages(size_t size)
585 {
586     void *p, *delay_free = NULL;
587     size_t i;
588     struct pgfree *pf;
589     size_t idx;
590 
591     idx = pageround(size);
592     if (idx < size) {
593 	errno = ENOMEM;
594 	return NULL;
595     } else
596 	size = idx;
597 
598     p = NULL;
599 
600     /* Look for free pages before asking for more */
601     for(pf = free_list.next; pf; pf = pf->next) {
602 
603 #ifdef MALLOC_EXTRA_SANITY
604 	if (pf->size & malloc_pagemask)
605 	    wrterror("(ES): junk length entry on free_list.\n");
606 	if (!pf->size)
607 	    wrterror("(ES): zero length entry on free_list.\n");
608 	if (pf->page == pf->end)
609 	    wrterror("(ES): zero entry on free_list.\n");
610 	if (pf->page > pf->end)
611 	    wrterror("(ES): sick entry on free_list.\n");
612 	if ((void*)pf->page >= (void*)sbrk(0))
613 	    wrterror("(ES): entry on free_list past brk.\n");
614 	if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
615 	    wrterror("(ES): non-free first page on free-list.\n");
616 	if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
617 	    wrterror("(ES): non-free last page on free-list.\n");
618 #endif /* MALLOC_EXTRA_SANITY */
619 
620 	if (pf->size < size)
621 	    continue;
622 
623 	if (pf->size == size) {
624 	    p = pf->page;
625 	    if (pf->next != NULL)
626 		    pf->next->prev = pf->prev;
627 	    pf->prev->next = pf->next;
628 	    delay_free = pf;
629 	    break;
630 	}
631 
632 	p = pf->page;
633 	pf->page = (char *)pf->page + size;
634 	pf->size -= size;
635 	break;
636     }
637 
638 #ifdef MALLOC_EXTRA_SANITY
639     if (p != NULL && page_dir[ptr2idx(p)] != MALLOC_FREE)
640 	wrterror("(ES): allocated non-free page on free-list.\n");
641 #endif /* MALLOC_EXTRA_SANITY */
642 
643     size >>= malloc_pageshift;
644 
645     /* Map new pages */
646     if (p == NULL)
647 	p = map_pages(size);
648 
649     if (p != NULL) {
650 
651 	idx = ptr2idx(p);
652 	page_dir[idx] = MALLOC_FIRST;
653 	for (i=1;i<size;i++)
654 	    page_dir[idx+i] = MALLOC_FOLLOW;
655 
656 	if (malloc_junk)
657 	    memset(p, SOME_JUNK, size << malloc_pageshift);
658     }
659 
660     if (delay_free) {
661 	if (px == NULL)
662 	    px = delay_free;
663 	else
664 	    ifree(delay_free);
665     }
666 
667     return p;
668 }
669 
670 /*
671  * Allocate a page of fragments
672  */
673 
674 static inline int
675 malloc_make_chunks(int bits)
676 {
677     struct  pginfo *bp;
678     void *pp;
679     int i, k;
680     long l;
681 
682     /* Allocate a new bucket */
683     pp = malloc_pages(malloc_pagesize);
684     if (pp == NULL)
685 	return 0;
686 
687     /* Find length of admin structure */
688     l = (long)offsetof(struct pginfo, bits[0]);
689     l += (long)sizeof bp->bits[0] *
690 	(((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
691 
692     /* Don't waste more than two chunks on this */
693     if ((1<<(bits)) <= l+l) {
694 	bp = (struct  pginfo *)pp;
695     } else {
696 	bp = imalloc((size_t)l);
697 	if (bp == NULL) {
698 	    ifree(pp);
699 	    return 0;
700 	}
701     }
702 
703     bp->size = (1<<bits);
704     bp->shift = bits;
705     bp->total = bp->free = (u_short)(malloc_pagesize >> bits);
706     bp->page = pp;
707 
708     /* set all valid bits in the bitmap */
709     k = bp->total;
710     i = 0;
711 
712     /* Do a bunch at a time */
713     for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
714 	bp->bits[i / MALLOC_BITS] = ~0U;
715 
716     for(; i < k; i++)
717         bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
718 
719     if (bp == bp->page) {
720 	/* Mark the ones we stole for ourselves */
721 	for(i = 0; l > 0; i++) {
722 	    bp->bits[i / MALLOC_BITS] &= ~(1 << (i % MALLOC_BITS));
723 	    bp->free--;
724 	    bp->total--;
725 	    l -= (long)(1 << bits);
726 	}
727     }
728 
729     /* MALLOC_LOCK */
730 
731     page_dir[ptr2idx(pp)] = bp;
732 
733     bp->next = page_dir[bits];
734     page_dir[bits] = bp;
735 
736     /* MALLOC_UNLOCK */
737 
738     return 1;
739 }
740 
741 /*
742  * Allocate a fragment
743  */
744 static void *
745 malloc_bytes(size_t size)
746 {
747     size_t i;
748     int j;
749     u_int u;
750     struct  pginfo *bp;
751     size_t k;
752     u_int *lp;
753 
754     /* Don't bother with anything less than this */
755     if (size < malloc_minsize)
756 	size = malloc_minsize;
757 
758 
759     /* Find the right bucket */
760     j = 1;
761     i = size-1;
762     while (i >>= 1)
763 	j++;
764 
765     /* If it's empty, make a page more of that size chunks */
766     if (page_dir[j] == NULL && !malloc_make_chunks(j))
767 	return NULL;
768 
769     bp = page_dir[j];
770 
771     /* Find first word of bitmap which isn't empty */
772     for (lp = bp->bits; !*lp; lp++)
773 	;
774 
775     /* Find that bit, and tweak it */
776     u = 1;
777     k = 0;
778     while (!(*lp & u)) {
779 	u += u;
780 	k++;
781     }
782     *lp ^= u;
783 
784     /* If there are no more free, remove from free-list */
785     if (!--bp->free) {
786 	page_dir[j] = bp->next;
787 	bp->next = NULL;
788     }
789 
790     /* Adjust to the real offset of that chunk */
791     k += (lp-bp->bits)*MALLOC_BITS;
792     k <<= bp->shift;
793 
794     if (malloc_junk)
795 	memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
796 
797     return (u_char *)bp->page + k;
798 }
799 
800 /*
801  * Allocate a piece of memory
802  */
803 static void *
804 imalloc(size_t size)
805 {
806     void *result;
807 
808     if (suicide)
809 	abort();
810 
811     if ((size + malloc_pagesize) < size)	/* Check for overflow */
812 	result = NULL;
813     else if ((size + malloc_pagesize) >= (uintptr_t)page_dir)
814 	result = NULL;
815     else if (size <= malloc_maxsize)
816 	result = malloc_bytes(size);
817     else
818 	result = malloc_pages(size);
819 
820     if (malloc_abort && result == NULL)
821 	wrterror("allocation failed.\n");
822 
823     if (malloc_zero && result != NULL)
824 	memset(result, 0, size);
825 
826     return result;
827 }
828 
829 /*
830  * Change the size of an allocation.
831  */
832 static void *
833 irealloc(void *ptr, size_t size)
834 {
835     void *p;
836     size_t osize, idx;
837     struct pginfo **mp;
838     size_t i;
839 
840     if (suicide)
841 	abort();
842 
843     idx = ptr2idx(ptr);
844 
845     if (idx < malloc_pageshift) {
846 	wrtwarning("junk pointer, too low to make sense.\n");
847 	return 0;
848     }
849 
850     if (idx > last_idx) {
851 	wrtwarning("junk pointer, too high to make sense.\n");
852 	return 0;
853     }
854 
855     mp = &page_dir[idx];
856 
857     if (*mp == MALLOC_FIRST) {			/* Page allocation */
858 
859 	/* Check the pointer */
860 	if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
861 	    wrtwarning("modified (page-) pointer.\n");
862 	    return NULL;
863 	}
864 
865 	/* Find the size in bytes */
866 	for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
867 	    osize += malloc_pagesize;
868 
869         if (!malloc_realloc && 			/* unless we have to, */
870 	  size <= osize && 			/* .. or are too small, */
871 	  size > (osize - malloc_pagesize)) {	/* .. or can free a page, */
872 	    if (malloc_junk)
873 		memset((u_char *)ptr + size, SOME_JUNK, osize-size);
874 	    return ptr;				/* don't do anything. */
875 	}
876 
877     } else if (*mp >= MALLOC_MAGIC) {		/* Chunk allocation */
878 
879 	/* Check the pointer for sane values */
880 	if (((size_t)(uintptr_t)ptr & ((*mp)->size-1))) {
881 	    wrtwarning("modified (chunk-) pointer.\n");
882 	    return NULL;
883 	}
884 
885 	/* Find the chunk index in the page */
886 	i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> (*mp)->shift;
887 
888 	/* Verify that it isn't a free chunk already */
889         if ((*mp)->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
890 	    wrtwarning("chunk is already free.\n");
891 	    return NULL;
892 	}
893 
894 	osize = (*mp)->size;
895 
896 	if (!malloc_realloc &&		/* Unless we have to, */
897 	  size <= osize && 		/* ..or are too small, */
898 	  (size > osize / 2 ||	 	/* ..or could use a smaller size, */
899 	  osize == malloc_minsize)) {	/* ..(if there is one) */
900 	    if (malloc_junk)
901 		memset((u_char *)ptr + size, SOME_JUNK, osize-size);
902 	    return ptr;			/* ..Don't do anything */
903 	}
904 
905     } else {
906 	wrtwarning("pointer to wrong page.\n");
907 	return NULL;
908     }
909 
910     p = imalloc(size);
911 
912     if (p != NULL) {
913 	/* copy the lesser of the two sizes, and free the old one */
914 	if (!size || !osize)
915 	    ;
916 	else if (osize < size)
917 	    memcpy(p, ptr, osize);
918 	else
919 	    memcpy(p, ptr, size);
920 	ifree(ptr);
921     }
922     return p;
923 }
924 
925 /*
926  * Free a sequence of pages
927  */
928 
929 static inline void
930 free_pages(void *ptr, size_t idx, struct pginfo *info)
931 {
932     size_t i;
933     struct pgfree *pf, *pt=NULL;
934     size_t l;
935     void *tail;
936 
937     if (info == MALLOC_FREE) {
938 	wrtwarning("page is already free.\n");
939 	return;
940     }
941 
942     if (info != MALLOC_FIRST) {
943 	wrtwarning("pointer to wrong page.\n");
944 	return;
945     }
946 
947     if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
948 	wrtwarning("modified (page-) pointer.\n");
949 	return;
950     }
951 
952     /* Count how many pages and mark them free at the same time */
953     page_dir[idx] = MALLOC_FREE;
954     for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
955 	page_dir[idx + i] = MALLOC_FREE;
956 
957     l = i << malloc_pageshift;
958 
959     if (malloc_junk)
960 	memset(ptr, SOME_JUNK, l);
961 
962 #ifndef __minix
963     if (malloc_hint)
964 	madvise(ptr, l, MADV_FREE);
965 #endif /* !__minix */
966 
967     tail = (char *)ptr+l;
968 
969     /* add to free-list */
970     if (px == NULL)
971 	px = imalloc(sizeof *px);	/* This cannot fail... */
972     px->page = ptr;
973     px->end =  tail;
974     px->size = l;
975     if (free_list.next == NULL) {
976 
977 	/* Nothing on free list, put this at head */
978 	px->next = free_list.next;
979 	px->prev = &free_list;
980 	free_list.next = px;
981 	pf = px;
982 	px = NULL;
983 
984     } else {
985 
986 	/* Find the right spot, leave pf pointing to the modified entry. */
987 	tail = (char *)ptr+l;
988 
989 	for(pf = free_list.next; pf->end < ptr && pf->next != NULL;
990 	    pf = pf->next)
991 	    ; /* Race ahead here */
992 
993 	if (pf->page > tail) {
994 	    /* Insert before entry */
995 	    px->next = pf;
996 	    px->prev = pf->prev;
997 	    pf->prev = px;
998 	    px->prev->next = px;
999 	    pf = px;
1000 	    px = NULL;
1001 	} else if (pf->end == ptr ) {
1002 	    /* Append to the previous entry */
1003 	    pf->end = (char *)pf->end + l;
1004 	    pf->size += l;
1005 	    if (pf->next != NULL && pf->end == pf->next->page ) {
1006 		/* And collapse the next too. */
1007 		pt = pf->next;
1008 		pf->end = pt->end;
1009 		pf->size += pt->size;
1010 		pf->next = pt->next;
1011 		if (pf->next != NULL)
1012 		    pf->next->prev = pf;
1013 	    }
1014 	} else if (pf->page == tail) {
1015 	    /* Prepend to entry */
1016 	    pf->size += l;
1017 	    pf->page = ptr;
1018 	} else if (pf->next == NULL) {
1019 	    /* Append at tail of chain */
1020 	    px->next = NULL;
1021 	    px->prev = pf;
1022 	    pf->next = px;
1023 	    pf = px;
1024 	    px = NULL;
1025 	} else {
1026 	    wrterror("freelist is destroyed.\n");
1027 	}
1028     }
1029 
1030     /* Return something to OS ? */
1031     if (pf->next == NULL &&			/* If we're the last one, */
1032       pf->size > malloc_cache &&		/* ..and the cache is full, */
1033       pf->end == malloc_brk &&			/* ..and none behind us, */
1034       malloc_brk == sbrk((intptr_t)0)) {	/* ..and it's OK to do... */
1035 	int r;
1036 	/*
1037 	 * Keep the cache intact.  Notice that the '>' above guarantees that
1038 	 * the pf will always have at least one page afterwards.
1039 	 */
1040 	pf->end = (char *)pf->page + malloc_cache;
1041 	pf->size = malloc_cache;
1042 
1043 	r = brk(pf->end);
1044 	assert(r >= 0);
1045 	malloc_brk = pf->end;
1046 
1047 	idx = ptr2idx(pf->end);
1048 
1049 	for(i=idx;i <= last_idx;)
1050 	    page_dir[i++] = MALLOC_NOT_MINE;
1051 
1052 	last_idx = idx - 1;
1053 
1054 	/* XXX: We could realloc/shrink the pagedir here I guess. */
1055     }
1056     if (pt != NULL)
1057 	ifree(pt);
1058 }
1059 
1060 /*
1061  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1062  */
1063 
1064 static inline void
1065 free_bytes(void *ptr, size_t idx, struct pginfo *info)
1066 {
1067     size_t i;
1068     struct pginfo **mp;
1069     void *vp;
1070 
1071     /* Find the chunk number on the page */
1072     i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> info->shift;
1073 
1074     if (((size_t)(uintptr_t)ptr & (info->size-1))) {
1075 	wrtwarning("modified (chunk-) pointer.\n");
1076 	return;
1077     }
1078 
1079     if (info->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
1080 	wrtwarning("chunk is already free.\n");
1081 	return;
1082     }
1083 
1084     if (malloc_junk)
1085 	memset(ptr, SOME_JUNK, (size_t)info->size);
1086 
1087     info->bits[i/MALLOC_BITS] |= (u_int)(1UL << (i % MALLOC_BITS));
1088     info->free++;
1089 
1090     mp = page_dir + info->shift;
1091 
1092     if (info->free == 1) {
1093 
1094 	/* Page became non-full */
1095 
1096 	mp = page_dir + info->shift;
1097 	/* Insert in address order */
1098 	while (*mp && (*mp)->next && (*mp)->next->page < info->page)
1099 	    mp = &(*mp)->next;
1100 	info->next = *mp;
1101 	*mp = info;
1102 	return;
1103     }
1104 
1105     if (info->free != info->total)
1106 	return;
1107 
1108     /* Find & remove this page in the queue */
1109     while (*mp != info) {
1110 	mp = &((*mp)->next);
1111 #ifdef MALLOC_EXTRA_SANITY
1112 	if (!*mp)
1113 		wrterror("(ES): Not on queue.\n");
1114 #endif /* MALLOC_EXTRA_SANITY */
1115     }
1116     *mp = info->next;
1117 
1118     /* Free the page & the info structure if need be */
1119     page_dir[idx] = MALLOC_FIRST;
1120     vp = info->page;		/* Order is important ! */
1121     if(vp != (void*)info)
1122 	ifree(info);
1123     ifree(vp);
1124 }
1125 
1126 static void
1127 ifree(void *ptr)
1128 {
1129     struct pginfo *info;
1130     size_t idx;
1131 
1132     /* This is legal */
1133     if (ptr == NULL)
1134 	return;
1135 
1136     /* If we're already sinking, don't make matters any worse. */
1137     if (suicide)
1138 	return;
1139 
1140     idx = ptr2idx(ptr);
1141 
1142     if (idx < malloc_pageshift) {
1143 	wrtwarning("junk pointer, too low to make sense.\n");
1144 	return;
1145     }
1146 
1147     if (idx > last_idx) {
1148 	wrtwarning("junk pointer, too high to make sense.\n");
1149 	return;
1150     }
1151 
1152     info = page_dir[idx];
1153 
1154     if (info < MALLOC_MAGIC)
1155         free_pages(ptr, idx, info);
1156     else
1157 	free_bytes(ptr, idx, info);
1158     return;
1159 }
1160 
1161 static int malloc_active; /* Recusion flag for public interface. */
1162 static unsigned malloc_started; /* Set when initialization has been done */
1163 
1164 static void *
1165 pubrealloc(void *ptr, size_t size, const char *func)
1166 {
1167     void *r;
1168     int err = 0;
1169 
1170     /*
1171      * If a thread is inside our code with a functional lock held, and then
1172      * catches a signal which calls us again, we would get a deadlock if the
1173      * lock is not of a recursive type.
1174      */
1175     _MALLOC_LOCK();
1176     malloc_func = func;
1177     if (malloc_active > 0) {
1178 	if (malloc_active == 1) {
1179 	    wrtwarning("recursive call\n");
1180 	    malloc_active = 2;
1181 	}
1182         _MALLOC_UNLOCK();
1183 	errno = EINVAL;
1184 	return (NULL);
1185     }
1186     malloc_active = 1;
1187 
1188     if (!malloc_started) {
1189         if (ptr != NULL) {
1190 	    wrtwarning("malloc() has never been called\n");
1191 	    malloc_active = 0;
1192             _MALLOC_UNLOCK();
1193 	    errno = EINVAL;
1194 	    return (NULL);
1195 	}
1196 	malloc_init();
1197 	malloc_started = 1;
1198     }
1199 
1200     if (ptr == ZEROSIZEPTR)
1201 	ptr = NULL;
1202     if (malloc_sysv && !size) {
1203 	if (ptr != NULL)
1204 	    ifree(ptr);
1205 	r = NULL;
1206     } else if (!size) {
1207 	if (ptr != NULL)
1208 	    ifree(ptr);
1209 	r = ZEROSIZEPTR;
1210     } else if (ptr == NULL) {
1211 	r = imalloc(size);
1212 	err = (r == NULL);
1213     } else {
1214         r = irealloc(ptr, size);
1215 	err = (r == NULL);
1216     }
1217     UTRACE(ptr, size, r);
1218     malloc_active = 0;
1219     _MALLOC_UNLOCK();
1220     if (malloc_xmalloc && err)
1221 	wrterror("out of memory\n");
1222     if (err)
1223 	errno = ENOMEM;
1224     return (r);
1225 }
1226 
1227 /*
1228  * These are the public exported interface routines.
1229  */
1230 
1231 void *
1232 malloc(size_t size)
1233 {
1234 
1235     return pubrealloc(NULL, size, " in malloc():");
1236 }
1237 
1238 int
1239 posix_memalign(void **memptr, size_t alignment, size_t size)
1240 {
1241     int err;
1242     void *result;
1243 
1244     if (!malloc_started) {
1245 	    malloc_init();
1246 	    malloc_started = 1;
1247     }
1248     /* Make sure that alignment is a large enough power of 2. */
1249     if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *) ||
1250 	alignment > malloc_pagesize)
1251 	    return EINVAL;
1252 
1253     /*
1254      * (size | alignment) is enough to assure the requested alignment, since
1255      * the allocator always allocates power-of-two blocks.
1256      */
1257     err = errno; /* Protect errno against changes in pubrealloc(). */
1258     result = pubrealloc(NULL, (size | alignment), " in posix_memalign()");
1259     errno = err;
1260 
1261     if (result == NULL)
1262 	return ENOMEM;
1263 
1264     *memptr = result;
1265     return 0;
1266 }
1267 
1268 void *
1269 calloc(size_t num, size_t size)
1270 {
1271     void *ret;
1272 
1273     if (size != 0 && (num * size) / size != num) {
1274 	/* size_t overflow. */
1275 	errno = ENOMEM;
1276 	return (NULL);
1277     }
1278 
1279     ret = pubrealloc(NULL, num * size, " in calloc():");
1280 
1281     if (ret != NULL)
1282 	memset(ret, 0, num * size);
1283 
1284     return ret;
1285 }
1286 
1287 void
1288 free(void *ptr)
1289 {
1290 
1291     pubrealloc(ptr, 0, " in free():");
1292 }
1293 
1294 void *
1295 realloc(void *ptr, size_t size)
1296 {
1297 
1298     return pubrealloc(ptr, size, " in realloc():");
1299 }
1300 
1301 /*
1302  * Begin library-private functions, used by threading libraries for protection
1303  * of malloc during fork().  These functions are only called if the program is
1304  * running in threaded mode, so there is no need to check whether the program
1305  * is threaded here.
1306  */
1307 
1308 void
1309 _malloc_prefork(void)
1310 {
1311 
1312 	_MALLOC_LOCK();
1313 }
1314 
1315 void
1316 _malloc_postfork(void)
1317 {
1318 
1319 	_MALLOC_UNLOCK();
1320 }
1321