xref: /minix3/lib/libc/stdlib/malloc.c (revision b86af8be0be1a62e182faf3677a637d21b24e289)
1 /*	$NetBSD: malloc.c,v 1.54 2011/05/18 01:59:39 christos Exp $	*/
2 
3 /*
4  * ----------------------------------------------------------------------------
5  * "THE BEER-WARE LICENSE" (Revision 42):
6  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
7  * can do whatever you want with this stuff. If we meet some day, and you think
8  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
9  * ----------------------------------------------------------------------------
10  *
11  * From FreeBSD: malloc.c,v 1.91 2006/01/12 07:28:20 jasone
12  *
13  */
14 
15 #ifdef __minix
16 #define mmap minix_mmap
17 #define munmap minix_munmap
18 #ifdef _LIBSYS
19 #include <minix/sysutil.h>
20 #define MALLOC_NO_SYSCALLS
21 #define wrtwarning(w) printf("libminc malloc warning: %s\n", w)
22 #define wrterror(w) panic("libminc malloc error: %s\n", w)
23 #endif
24 #endif
25 
26 /*
27  * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
28  * to internal conditions and consistency in malloc.c. This has a
29  * noticeable runtime performance hit, and generally will not do you
30  * any good unless you fiddle with the internals of malloc or want
31  * to catch random pointer corruption as early as possible.
32  */
33 #ifndef MALLOC_EXTRA_SANITY
34 #undef MALLOC_EXTRA_SANITY
35 #endif
36 
37 /*
38  * What to use for Junk.  This is the byte value we use to fill with
39  * when the 'J' option is enabled.
40  */
41 #define SOME_JUNK	0xd0		/* as in "Duh" :-) */
42 
43 /*
44  * The basic parameters you can tweak.
45  *
46  * malloc_minsize	minimum size of an allocation in bytes.
47  *			If this is too small it's too much work
48  *			to manage them.  This is also the smallest
49  *			unit of alignment used for the storage
50  *			returned by malloc/realloc.
51  *
52  */
53 
54 #include "namespace.h"
55 #if defined(__FreeBSD__)
56 #   if defined(__i386__)
57 #       define malloc_minsize		16U
58 #   endif
59 #   if defined(__ia64__)
60 #	define malloc_pageshift		13U
61 #	define malloc_minsize		16U
62 #   endif
63 #   if defined(__alpha__)
64 #       define malloc_pageshift		13U
65 #       define malloc_minsize		16U
66 #   endif
67 #   if defined(__sparc64__)
68 #       define malloc_pageshift		13U
69 #       define malloc_minsize		16U
70 #   endif
71 #   if defined(__amd64__)
72 #       define malloc_pageshift		12U
73 #       define malloc_minsize		16U
74 #   endif
75 #   if defined(__arm__)
76 #       define malloc_pageshift         12U
77 #       define malloc_minsize           16U
78 #   endif
79 #   define HAS_UTRACE
80 #   define UTRACE_LABEL
81 
82 #include <sys/cdefs.h>
83 void utrace(struct ut *, int);
84 
85     /*
86      * Make malloc/free/realloc thread-safe in libc for use with
87      * kernel threads.
88      */
89 #   include "libc_private.h"
90 #   include "spinlock.h"
91     static spinlock_t thread_lock	= _SPINLOCK_INITIALIZER;
92 #   define _MALLOC_LOCK()		if (__isthreaded) _SPINLOCK(&thread_lock);
93 #   define _MALLOC_UNLOCK()		if (__isthreaded) _SPINUNLOCK(&thread_lock);
94 #endif /* __FreeBSD__ */
95 
96 /* #undef these things so that malloc uses the non-internal symbols.
97  * This is necessary for VM to be able to define its own versions, and
98  * use this malloc.
99  */
100 #undef minix_mmap
101 #undef minix_munmap
102 
103 #include <sys/types.h>
104 #if defined(__NetBSD__)
105 #   define malloc_minsize               16U
106 #   define HAS_UTRACE
107 #   define UTRACE_LABEL "malloc",
108 #include <sys/cdefs.h>
109 #include "extern.h"
110 #if defined(LIBC_SCCS) && !defined(lint)
111 __RCSID("$NetBSD: malloc.c,v 1.54 2011/05/18 01:59:39 christos Exp $");
112 #endif /* LIBC_SCCS and not lint */
113 int utrace(const char *, void *, size_t);
114 
115 #include <reentrant.h>
116 extern int __isthreaded;
117 static mutex_t thread_lock = MUTEX_INITIALIZER;
118 #define _MALLOC_LOCK()	if (__isthreaded) mutex_lock(&thread_lock);
119 #define _MALLOC_UNLOCK()	if (__isthreaded) mutex_unlock(&thread_lock);
120 #endif /* __NetBSD__ */
121 
122 #if defined(__sparc__) && defined(sun)
123 #   define malloc_minsize		16U
124 #   define MAP_ANON			(0)
125     static int fdzero;
126 #   define MMAP_FD	fdzero
127 #   define INIT_MMAP() \
128 	{ if ((fdzero = open(_PATH_DEVZERO, O_RDWR, 0000)) == -1) \
129 	    wrterror("open of /dev/zero"); }
130 #endif /* __sparc__ */
131 
132 /* Insert your combination here... */
133 #if defined(__FOOCPU__) && defined(__BAROS__)
134 #   define malloc_minsize		16U
135 #endif /* __FOOCPU__ && __BAROS__ */
136 
137 #ifndef ZEROSIZEPTR
138 #define ZEROSIZEPTR	((void *)(uintptr_t)(1UL << (malloc_pageshift - 1)))
139 #endif
140 
141 /*
142  * No user serviceable parts behind this point.
143  */
144 #include <sys/types.h>
145 #include <sys/mman.h>
146 #include <errno.h>
147 #include <fcntl.h>
148 #include <paths.h>
149 #include <stddef.h>
150 #include <stdio.h>
151 #include <stdlib.h>
152 #include <string.h>
153 #include <unistd.h>
154 
155 /*
156  * This structure describes a page worth of chunks.
157  */
158 
159 struct pginfo {
160     struct pginfo	*next;	/* next on the free list */
161     void		*page;	/* Pointer to the page */
162     u_short		size;	/* size of this page's chunks */
163     u_short		shift;	/* How far to shift for this size chunks */
164     u_short		free;	/* How many free chunks */
165     u_short		total;	/* How many chunk */
166     u_int		bits[1]; /* Which chunks are free */
167 };
168 
169 /*
170  * This structure describes a number of free pages.
171  */
172 
173 struct pgfree {
174     struct pgfree	*next;	/* next run of free pages */
175     struct pgfree	*prev;	/* prev run of free pages */
176     void		*page;	/* pointer to free pages */
177     void		*end;	/* pointer to end of free pages */
178     size_t		size;	/* number of bytes free */
179 };
180 
181 /*
182  * How many bits per u_int in the bitmap.
183  * Change only if not 8 bits/byte
184  */
185 #define	MALLOC_BITS	((int)(8*sizeof(u_int)))
186 
187 /*
188  * Magic values to put in the page_directory
189  */
190 #define MALLOC_NOT_MINE	((struct pginfo*) 0)
191 #define MALLOC_FREE 	((struct pginfo*) 1)
192 #define MALLOC_FIRST	((struct pginfo*) 2)
193 #define MALLOC_FOLLOW	((struct pginfo*) 3)
194 #define MALLOC_MAGIC	((struct pginfo*) 4)
195 
196 /*
197  * Page size related parameters, computed at run-time.
198  */
199 static size_t malloc_pagesize;
200 static size_t malloc_pageshift;
201 static size_t malloc_pagemask;
202 
203 #ifndef malloc_minsize
204 #define malloc_minsize			16U
205 #endif
206 
207 #ifndef malloc_maxsize
208 #define malloc_maxsize			((malloc_pagesize)>>1)
209 #endif
210 
211 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
212 #define ptr2idx(foo) \
213     (((size_t)(uintptr_t)(foo) >> malloc_pageshift)-malloc_origo)
214 
215 #ifndef _MALLOC_LOCK
216 #define _MALLOC_LOCK()
217 #endif
218 
219 #ifndef _MALLOC_UNLOCK
220 #define _MALLOC_UNLOCK()
221 #endif
222 
223 #ifndef MMAP_FD
224 #define MMAP_FD (-1)
225 #endif
226 
227 #ifndef INIT_MMAP
228 #define INIT_MMAP()
229 #endif
230 
231 #ifndef __minix
232 #ifndef MADV_FREE
233 #define MADV_FREE MADV_DONTNEED
234 #endif
235 #endif /* !__minix */
236 
237 /* Number of free pages we cache */
238 static size_t malloc_cache = 16;
239 
240 /* The offset from pagenumber to index into the page directory */
241 static size_t malloc_origo;
242 
243 /* The last index in the page directory we care about */
244 static size_t last_idx;
245 
246 /* Pointer to page directory. Allocated "as if with" malloc */
247 static struct	pginfo **page_dir;
248 
249 /* How many slots in the page directory */
250 static size_t	malloc_ninfo;
251 
252 /* Free pages line up here */
253 static struct pgfree free_list;
254 
255 /* Abort(), user doesn't handle problems.  */
256 static int malloc_abort;
257 
258 /* Are we trying to die ?  */
259 static int suicide;
260 
261 /* always realloc ?  */
262 static int malloc_realloc;
263 
264 /* pass the kernel a hint on free pages ?  */
265 #if defined(MADV_FREE)
266 static int malloc_hint = 0;
267 #endif
268 
269 /* xmalloc behaviour ?  */
270 static int malloc_xmalloc;
271 
272 /* sysv behaviour for malloc(0) ?  */
273 static int malloc_sysv;
274 
275 /* zero fill ?  */
276 static int malloc_zero;
277 
278 /* junk fill ?  */
279 static int malloc_junk;
280 
281 #ifdef HAS_UTRACE
282 
283 /* utrace ?  */
284 static int malloc_utrace;
285 
286 struct ut { void *p; size_t s; void *r; };
287 
288 #define UTRACE(a, b, c) \
289 	if (malloc_utrace) {			\
290 		struct ut u;			\
291 		u.p=a; u.s = b; u.r=c;		\
292 		utrace(UTRACE_LABEL (void *) &u, sizeof u);	\
293 	}
294 #else /* !HAS_UTRACE */
295 #define UTRACE(a,b,c)
296 #endif /* HAS_UTRACE */
297 
298 /* my last break. */
299 static void *malloc_brk;
300 
301 /* one location cache for free-list holders */
302 static struct pgfree *px;
303 
304 /* compile-time options */
305 const char *_malloc_options;
306 
307 /* Name of the current public function */
308 static const char *malloc_func;
309 
310 /* Macro for mmap */
311 #define MMAP(size) \
312 	mmap(NULL, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
313 	    MMAP_FD, (off_t)0);
314 
315 /*
316  * Necessary function declarations
317  */
318 static int extend_pgdir(size_t idx);
319 static void *imalloc(size_t size);
320 static void ifree(void *ptr);
321 static void *irealloc(void *ptr, size_t size);
322 
323 #ifndef MALLOC_NO_SYSCALLS
324 static void
325 wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
326 {
327 
328     write(STDERR_FILENO, p1, strlen(p1));
329     write(STDERR_FILENO, p2, strlen(p2));
330     write(STDERR_FILENO, p3, strlen(p3));
331     write(STDERR_FILENO, p4, strlen(p4));
332 }
333 
334 void (*_malloc_message)(const char *p1, const char *p2, const char *p3,
335 	    const char *p4) = wrtmessage;
336 static void
337 wrterror(const char *p)
338 {
339 
340     suicide = 1;
341     _malloc_message(getprogname(), malloc_func, " error: ", p);
342     abort();
343 }
344 
345 static void
346 wrtwarning(const char *p)
347 {
348 
349     /*
350      * Sensitive processes, somewhat arbitrarily defined here as setuid,
351      * setgid, root and wheel cannot afford to have malloc mistakes.
352      */
353     if (malloc_abort || issetugid() || getuid() == 0 || getgid() == 0)
354 	wrterror(p);
355 }
356 #endif
357 
358 /*
359  * Allocate a number of pages from the OS
360  */
361 static void *
362 map_pages(size_t pages)
363 {
364     caddr_t result, rresult, tail;
365     intptr_t bytes = pages << malloc_pageshift;
366 
367     if (bytes < 0 || (size_t)bytes < pages) {
368 	errno = ENOMEM;
369 	return NULL;
370     }
371 
372     if ((result = sbrk(bytes)) == (void *)-1)
373 	return NULL;
374 
375     /*
376      * Round to a page, in case sbrk(2) did not do this for us
377      */
378     rresult = (caddr_t)pageround((size_t)(uintptr_t)result);
379     if (result < rresult) {
380 	/* make sure we have enough space to fit bytes */
381 	if (sbrk((intptr_t)(rresult - result)) == (void *) -1) {
382 	    /* we failed, put everything back */
383 	    if (brk(result)) {
384 		wrterror("brk(2) failed [internal error]\n");
385 	    }
386 	}
387     }
388     tail = rresult + (size_t)bytes;
389 
390     last_idx = ptr2idx(tail) - 1;
391     malloc_brk = tail;
392 
393     if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx)) {
394 	malloc_brk = result;
395 	last_idx = ptr2idx(malloc_brk) - 1;
396 	/* Put back break point since we failed. */
397 	if (brk(malloc_brk))
398 	    wrterror("brk(2) failed [internal error]\n");
399 	return 0;
400     }
401 
402     return rresult;
403 }
404 
405 /*
406  * Extend page directory
407  */
408 static int
409 extend_pgdir(size_t idx)
410 {
411     struct  pginfo **new, **old;
412     size_t newlen, oldlen;
413 
414     /* check for overflow */
415     if ((((~(1UL << ((sizeof(size_t) * NBBY) - 1)) / sizeof(*page_dir)) + 1)
416 	+ (malloc_pagesize / sizeof *page_dir)) < idx) {
417 	errno = ENOMEM;
418 	return 0;
419     }
420 
421     /* Make it this many pages */
422     newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
423 
424     /* remember the old mapping size */
425     oldlen = malloc_ninfo * sizeof *page_dir;
426 
427     /*
428      * NOTE: we allocate new pages and copy the directory rather than tempt
429      * fate by trying to "grow" the region.. There is nothing to prevent
430      * us from accidentally re-mapping space that's been allocated by our caller
431      * via dlopen() or other mmap().
432      *
433      * The copy problem is not too bad, as there is 4K of page index per
434      * 4MB of malloc arena.
435      *
436      * We can totally avoid the copy if we open a file descriptor to associate
437      * the anon mappings with.  Then, when we remap the pages at the new
438      * address, the old pages will be "magically" remapped..  But this means
439      * keeping open a "secret" file descriptor.....
440      */
441 
442     /* Get new pages */
443     new = MMAP(newlen);
444     if (new == MAP_FAILED)
445 	return 0;
446 
447     /* Copy the old stuff */
448     memcpy(new, page_dir, oldlen);
449 
450     /* register the new size */
451     malloc_ninfo = newlen / sizeof *page_dir;
452 
453     /* swap the pointers */
454     old = page_dir;
455     page_dir = new;
456 
457     /* Now free the old stuff */
458     munmap(old, oldlen);
459     return 1;
460 }
461 
462 /*
463  * Initialize the world
464  */
465 static void
466 malloc_init(void)
467 {
468 #ifndef MALLOC_NO_SYSCALLS
469     const char *p;
470     char b[64];
471     size_t i;
472     ssize_t j;
473 #endif
474     int serrno = errno;
475 #ifndef MALLOC_NO_SYSCALLS
476 
477     /*
478      * Compute page-size related variables.
479      */
480     malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
481 #else
482     malloc_pagesize = PAGE_SIZE;
483 #endif
484     malloc_pagemask = malloc_pagesize - 1;
485     for (malloc_pageshift = 0;
486 	 (1UL << malloc_pageshift) != malloc_pagesize;
487 	 malloc_pageshift++)
488 	/* nothing */ ;
489 
490     INIT_MMAP();
491 
492 #ifdef MALLOC_EXTRA_SANITY
493     malloc_junk = 1;
494 #endif /* MALLOC_EXTRA_SANITY */
495 
496 #ifndef MALLOC_NO_SYSCALLS
497     for (i = 0; i < 3; i++) {
498 	if (i == 0) {
499 	    j = readlink("/etc/malloc.conf", b, sizeof b - 1);
500 	    if (j == -1)
501 		continue;
502 	    b[j] = '\0';
503 	    p = b;
504 	} else if (i == 1 && issetugid() == 0) {
505 	    p = getenv("MALLOC_OPTIONS");
506 	} else if (i == 1) {
507 	    continue;
508 	} else {
509 	    p = _malloc_options;
510 	}
511 	for (; p != NULL && *p != '\0'; p++) {
512 	    switch (*p) {
513 		case '>': malloc_cache   <<= 1; break;
514 		case '<': malloc_cache   >>= 1; break;
515 		case 'a': malloc_abort   = 0; break;
516 		case 'A': malloc_abort   = 1; break;
517 #ifndef __minix
518 		case 'h': malloc_hint    = 0; break;
519 		case 'H': malloc_hint    = 1; break;
520 #endif /* !__minix */
521 		case 'r': malloc_realloc = 0; break;
522 		case 'R': malloc_realloc = 1; break;
523 		case 'j': malloc_junk    = 0; break;
524 		case 'J': malloc_junk    = 1; break;
525 #ifdef HAS_UTRACE
526 		case 'u': malloc_utrace  = 0; break;
527 		case 'U': malloc_utrace  = 1; break;
528 #endif
529 		case 'v': malloc_sysv    = 0; break;
530 		case 'V': malloc_sysv    = 1; break;
531 		case 'x': malloc_xmalloc = 0; break;
532 		case 'X': malloc_xmalloc = 1; break;
533 		case 'z': malloc_zero    = 0; break;
534 		case 'Z': malloc_zero    = 1; break;
535 		default:
536 		    _malloc_message(getprogname(), malloc_func,
537 			 " warning: ", "unknown char in MALLOC_OPTIONS\n");
538 		    break;
539 	    }
540 	}
541     }
542 #endif
543 
544     UTRACE(0, 0, 0);
545 
546     /*
547      * We want junk in the entire allocation, and zero only in the part
548      * the user asked for.
549      */
550     if (malloc_zero)
551 	malloc_junk = 1;
552 
553     /* Allocate one page for the page directory */
554     page_dir = MMAP(malloc_pagesize);
555 
556     if (page_dir == MAP_FAILED)
557 	wrterror("mmap(2) failed, check limits.\n");
558 
559     /*
560      * We need a maximum of malloc_pageshift buckets, steal these from the
561      * front of the page_directory;
562      */
563     malloc_origo = pageround((size_t)(uintptr_t)sbrk((intptr_t)0))
564 	>> malloc_pageshift;
565     malloc_origo -= malloc_pageshift;
566 
567     malloc_ninfo = malloc_pagesize / sizeof *page_dir;
568 
569     /* Recalculate the cache size in bytes, and make sure it's nonzero */
570 
571     if (!malloc_cache)
572 	malloc_cache++;
573 
574     malloc_cache <<= malloc_pageshift;
575 
576     /*
577      * This is a nice hack from Kaleb Keithly (kaleb@x.org).
578      * We can sbrk(2) further back when we keep this on a low address.
579      */
580     px = imalloc(sizeof *px);
581 
582     errno = serrno;
583 }
584 
585 /*
586  * Allocate a number of complete pages
587  */
588 static void *
589 malloc_pages(size_t size)
590 {
591     void *p, *delay_free = NULL;
592     size_t i;
593     struct pgfree *pf;
594     size_t idx;
595 
596     idx = pageround(size);
597     if (idx < size) {
598 	errno = ENOMEM;
599 	return NULL;
600     } else
601 	size = idx;
602 
603     p = NULL;
604 
605     /* Look for free pages before asking for more */
606     for(pf = free_list.next; pf; pf = pf->next) {
607 
608 #ifdef MALLOC_EXTRA_SANITY
609 	if (pf->size & malloc_pagemask)
610 	    wrterror("(ES): junk length entry on free_list.\n");
611 	if (!pf->size)
612 	    wrterror("(ES): zero length entry on free_list.\n");
613 	if (pf->page == pf->end)
614 	    wrterror("(ES): zero entry on free_list.\n");
615 	if (pf->page > pf->end)
616 	    wrterror("(ES): sick entry on free_list.\n");
617 	if ((void*)pf->page >= (void*)sbrk(0))
618 	    wrterror("(ES): entry on free_list past brk.\n");
619 	if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
620 	    wrterror("(ES): non-free first page on free-list.\n");
621 	if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
622 	    wrterror("(ES): non-free last page on free-list.\n");
623 #endif /* MALLOC_EXTRA_SANITY */
624 
625 	if (pf->size < size)
626 	    continue;
627 
628 	if (pf->size == size) {
629 	    p = pf->page;
630 	    if (pf->next != NULL)
631 		    pf->next->prev = pf->prev;
632 	    pf->prev->next = pf->next;
633 	    delay_free = pf;
634 	    break;
635 	}
636 
637 	p = pf->page;
638 	pf->page = (char *)pf->page + size;
639 	pf->size -= size;
640 	break;
641     }
642 
643 #ifdef MALLOC_EXTRA_SANITY
644     if (p != NULL && page_dir[ptr2idx(p)] != MALLOC_FREE)
645 	wrterror("(ES): allocated non-free page on free-list.\n");
646 #endif /* MALLOC_EXTRA_SANITY */
647 
648     size >>= malloc_pageshift;
649 
650     /* Map new pages */
651     if (p == NULL)
652 	p = map_pages(size);
653 
654     if (p != NULL) {
655 
656 	idx = ptr2idx(p);
657 	page_dir[idx] = MALLOC_FIRST;
658 	for (i=1;i<size;i++)
659 	    page_dir[idx+i] = MALLOC_FOLLOW;
660 
661 	if (malloc_junk)
662 	    memset(p, SOME_JUNK, size << malloc_pageshift);
663     }
664 
665     if (delay_free) {
666 	if (px == NULL)
667 	    px = delay_free;
668 	else
669 	    ifree(delay_free);
670     }
671 
672     return p;
673 }
674 
675 /*
676  * Allocate a page of fragments
677  */
678 
679 static inline int
680 malloc_make_chunks(int bits)
681 {
682     struct  pginfo *bp;
683     void *pp;
684     int i, k;
685     long l;
686 
687     /* Allocate a new bucket */
688     pp = malloc_pages(malloc_pagesize);
689     if (pp == NULL)
690 	return 0;
691 
692     /* Find length of admin structure */
693     l = (long)offsetof(struct pginfo, bits[0]);
694     l += (long)sizeof bp->bits[0] *
695 	(((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
696 
697     /* Don't waste more than two chunks on this */
698     if ((1<<(bits)) <= l+l) {
699 	bp = (struct  pginfo *)pp;
700     } else {
701 	bp = imalloc((size_t)l);
702 	if (bp == NULL) {
703 	    ifree(pp);
704 	    return 0;
705 	}
706     }
707 
708     bp->size = (1<<bits);
709     bp->shift = bits;
710     bp->total = bp->free = (u_short)(malloc_pagesize >> bits);
711     bp->page = pp;
712 
713     /* set all valid bits in the bitmap */
714     k = bp->total;
715     i = 0;
716 
717     /* Do a bunch at a time */
718     for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
719 	bp->bits[i / MALLOC_BITS] = ~0U;
720 
721     for(; i < k; i++)
722         bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
723 
724     if (bp == bp->page) {
725 	/* Mark the ones we stole for ourselves */
726 	for(i = 0; l > 0; i++) {
727 	    bp->bits[i / MALLOC_BITS] &= ~(1 << (i % MALLOC_BITS));
728 	    bp->free--;
729 	    bp->total--;
730 	    l -= (long)(1 << bits);
731 	}
732     }
733 
734     /* MALLOC_LOCK */
735 
736     page_dir[ptr2idx(pp)] = bp;
737 
738     bp->next = page_dir[bits];
739     page_dir[bits] = bp;
740 
741     /* MALLOC_UNLOCK */
742 
743     return 1;
744 }
745 
746 /*
747  * Allocate a fragment
748  */
749 static void *
750 malloc_bytes(size_t size)
751 {
752     size_t i;
753     int j;
754     u_int u;
755     struct  pginfo *bp;
756     size_t k;
757     u_int *lp;
758 
759     /* Don't bother with anything less than this */
760     if (size < malloc_minsize)
761 	size = malloc_minsize;
762 
763 
764     /* Find the right bucket */
765     j = 1;
766     i = size-1;
767     while (i >>= 1)
768 	j++;
769 
770     /* If it's empty, make a page more of that size chunks */
771     if (page_dir[j] == NULL && !malloc_make_chunks(j))
772 	return NULL;
773 
774     bp = page_dir[j];
775 
776     /* Find first word of bitmap which isn't empty */
777     for (lp = bp->bits; !*lp; lp++)
778 	;
779 
780     /* Find that bit, and tweak it */
781     u = 1;
782     k = 0;
783     while (!(*lp & u)) {
784 	u += u;
785 	k++;
786     }
787     *lp ^= u;
788 
789     /* If there are no more free, remove from free-list */
790     if (!--bp->free) {
791 	page_dir[j] = bp->next;
792 	bp->next = NULL;
793     }
794 
795     /* Adjust to the real offset of that chunk */
796     k += (lp-bp->bits)*MALLOC_BITS;
797     k <<= bp->shift;
798 
799     if (malloc_junk)
800 	memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
801 
802     return (u_char *)bp->page + k;
803 }
804 
805 /*
806  * Allocate a piece of memory
807  */
808 static void *
809 imalloc(size_t size)
810 {
811     void *result;
812 
813     if (suicide)
814 	abort();
815 
816     if ((size + malloc_pagesize) < size)	/* Check for overflow */
817 	result = NULL;
818     else if ((size + malloc_pagesize) >= (uintptr_t)page_dir)
819 	result = NULL;
820     else if (size <= malloc_maxsize)
821 	result = malloc_bytes(size);
822     else
823 	result = malloc_pages(size);
824 
825     if (malloc_abort && result == NULL)
826 	wrterror("allocation failed.\n");
827 
828     if (malloc_zero && result != NULL)
829 	memset(result, 0, size);
830 
831     return result;
832 }
833 
834 /*
835  * Change the size of an allocation.
836  */
837 static void *
838 irealloc(void *ptr, size_t size)
839 {
840     void *p;
841     size_t osize, idx;
842     struct pginfo **mp;
843     size_t i;
844 
845     if (suicide)
846 	abort();
847 
848     idx = ptr2idx(ptr);
849 
850     if (idx < malloc_pageshift) {
851 	wrtwarning("junk pointer, too low to make sense.\n");
852 	return 0;
853     }
854 
855     if (idx > last_idx) {
856 	wrtwarning("junk pointer, too high to make sense.\n");
857 	return 0;
858     }
859 
860     mp = &page_dir[idx];
861 
862     if (*mp == MALLOC_FIRST) {			/* Page allocation */
863 
864 	/* Check the pointer */
865 	if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
866 	    wrtwarning("modified (page-) pointer.\n");
867 	    return NULL;
868 	}
869 
870 	/* Find the size in bytes */
871 	for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
872 	    osize += malloc_pagesize;
873 
874         if (!malloc_realloc && 			/* unless we have to, */
875 	  size <= osize && 			/* .. or are too small, */
876 	  size > (osize - malloc_pagesize)) {	/* .. or can free a page, */
877 	    if (malloc_junk)
878 		memset((u_char *)ptr + size, SOME_JUNK, osize-size);
879 	    return ptr;				/* don't do anything. */
880 	}
881 
882     } else if (*mp >= MALLOC_MAGIC) {		/* Chunk allocation */
883 
884 	/* Check the pointer for sane values */
885 	if (((size_t)(uintptr_t)ptr & ((*mp)->size-1))) {
886 	    wrtwarning("modified (chunk-) pointer.\n");
887 	    return NULL;
888 	}
889 
890 	/* Find the chunk index in the page */
891 	i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> (*mp)->shift;
892 
893 	/* Verify that it isn't a free chunk already */
894         if ((*mp)->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
895 	    wrtwarning("chunk is already free.\n");
896 	    return NULL;
897 	}
898 
899 	osize = (*mp)->size;
900 
901 	if (!malloc_realloc &&		/* Unless we have to, */
902 	  size <= osize && 		/* ..or are too small, */
903 	  (size > osize / 2 ||	 	/* ..or could use a smaller size, */
904 	  osize == malloc_minsize)) {	/* ..(if there is one) */
905 	    if (malloc_junk)
906 		memset((u_char *)ptr + size, SOME_JUNK, osize-size);
907 	    return ptr;			/* ..Don't do anything */
908 	}
909 
910     } else {
911 	wrtwarning("pointer to wrong page.\n");
912 	return NULL;
913     }
914 
915     p = imalloc(size);
916 
917     if (p != NULL) {
918 	/* copy the lesser of the two sizes, and free the old one */
919 	if (!size || !osize)
920 	    ;
921 	else if (osize < size)
922 	    memcpy(p, ptr, osize);
923 	else
924 	    memcpy(p, ptr, size);
925 	ifree(ptr);
926     }
927     return p;
928 }
929 
930 /*
931  * Free a sequence of pages
932  */
933 
934 static inline void
935 free_pages(void *ptr, size_t idx, struct pginfo *info)
936 {
937     size_t i;
938     struct pgfree *pf, *pt=NULL;
939     size_t l;
940     void *tail;
941 
942     if (info == MALLOC_FREE) {
943 	wrtwarning("page is already free.\n");
944 	return;
945     }
946 
947     if (info != MALLOC_FIRST) {
948 	wrtwarning("pointer to wrong page.\n");
949 	return;
950     }
951 
952     if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
953 	wrtwarning("modified (page-) pointer.\n");
954 	return;
955     }
956 
957     /* Count how many pages and mark them free at the same time */
958     page_dir[idx] = MALLOC_FREE;
959     for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
960 	page_dir[idx + i] = MALLOC_FREE;
961 
962     l = i << malloc_pageshift;
963 
964     if (malloc_junk)
965 	memset(ptr, SOME_JUNK, l);
966 
967 #ifndef __minix
968     if (malloc_hint)
969 	madvise(ptr, l, MADV_FREE);
970 #endif /* !__minix */
971 
972     tail = (char *)ptr+l;
973 
974     /* add to free-list */
975     if (px == NULL)
976 	px = imalloc(sizeof *px);	/* This cannot fail... */
977     px->page = ptr;
978     px->end =  tail;
979     px->size = l;
980     if (free_list.next == NULL) {
981 
982 	/* Nothing on free list, put this at head */
983 	px->next = free_list.next;
984 	px->prev = &free_list;
985 	free_list.next = px;
986 	pf = px;
987 	px = NULL;
988 
989     } else {
990 
991 	/* Find the right spot, leave pf pointing to the modified entry. */
992 	tail = (char *)ptr+l;
993 
994 	for(pf = free_list.next; pf->end < ptr && pf->next != NULL;
995 	    pf = pf->next)
996 	    ; /* Race ahead here */
997 
998 	if (pf->page > tail) {
999 	    /* Insert before entry */
1000 	    px->next = pf;
1001 	    px->prev = pf->prev;
1002 	    pf->prev = px;
1003 	    px->prev->next = px;
1004 	    pf = px;
1005 	    px = NULL;
1006 	} else if (pf->end == ptr ) {
1007 	    /* Append to the previous entry */
1008 	    pf->end = (char *)pf->end + l;
1009 	    pf->size += l;
1010 	    if (pf->next != NULL && pf->end == pf->next->page ) {
1011 		/* And collapse the next too. */
1012 		pt = pf->next;
1013 		pf->end = pt->end;
1014 		pf->size += pt->size;
1015 		pf->next = pt->next;
1016 		if (pf->next != NULL)
1017 		    pf->next->prev = pf;
1018 	    }
1019 	} else if (pf->page == tail) {
1020 	    /* Prepend to entry */
1021 	    pf->size += l;
1022 	    pf->page = ptr;
1023 	} else if (pf->next == NULL) {
1024 	    /* Append at tail of chain */
1025 	    px->next = NULL;
1026 	    px->prev = pf;
1027 	    pf->next = px;
1028 	    pf = px;
1029 	    px = NULL;
1030 	} else {
1031 	    wrterror("freelist is destroyed.\n");
1032 	}
1033     }
1034 
1035     /* Return something to OS ? */
1036     if (pf->next == NULL &&			/* If we're the last one, */
1037       pf->size > malloc_cache &&		/* ..and the cache is full, */
1038       pf->end == malloc_brk &&			/* ..and none behind us, */
1039       malloc_brk == sbrk((intptr_t)0)) {	/* ..and it's OK to do... */
1040 
1041 	/*
1042 	 * Keep the cache intact.  Notice that the '>' above guarantees that
1043 	 * the pf will always have at least one page afterwards.
1044 	 */
1045 	pf->end = (char *)pf->page + malloc_cache;
1046 	pf->size = malloc_cache;
1047 
1048 	brk(pf->end);
1049 	malloc_brk = pf->end;
1050 
1051 	idx = ptr2idx(pf->end);
1052 
1053 	for(i=idx;i <= last_idx;)
1054 	    page_dir[i++] = MALLOC_NOT_MINE;
1055 
1056 	last_idx = idx - 1;
1057 
1058 	/* XXX: We could realloc/shrink the pagedir here I guess. */
1059     }
1060     if (pt != NULL)
1061 	ifree(pt);
1062 }
1063 
1064 /*
1065  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1066  */
1067 
1068 static inline void
1069 free_bytes(void *ptr, size_t idx, struct pginfo *info)
1070 {
1071     size_t i;
1072     struct pginfo **mp;
1073     void *vp;
1074 
1075     /* Find the chunk number on the page */
1076     i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> info->shift;
1077 
1078     if (((size_t)(uintptr_t)ptr & (info->size-1))) {
1079 	wrtwarning("modified (chunk-) pointer.\n");
1080 	return;
1081     }
1082 
1083     if (info->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
1084 	wrtwarning("chunk is already free.\n");
1085 	return;
1086     }
1087 
1088     if (malloc_junk)
1089 	memset(ptr, SOME_JUNK, (size_t)info->size);
1090 
1091     info->bits[i/MALLOC_BITS] |= (u_int)(1UL << (i % MALLOC_BITS));
1092     info->free++;
1093 
1094     mp = page_dir + info->shift;
1095 
1096     if (info->free == 1) {
1097 
1098 	/* Page became non-full */
1099 
1100 	mp = page_dir + info->shift;
1101 	/* Insert in address order */
1102 	while (*mp && (*mp)->next && (*mp)->next->page < info->page)
1103 	    mp = &(*mp)->next;
1104 	info->next = *mp;
1105 	*mp = info;
1106 	return;
1107     }
1108 
1109     if (info->free != info->total)
1110 	return;
1111 
1112     /* Find & remove this page in the queue */
1113     while (*mp != info) {
1114 	mp = &((*mp)->next);
1115 #ifdef MALLOC_EXTRA_SANITY
1116 	if (!*mp)
1117 		wrterror("(ES): Not on queue.\n");
1118 #endif /* MALLOC_EXTRA_SANITY */
1119     }
1120     *mp = info->next;
1121 
1122     /* Free the page & the info structure if need be */
1123     page_dir[idx] = MALLOC_FIRST;
1124     vp = info->page;		/* Order is important ! */
1125     if(vp != (void*)info)
1126 	ifree(info);
1127     ifree(vp);
1128 }
1129 
1130 static void
1131 ifree(void *ptr)
1132 {
1133     struct pginfo *info;
1134     size_t idx;
1135 
1136     /* This is legal */
1137     if (ptr == NULL)
1138 	return;
1139 
1140     /* If we're already sinking, don't make matters any worse. */
1141     if (suicide)
1142 	return;
1143 
1144     idx = ptr2idx(ptr);
1145 
1146     if (idx < malloc_pageshift) {
1147 	wrtwarning("junk pointer, too low to make sense.\n");
1148 	return;
1149     }
1150 
1151     if (idx > last_idx) {
1152 	wrtwarning("junk pointer, too high to make sense.\n");
1153 	return;
1154     }
1155 
1156     info = page_dir[idx];
1157 
1158     if (info < MALLOC_MAGIC)
1159         free_pages(ptr, idx, info);
1160     else
1161 	free_bytes(ptr, idx, info);
1162     return;
1163 }
1164 
1165 static int malloc_active; /* Recusion flag for public interface. */
1166 static unsigned malloc_started; /* Set when initialization has been done */
1167 
1168 static void *
1169 pubrealloc(void *ptr, size_t size, const char *func)
1170 {
1171     void *r;
1172     int err = 0;
1173 
1174     /*
1175      * If a thread is inside our code with a functional lock held, and then
1176      * catches a signal which calls us again, we would get a deadlock if the
1177      * lock is not of a recursive type.
1178      */
1179     _MALLOC_LOCK();
1180     malloc_func = func;
1181     if (malloc_active > 0) {
1182 	if (malloc_active == 1) {
1183 	    wrtwarning("recursive call\n");
1184 	    malloc_active = 2;
1185 	}
1186         _MALLOC_UNLOCK();
1187 	errno = EINVAL;
1188 	return (NULL);
1189     }
1190     malloc_active = 1;
1191 
1192     if (!malloc_started) {
1193         if (ptr != NULL) {
1194 	    wrtwarning("malloc() has never been called\n");
1195 	    malloc_active = 0;
1196             _MALLOC_UNLOCK();
1197 	    errno = EINVAL;
1198 	    return (NULL);
1199 	}
1200 	malloc_init();
1201 	malloc_started = 1;
1202     }
1203 
1204     if (ptr == ZEROSIZEPTR)
1205 	ptr = NULL;
1206     if (malloc_sysv && !size) {
1207 	if (ptr != NULL)
1208 	    ifree(ptr);
1209 	r = NULL;
1210     } else if (!size) {
1211 	if (ptr != NULL)
1212 	    ifree(ptr);
1213 	r = ZEROSIZEPTR;
1214     } else if (ptr == NULL) {
1215 	r = imalloc(size);
1216 	err = (r == NULL);
1217     } else {
1218         r = irealloc(ptr, size);
1219 	err = (r == NULL);
1220     }
1221     UTRACE(ptr, size, r);
1222     malloc_active = 0;
1223     _MALLOC_UNLOCK();
1224     if (malloc_xmalloc && err)
1225 	wrterror("out of memory\n");
1226     if (err)
1227 	errno = ENOMEM;
1228     return (r);
1229 }
1230 
1231 /*
1232  * These are the public exported interface routines.
1233  */
1234 
1235 void *
1236 malloc(size_t size)
1237 {
1238 
1239     return pubrealloc(NULL, size, " in malloc():");
1240 }
1241 
1242 int
1243 posix_memalign(void **memptr, size_t alignment, size_t size)
1244 {
1245     int err;
1246     void *result;
1247 
1248     if (!malloc_started) {
1249 	    malloc_init();
1250 	    malloc_started = 1;
1251     }
1252     /* Make sure that alignment is a large enough power of 2. */
1253     if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *) ||
1254 	alignment > malloc_pagesize)
1255 	    return EINVAL;
1256 
1257     /*
1258      * (size | alignment) is enough to assure the requested alignment, since
1259      * the allocator always allocates power-of-two blocks.
1260      */
1261     err = errno; /* Protect errno against changes in pubrealloc(). */
1262     result = pubrealloc(NULL, (size | alignment), " in posix_memalign()");
1263     errno = err;
1264 
1265     if (result == NULL)
1266 	return ENOMEM;
1267 
1268     *memptr = result;
1269     return 0;
1270 }
1271 
1272 void *
1273 calloc(size_t num, size_t size)
1274 {
1275     void *ret;
1276 
1277     if (size != 0 && (num * size) / size != num) {
1278 	/* size_t overflow. */
1279 	errno = ENOMEM;
1280 	return (NULL);
1281     }
1282 
1283     ret = pubrealloc(NULL, num * size, " in calloc():");
1284 
1285     if (ret != NULL)
1286 	memset(ret, 0, num * size);
1287 
1288     return ret;
1289 }
1290 
1291 void
1292 free(void *ptr)
1293 {
1294 
1295     pubrealloc(ptr, 0, " in free():");
1296 }
1297 
1298 void *
1299 realloc(void *ptr, size_t size)
1300 {
1301 
1302     return pubrealloc(ptr, size, " in realloc():");
1303 }
1304 
1305 /*
1306  * Begin library-private functions, used by threading libraries for protection
1307  * of malloc during fork().  These functions are only called if the program is
1308  * running in threaded mode, so there is no need to check whether the program
1309  * is threaded here.
1310  */
1311 
1312 void
1313 _malloc_prefork(void)
1314 {
1315 
1316 	_MALLOC_LOCK();
1317 }
1318 
1319 void
1320 _malloc_postfork(void)
1321 {
1322 
1323 	_MALLOC_UNLOCK();
1324 }
1325