xref: /minix3/lib/libc/stdlib/malloc.c (revision 0a6a1f1d05b60e214de2f05a7310ddd1f0e590e7)
1 /*	$NetBSD: malloc.c,v 1.56 2014/09/18 13:58:20 christos Exp $	*/
2 
3 /*
4  * ----------------------------------------------------------------------------
5  * "THE BEER-WARE LICENSE" (Revision 42):
6  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
7  * can do whatever you want with this stuff. If we meet some day, and you think
8  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
9  * ----------------------------------------------------------------------------
10  *
11  * From FreeBSD: malloc.c,v 1.91 2006/01/12 07:28:20 jasone
12  *
13  */
14 
15 #if defined(__minix)
16 #include "extern.h"
17 #ifdef _LIBSYS
18 #include <minix/sysutil.h>
19 #include <machine/param.h>
20 #include <machine/vmparam.h>
21 #define MALLOC_NO_SYSCALLS
22 #define wrtwarning(w) printf("libminc malloc warning: %s\n", w)
23 #define wrterror(w) panic("libminc malloc error: %s\n", w)
24 #endif
25 #endif /* defined(__minix) */
26 
27 /*
28  * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
29  * to internal conditions and consistency in malloc.c. This has a
30  * noticeable runtime performance hit, and generally will not do you
31  * any good unless you fiddle with the internals of malloc or want
32  * to catch random pointer corruption as early as possible.
33  */
34 #ifndef MALLOC_EXTRA_SANITY
35 #undef MALLOC_EXTRA_SANITY
36 #endif
37 
38 /*
39  * What to use for Junk.  This is the byte value we use to fill with
40  * when the 'J' option is enabled.
41  */
42 #define SOME_JUNK	0xd0		/* as in "Duh" :-) */
43 
44 /*
45  * The basic parameters you can tweak.
46  *
47  * malloc_minsize	minimum size of an allocation in bytes.
48  *			If this is too small it's too much work
49  *			to manage them.  This is also the smallest
50  *			unit of alignment used for the storage
51  *			returned by malloc/realloc.
52  *
53  */
54 
55 #include "namespace.h"
56 #if defined(__FreeBSD__)
57 #   if defined(__i386__)
58 #       define malloc_minsize		16U
59 #   endif
60 #   if defined(__ia64__)
61 #	define malloc_pageshift		13U
62 #	define malloc_minsize		16U
63 #   endif
64 #   if defined(__alpha__)
65 #       define malloc_pageshift		13U
66 #       define malloc_minsize		16U
67 #   endif
68 #   if defined(__sparc64__)
69 #       define malloc_pageshift		13U
70 #       define malloc_minsize		16U
71 #   endif
72 #   if defined(__amd64__)
73 #       define malloc_pageshift		12U
74 #       define malloc_minsize		16U
75 #   endif
76 #   if defined(__arm__)
77 #       define malloc_pageshift         12U
78 #       define malloc_minsize           16U
79 #   endif
80 #   define HAS_UTRACE
81 #   define UTRACE_LABEL
82 
83 #include <sys/cdefs.h>
84 void utrace(struct ut *, int);
85 
86     /*
87      * Make malloc/free/realloc thread-safe in libc for use with
88      * kernel threads.
89      */
90 #   include "libc_private.h"
91 #   include "spinlock.h"
92     static spinlock_t thread_lock	= _SPINLOCK_INITIALIZER;
93 #   define _MALLOC_LOCK()		if (__isthreaded) _SPINLOCK(&thread_lock);
94 #   define _MALLOC_UNLOCK()		if (__isthreaded) _SPINUNLOCK(&thread_lock);
95 #endif /* __FreeBSD__ */
96 
97 #if defined(__minix)
98 /* #undef these things so that malloc uses the non-internal symbols.
99  * This is necessary for VM to be able to define its own versions, and
100  * use this malloc.
101  */
102 #undef mmap
103 #undef munmap
104 #endif /* defined(__minix) */
105 
106 #include <sys/types.h>
107 #if defined(__NetBSD__)
108 #   define malloc_minsize               16U
109 #   define HAS_UTRACE
110 #   define UTRACE_LABEL "malloc",
111 #include <sys/cdefs.h>
112 #include "extern.h"
113 #if defined(LIBC_SCCS) && !defined(lint)
114 __RCSID("$NetBSD: malloc.c,v 1.56 2014/09/18 13:58:20 christos Exp $");
115 #endif /* LIBC_SCCS and not lint */
116 int utrace(const char *, void *, size_t);
117 
118 #include <reentrant.h>
119 extern int __isthreaded;
120 static mutex_t thread_lock = MUTEX_INITIALIZER;
121 #define _MALLOC_LOCK()	if (__isthreaded) mutex_lock(&thread_lock);
122 #define _MALLOC_UNLOCK()	if (__isthreaded) mutex_unlock(&thread_lock);
123 #endif /* __NetBSD__ */
124 
125 #if defined(__sparc__) && defined(sun)
126 #   define malloc_minsize		16U
127 #   define MAP_ANON			(0)
128     static int fdzero;
129 #   define MMAP_FD	fdzero
130 #   define INIT_MMAP() \
131 	{ if ((fdzero = open(_PATH_DEVZERO, O_RDWR | O_CLOEXEC, 0000)) == -1) \
132 	    wrterror("open of /dev/zero"); }
133 #endif /* __sparc__ */
134 
135 /* Insert your combination here... */
136 #if defined(__FOOCPU__) && defined(__BAROS__)
137 #   define malloc_minsize		16U
138 #endif /* __FOOCPU__ && __BAROS__ */
139 
140 #ifndef ZEROSIZEPTR
141 #define ZEROSIZEPTR	((void *)(uintptr_t)(1UL << (malloc_pageshift - 1)))
142 #endif
143 
144 /*
145  * No user serviceable parts behind this point.
146  */
147 #include <sys/types.h>
148 #include <sys/mman.h>
149 #include <errno.h>
150 #include <fcntl.h>
151 #include <paths.h>
152 #include <stddef.h>
153 #include <stdio.h>
154 #include <stdlib.h>
155 #include <string.h>
156 #include <unistd.h>
157 
158 /*
159  * This structure describes a page worth of chunks.
160  */
161 
162 struct pginfo {
163     struct pginfo	*next;	/* next on the free list */
164     void		*page;	/* Pointer to the page */
165     u_short		size;	/* size of this page's chunks */
166     u_short		shift;	/* How far to shift for this size chunks */
167     u_short		free;	/* How many free chunks */
168     u_short		total;	/* How many chunk */
169     u_int		bits[1]; /* Which chunks are free */
170 };
171 
172 /*
173  * This structure describes a number of free pages.
174  */
175 
176 struct pgfree {
177     struct pgfree	*next;	/* next run of free pages */
178     struct pgfree	*prev;	/* prev run of free pages */
179     void		*page;	/* pointer to free pages */
180     void		*end;	/* pointer to end of free pages */
181     size_t		size;	/* number of bytes free */
182 };
183 
184 /*
185  * How many bits per u_int in the bitmap.
186  * Change only if not 8 bits/byte
187  */
188 #define	MALLOC_BITS	((int)(8*sizeof(u_int)))
189 
190 /*
191  * Magic values to put in the page_directory
192  */
193 #define MALLOC_NOT_MINE	((struct pginfo*) 0)
194 #define MALLOC_FREE 	((struct pginfo*) 1)
195 #define MALLOC_FIRST	((struct pginfo*) 2)
196 #define MALLOC_FOLLOW	((struct pginfo*) 3)
197 #define MALLOC_MAGIC	((struct pginfo*) 4)
198 
199 /*
200  * Page size related parameters, computed at run-time.
201  */
202 static size_t malloc_pagesize;
203 static size_t malloc_pageshift;
204 static size_t malloc_pagemask;
205 
206 #ifndef malloc_minsize
207 #define malloc_minsize			16U
208 #endif
209 
210 #ifndef malloc_maxsize
211 #define malloc_maxsize			((malloc_pagesize)>>1)
212 #endif
213 
214 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
215 #define ptr2idx(foo) \
216     (((size_t)(uintptr_t)(foo) >> malloc_pageshift)-malloc_origo)
217 
218 #ifndef _MALLOC_LOCK
219 #define _MALLOC_LOCK()
220 #endif
221 
222 #ifndef _MALLOC_UNLOCK
223 #define _MALLOC_UNLOCK()
224 #endif
225 
226 #ifndef MMAP_FD
227 #define MMAP_FD (-1)
228 #endif
229 
230 #ifndef INIT_MMAP
231 #define INIT_MMAP()
232 #endif
233 
234 #if !defined(__minix)
235 #ifndef MADV_FREE
236 #define MADV_FREE MADV_DONTNEED
237 #endif
238 #else
239 #undef MADV_FREE
240 #endif /* !defined(__minix) */
241 
242 /* Number of free pages we cache */
243 static size_t malloc_cache = 16;
244 
245 /* The offset from pagenumber to index into the page directory */
246 static size_t malloc_origo;
247 
248 /* The last index in the page directory we care about */
249 static size_t last_idx;
250 
251 /* Pointer to page directory. Allocated "as if with" malloc */
252 static struct	pginfo **page_dir;
253 
254 /* How many slots in the page directory */
255 static size_t	malloc_ninfo;
256 
257 /* Free pages line up here */
258 static struct pgfree free_list;
259 
260 /* Abort(), user doesn't handle problems.  */
261 static int malloc_abort;
262 
263 /* Are we trying to die ?  */
264 static int suicide;
265 
266 /* always realloc ?  */
267 static int malloc_realloc;
268 
269 /* pass the kernel a hint on free pages ?  */
270 #if defined(MADV_FREE)
271 static int malloc_hint = 0;
272 #endif
273 
274 /* xmalloc behaviour ?  */
275 static int malloc_xmalloc;
276 
277 /* sysv behaviour for malloc(0) ?  */
278 static int malloc_sysv;
279 
280 /* zero fill ?  */
281 static int malloc_zero;
282 
283 /* junk fill ?  */
284 static int malloc_junk;
285 
286 #ifdef HAS_UTRACE
287 
288 /* utrace ?  */
289 static int malloc_utrace;
290 
291 struct ut { void *p; size_t s; void *r; };
292 
293 #define UTRACE(a, b, c) \
294 	if (malloc_utrace) {			\
295 		struct ut u;			\
296 		u.p=a; u.s = b; u.r=c;		\
297 		utrace(UTRACE_LABEL (void *) &u, sizeof u);	\
298 	}
299 #else /* !HAS_UTRACE */
300 #define UTRACE(a,b,c)
301 #endif /* HAS_UTRACE */
302 
303 /* my last break. */
304 static void *malloc_brk;
305 
306 /* one location cache for free-list holders */
307 static struct pgfree *px;
308 
309 /* compile-time options */
310 const char *_malloc_options;
311 
312 /* Name of the current public function */
313 static const char *malloc_func;
314 
315 /* Macro for mmap */
316 #define MMAP(size) \
317 	mmap(NULL, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
318 	    MMAP_FD, (off_t)0);
319 
320 /*
321  * Necessary function declarations
322  */
323 static int extend_pgdir(size_t idx);
324 static void *imalloc(size_t size);
325 static void ifree(void *ptr);
326 static void *irealloc(void *ptr, size_t size);
327 
328 #ifndef MALLOC_NO_SYSCALLS
329 static void
wrtmessage(const char * p1,const char * p2,const char * p3,const char * p4)330 wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
331 {
332 
333     write(STDERR_FILENO, p1, strlen(p1));
334     write(STDERR_FILENO, p2, strlen(p2));
335     write(STDERR_FILENO, p3, strlen(p3));
336     write(STDERR_FILENO, p4, strlen(p4));
337 }
338 
339 void (*_malloc_message)(const char *p1, const char *p2, const char *p3,
340 	    const char *p4) = wrtmessage;
341 static void
wrterror(const char * p)342 wrterror(const char *p)
343 {
344 
345     suicide = 1;
346     _malloc_message(getprogname(), malloc_func, " error: ", p);
347     abort();
348 }
349 
350 static void
wrtwarning(const char * p)351 wrtwarning(const char *p)
352 {
353 
354     /*
355      * Sensitive processes, somewhat arbitrarily defined here as setuid,
356      * setgid, root and wheel cannot afford to have malloc mistakes.
357      */
358     if (malloc_abort || issetugid() || getuid() == 0 || getgid() == 0)
359 	wrterror(p);
360 }
361 #endif
362 
363 /*
364  * Allocate a number of pages from the OS
365  */
366 static void *
map_pages(size_t pages)367 map_pages(size_t pages)
368 {
369     caddr_t result, rresult, tail;
370     intptr_t bytes = pages << malloc_pageshift;
371 
372     if (bytes < 0 || (size_t)bytes < pages) {
373 	errno = ENOMEM;
374 	return NULL;
375     }
376 
377     if ((result = sbrk(bytes)) == (void *)-1)
378 	return NULL;
379 
380     /*
381      * Round to a page, in case sbrk(2) did not do this for us
382      */
383     rresult = (caddr_t)pageround((size_t)(uintptr_t)result);
384     if (result < rresult) {
385 	/* make sure we have enough space to fit bytes */
386 	if (sbrk((intptr_t)(rresult - result)) == (void *) -1) {
387 	    /* we failed, put everything back */
388 	    if (brk(result)) {
389 		wrterror("brk(2) failed [internal error]\n");
390 	    }
391 	}
392     }
393     tail = rresult + (size_t)bytes;
394 
395     last_idx = ptr2idx(tail) - 1;
396     malloc_brk = tail;
397 
398     if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx)) {
399 	malloc_brk = result;
400 	last_idx = ptr2idx(malloc_brk) - 1;
401 	/* Put back break point since we failed. */
402 	if (brk(malloc_brk))
403 	    wrterror("brk(2) failed [internal error]\n");
404 	return 0;
405     }
406 
407     return rresult;
408 }
409 
410 /*
411  * Extend page directory
412  */
413 static int
extend_pgdir(size_t idx)414 extend_pgdir(size_t idx)
415 {
416     struct  pginfo **new, **old;
417     size_t newlen, oldlen;
418 
419     /* check for overflow */
420     if ((((~(1UL << ((sizeof(size_t) * NBBY) - 1)) / sizeof(*page_dir)) + 1)
421 	+ (malloc_pagesize / sizeof *page_dir)) < idx) {
422 	errno = ENOMEM;
423 	return 0;
424     }
425 
426     /* Make it this many pages */
427     newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
428 
429     /* remember the old mapping size */
430     oldlen = malloc_ninfo * sizeof *page_dir;
431 
432     /*
433      * NOTE: we allocate new pages and copy the directory rather than tempt
434      * fate by trying to "grow" the region.. There is nothing to prevent
435      * us from accidentally re-mapping space that's been allocated by our caller
436      * via dlopen() or other mmap().
437      *
438      * The copy problem is not too bad, as there is 4K of page index per
439      * 4MB of malloc arena.
440      *
441      * We can totally avoid the copy if we open a file descriptor to associate
442      * the anon mappings with.  Then, when we remap the pages at the new
443      * address, the old pages will be "magically" remapped..  But this means
444      * keeping open a "secret" file descriptor.....
445      */
446 
447     /* Get new pages */
448     new = MMAP(newlen);
449     if (new == MAP_FAILED)
450 	return 0;
451 
452     /* Copy the old stuff */
453     memcpy(new, page_dir, oldlen);
454 
455     /* register the new size */
456     malloc_ninfo = newlen / sizeof *page_dir;
457 
458     /* swap the pointers */
459     old = page_dir;
460     page_dir = new;
461 
462     /* Now free the old stuff */
463     munmap(old, oldlen);
464     return 1;
465 }
466 
467 /*
468  * Initialize the world
469  */
470 static void
malloc_init(void)471 malloc_init(void)
472 {
473 #ifndef MALLOC_NO_SYSCALLS
474     const char *p;
475     char b[64];
476     size_t i;
477     ssize_t j;
478 #endif
479     int serrno = errno;
480 #ifndef MALLOC_NO_SYSCALLS
481 
482     /*
483      * Compute page-size related variables.
484      */
485     malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
486 #else
487     malloc_pagesize = PAGE_SIZE;
488 #endif
489     malloc_pagemask = malloc_pagesize - 1;
490     for (malloc_pageshift = 0;
491 	 (1UL << malloc_pageshift) != malloc_pagesize;
492 	 malloc_pageshift++)
493 	/* nothing */ ;
494 
495     INIT_MMAP();
496 
497 #ifdef MALLOC_EXTRA_SANITY
498     malloc_junk = 1;
499 #endif /* MALLOC_EXTRA_SANITY */
500 
501 #ifndef MALLOC_NO_SYSCALLS
502     for (i = 0; i < 3; i++) {
503 	if (i == 0) {
504 	    j = readlink("/etc/malloc.conf", b, sizeof b - 1);
505 	    if (j == -1)
506 		continue;
507 	    b[j] = '\0';
508 	    p = b;
509 	} else if (i == 1 && issetugid() == 0) {
510 	    p = getenv("MALLOC_OPTIONS");
511 	} else if (i == 1) {
512 	    continue;
513 	} else {
514 	    p = _malloc_options;
515 	}
516 	for (; p != NULL && *p != '\0'; p++) {
517 	    switch (*p) {
518 		case '>': malloc_cache   <<= 1; break;
519 		case '<': malloc_cache   >>= 1; break;
520 		case 'a': malloc_abort   = 0; break;
521 		case 'A': malloc_abort   = 1; break;
522 #if !defined(__minix)
523 		case 'h': malloc_hint    = 0; break;
524 		case 'H': malloc_hint    = 1; break;
525 #endif /* !defined(__minix) */
526 		case 'r': malloc_realloc = 0; break;
527 		case 'R': malloc_realloc = 1; break;
528 		case 'j': malloc_junk    = 0; break;
529 		case 'J': malloc_junk    = 1; break;
530 #ifdef HAS_UTRACE
531 		case 'u': malloc_utrace  = 0; break;
532 		case 'U': malloc_utrace  = 1; break;
533 #endif
534 		case 'v': malloc_sysv    = 0; break;
535 		case 'V': malloc_sysv    = 1; break;
536 		case 'x': malloc_xmalloc = 0; break;
537 		case 'X': malloc_xmalloc = 1; break;
538 		case 'z': malloc_zero    = 0; break;
539 		case 'Z': malloc_zero    = 1; break;
540 		default:
541 		    _malloc_message(getprogname(), malloc_func,
542 			 " warning: ", "unknown char in MALLOC_OPTIONS\n");
543 		    break;
544 	    }
545 	}
546     }
547 #endif
548 
549     UTRACE(0, 0, 0);
550 
551     /*
552      * We want junk in the entire allocation, and zero only in the part
553      * the user asked for.
554      */
555     if (malloc_zero)
556 	malloc_junk = 1;
557 
558     /* Allocate one page for the page directory */
559     page_dir = MMAP(malloc_pagesize);
560 
561     if (page_dir == MAP_FAILED)
562 	wrterror("mmap(2) failed, check limits.\n");
563 
564     /*
565      * We need a maximum of malloc_pageshift buckets, steal these from the
566      * front of the page_directory;
567      */
568     malloc_origo = pageround((size_t)(uintptr_t)sbrk((intptr_t)0))
569 	>> malloc_pageshift;
570     malloc_origo -= malloc_pageshift;
571 
572     malloc_ninfo = malloc_pagesize / sizeof *page_dir;
573 
574     /* Recalculate the cache size in bytes, and make sure it's nonzero */
575 
576     if (!malloc_cache)
577 	malloc_cache++;
578 
579     malloc_cache <<= malloc_pageshift;
580 
581     /*
582      * This is a nice hack from Kaleb Keithly (kaleb@x.org).
583      * We can sbrk(2) further back when we keep this on a low address.
584      */
585     px = imalloc(sizeof *px);
586 
587     errno = serrno;
588 }
589 
590 /*
591  * Allocate a number of complete pages
592  */
593 static void *
malloc_pages(size_t size)594 malloc_pages(size_t size)
595 {
596     void *p, *delay_free = NULL;
597     size_t i;
598     struct pgfree *pf;
599     size_t idx;
600 
601     idx = pageround(size);
602     if (idx < size) {
603 	errno = ENOMEM;
604 	return NULL;
605     } else
606 	size = idx;
607 
608     p = NULL;
609 
610     /* Look for free pages before asking for more */
611     for(pf = free_list.next; pf; pf = pf->next) {
612 
613 #ifdef MALLOC_EXTRA_SANITY
614 	if (pf->size & malloc_pagemask)
615 	    wrterror("(ES): junk length entry on free_list.\n");
616 	if (!pf->size)
617 	    wrterror("(ES): zero length entry on free_list.\n");
618 	if (pf->page == pf->end)
619 	    wrterror("(ES): zero entry on free_list.\n");
620 	if (pf->page > pf->end)
621 	    wrterror("(ES): sick entry on free_list.\n");
622 	if ((void*)pf->page >= (void*)sbrk(0))
623 	    wrterror("(ES): entry on free_list past brk.\n");
624 	if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
625 	    wrterror("(ES): non-free first page on free-list.\n");
626 	if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
627 	    wrterror("(ES): non-free last page on free-list.\n");
628 #endif /* MALLOC_EXTRA_SANITY */
629 
630 	if (pf->size < size)
631 	    continue;
632 
633 	if (pf->size == size) {
634 	    p = pf->page;
635 	    if (pf->next != NULL)
636 		    pf->next->prev = pf->prev;
637 	    pf->prev->next = pf->next;
638 	    delay_free = pf;
639 	    break;
640 	}
641 
642 	p = pf->page;
643 	pf->page = (char *)pf->page + size;
644 	pf->size -= size;
645 	break;
646     }
647 
648 #ifdef MALLOC_EXTRA_SANITY
649     if (p != NULL && page_dir[ptr2idx(p)] != MALLOC_FREE)
650 	wrterror("(ES): allocated non-free page on free-list.\n");
651 #endif /* MALLOC_EXTRA_SANITY */
652 
653     size >>= malloc_pageshift;
654 
655     /* Map new pages */
656     if (p == NULL)
657 	p = map_pages(size);
658 
659     if (p != NULL) {
660 
661 	idx = ptr2idx(p);
662 	page_dir[idx] = MALLOC_FIRST;
663 	for (i=1;i<size;i++)
664 	    page_dir[idx+i] = MALLOC_FOLLOW;
665 
666 	if (malloc_junk)
667 	    memset(p, SOME_JUNK, size << malloc_pageshift);
668     }
669 
670     if (delay_free) {
671 	if (px == NULL)
672 	    px = delay_free;
673 	else
674 	    ifree(delay_free);
675     }
676 
677     return p;
678 }
679 
680 /*
681  * Allocate a page of fragments
682  */
683 
684 static inline int
malloc_make_chunks(int bits)685 malloc_make_chunks(int bits)
686 {
687     struct  pginfo *bp;
688     void *pp;
689     int i, k;
690     long l;
691 
692     /* Allocate a new bucket */
693     pp = malloc_pages(malloc_pagesize);
694     if (pp == NULL)
695 	return 0;
696 
697     /* Find length of admin structure */
698     l = (long)offsetof(struct pginfo, bits[0]);
699     l += (long)sizeof bp->bits[0] *
700 	(((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
701 
702     /* Don't waste more than two chunks on this */
703     if ((1<<(bits)) <= l+l) {
704 	bp = (struct  pginfo *)pp;
705     } else {
706 	bp = imalloc((size_t)l);
707 	if (bp == NULL) {
708 	    ifree(pp);
709 	    return 0;
710 	}
711     }
712 
713     bp->size = (1<<bits);
714     bp->shift = bits;
715     bp->total = bp->free = (u_short)(malloc_pagesize >> bits);
716     bp->page = pp;
717 
718     /* set all valid bits in the bitmap */
719     k = bp->total;
720     i = 0;
721 
722     /* Do a bunch at a time */
723     for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
724 	bp->bits[i / MALLOC_BITS] = ~0U;
725 
726     for(; i < k; i++)
727         bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
728 
729     if (bp == bp->page) {
730 	/* Mark the ones we stole for ourselves */
731 	for(i = 0; l > 0; i++) {
732 	    bp->bits[i / MALLOC_BITS] &= ~(1 << (i % MALLOC_BITS));
733 	    bp->free--;
734 	    bp->total--;
735 	    l -= (long)(1 << bits);
736 	}
737     }
738 
739     /* MALLOC_LOCK */
740 
741     page_dir[ptr2idx(pp)] = bp;
742 
743     bp->next = page_dir[bits];
744     page_dir[bits] = bp;
745 
746     /* MALLOC_UNLOCK */
747 
748     return 1;
749 }
750 
751 /*
752  * Allocate a fragment
753  */
754 static void *
malloc_bytes(size_t size)755 malloc_bytes(size_t size)
756 {
757     size_t i;
758     int j;
759     u_int u;
760     struct  pginfo *bp;
761     size_t k;
762     u_int *lp;
763 
764     /* Don't bother with anything less than this */
765     if (size < malloc_minsize)
766 	size = malloc_minsize;
767 
768 
769     /* Find the right bucket */
770     j = 1;
771     i = size-1;
772     while (i >>= 1)
773 	j++;
774 
775     /* If it's empty, make a page more of that size chunks */
776     if (page_dir[j] == NULL && !malloc_make_chunks(j))
777 	return NULL;
778 
779     bp = page_dir[j];
780 
781     /* Find first word of bitmap which isn't empty */
782     for (lp = bp->bits; !*lp; lp++)
783 	;
784 
785     /* Find that bit, and tweak it */
786     u = 1;
787     k = 0;
788     while (!(*lp & u)) {
789 	u += u;
790 	k++;
791     }
792     *lp ^= u;
793 
794     /* If there are no more free, remove from free-list */
795     if (!--bp->free) {
796 	page_dir[j] = bp->next;
797 	bp->next = NULL;
798     }
799 
800     /* Adjust to the real offset of that chunk */
801     k += (lp-bp->bits)*MALLOC_BITS;
802     k <<= bp->shift;
803 
804     if (malloc_junk)
805 	memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
806 
807     return (u_char *)bp->page + k;
808 }
809 
810 /*
811  * Allocate a piece of memory
812  */
813 static void *
imalloc(size_t size)814 imalloc(size_t size)
815 {
816     void *result;
817 
818     if (suicide)
819 	abort();
820 
821     if ((size + malloc_pagesize) < size)	/* Check for overflow */
822 	result = NULL;
823 #ifndef __minix
824     else if ((size + malloc_pagesize) >= (uintptr_t)page_dir)
825 	result = NULL;
826 #endif /* !__minix */
827     else if (size <= malloc_maxsize)
828 	result = malloc_bytes(size);
829     else
830 	result = malloc_pages(size);
831 
832     if (malloc_abort && result == NULL)
833 	wrterror("allocation failed.\n");
834 
835     if (malloc_zero && result != NULL)
836 	memset(result, 0, size);
837 
838     return result;
839 }
840 
841 /*
842  * Change the size of an allocation.
843  */
844 static void *
irealloc(void * ptr,size_t size)845 irealloc(void *ptr, size_t size)
846 {
847     void *p;
848     size_t osize, idx;
849     struct pginfo **mp;
850     size_t i;
851 
852     if (suicide)
853 	abort();
854 
855     idx = ptr2idx(ptr);
856 
857     if (idx < malloc_pageshift) {
858 	wrtwarning("junk pointer, too low to make sense.\n");
859 	return 0;
860     }
861 
862     if (idx > last_idx) {
863 	wrtwarning("junk pointer, too high to make sense.\n");
864 	return 0;
865     }
866 
867     mp = &page_dir[idx];
868 
869     if (*mp == MALLOC_FIRST) {			/* Page allocation */
870 
871 	/* Check the pointer */
872 	if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
873 	    wrtwarning("modified (page-) pointer.\n");
874 	    return NULL;
875 	}
876 
877 	/* Find the size in bytes */
878 	for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
879 	    osize += malloc_pagesize;
880 
881         if (!malloc_realloc && 			/* unless we have to, */
882 	  size <= osize && 			/* .. or are too small, */
883 	  size > (osize - malloc_pagesize)) {	/* .. or can free a page, */
884 	    if (malloc_junk)
885 		memset((u_char *)ptr + size, SOME_JUNK, osize-size);
886 	    return ptr;				/* don't do anything. */
887 	}
888 
889     } else if (*mp >= MALLOC_MAGIC) {		/* Chunk allocation */
890 
891 	/* Check the pointer for sane values */
892 	if (((size_t)(uintptr_t)ptr & ((*mp)->size-1))) {
893 	    wrtwarning("modified (chunk-) pointer.\n");
894 	    return NULL;
895 	}
896 
897 	/* Find the chunk index in the page */
898 	i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> (*mp)->shift;
899 
900 	/* Verify that it isn't a free chunk already */
901         if ((*mp)->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
902 	    wrtwarning("chunk is already free.\n");
903 	    return NULL;
904 	}
905 
906 	osize = (*mp)->size;
907 
908 	if (!malloc_realloc &&		/* Unless we have to, */
909 	  size <= osize && 		/* ..or are too small, */
910 	  (size > osize / 2 ||	 	/* ..or could use a smaller size, */
911 	  osize == malloc_minsize)) {	/* ..(if there is one) */
912 	    if (malloc_junk)
913 		memset((u_char *)ptr + size, SOME_JUNK, osize-size);
914 	    return ptr;			/* ..Don't do anything */
915 	}
916 
917     } else {
918 	wrtwarning("pointer to wrong page.\n");
919 	return NULL;
920     }
921 
922     p = imalloc(size);
923 
924     if (p != NULL) {
925 	/* copy the lesser of the two sizes, and free the old one */
926 	if (!size || !osize)
927 	    ;
928 	else if (osize < size)
929 	    memcpy(p, ptr, osize);
930 	else
931 	    memcpy(p, ptr, size);
932 	ifree(ptr);
933     }
934     return p;
935 }
936 
937 /*
938  * Free a sequence of pages
939  */
940 
941 static inline void
free_pages(void * ptr,size_t idx,struct pginfo * info)942 free_pages(void *ptr, size_t idx, struct pginfo *info)
943 {
944     size_t i;
945     struct pgfree *pf, *pt=NULL;
946     size_t l;
947     void *tail;
948 
949     if (info == MALLOC_FREE) {
950 	wrtwarning("page is already free.\n");
951 	return;
952     }
953 
954     if (info != MALLOC_FIRST) {
955 	wrtwarning("pointer to wrong page.\n");
956 	return;
957     }
958 
959     if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
960 	wrtwarning("modified (page-) pointer.\n");
961 	return;
962     }
963 
964     /* Count how many pages and mark them free at the same time */
965     page_dir[idx] = MALLOC_FREE;
966     for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
967 	page_dir[idx + i] = MALLOC_FREE;
968 
969     l = i << malloc_pageshift;
970 
971     if (malloc_junk)
972 	memset(ptr, SOME_JUNK, l);
973 
974 #if !defined(__minix)
975     if (malloc_hint)
976 	madvise(ptr, l, MADV_FREE);
977 #endif /* !defined(__minix) */
978 
979     tail = (char *)ptr+l;
980 
981     /* add to free-list */
982     if (px == NULL)
983 	px = imalloc(sizeof *px);	/* This cannot fail... */
984     px->page = ptr;
985     px->end =  tail;
986     px->size = l;
987     if (free_list.next == NULL) {
988 
989 	/* Nothing on free list, put this at head */
990 	px->next = free_list.next;
991 	px->prev = &free_list;
992 	free_list.next = px;
993 	pf = px;
994 	px = NULL;
995 
996     } else {
997 
998 	/* Find the right spot, leave pf pointing to the modified entry. */
999 	tail = (char *)ptr+l;
1000 
1001 	for(pf = free_list.next; pf->end < ptr && pf->next != NULL;
1002 	    pf = pf->next)
1003 	    ; /* Race ahead here */
1004 
1005 	if (pf->page > tail) {
1006 	    /* Insert before entry */
1007 	    px->next = pf;
1008 	    px->prev = pf->prev;
1009 	    pf->prev = px;
1010 	    px->prev->next = px;
1011 	    pf = px;
1012 	    px = NULL;
1013 	} else if (pf->end == ptr ) {
1014 	    /* Append to the previous entry */
1015 	    pf->end = (char *)pf->end + l;
1016 	    pf->size += l;
1017 	    if (pf->next != NULL && pf->end == pf->next->page ) {
1018 		/* And collapse the next too. */
1019 		pt = pf->next;
1020 		pf->end = pt->end;
1021 		pf->size += pt->size;
1022 		pf->next = pt->next;
1023 		if (pf->next != NULL)
1024 		    pf->next->prev = pf;
1025 	    }
1026 	} else if (pf->page == tail) {
1027 	    /* Prepend to entry */
1028 	    pf->size += l;
1029 	    pf->page = ptr;
1030 	} else if (pf->next == NULL) {
1031 	    /* Append at tail of chain */
1032 	    px->next = NULL;
1033 	    px->prev = pf;
1034 	    pf->next = px;
1035 	    pf = px;
1036 	    px = NULL;
1037 	} else {
1038 	    wrterror("freelist is destroyed.\n");
1039 	}
1040     }
1041 
1042     /* Return something to OS ? */
1043     if (pf->next == NULL &&			/* If we're the last one, */
1044       pf->size > malloc_cache &&		/* ..and the cache is full, */
1045       pf->end == malloc_brk &&			/* ..and none behind us, */
1046       malloc_brk == sbrk((intptr_t)0)) {	/* ..and it's OK to do... */
1047 
1048 	/*
1049 	 * Keep the cache intact.  Notice that the '>' above guarantees that
1050 	 * the pf will always have at least one page afterwards.
1051 	 */
1052 	pf->end = (char *)pf->page + malloc_cache;
1053 	pf->size = malloc_cache;
1054 
1055 	brk(pf->end);
1056 	malloc_brk = pf->end;
1057 
1058 	idx = ptr2idx(pf->end);
1059 
1060 	for(i=idx;i <= last_idx;)
1061 	    page_dir[i++] = MALLOC_NOT_MINE;
1062 
1063 	last_idx = idx - 1;
1064 
1065 	/* XXX: We could realloc/shrink the pagedir here I guess. */
1066     }
1067     if (pt != NULL)
1068 	ifree(pt);
1069 }
1070 
1071 /*
1072  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1073  */
1074 
1075 static inline void
free_bytes(void * ptr,size_t idx,struct pginfo * info)1076 free_bytes(void *ptr, size_t idx, struct pginfo *info)
1077 {
1078     size_t i;
1079     struct pginfo **mp;
1080     void *vp;
1081 
1082     /* Find the chunk number on the page */
1083     i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> info->shift;
1084 
1085     if (((size_t)(uintptr_t)ptr & (info->size-1))) {
1086 	wrtwarning("modified (chunk-) pointer.\n");
1087 	return;
1088     }
1089 
1090     if (info->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
1091 	wrtwarning("chunk is already free.\n");
1092 	return;
1093     }
1094 
1095     if (malloc_junk)
1096 	memset(ptr, SOME_JUNK, (size_t)info->size);
1097 
1098     info->bits[i/MALLOC_BITS] |= (u_int)(1UL << (i % MALLOC_BITS));
1099     info->free++;
1100 
1101     mp = page_dir + info->shift;
1102 
1103     if (info->free == 1) {
1104 
1105 	/* Page became non-full */
1106 
1107 	mp = page_dir + info->shift;
1108 	/* Insert in address order */
1109 	while (*mp && (*mp)->next && (*mp)->next->page < info->page)
1110 	    mp = &(*mp)->next;
1111 	info->next = *mp;
1112 	*mp = info;
1113 	return;
1114     }
1115 
1116     if (info->free != info->total)
1117 	return;
1118 
1119     /* Find & remove this page in the queue */
1120     while (*mp != info) {
1121 	mp = &((*mp)->next);
1122 #ifdef MALLOC_EXTRA_SANITY
1123 	if (!*mp)
1124 		wrterror("(ES): Not on queue.\n");
1125 #endif /* MALLOC_EXTRA_SANITY */
1126     }
1127     *mp = info->next;
1128 
1129     /* Free the page & the info structure if need be */
1130     page_dir[idx] = MALLOC_FIRST;
1131     vp = info->page;		/* Order is important ! */
1132     if(vp != (void*)info)
1133 	ifree(info);
1134     ifree(vp);
1135 }
1136 
1137 static void
ifree(void * ptr)1138 ifree(void *ptr)
1139 {
1140     struct pginfo *info;
1141     size_t idx;
1142 
1143     /* This is legal */
1144     if (ptr == NULL)
1145 	return;
1146 
1147     /* If we're already sinking, don't make matters any worse. */
1148     if (suicide)
1149 	return;
1150 
1151     idx = ptr2idx(ptr);
1152 
1153     if (idx < malloc_pageshift) {
1154 	wrtwarning("junk pointer, too low to make sense.\n");
1155 	return;
1156     }
1157 
1158     if (idx > last_idx) {
1159 	wrtwarning("junk pointer, too high to make sense.\n");
1160 	return;
1161     }
1162 
1163     info = page_dir[idx];
1164 
1165     if (info < MALLOC_MAGIC)
1166         free_pages(ptr, idx, info);
1167     else
1168 	free_bytes(ptr, idx, info);
1169     return;
1170 }
1171 
1172 static int malloc_active; /* Recursion flag for public interface. */
1173 static unsigned malloc_started; /* Set when initialization has been done */
1174 
1175 static void *
pubrealloc(void * ptr,size_t size,const char * func)1176 pubrealloc(void *ptr, size_t size, const char *func)
1177 {
1178     void *r;
1179     int err = 0;
1180 
1181     /*
1182      * If a thread is inside our code with a functional lock held, and then
1183      * catches a signal which calls us again, we would get a deadlock if the
1184      * lock is not of a recursive type.
1185      */
1186     _MALLOC_LOCK();
1187     malloc_func = func;
1188     if (malloc_active > 0) {
1189 	if (malloc_active == 1) {
1190 	    wrtwarning("recursive call\n");
1191 	    malloc_active = 2;
1192 	}
1193         _MALLOC_UNLOCK();
1194 	errno = EINVAL;
1195 	return (NULL);
1196     }
1197     malloc_active = 1;
1198 
1199     if (!malloc_started) {
1200         if (ptr != NULL) {
1201 	    wrtwarning("malloc() has never been called\n");
1202 	    malloc_active = 0;
1203             _MALLOC_UNLOCK();
1204 	    errno = EINVAL;
1205 	    return (NULL);
1206 	}
1207 	malloc_init();
1208 	malloc_started = 1;
1209     }
1210 
1211     if (ptr == ZEROSIZEPTR)
1212 	ptr = NULL;
1213     if (malloc_sysv && !size) {
1214 	if (ptr != NULL)
1215 	    ifree(ptr);
1216 	r = NULL;
1217     } else if (!size) {
1218 	if (ptr != NULL)
1219 	    ifree(ptr);
1220 	r = ZEROSIZEPTR;
1221     } else if (ptr == NULL) {
1222 	r = imalloc(size);
1223 	err = (r == NULL);
1224     } else {
1225         r = irealloc(ptr, size);
1226 	err = (r == NULL);
1227     }
1228     UTRACE(ptr, size, r);
1229     malloc_active = 0;
1230     _MALLOC_UNLOCK();
1231     if (malloc_xmalloc && err)
1232 	wrterror("out of memory\n");
1233     if (err)
1234 	errno = ENOMEM;
1235     return (r);
1236 }
1237 
1238 /*
1239  * These are the public exported interface routines.
1240  */
1241 
1242 void *
malloc(size_t size)1243 malloc(size_t size)
1244 {
1245 
1246     return pubrealloc(NULL, size, " in malloc():");
1247 }
1248 
1249 int
posix_memalign(void ** memptr,size_t alignment,size_t size)1250 posix_memalign(void **memptr, size_t alignment, size_t size)
1251 {
1252     int err;
1253     void *result;
1254 
1255     if (!malloc_started) {
1256 	    malloc_init();
1257 	    malloc_started = 1;
1258     }
1259     /* Make sure that alignment is a large enough power of 2. */
1260     if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *) ||
1261 	alignment > malloc_pagesize)
1262 	    return EINVAL;
1263 
1264     /*
1265      * (size | alignment) is enough to assure the requested alignment, since
1266      * the allocator always allocates power-of-two blocks.
1267      */
1268     err = errno; /* Protect errno against changes in pubrealloc(). */
1269     result = pubrealloc(NULL, (size | alignment), " in posix_memalign()");
1270     errno = err;
1271 
1272     if (result == NULL)
1273 	return ENOMEM;
1274 
1275     *memptr = result;
1276     return 0;
1277 }
1278 
1279 void *
calloc(size_t num,size_t size)1280 calloc(size_t num, size_t size)
1281 {
1282     void *ret;
1283 
1284     if (size != 0 && (num * size) / size != num) {
1285 	/* size_t overflow. */
1286 	errno = ENOMEM;
1287 	return (NULL);
1288     }
1289 
1290     ret = pubrealloc(NULL, num * size, " in calloc():");
1291 
1292     if (ret != NULL)
1293 	memset(ret, 0, num * size);
1294 
1295     return ret;
1296 }
1297 
1298 void
free(void * ptr)1299 free(void *ptr)
1300 {
1301 
1302     pubrealloc(ptr, 0, " in free():");
1303 }
1304 
1305 void *
realloc(void * ptr,size_t size)1306 realloc(void *ptr, size_t size)
1307 {
1308 
1309     return pubrealloc(ptr, size, " in realloc():");
1310 }
1311 
1312 /*
1313  * Begin library-private functions, used by threading libraries for protection
1314  * of malloc during fork().  These functions are only called if the program is
1315  * running in threaded mode, so there is no need to check whether the program
1316  * is threaded here.
1317  */
1318 
1319 void
_malloc_prefork(void)1320 _malloc_prefork(void)
1321 {
1322 
1323 	_MALLOC_LOCK();
1324 }
1325 
1326 void
_malloc_postfork(void)1327 _malloc_postfork(void)
1328 {
1329 
1330 	_MALLOC_UNLOCK();
1331 }
1332