xref: /openbsd-src/lib/libc/stdlib/malloc.c (revision 48950c12d106c85f315112191a0228d7b83b9510)
1 /*	$OpenBSD: malloc.c,v 1.149 2012/12/22 07:32:17 otto Exp $	*/
2 /*
3  * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 /*
19  * Parts of this code, mainly the sub page sized chunk management code is
20  * derived from the malloc implementation with the following license:
21  */
22 /*
23  * ----------------------------------------------------------------------------
24  * "THE BEER-WARE LICENSE" (Revision 42):
25  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
26  * can do whatever you want with this stuff. If we meet some day, and you think
27  * this stuff is worth it, you can buy me a beer in return.  Poul-Henning Kamp
28  * ----------------------------------------------------------------------------
29  */
30 
31 /* #define MALLOC_STATS */
32 
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/queue.h>
36 #include <sys/mman.h>
37 #include <sys/uio.h>
38 #include <errno.h>
39 #include <stdint.h>
40 #include <stdlib.h>
41 #include <string.h>
42 #include <stdio.h>
43 #include <unistd.h>
44 
45 #ifdef MALLOC_STATS
46 #include <sys/tree.h>
47 #include <fcntl.h>
48 #endif
49 
50 #include "thread_private.h"
51 
52 #if defined(__sparc__) && !defined(__sparcv9__)
53 #define MALLOC_PAGESHIFT	(13U)
54 #elif defined(__mips64__)
55 #define MALLOC_PAGESHIFT	(14U)
56 #else
57 #define MALLOC_PAGESHIFT	(PAGE_SHIFT)
58 #endif
59 
60 #define MALLOC_MINSHIFT		4
61 #define MALLOC_MAXSHIFT		(MALLOC_PAGESHIFT - 1)
62 #define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
63 #define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
64 #define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
65 #define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
66 
67 #define MALLOC_MAXCHUNK		(1 << MALLOC_MAXSHIFT)
68 #define MALLOC_MAXCACHE		256
69 #define MALLOC_DELAYED_CHUNKS	15	/* max of getrnibble() */
70 #define MALLOC_INITIAL_REGIONS	512
71 #define MALLOC_DEFAULT_CACHE	64
72 
73 /*
74  * When the P option is active, we move allocations between half a page
75  * and a whole page towards the end, subject to alignment constraints.
76  * This is the extra headroom we allow. Set to zero to be the most
77  * strict.
78  */
79 #define MALLOC_LEEWAY		0
80 
81 #define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
82 
83 /*
84  * What to use for Junk.  This is the byte value we use to fill with
85  * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
86  * and SOME_FREEJUNK right before free.
87  */
88 #define SOME_JUNK		0xd0	/* as in "Duh" :-) */
89 #define SOME_FREEJUNK		0xdf
90 
91 #define MMAP(sz)	mmap(NULL, (size_t)(sz), PROT_READ | PROT_WRITE, \
92     MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
93 
94 #define MMAPA(a,sz)	mmap((a), (size_t)(sz), PROT_READ | PROT_WRITE, \
95     MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
96 
97 #define MQUERY(a, sz)	mquery((a), (size_t)(sz), PROT_READ | PROT_WRITE, \
98     MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, (off_t)0)
99 
100 struct region_info {
101 	void *p;		/* page; low bits used to mark chunks */
102 	uintptr_t size;		/* size for pages, or chunk_info pointer */
103 #ifdef MALLOC_STATS
104 	void *f;		/* where allocated from */
105 #endif
106 };
107 
108 LIST_HEAD(chunk_head, chunk_info);
109 
110 struct dir_info {
111 	u_int32_t canary1;
112 	struct region_info *r;		/* region slots */
113 	size_t regions_total;		/* number of region slots */
114 	size_t regions_free;		/* number of free slots */
115 					/* lists of free chunk info structs */
116 	struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1];
117 					/* lists of chunks with free slots */
118 	struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1];
119 	size_t free_regions_size;	/* free pages cached */
120 					/* free pages cache */
121 	struct region_info free_regions[MALLOC_MAXCACHE];
122 					/* delayed free chunk slots */
123 	void *delayed_chunks[MALLOC_DELAYED_CHUNKS + 1];
124 	u_short chunk_start;
125 #ifdef MALLOC_STATS
126 	size_t inserts;
127 	size_t insert_collisions;
128 	size_t finds;
129 	size_t find_collisions;
130 	size_t deletes;
131 	size_t delete_moves;
132 	size_t cheap_realloc_tries;
133 	size_t cheap_reallocs;
134 #define STATS_INC(x) ((x)++)
135 #define STATS_ZERO(x) ((x) = 0)
136 #define STATS_SETF(x,y) ((x)->f = (y))
137 #else
138 #define STATS_INC(x)	/* nothing */
139 #define STATS_ZERO(x)	/* nothing */
140 #define STATS_SETF(x,y)	/* nothing */
141 #endif /* MALLOC_STATS */
142 	u_int32_t canary2;
143 };
144 #define DIR_INFO_RSZ	((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
145 			~MALLOC_PAGEMASK)
146 
147 /*
148  * This structure describes a page worth of chunks.
149  *
150  * How many bits per u_short in the bitmap
151  */
152 #define MALLOC_BITS		(NBBY * sizeof(u_short))
153 struct chunk_info {
154 	LIST_ENTRY(chunk_info) entries;
155 	void *page;			/* pointer to the page */
156 	u_int32_t canary;
157 	u_short size;			/* size of this page's chunks */
158 	u_short shift;			/* how far to shift for this size */
159 	u_short free;			/* how many free chunks */
160 	u_short total;			/* how many chunk */
161 					/* which chunks are free */
162 	u_short bits[1];
163 };
164 
165 struct malloc_readonly {
166 	struct dir_info *g_pool;	/* Main bookkeeping information */
167 	int	malloc_abort;		/* abort() on error */
168 	int	malloc_freenow;		/* Free quickly - disable chunk rnd */
169 	int	malloc_freeunmap;	/* mprotect free pages PROT_NONE? */
170 	int	malloc_hint;		/* call madvice on free pages?  */
171 	int	malloc_junk;		/* junk fill? */
172 	int	malloc_move;		/* move allocations to end of page? */
173 	int	malloc_realloc;		/* always realloc? */
174 	int	malloc_xmalloc;		/* xmalloc behaviour? */
175 	int	malloc_zero;		/* zero fill? */
176 	size_t	malloc_guard;		/* use guard pages after allocations? */
177 	u_int	malloc_cache;		/* free pages we cache */
178 #ifdef MALLOC_STATS
179 	int	malloc_stats;		/* dump statistics at end */
180 #endif
181 	u_int32_t malloc_canary;	/* Matched against ones in g_pool */
182 };
183 
184 /* This object is mapped PROT_READ after initialisation to prevent tampering */
185 static union {
186 	struct malloc_readonly mopts;
187 	u_char _pad[MALLOC_PAGESIZE];
188 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
189 #define mopts	malloc_readonly.mopts
190 #define g_pool	mopts.g_pool
191 
192 char		*malloc_options;	/* compile-time options */
193 
194 static char	*malloc_func;		/* current function */
195 static int	malloc_active;		/* status of malloc */
196 
197 static size_t	malloc_guarded;		/* bytes used for guards */
198 static size_t	malloc_used;		/* bytes allocated */
199 
200 static size_t rnibblesused;		/* random nibbles used */
201 static u_char rbytes[512];		/* random bytes */
202 static u_char getrnibble(void);
203 
204 extern char	*__progname;
205 
206 #ifdef MALLOC_STATS
207 void malloc_dump(int);
208 static void malloc_exit(void);
209 #define CALLER	__builtin_return_address(0)
210 #else
211 #define CALLER	NULL
212 #endif
213 
214 /* low bits of r->p determine size: 0 means >= page size and p->size holding
215  *  real size, otherwise r->size is a shift count, or 1 for malloc(0)
216  */
217 #define REALSIZE(sz, r) 					\
218 	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
219 	(sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
220 
221 static inline size_t
222 hash(void *p)
223 {
224 	size_t sum;
225 	union {
226 		uintptr_t p;
227 		unsigned short a[sizeof(void *) / sizeof(short)];
228 	} u;
229 	u.p = (uintptr_t)p >> MALLOC_PAGESHIFT;
230 	sum = u.a[0];
231 	sum = (sum << 7) - sum + u.a[1];
232 #ifdef __LP64__
233 	sum = (sum << 7) - sum + u.a[2];
234 	sum = (sum << 7) - sum + u.a[3];
235 #endif
236 	return sum;
237 }
238 
239 static void
240 wrterror(char *msg, void *p)
241 {
242 	char		*q = " error: ";
243 	struct iovec	iov[6];
244 	char		buf[20];
245 	int		saved_errno = errno;
246 
247 	iov[0].iov_base = __progname;
248 	iov[0].iov_len = strlen(__progname);
249 	iov[1].iov_base = malloc_func;
250 	iov[1].iov_len = strlen(malloc_func);
251 	iov[2].iov_base = q;
252 	iov[2].iov_len = strlen(q);
253 	iov[3].iov_base = msg;
254 	iov[3].iov_len = strlen(msg);
255 	iov[4].iov_base = buf;
256 	if (p == NULL)
257 		iov[4].iov_len = 0;
258 	else {
259 		snprintf(buf, sizeof(buf), " %p", p);
260 		iov[4].iov_len = strlen(buf);
261 	}
262 	iov[5].iov_base = "\n";
263 	iov[5].iov_len = 1;
264 	writev(STDERR_FILENO, iov, 6);
265 
266 #ifdef MALLOC_STATS
267 	if (mopts.malloc_stats)
268 		malloc_dump(STDERR_FILENO);
269 #endif /* MALLOC_STATS */
270 
271 	errno = saved_errno;
272 	if (mopts.malloc_abort)
273 		abort();
274 }
275 
276 static void
277 rbytes_init(void)
278 {
279 	arc4random_buf(rbytes, sizeof(rbytes));
280 	rnibblesused = 0;
281 }
282 
283 static inline u_char
284 getrnibble(void)
285 {
286 	u_char x;
287 
288 	if (rnibblesused >= 2 * sizeof(rbytes))
289 		rbytes_init();
290 	x = rbytes[rnibblesused++ / 2];
291 	return (rnibblesused & 1 ? x & 0xf : x >> 4);
292 }
293 
294 /*
295  * Cache maintenance. We keep at most malloc_cache pages cached.
296  * If the cache is becoming full, unmap pages in the cache for real,
297  * and then add the region to the cache
298  * Opposed to the regular region data structure, the sizes in the
299  * cache are in MALLOC_PAGESIZE units.
300  */
301 static void
302 unmap(struct dir_info *d, void *p, size_t sz)
303 {
304 	size_t psz = sz >> MALLOC_PAGESHIFT;
305 	size_t rsz, tounmap;
306 	struct region_info *r;
307 	u_int i, offset;
308 
309 	if (sz != PAGEROUND(sz)) {
310 		wrterror("munmap round", NULL);
311 		return;
312 	}
313 
314 	if (psz > mopts.malloc_cache) {
315 		if (munmap(p, sz))
316 			wrterror("munmap", p);
317 		malloc_used -= sz;
318 		return;
319 	}
320 	tounmap = 0;
321 	rsz = mopts.malloc_cache - d->free_regions_size;
322 	if (psz > rsz)
323 		tounmap = psz - rsz;
324 	offset = getrnibble() + (getrnibble() << 4);
325 	for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) {
326 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
327 		if (r->p != NULL) {
328 			rsz = r->size << MALLOC_PAGESHIFT;
329 			if (munmap(r->p, rsz))
330 				wrterror("munmap", r->p);
331 			r->p = NULL;
332 			if (tounmap > r->size)
333 				tounmap -= r->size;
334 			else
335 				tounmap = 0;
336 			d->free_regions_size -= r->size;
337 			r->size = 0;
338 			malloc_used -= rsz;
339 		}
340 	}
341 	if (tounmap > 0)
342 		wrterror("malloc cache underflow", NULL);
343 	for (i = 0; i < mopts.malloc_cache; i++) {
344 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
345 		if (r->p == NULL) {
346 			if (mopts.malloc_hint)
347 				madvise(p, sz, MADV_FREE);
348 			if (mopts.malloc_freeunmap)
349 				mprotect(p, sz, PROT_NONE);
350 			r->p = p;
351 			r->size = psz;
352 			d->free_regions_size += psz;
353 			break;
354 		}
355 	}
356 	if (i == mopts.malloc_cache)
357 		wrterror("malloc free slot lost", NULL);
358 	if (d->free_regions_size > mopts.malloc_cache)
359 		wrterror("malloc cache overflow", NULL);
360 }
361 
362 static void
363 zapcacheregion(struct dir_info *d, void *p, size_t len)
364 {
365 	u_int i;
366 	struct region_info *r;
367 	size_t rsz;
368 
369 	for (i = 0; i < mopts.malloc_cache; i++) {
370 		r = &d->free_regions[i];
371 		if (r->p >= p && r->p <= (void *)((char *)p + len)) {
372 			rsz = r->size << MALLOC_PAGESHIFT;
373 			if (munmap(r->p, rsz))
374 				wrterror("munmap", r->p);
375 			r->p = NULL;
376 			d->free_regions_size -= r->size;
377 			r->size = 0;
378 			malloc_used -= rsz;
379 		}
380 	}
381 }
382 
383 static void *
384 map(struct dir_info *d, size_t sz, int zero_fill)
385 {
386 	size_t psz = sz >> MALLOC_PAGESHIFT;
387 	struct region_info *r, *big = NULL;
388 	u_int i, offset;
389 	void *p;
390 
391 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
392 	    d->canary1 != ~d->canary2)
393 		wrterror("internal struct corrupt", NULL);
394 	if (sz != PAGEROUND(sz)) {
395 		wrterror("map round", NULL);
396 		return MAP_FAILED;
397 	}
398 	if (psz > d->free_regions_size) {
399 		p = MMAP(sz);
400 		if (p != MAP_FAILED)
401 			malloc_used += sz;
402 		/* zero fill not needed */
403 		return p;
404 	}
405 	offset = getrnibble() + (getrnibble() << 4);
406 	for (i = 0; i < mopts.malloc_cache; i++) {
407 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
408 		if (r->p != NULL) {
409 			if (r->size == psz) {
410 				p = r->p;
411 				if (mopts.malloc_freeunmap)
412 					mprotect(p, sz, PROT_READ | PROT_WRITE);
413 				if (mopts.malloc_hint)
414 					madvise(p, sz, MADV_NORMAL);
415 				r->p = NULL;
416 				r->size = 0;
417 				d->free_regions_size -= psz;
418 				if (zero_fill)
419 					memset(p, 0, sz);
420 				else if (mopts.malloc_junk &&
421 				    mopts.malloc_freeunmap)
422 					memset(p, SOME_FREEJUNK, sz);
423 				return p;
424 			} else if (r->size > psz)
425 				big = r;
426 		}
427 	}
428 	if (big != NULL) {
429 		r = big;
430 		p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT);
431 		if (mopts.malloc_freeunmap)
432 			mprotect(p, sz, PROT_READ | PROT_WRITE);
433 		if (mopts.malloc_hint)
434 			madvise(p, sz, MADV_NORMAL);
435 		r->size -= psz;
436 		d->free_regions_size -= psz;
437 		if (zero_fill)
438 			memset(p, 0, sz);
439 		else if (mopts.malloc_junk && mopts.malloc_freeunmap)
440 			memset(p, SOME_FREEJUNK, sz);
441 		return p;
442 	}
443 	p = MMAP(sz);
444 	if (p != MAP_FAILED)
445 		malloc_used += sz;
446 	if (d->free_regions_size > mopts.malloc_cache)
447 		wrterror("malloc cache", NULL);
448 	/* zero fill not needed */
449 	return p;
450 }
451 
452 /*
453  * Initialize a dir_info, which should have been cleared by caller
454  */
455 static int
456 omalloc_init(struct dir_info **dp)
457 {
458 	char *p, b[64];
459 	int i, j;
460 	size_t d_avail, regioninfo_size;
461 	struct dir_info *d;
462 
463 	rbytes_init();
464 
465 	/*
466 	 * Default options
467 	 */
468 	mopts.malloc_abort = 1;
469 	mopts.malloc_move = 1;
470 	mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
471 
472 	for (i = 0; i < 3; i++) {
473 		switch (i) {
474 		case 0:
475 			j = readlink("/etc/malloc.conf", b, sizeof b - 1);
476 			if (j <= 0)
477 				continue;
478 			b[j] = '\0';
479 			p = b;
480 			break;
481 		case 1:
482 			if (issetugid() == 0)
483 				p = getenv("MALLOC_OPTIONS");
484 			else
485 				continue;
486 			break;
487 		case 2:
488 			p = malloc_options;
489 			break;
490 		default:
491 			p = NULL;
492 		}
493 
494 		for (; p != NULL && *p != '\0'; p++) {
495 			switch (*p) {
496 			case '>':
497 				mopts.malloc_cache <<= 1;
498 				if (mopts.malloc_cache > MALLOC_MAXCACHE)
499 					mopts.malloc_cache = MALLOC_MAXCACHE;
500 				break;
501 			case '<':
502 				mopts.malloc_cache >>= 1;
503 				break;
504 			case 'a':
505 				mopts.malloc_abort = 0;
506 				break;
507 			case 'A':
508 				mopts.malloc_abort = 1;
509 				break;
510 #ifdef MALLOC_STATS
511 			case 'd':
512 				mopts.malloc_stats = 0;
513 				break;
514 			case 'D':
515 				mopts.malloc_stats = 1;
516 				break;
517 #endif /* MALLOC_STATS */
518 			case 'f':
519 				mopts.malloc_freenow = 0;
520 				mopts.malloc_freeunmap = 0;
521 				break;
522 			case 'F':
523 				mopts.malloc_freenow = 1;
524 				mopts.malloc_freeunmap = 1;
525 				break;
526 			case 'g':
527 				mopts.malloc_guard = 0;
528 				break;
529 			case 'G':
530 				mopts.malloc_guard = MALLOC_PAGESIZE;
531 				break;
532 			case 'h':
533 				mopts.malloc_hint = 0;
534 				break;
535 			case 'H':
536 				mopts.malloc_hint = 1;
537 				break;
538 			case 'j':
539 				mopts.malloc_junk = 0;
540 				break;
541 			case 'J':
542 				mopts.malloc_junk = 1;
543 				break;
544 			case 'n':
545 			case 'N':
546 				break;
547 			case 'p':
548 				mopts.malloc_move = 0;
549 				break;
550 			case 'P':
551 				mopts.malloc_move = 1;
552 				break;
553 			case 'r':
554 				mopts.malloc_realloc = 0;
555 				break;
556 			case 'R':
557 				mopts.malloc_realloc = 1;
558 				break;
559 			case 's':
560 				mopts.malloc_freeunmap = mopts.malloc_junk = 0;
561 				mopts.malloc_guard = 0;
562 				mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
563 				break;
564 			case 'S':
565 				mopts.malloc_freeunmap = mopts.malloc_junk = 1;
566 				mopts.malloc_guard = MALLOC_PAGESIZE;
567 				mopts.malloc_cache = 0;
568 				break;
569 			case 'u':
570 				mopts.malloc_freeunmap = 0;
571 				break;
572 			case 'U':
573 				mopts.malloc_freeunmap = 1;
574 				break;
575 			case 'x':
576 				mopts.malloc_xmalloc = 0;
577 				break;
578 			case 'X':
579 				mopts.malloc_xmalloc = 1;
580 				break;
581 			case 'z':
582 				mopts.malloc_zero = 0;
583 				break;
584 			case 'Z':
585 				mopts.malloc_zero = 1;
586 				break;
587 			default: {
588 				static const char q[] = "malloc() warning: "
589 				    "unknown char in MALLOC_OPTIONS\n";
590 				write(STDERR_FILENO, q, sizeof(q) - 1);
591 				break;
592 			}
593 			}
594 		}
595 	}
596 
597 	/*
598 	 * We want junk in the entire allocation, and zero only in the part
599 	 * the user asked for.
600 	 */
601 	if (mopts.malloc_zero)
602 		mopts.malloc_junk = 1;
603 
604 #ifdef MALLOC_STATS
605 	if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
606 		static const char q[] = "malloc() warning: atexit(2) failed."
607 		    " Will not be able to dump stats on exit\n";
608 		write(STDERR_FILENO, q, sizeof(q) - 1);
609 	}
610 #endif /* MALLOC_STATS */
611 
612 	while ((mopts.malloc_canary = arc4random()) == 0)
613 		;
614 
615 	/*
616 	 * Allocate dir_info with a guard page on either side. Also
617 	 * randomise offset inside the page at which the dir_info
618 	 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
619 	 */
620 	if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED)
621 		return -1;
622 	mprotect(p, MALLOC_PAGESIZE, PROT_NONE);
623 	mprotect(p + MALLOC_PAGESIZE + DIR_INFO_RSZ,
624 	    MALLOC_PAGESIZE, PROT_NONE);
625 	d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
626 	d = (struct dir_info *)(p + MALLOC_PAGESIZE +
627 	    (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
628 
629 	d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS;
630 	regioninfo_size = d->regions_total * sizeof(struct region_info);
631 	d->r = MMAP(regioninfo_size);
632 	if (d->r == MAP_FAILED) {
633 		wrterror("malloc init mmap failed", NULL);
634 		d->regions_total = 0;
635 		return 1;
636 	}
637 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
638 		LIST_INIT(&d->chunk_info_list[i]);
639 		LIST_INIT(&d->chunk_dir[i]);
640 	}
641 	malloc_used += regioninfo_size;
642 	d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
643 	d->canary2 = ~d->canary1;
644 
645 	*dp = d;
646 
647 	/*
648 	 * Options have been set and will never be reset.
649 	 * Prevent further tampering with them.
650 	 */
651 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
652 		mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
653 
654 	return 0;
655 }
656 
657 static int
658 omalloc_grow(struct dir_info *d)
659 {
660 	size_t newtotal;
661 	size_t newsize;
662 	size_t mask;
663 	size_t i;
664 	struct region_info *p;
665 
666 	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 )
667 		return 1;
668 
669 	newtotal = d->regions_total * 2;
670 	newsize = newtotal * sizeof(struct region_info);
671 	mask = newtotal - 1;
672 
673 	p = MMAP(newsize);
674 	if (p == MAP_FAILED)
675 		return 1;
676 
677 	malloc_used += newsize;
678 	memset(p, 0, newsize);
679 	STATS_ZERO(d->inserts);
680 	STATS_ZERO(d->insert_collisions);
681 	for (i = 0; i < d->regions_total; i++) {
682 		void *q = d->r[i].p;
683 		if (q != NULL) {
684 			size_t index = hash(q) & mask;
685 			STATS_INC(d->inserts);
686 			while (p[index].p != NULL) {
687 				index = (index - 1) & mask;
688 				STATS_INC(d->insert_collisions);
689 			}
690 			p[index] = d->r[i];
691 		}
692 	}
693 	/* avoid pages containing meta info to end up in cache */
694 	if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
695 		wrterror("munmap", d->r);
696 	else
697 		malloc_used -= d->regions_total * sizeof(struct region_info);
698 	d->regions_free = d->regions_free + d->regions_total;
699 	d->regions_total = newtotal;
700 	d->r = p;
701 	return 0;
702 }
703 
704 static struct chunk_info *
705 alloc_chunk_info(struct dir_info *d, int bits)
706 {
707 	struct chunk_info *p;
708 	size_t size, count;
709 
710 	if (bits == 0)
711 		count = MALLOC_PAGESIZE / MALLOC_MINSIZE;
712 	else
713 		count = MALLOC_PAGESIZE >> bits;
714 
715 	size = howmany(count, MALLOC_BITS);
716 	size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
717 	size = ALIGN(size);
718 
719 	if (LIST_EMPTY(&d->chunk_info_list[bits])) {
720 		void *q;
721 		int i;
722 
723 		q = MMAP(MALLOC_PAGESIZE);
724 		if (q == MAP_FAILED)
725 			return NULL;
726 		malloc_used += MALLOC_PAGESIZE;
727 		count = MALLOC_PAGESIZE / size;
728 		for (i = 0; i < count; i++, q += size)
729 			LIST_INSERT_HEAD(&d->chunk_info_list[bits],
730 			    (struct chunk_info *)q, entries);
731 	}
732 	p = LIST_FIRST(&d->chunk_info_list[bits]);
733 	LIST_REMOVE(p, entries);
734 	memset(p, 0, size);
735 	p->canary = d->canary1;
736 	return p;
737 }
738 
739 
740 /*
741  * The hashtable uses the assumption that p is never NULL. This holds since
742  * non-MAP_FIXED mappings with hint 0 start at BRKSIZ.
743  */
744 static int
745 insert(struct dir_info *d, void *p, size_t sz, void *f)
746 {
747 	size_t index;
748 	size_t mask;
749 	void *q;
750 
751 	if (d->regions_free * 4 < d->regions_total) {
752 		if (omalloc_grow(d))
753 			return 1;
754 	}
755 	mask = d->regions_total - 1;
756 	index = hash(p) & mask;
757 	q = d->r[index].p;
758 	STATS_INC(d->inserts);
759 	while (q != NULL) {
760 		index = (index - 1) & mask;
761 		q = d->r[index].p;
762 		STATS_INC(d->insert_collisions);
763 	}
764 	d->r[index].p = p;
765 	d->r[index].size = sz;
766 #ifdef MALLOC_STATS
767 	d->r[index].f = f;
768 #endif
769 	d->regions_free--;
770 	return 0;
771 }
772 
773 static struct region_info *
774 find(struct dir_info *d, void *p)
775 {
776 	size_t index;
777 	size_t mask = d->regions_total - 1;
778 	void *q, *r;
779 
780 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
781 	    d->canary1 != ~d->canary2)
782 		wrterror("internal struct corrupt", NULL);
783 	p = MASK_POINTER(p);
784 	index = hash(p) & mask;
785 	r = d->r[index].p;
786 	q = MASK_POINTER(r);
787 	STATS_INC(d->finds);
788 	while (q != p && r != NULL) {
789 		index = (index - 1) & mask;
790 		r = d->r[index].p;
791 		q = MASK_POINTER(r);
792 		STATS_INC(d->find_collisions);
793 	}
794 	return (q == p && r != NULL) ? &d->r[index] : NULL;
795 }
796 
797 static void
798 delete(struct dir_info *d, struct region_info *ri)
799 {
800 	/* algorithm R, Knuth Vol III section 6.4 */
801 	size_t mask = d->regions_total - 1;
802 	size_t i, j, r;
803 
804 	if (d->regions_total & (d->regions_total - 1))
805 		wrterror("regions_total not 2^x", NULL);
806 	d->regions_free++;
807 	STATS_INC(g_pool->deletes);
808 
809 	i = ri - d->r;
810 	for (;;) {
811 		d->r[i].p = NULL;
812 		d->r[i].size = 0;
813 		j = i;
814 		for (;;) {
815 			i = (i - 1) & mask;
816 			if (d->r[i].p == NULL)
817 				return;
818 			r = hash(d->r[i].p) & mask;
819 			if ((i <= r && r < j) || (r < j && j < i) ||
820 			    (j < i && i <= r))
821 				continue;
822 			d->r[j] = d->r[i];
823 			STATS_INC(g_pool->delete_moves);
824 			break;
825 		}
826 
827 	}
828 }
829 
830 /*
831  * Allocate a page of chunks
832  */
833 static struct chunk_info *
834 omalloc_make_chunks(struct dir_info *d, int bits)
835 {
836 	struct chunk_info *bp;
837 	void		*pp;
838 	int		i, k;
839 
840 	/* Allocate a new bucket */
841 	pp = map(d, MALLOC_PAGESIZE, 0);
842 	if (pp == MAP_FAILED)
843 		return NULL;
844 
845 	bp = alloc_chunk_info(d, bits);
846 	if (bp == NULL) {
847 		unmap(d, pp, MALLOC_PAGESIZE);
848 		return NULL;
849 	}
850 
851 	/* memory protect the page allocated in the malloc(0) case */
852 	if (bits == 0) {
853 		bp->size = 0;
854 		bp->shift = 1;
855 		i = MALLOC_MINSIZE - 1;
856 		while (i >>= 1)
857 			bp->shift++;
858 		bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift;
859 		bp->page = pp;
860 
861 		k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE);
862 		if (k < 0) {
863 			unmap(d, pp, MALLOC_PAGESIZE);
864 			LIST_INSERT_HEAD(&d->chunk_info_list[0], bp, entries);
865 			return NULL;
866 		}
867 	} else {
868 		bp->size = 1U << bits;
869 		bp->shift = bits;
870 		bp->total = bp->free = MALLOC_PAGESIZE >> bits;
871 		bp->page = pp;
872 	}
873 
874 	/* set all valid bits in the bitmap */
875 	k = bp->total;
876 	i = 0;
877 
878 	/* Do a bunch at a time */
879 	for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS)
880 		bp->bits[i / MALLOC_BITS] = (u_short)~0U;
881 
882 	for (; i < k; i++)
883 		bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS);
884 
885 	LIST_INSERT_HEAD(&d->chunk_dir[bits], bp, entries);
886 
887 	bits++;
888 	if ((uintptr_t)pp & bits)
889 		wrterror("pp & bits", pp);
890 
891 	insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL);
892 	return bp;
893 }
894 
895 
896 /*
897  * Allocate a chunk
898  */
899 static void *
900 malloc_bytes(struct dir_info *d, size_t size, void *f)
901 {
902 	int		i, j;
903 	size_t		k;
904 	u_short		u, *lp;
905 	struct chunk_info *bp;
906 
907 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
908 	    d->canary1 != ~d->canary2)
909 		wrterror("internal struct corrupt", NULL);
910 	/* Don't bother with anything less than this */
911 	/* unless we have a malloc(0) requests */
912 	if (size != 0 && size < MALLOC_MINSIZE)
913 		size = MALLOC_MINSIZE;
914 
915 	/* Find the right bucket */
916 	if (size == 0)
917 		j = 0;
918 	else {
919 		j = MALLOC_MINSHIFT;
920 		i = (size - 1) >> (MALLOC_MINSHIFT - 1);
921 		while (i >>= 1)
922 			j++;
923 	}
924 
925 	/* If it's empty, make a page more of that size chunks */
926 	if (LIST_EMPTY(&d->chunk_dir[j])) {
927 		bp = omalloc_make_chunks(d, j);
928 		if (bp == NULL)
929 			return NULL;
930 	} else
931 		bp = LIST_FIRST(&d->chunk_dir[j]);
932 
933 	if (bp->canary != d->canary1)
934 		wrterror("chunk info corrupted", NULL);
935 
936 	i = d->chunk_start;
937 	if (bp->free > 1)
938 		i += getrnibble();
939 	if (i >= bp->total)
940 		i &= bp->total - 1;
941 	for (;;) {
942 		for (;;) {
943 			lp = &bp->bits[i / MALLOC_BITS];
944 			if (!*lp) {
945 				i += MALLOC_BITS;
946 				i &= ~(MALLOC_BITS - 1);
947 				if (i >= bp->total)
948 					i = 0;
949 			} else
950 				break;
951 		}
952 		k = i % MALLOC_BITS;
953 		u = 1 << k;
954 		if (*lp & u)
955 			break;
956 		if (++i >= bp->total)
957 			i = 0;
958 	}
959 	d->chunk_start += i + 1;
960 #ifdef MALLOC_STATS
961 	if (i == 0) {
962 		struct region_info *r = find(d, bp->page);
963 		r->f = f;
964 	}
965 #endif
966 
967 	*lp ^= u;
968 
969 	/* If there are no more free, remove from free-list */
970 	if (!--bp->free)
971 		LIST_REMOVE(bp, entries);
972 
973 	/* Adjust to the real offset of that chunk */
974 	k += (lp - bp->bits) * MALLOC_BITS;
975 	k <<= bp->shift;
976 
977 	if (mopts.malloc_junk && bp->size > 0)
978 		memset((char *)bp->page + k, SOME_JUNK, bp->size);
979 	return ((char *)bp->page + k);
980 }
981 
982 
983 /*
984  * Free a chunk, and possibly the page it's on, if the page becomes empty.
985  */
986 static void
987 free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
988 {
989 	struct chunk_head *mp;
990 	struct chunk_info *info;
991 	int i;
992 
993 	info = (struct chunk_info *)r->size;
994 	if (info->canary != d->canary1)
995 		wrterror("chunk info corrupted", NULL);
996 
997 	/* Find the chunk number on the page */
998 	i = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
999 
1000 	if ((uintptr_t)ptr & ((1U << (info->shift)) - 1)) {
1001 		wrterror("modified chunk-pointer", ptr);
1002 		return;
1003 	}
1004 	if (info->bits[i / MALLOC_BITS] & (1U << (i % MALLOC_BITS))) {
1005 		wrterror("chunk is already free", ptr);
1006 		return;
1007 	}
1008 
1009 	info->bits[i / MALLOC_BITS] |= 1U << (i % MALLOC_BITS);
1010 	info->free++;
1011 
1012 	if (info->size != 0)
1013 		mp = d->chunk_dir + info->shift;
1014 	else
1015 		mp = d->chunk_dir;
1016 
1017 	if (info->free == 1) {
1018 		/* Page became non-full */
1019 		LIST_INSERT_HEAD(mp, info, entries);
1020 		return;
1021 	}
1022 	if (info->free != info->total)
1023 		return;
1024 
1025 	LIST_REMOVE(info, entries);
1026 
1027 	if (info->size == 0 && !mopts.malloc_freeunmap)
1028 		mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1029 	unmap(d, info->page, MALLOC_PAGESIZE);
1030 
1031 	delete(d, r);
1032 	if (info->size != 0)
1033 		mp = &d->chunk_info_list[info->shift];
1034 	else
1035 		mp = &d->chunk_info_list[0];
1036 	LIST_INSERT_HEAD(mp, info, entries);
1037 }
1038 
1039 
1040 
1041 static void *
1042 omalloc(size_t sz, int zero_fill, void *f)
1043 {
1044 	void *p;
1045 	size_t psz;
1046 
1047 	if (sz > MALLOC_MAXCHUNK) {
1048 		if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1049 			errno = ENOMEM;
1050 			return NULL;
1051 		}
1052 		sz += mopts.malloc_guard;
1053 		psz = PAGEROUND(sz);
1054 		p = map(g_pool, psz, zero_fill);
1055 		if (p == MAP_FAILED) {
1056 			errno = ENOMEM;
1057 			return NULL;
1058 		}
1059 		if (insert(g_pool, p, sz, f)) {
1060 			unmap(g_pool, p, psz);
1061 			errno = ENOMEM;
1062 			return NULL;
1063 		}
1064 		if (mopts.malloc_guard) {
1065 			if (mprotect((char *)p + psz - mopts.malloc_guard,
1066 			    mopts.malloc_guard, PROT_NONE))
1067 				wrterror("mprotect", NULL);
1068 			malloc_guarded += mopts.malloc_guard;
1069 		}
1070 
1071 		if (mopts.malloc_move &&
1072 		    sz - mopts.malloc_guard < MALLOC_PAGESIZE -
1073 		    MALLOC_LEEWAY) {
1074 			/* fill whole allocation */
1075 			if (mopts.malloc_junk)
1076 				memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1077 			/* shift towards the end */
1078 			p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY -
1079 			    (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1));
1080 			/* fill zeros if needed and overwritten above */
1081 			if (zero_fill && mopts.malloc_junk)
1082 				memset(p, 0, sz - mopts.malloc_guard);
1083 		} else {
1084 			if (mopts.malloc_junk) {
1085 				if (zero_fill)
1086 					memset((char *)p + sz - mopts.malloc_guard,
1087 					    SOME_JUNK, psz - sz);
1088 				else
1089 					memset(p, SOME_JUNK,
1090 					    psz - mopts.malloc_guard);
1091 			}
1092 		}
1093 
1094 	} else {
1095 		/* takes care of SOME_JUNK */
1096 		p = malloc_bytes(g_pool, sz, f);
1097 		if (zero_fill && p != NULL && sz > 0)
1098 			memset(p, 0, sz);
1099 	}
1100 
1101 	return p;
1102 }
1103 
1104 /*
1105  * Common function for handling recursion.  Only
1106  * print the error message once, to avoid making the problem
1107  * potentially worse.
1108  */
1109 static void
1110 malloc_recurse(void)
1111 {
1112 	static int noprint;
1113 
1114 	if (noprint == 0) {
1115 		noprint = 1;
1116 		wrterror("recursive call", NULL);
1117 	}
1118 	malloc_active--;
1119 	_MALLOC_UNLOCK();
1120 	errno = EDEADLK;
1121 }
1122 
1123 static int
1124 malloc_init(void)
1125 {
1126 	if (omalloc_init(&g_pool)) {
1127 		_MALLOC_UNLOCK();
1128 		if (mopts.malloc_xmalloc)
1129 			wrterror("out of memory", NULL);
1130 		errno = ENOMEM;
1131 		return -1;
1132 	}
1133 	return 0;
1134 }
1135 
1136 void *
1137 malloc(size_t size)
1138 {
1139 	void *r;
1140 	int saved_errno = errno;
1141 
1142 	_MALLOC_LOCK();
1143 	malloc_func = " in malloc():";
1144 	if (g_pool == NULL) {
1145 		if (malloc_init() != 0)
1146 			return NULL;
1147 	}
1148 	if (malloc_active++) {
1149 		malloc_recurse();
1150 		return NULL;
1151 	}
1152 	r = omalloc(size, mopts.malloc_zero, CALLER);
1153 	malloc_active--;
1154 	_MALLOC_UNLOCK();
1155 	if (r == NULL && mopts.malloc_xmalloc) {
1156 		wrterror("out of memory", NULL);
1157 		errno = ENOMEM;
1158 	}
1159 	if (r != NULL)
1160 		errno = saved_errno;
1161 	return r;
1162 }
1163 
1164 static void
1165 ofree(void *p)
1166 {
1167 	struct region_info *r;
1168 	size_t sz;
1169 
1170 	r = find(g_pool, p);
1171 	if (r == NULL) {
1172 		wrterror("bogus pointer (double free?)", p);
1173 		return;
1174 	}
1175 	REALSIZE(sz, r);
1176 	if (sz > MALLOC_MAXCHUNK) {
1177 		if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE -
1178 		    MALLOC_LEEWAY) {
1179 			if (r->p != p) {
1180 				wrterror("bogus pointer", p);
1181 				return;
1182 			}
1183 		} else {
1184 #if notyetbecause_of_realloc
1185 			/* shifted towards the end */
1186 			if (p != ((char *)r->p) + ((MALLOC_PAGESIZE -
1187 			    MALLOC_MINSIZE - sz - mopts.malloc_guard) &
1188 			    ~(MALLOC_MINSIZE-1))) {
1189 			}
1190 #endif
1191 			p = r->p;
1192 		}
1193 		if (mopts.malloc_guard) {
1194 			if (sz < mopts.malloc_guard)
1195 				wrterror("guard size", NULL);
1196 			if (!mopts.malloc_freeunmap) {
1197 				if (mprotect((char *)p + PAGEROUND(sz) -
1198 				    mopts.malloc_guard, mopts.malloc_guard,
1199 				    PROT_READ | PROT_WRITE))
1200 					wrterror("mprotect", NULL);
1201 			}
1202 			malloc_guarded -= mopts.malloc_guard;
1203 		}
1204 		if (mopts.malloc_junk && !mopts.malloc_freeunmap)
1205 			memset(p, SOME_FREEJUNK,
1206 			    PAGEROUND(sz) - mopts.malloc_guard);
1207 		unmap(g_pool, p, PAGEROUND(sz));
1208 		delete(g_pool, r);
1209 	} else {
1210 		void *tmp;
1211 		int i;
1212 
1213 		if (mopts.malloc_junk && sz > 0)
1214 			memset(p, SOME_FREEJUNK, sz);
1215 		if (!mopts.malloc_freenow) {
1216 			i = getrnibble();
1217 			tmp = p;
1218 			p = g_pool->delayed_chunks[i];
1219 			g_pool->delayed_chunks[i] = tmp;
1220 		}
1221 		if (p != NULL) {
1222 			r = find(g_pool, p);
1223 			if (r == NULL) {
1224 				wrterror("bogus pointer (double free?)", p);
1225 				return;
1226 			}
1227 			free_bytes(g_pool, r, p);
1228 		}
1229 	}
1230 }
1231 
1232 void
1233 free(void *ptr)
1234 {
1235 	int saved_errno = errno;
1236 
1237 	/* This is legal. */
1238 	if (ptr == NULL)
1239 		return;
1240 
1241 	_MALLOC_LOCK();
1242 	malloc_func = " in free():";
1243 	if (g_pool == NULL) {
1244 		_MALLOC_UNLOCK();
1245 		wrterror("free() called before allocation", NULL);
1246 		return;
1247 	}
1248 	if (malloc_active++) {
1249 		malloc_recurse();
1250 		return;
1251 	}
1252 	ofree(ptr);
1253 	malloc_active--;
1254 	_MALLOC_UNLOCK();
1255 	errno = saved_errno;
1256 }
1257 
1258 
1259 static void *
1260 orealloc(void *p, size_t newsz, void *f)
1261 {
1262 	struct region_info *r;
1263 	size_t oldsz, goldsz, gnewsz;
1264 	void *q;
1265 
1266 	if (p == NULL)
1267 		return omalloc(newsz, 0, f);
1268 
1269 	r = find(g_pool, p);
1270 	if (r == NULL) {
1271 		wrterror("bogus pointer (double free?)", p);
1272 		return NULL;
1273 	}
1274 	if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1275 		errno = ENOMEM;
1276 		return NULL;
1277 	}
1278 
1279 	REALSIZE(oldsz, r);
1280 	goldsz = oldsz;
1281 	if (oldsz > MALLOC_MAXCHUNK) {
1282 		if (oldsz < mopts.malloc_guard)
1283 			wrterror("guard size", NULL);
1284 		oldsz -= mopts.malloc_guard;
1285 	}
1286 
1287 	gnewsz = newsz;
1288 	if (gnewsz > MALLOC_MAXCHUNK)
1289 		gnewsz += mopts.malloc_guard;
1290 
1291 	if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p &&
1292 	    !mopts.malloc_realloc) {
1293 		size_t roldsz = PAGEROUND(goldsz);
1294 		size_t rnewsz = PAGEROUND(gnewsz);
1295 
1296 		if (rnewsz > roldsz) {
1297 			if (!mopts.malloc_guard) {
1298 				void *hint = (char *)p + roldsz;
1299 				size_t needed = rnewsz - roldsz;
1300 
1301 				STATS_INC(g_pool->cheap_realloc_tries);
1302 				zapcacheregion(g_pool, hint, needed);
1303 				q = MQUERY(hint, needed);
1304 				if (q == hint)
1305 					q = MMAPA(hint, needed);
1306 				else
1307 					q = MAP_FAILED;
1308 				if (q == hint) {
1309 					malloc_used += needed;
1310 					if (mopts.malloc_junk)
1311 						memset(q, SOME_JUNK, needed);
1312 					r->size = newsz;
1313 					STATS_SETF(r, f);
1314 					STATS_INC(g_pool->cheap_reallocs);
1315 					return p;
1316 				} else if (q != MAP_FAILED) {
1317 					if (munmap(q, needed))
1318 						wrterror("munmap", q);
1319 				}
1320 			}
1321 		} else if (rnewsz < roldsz) {
1322 			if (mopts.malloc_guard) {
1323 				if (mprotect((char *)p + roldsz -
1324 				    mopts.malloc_guard, mopts.malloc_guard,
1325 				    PROT_READ | PROT_WRITE))
1326 					wrterror("mprotect", NULL);
1327 				if (mprotect((char *)p + rnewsz -
1328 				    mopts.malloc_guard, mopts.malloc_guard,
1329 				    PROT_NONE))
1330 					wrterror("mprotect", NULL);
1331 			}
1332 			unmap(g_pool, (char *)p + rnewsz, roldsz - rnewsz);
1333 			r->size = gnewsz;
1334 			STATS_SETF(r, f);
1335 			return p;
1336 		} else {
1337 			if (newsz > oldsz && mopts.malloc_junk)
1338 				memset((char *)p + newsz, SOME_JUNK,
1339 				    rnewsz - mopts.malloc_guard - newsz);
1340 			r->size = gnewsz;
1341 			STATS_SETF(r, f);
1342 			return p;
1343 		}
1344 	}
1345 	if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) {
1346 		if (mopts.malloc_junk && newsz > 0)
1347 			memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1348 		STATS_SETF(r, f);
1349 		return p;
1350 	} else if (newsz != oldsz || mopts.malloc_realloc) {
1351 		q = omalloc(newsz, 0, f);
1352 		if (q == NULL)
1353 			return NULL;
1354 		if (newsz != 0 && oldsz != 0)
1355 			memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1356 		ofree(p);
1357 		return q;
1358 	} else {
1359 		STATS_SETF(r, f);
1360 		return p;
1361 	}
1362 }
1363 
1364 void *
1365 realloc(void *ptr, size_t size)
1366 {
1367 	void *r;
1368 	int saved_errno = errno;
1369 
1370 	_MALLOC_LOCK();
1371 	malloc_func = " in realloc():";
1372 	if (g_pool == NULL) {
1373 		if (malloc_init() != 0)
1374 			return NULL;
1375 	}
1376 	if (malloc_active++) {
1377 		malloc_recurse();
1378 		return NULL;
1379 	}
1380 	r = orealloc(ptr, size, CALLER);
1381 
1382 	malloc_active--;
1383 	_MALLOC_UNLOCK();
1384 	if (r == NULL && mopts.malloc_xmalloc) {
1385 		wrterror("out of memory", NULL);
1386 		errno = ENOMEM;
1387 	}
1388 	if (r != NULL)
1389 		errno = saved_errno;
1390 	return r;
1391 }
1392 
1393 
1394 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1395 
1396 void *
1397 calloc(size_t nmemb, size_t size)
1398 {
1399 	void *r;
1400 	int saved_errno = errno;
1401 
1402 	_MALLOC_LOCK();
1403 	malloc_func = " in calloc():";
1404 	if (g_pool == NULL) {
1405 		if (malloc_init() != 0)
1406 			return NULL;
1407 	}
1408 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1409 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1410 		_MALLOC_UNLOCK();
1411 		if (mopts.malloc_xmalloc)
1412 			wrterror("out of memory", NULL);
1413 		errno = ENOMEM;
1414 		return NULL;
1415 	}
1416 
1417 	if (malloc_active++) {
1418 		malloc_recurse();
1419 		return NULL;
1420 	}
1421 
1422 	size *= nmemb;
1423 	r = omalloc(size, 1, CALLER);
1424 
1425 	malloc_active--;
1426 	_MALLOC_UNLOCK();
1427 	if (r == NULL && mopts.malloc_xmalloc) {
1428 		wrterror("out of memory", NULL);
1429 		errno = ENOMEM;
1430 	}
1431 	if (r != NULL)
1432 		errno = saved_errno;
1433 	return r;
1434 }
1435 
1436 static void *
1437 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
1438 {
1439 	void *p, *q;
1440 
1441 	if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0) {
1442 		wrterror("mapalign bad alignment", NULL);
1443 		return MAP_FAILED;
1444 	}
1445 	if (sz != PAGEROUND(sz)) {
1446 		wrterror("mapalign round", NULL);
1447 		return MAP_FAILED;
1448 	}
1449 
1450 	/* Allocate sz + alignment bytes of memory, which must include a
1451 	 * subrange of size bytes that is properly aligned.  Unmap the
1452 	 * other bytes, and then return that subrange.
1453 	 */
1454 
1455 	/* We need sz + alignment to fit into a size_t. */
1456 	if (alignment > SIZE_MAX - sz)
1457 		return MAP_FAILED;
1458 
1459 	p = map(d, sz + alignment, zero_fill);
1460 	if (p == MAP_FAILED)
1461 		return MAP_FAILED;
1462 	q = (void *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
1463 	if (q != p) {
1464 		if (munmap(p, q - p))
1465 			wrterror("munmap", p);
1466 	}
1467 	if (munmap(q + sz, alignment - (q - p)))
1468 		wrterror("munmap", q + sz);
1469 	malloc_used -= alignment;
1470 
1471 	return q;
1472 }
1473 
1474 static void *
1475 omemalign(size_t alignment, size_t sz, int zero_fill, void *f)
1476 {
1477 	size_t psz;
1478 	void *p;
1479 
1480 	if (alignment <= MALLOC_PAGESIZE) {
1481 		/*
1482 		 * max(size, alignment) is enough to assure the requested alignment,
1483 		 * since the allocator always allocates power-of-two blocks.
1484 		 */
1485 		if (sz < alignment)
1486 			sz = alignment;
1487 		return omalloc(sz, zero_fill, f);
1488 	}
1489 
1490 	if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1491 		errno = ENOMEM;
1492 		return NULL;
1493 	}
1494 
1495 	sz += mopts.malloc_guard;
1496 	psz = PAGEROUND(sz);
1497 
1498 	p = mapalign(g_pool, alignment, psz, zero_fill);
1499 	if (p == NULL) {
1500 		errno = ENOMEM;
1501 		return NULL;
1502 	}
1503 
1504 	if (insert(g_pool, p, sz, f)) {
1505 		unmap(g_pool, p, psz);
1506 		errno = ENOMEM;
1507 		return NULL;
1508 	}
1509 
1510 	if (mopts.malloc_guard) {
1511 		if (mprotect((char *)p + psz - mopts.malloc_guard,
1512 		    mopts.malloc_guard, PROT_NONE))
1513 			wrterror("mprotect", NULL);
1514 		malloc_guarded += mopts.malloc_guard;
1515 	}
1516 
1517 	if (mopts.malloc_junk) {
1518 		if (zero_fill)
1519 			memset((char *)p + sz - mopts.malloc_guard,
1520 			    SOME_JUNK, psz - sz);
1521 		else
1522 			memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1523 	}
1524 
1525 	return p;
1526 }
1527 
1528 int
1529 posix_memalign(void **memptr, size_t alignment, size_t size)
1530 {
1531 	int res, saved_errno = errno;
1532 	void *r;
1533 
1534 	/* Make sure that alignment is a large enough power of 2. */
1535 	if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
1536 		return EINVAL;
1537 
1538 	_MALLOC_LOCK();
1539 	malloc_func = " in posix_memalign():";
1540 	if (g_pool == NULL) {
1541 		if (malloc_init() != 0)
1542 			goto err;
1543 	}
1544 	if (malloc_active++) {
1545 		malloc_recurse();
1546 		goto err;
1547 	}
1548 	r = omemalign(alignment, size, mopts.malloc_zero, CALLER);
1549 	malloc_active--;
1550 	_MALLOC_UNLOCK();
1551 	if (r == NULL) {
1552 		if (mopts.malloc_xmalloc) {
1553 			wrterror("out of memory", NULL);
1554 			errno = ENOMEM;
1555 		}
1556 		goto err;
1557 	}
1558 	errno = saved_errno;
1559 	*memptr = r;
1560 	return 0;
1561 
1562 err:
1563 	res = errno;
1564 	errno = saved_errno;
1565 	return res;
1566 }
1567 
1568 #ifdef MALLOC_STATS
1569 
1570 struct malloc_leak {
1571 	void (*f)();
1572 	size_t total_size;
1573 	int count;
1574 };
1575 
1576 struct leaknode {
1577 	RB_ENTRY(leaknode) entry;
1578 	struct malloc_leak d;
1579 };
1580 
1581 static int
1582 leakcmp(struct leaknode *e1, struct leaknode *e2)
1583 {
1584 	return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
1585 }
1586 
1587 static RB_HEAD(leaktree, leaknode) leakhead;
1588 RB_GENERATE_STATIC(leaktree, leaknode, entry, leakcmp)
1589 
1590 static void
1591 putleakinfo(void *f, size_t sz, int cnt)
1592 {
1593 	struct leaknode key, *p;
1594 	static struct leaknode *page;
1595 	static int used;
1596 
1597 	if (cnt == 0)
1598 		return;
1599 
1600 	key.d.f = f;
1601 	p = RB_FIND(leaktree, &leakhead, &key);
1602 	if (p == NULL) {
1603 		if (page == NULL ||
1604 		    used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
1605 			page = MMAP(MALLOC_PAGESIZE);
1606 			if (page == MAP_FAILED)
1607 				return;
1608 			used = 0;
1609 		}
1610 		p = &page[used++];
1611 		p->d.f = f;
1612 		p->d.total_size = sz * cnt;
1613 		p->d.count = cnt;
1614 		RB_INSERT(leaktree, &leakhead, p);
1615 	} else {
1616 		p->d.total_size += sz * cnt;
1617 		p->d.count += cnt;
1618 	}
1619 }
1620 
1621 static struct malloc_leak *malloc_leaks;
1622 
1623 static void
1624 dump_leaks(int fd)
1625 {
1626 	struct leaknode *p;
1627 	char buf[64];
1628 	int i = 0;
1629 
1630 	snprintf(buf, sizeof(buf), "Leak report\n");
1631 	write(fd, buf, strlen(buf));
1632 	snprintf(buf, sizeof(buf), "           f     sum      #    avg\n");
1633 	write(fd, buf, strlen(buf));
1634 	/* XXX only one page of summary */
1635 	if (malloc_leaks == NULL)
1636 		malloc_leaks = MMAP(MALLOC_PAGESIZE);
1637 	if (malloc_leaks != MAP_FAILED)
1638 		memset(malloc_leaks, 0, MALLOC_PAGESIZE);
1639 	RB_FOREACH(p, leaktree, &leakhead) {
1640 		snprintf(buf, sizeof(buf), "%12p %7zu %6u %6zu\n", p->d.f,
1641 		    p->d.total_size, p->d.count, p->d.total_size / p->d.count);
1642 		write(fd, buf, strlen(buf));
1643 		if (malloc_leaks == MAP_FAILED ||
1644 		    i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak))
1645 			continue;
1646 		malloc_leaks[i].f = p->d.f;
1647 		malloc_leaks[i].total_size = p->d.total_size;
1648 		malloc_leaks[i].count = p->d.count;
1649 		i++;
1650 	}
1651 }
1652 
1653 static void
1654 dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist)
1655 {
1656 	char buf[64];
1657 
1658 	while (p != NULL) {
1659 		snprintf(buf, sizeof(buf), "chunk %12p %12p %4d %d/%d\n",
1660 		    p->page, ((p->bits[0] & 1) ? NULL : f),
1661 		    p->size, p->free, p->total);
1662 		write(fd, buf, strlen(buf));
1663 		if (!fromfreelist) {
1664 			if (p->bits[0] & 1)
1665 				putleakinfo(NULL, p->size, p->total - p->free);
1666 			else {
1667 				putleakinfo(f, p->size, 1);
1668 				putleakinfo(NULL, p->size,
1669 				    p->total - p->free - 1);
1670 			}
1671 			break;
1672 		}
1673 		p = LIST_NEXT(p, entries);
1674 		if (p != NULL) {
1675 			snprintf(buf, sizeof(buf), "        ");
1676 			write(fd, buf, strlen(buf));
1677 		}
1678 	}
1679 }
1680 
1681 static void
1682 dump_free_chunk_info(int fd, struct dir_info *d)
1683 {
1684 	char buf[64];
1685 	int i, count;
1686 
1687 	snprintf(buf, sizeof(buf), "Free chunk structs:\n");
1688 	write(fd, buf, strlen(buf));
1689 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
1690 		struct chunk_info *p;
1691 
1692 		count = 0;
1693 		LIST_FOREACH(p, &d->chunk_info_list[i], entries)
1694 			count++;
1695 		p = LIST_FIRST(&d->chunk_dir[i]);
1696 		if (p == NULL && count == 0)
1697 			continue;
1698 		snprintf(buf, sizeof(buf), "%2d) %3d ", i, count);
1699 		write(fd, buf, strlen(buf));
1700 		if (p != NULL)
1701 			dump_chunk(fd, p, NULL, 1);
1702 		else
1703 			write(fd, "\n", 1);
1704 	}
1705 
1706 }
1707 
1708 static void
1709 dump_free_page_info(int fd, struct dir_info *d)
1710 {
1711 	char buf[64];
1712 	int i;
1713 
1714 	snprintf(buf, sizeof(buf), "Free pages cached: %zu\n",
1715 	    d->free_regions_size);
1716 	write(fd, buf, strlen(buf));
1717 	for (i = 0; i < mopts.malloc_cache; i++) {
1718 		if (d->free_regions[i].p != NULL) {
1719 			snprintf(buf, sizeof(buf), "%2d) ", i);
1720 			write(fd, buf, strlen(buf));
1721 			snprintf(buf, sizeof(buf), "free at %p: %zu\n",
1722 			    d->free_regions[i].p, d->free_regions[i].size);
1723 			write(fd, buf, strlen(buf));
1724 		}
1725 	}
1726 }
1727 
1728 static void
1729 malloc_dump1(int fd, struct dir_info *d)
1730 {
1731 	char buf[64];
1732 	size_t i, realsize;
1733 
1734 	snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d);
1735 	write(fd, buf, strlen(buf));
1736 	if (d == NULL)
1737 		return;
1738 	snprintf(buf, sizeof(buf), "Region slots free %zu/%zu\n",
1739 		d->regions_free, d->regions_total);
1740 	write(fd, buf, strlen(buf));
1741 	snprintf(buf, sizeof(buf), "Finds %zu/%zu\n", d->finds,
1742 	    d->find_collisions);
1743 	write(fd, buf, strlen(buf));
1744 	snprintf(buf, sizeof(buf), "Inserts %zu/%zu\n", d->inserts,
1745 	    d->insert_collisions);
1746 	write(fd, buf, strlen(buf));
1747 	snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes,
1748 	     d->delete_moves);
1749 	write(fd, buf, strlen(buf));
1750 	snprintf(buf, sizeof(buf), "Cheap reallocs %zu/%zu\n",
1751 	    d->cheap_reallocs, d->cheap_realloc_tries);
1752 	write(fd, buf, strlen(buf));
1753 	dump_free_chunk_info(fd, d);
1754 	dump_free_page_info(fd, d);
1755 	snprintf(buf, sizeof(buf),
1756 	    "slot)  hash d  type         page            f size [free/n]\n");
1757 	write(fd, buf, strlen(buf));
1758 	for (i = 0; i < d->regions_total; i++) {
1759 		if (d->r[i].p != NULL) {
1760 			size_t h = hash(d->r[i].p) &
1761 			    (d->regions_total - 1);
1762 			snprintf(buf, sizeof(buf), "%4zx) #%4zx %zd ",
1763 			    i, h, h - i);
1764 			write(fd, buf, strlen(buf));
1765 			REALSIZE(realsize, &d->r[i]);
1766 			if (realsize > MALLOC_MAXCHUNK) {
1767 				putleakinfo(d->r[i].f, realsize, 1);
1768 				snprintf(buf, sizeof(buf),
1769 				    "pages %12p %12p %zu\n", d->r[i].p,
1770 				    d->r[i].f, realsize);
1771 				write(fd, buf, strlen(buf));
1772 			} else
1773 				dump_chunk(fd,
1774 				    (struct chunk_info *)d->r[i].size,
1775 				    d->r[i].f, 0);
1776 		}
1777 	}
1778 	snprintf(buf, sizeof(buf), "In use %zu\n", malloc_used);
1779 	write(fd, buf, strlen(buf));
1780 	snprintf(buf, sizeof(buf), "Guarded %zu\n", malloc_guarded);
1781 	write(fd, buf, strlen(buf));
1782 	dump_leaks(fd);
1783 	write(fd, "\n", 1);
1784 }
1785 
1786 void
1787 malloc_dump(int fd)
1788 {
1789 	int i;
1790 	void *p;
1791 	struct region_info *r;
1792 	int saved_errno = errno;
1793 
1794 	for (i = 0; i <= MALLOC_DELAYED_CHUNKS; i++) {
1795 		p = g_pool->delayed_chunks[i];
1796 		if (p == NULL)
1797 			continue;
1798 		r = find(g_pool, p);
1799 		if (r == NULL)
1800 			wrterror("bogus pointer in malloc_dump", p);
1801 		free_bytes(g_pool, r, p);
1802 		g_pool->delayed_chunks[i] = NULL;
1803 	}
1804 	/* XXX leak when run multiple times */
1805 	RB_INIT(&leakhead);
1806 	malloc_dump1(fd, g_pool);
1807 	errno = saved_errno;
1808 }
1809 
1810 static void
1811 malloc_exit(void)
1812 {
1813 	static const char q[] = "malloc() warning: Couldn't dump stats\n";
1814 	int save_errno = errno, fd;
1815 
1816 	fd = open("malloc.out", O_RDWR|O_APPEND);
1817 	if (fd != -1) {
1818 		malloc_dump(fd);
1819 		close(fd);
1820 	} else
1821 		write(STDERR_FILENO, q, sizeof(q) - 1);
1822 	errno = save_errno;
1823 }
1824 
1825 #endif /* MALLOC_STATS */
1826