xref: /openbsd-src/lib/libc/stdlib/malloc.c (revision d4741794dd2f512d997014f8bd85fbb24d935059)
1 /*	$OpenBSD: malloc.c,v 1.211 2016/11/04 09:11:20 otto Exp $	*/
2 /*
3  * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
4  * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
5  * Copyright (c) 2008 Damien Miller <djm@openbsd.org>
6  * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * If we meet some day, and you think this stuff is worth it, you
23  * can buy me a beer in return. Poul-Henning Kamp
24  */
25 
26 /* #define MALLOC_STATS */
27 
28 #include <sys/types.h>
29 #include <sys/param.h>	/* PAGE_SHIFT ALIGN */
30 #include <sys/queue.h>
31 #include <sys/mman.h>
32 #include <sys/uio.h>
33 #include <errno.h>
34 #include <stdarg.h>
35 #include <stdint.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <stdio.h>
39 #include <unistd.h>
40 
41 #ifdef MALLOC_STATS
42 #include <sys/tree.h>
43 #include <fcntl.h>
44 #endif
45 
46 #include "thread_private.h"
47 #include <tib.h>
48 
49 #if defined(__mips64__)
50 #define MALLOC_PAGESHIFT	(14U)
51 #else
52 #define MALLOC_PAGESHIFT	(PAGE_SHIFT)
53 #endif
54 
55 #define MALLOC_MINSHIFT		4
56 #define MALLOC_MAXSHIFT		(MALLOC_PAGESHIFT - 1)
57 #define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
58 #define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
59 #define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
60 #define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
61 
62 #define MALLOC_MAXCHUNK		(1 << MALLOC_MAXSHIFT)
63 #define MALLOC_MAXCACHE		256
64 #define MALLOC_DELAYED_CHUNK_MASK	15
65 #define MALLOC_INITIAL_REGIONS	512
66 #define MALLOC_DEFAULT_CACHE	64
67 #define MALLOC_CHUNK_LISTS	4
68 #define CHUNK_CHECK_LENGTH	32
69 
70 /*
71  * We move allocations between half a page and a whole page towards the end,
72  * subject to alignment constraints. This is the extra headroom we allow.
73  * Set to zero to be the most strict.
74  */
75 #define MALLOC_LEEWAY		0
76 
77 #define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
78 
79 /*
80  * What to use for Junk.  This is the byte value we use to fill with
81  * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
82  * and SOME_FREEJUNK right before free.
83  */
84 #define SOME_JUNK		0xdb	/* deadbeef */
85 #define SOME_FREEJUNK		0xdf	/* dead, free */
86 
87 #define MMAP(sz)	mmap(NULL, (sz), PROT_READ | PROT_WRITE, \
88     MAP_ANON | MAP_PRIVATE, -1, 0)
89 
90 #define MMAPA(a,sz)	mmap((a), (sz), PROT_READ | PROT_WRITE, \
91     MAP_ANON | MAP_PRIVATE, -1, 0)
92 
93 #define MQUERY(a, sz)	mquery((a), (sz), PROT_READ | PROT_WRITE, \
94     MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0)
95 
96 struct region_info {
97 	void *p;		/* page; low bits used to mark chunks */
98 	uintptr_t size;		/* size for pages, or chunk_info pointer */
99 #ifdef MALLOC_STATS
100 	void *f;		/* where allocated from */
101 #endif
102 };
103 
104 LIST_HEAD(chunk_head, chunk_info);
105 
106 struct dir_info {
107 	u_int32_t canary1;
108 	int active;			/* status of malloc */
109 	struct region_info *r;		/* region slots */
110 	size_t regions_total;		/* number of region slots */
111 	size_t regions_free;		/* number of free slots */
112 					/* lists of free chunk info structs */
113 	struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1];
114 					/* lists of chunks with free slots */
115 	struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS];
116 	size_t free_regions_size;	/* free pages cached */
117 					/* free pages cache */
118 	struct region_info free_regions[MALLOC_MAXCACHE];
119 					/* delayed free chunk slots */
120 	void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
121 	size_t rbytesused;		/* random bytes used */
122 	char *func;			/* current function */
123 	int mutex;
124 	u_char rbytes[32];		/* random bytes */
125 	u_short chunk_start;
126 #ifdef MALLOC_STATS
127 	size_t inserts;
128 	size_t insert_collisions;
129 	size_t finds;
130 	size_t find_collisions;
131 	size_t deletes;
132 	size_t delete_moves;
133 	size_t cheap_realloc_tries;
134 	size_t cheap_reallocs;
135 	size_t malloc_used;		/* bytes allocated */
136 	size_t malloc_guarded;		/* bytes used for guards */
137 #define STATS_ADD(x,y)	((x) += (y))
138 #define STATS_SUB(x,y)	((x) -= (y))
139 #define STATS_INC(x)	((x)++)
140 #define STATS_ZERO(x)	((x) = 0)
141 #define STATS_SETF(x,y)	((x)->f = (y))
142 #else
143 #define STATS_ADD(x,y)	/* nothing */
144 #define STATS_SUB(x,y)	/* nothing */
145 #define STATS_INC(x)	/* nothing */
146 #define STATS_ZERO(x)	/* nothing */
147 #define STATS_SETF(x,y)	/* nothing */
148 #endif /* MALLOC_STATS */
149 	u_int32_t canary2;
150 };
151 #define DIR_INFO_RSZ	((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
152 			~MALLOC_PAGEMASK)
153 
154 /*
155  * This structure describes a page worth of chunks.
156  *
157  * How many bits per u_short in the bitmap
158  */
159 #define MALLOC_BITS		(NBBY * sizeof(u_short))
160 struct chunk_info {
161 	LIST_ENTRY(chunk_info) entries;
162 	void *page;			/* pointer to the page */
163 	u_int32_t canary;
164 	u_short size;			/* size of this page's chunks */
165 	u_short shift;			/* how far to shift for this size */
166 	u_short free;			/* how many free chunks */
167 	u_short total;			/* how many chunks */
168 	u_short offset;			/* requested size table offset */
169 					/* which chunks are free */
170 	u_short bits[1];
171 };
172 
173 struct malloc_readonly {
174 	struct dir_info *malloc_pool[_MALLOC_MUTEXES];	/* Main bookkeeping information */
175 	int	malloc_mt;		/* multi-threaded mode? */
176 	int	malloc_freenow;		/* Free quickly - disable chunk rnd */
177 	int	malloc_freeunmap;	/* mprotect free pages PROT_NONE? */
178 	int	malloc_junk;		/* junk fill? */
179 	int	malloc_realloc;		/* always realloc? */
180 	int	malloc_xmalloc;		/* xmalloc behaviour? */
181 	int	chunk_canaries;		/* use canaries after chunks? */
182 	u_int	malloc_cache;		/* free pages we cache */
183 	size_t	malloc_guard;		/* use guard pages after allocations? */
184 #ifdef MALLOC_STATS
185 	int	malloc_stats;		/* dump statistics at end */
186 #endif
187 	u_int32_t malloc_canary;	/* Matched against ones in malloc_pool */
188 };
189 
190 /* This object is mapped PROT_READ after initialisation to prevent tampering */
191 static union {
192 	struct malloc_readonly mopts;
193 	u_char _pad[MALLOC_PAGESIZE];
194 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
195 #define mopts	malloc_readonly.mopts
196 
197 char		*malloc_options;	/* compile-time options */
198 
199 static u_char getrbyte(struct dir_info *d);
200 static __dead void wrterror(struct dir_info *d, char *msg, ...)
201     __attribute__((__format__ (printf, 2, 3)));
202 
203 #ifdef MALLOC_STATS
204 void malloc_dump(int, int, struct dir_info *);
205 PROTO_NORMAL(malloc_dump);
206 static void malloc_exit(void);
207 #define CALLER	__builtin_return_address(0)
208 #else
209 #define CALLER	NULL
210 #endif
211 
212 /* low bits of r->p determine size: 0 means >= page size and p->size holding
213  *  real size, otherwise r->size is a shift count, or 1 for malloc(0)
214  */
215 #define REALSIZE(sz, r)						\
216 	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
217 	(sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
218 
219 static inline void
220 _MALLOC_LEAVE(struct dir_info *d)
221 {
222 	if (mopts.malloc_mt) {
223 		d->active--;
224 		_MALLOC_UNLOCK(d->mutex);
225 	}
226 }
227 
228 static inline void
229 _MALLOC_ENTER(struct dir_info *d)
230 {
231 	if (mopts.malloc_mt) {
232 		_MALLOC_LOCK(d->mutex);
233 		d->active++;
234 	}
235 }
236 
237 static inline size_t
238 hash(void *p)
239 {
240 	size_t sum;
241 	uintptr_t u;
242 
243 	u = (uintptr_t)p >> MALLOC_PAGESHIFT;
244 	sum = u;
245 	sum = (sum << 7) - sum + (u >> 16);
246 #ifdef __LP64__
247 	sum = (sum << 7) - sum + (u >> 32);
248 	sum = (sum << 7) - sum + (u >> 48);
249 #endif
250 	return sum;
251 }
252 
253 static inline
254 struct dir_info *getpool(void)
255 {
256 	if (!mopts.malloc_mt)
257 		return mopts.malloc_pool[0];
258 	else
259 		return mopts.malloc_pool[TIB_GET()->tib_tid &
260 		    (_MALLOC_MUTEXES - 1)];
261 }
262 
263 static __dead void
264 wrterror(struct dir_info *d, char *msg, ...)
265 {
266 	struct iovec	iov[3];
267 	char		pidbuf[80];
268 	char		buf[80];
269 	int		saved_errno = errno;
270 	va_list		ap;
271 
272 	iov[0].iov_base = pidbuf;
273 	snprintf(pidbuf, sizeof(pidbuf), "%s(%d) in %s(): ", __progname,
274 	    getpid(), d->func ? d->func : "unknown");
275 	iov[0].iov_len = strlen(pidbuf);
276 	iov[1].iov_base = buf;
277 	va_start(ap, msg);
278 	vsnprintf(buf, sizeof(buf), msg, ap);
279 	va_end(ap);
280 	iov[1].iov_len = strlen(buf);
281 	iov[2].iov_base = "\n";
282 	iov[2].iov_len = 1;
283 	writev(STDERR_FILENO, iov, 3);
284 
285 #ifdef MALLOC_STATS
286 	if (mopts.malloc_stats) {
287 		int i;
288 
289 		for (i = 0; i < _MALLOC_MUTEXES; i++)
290 			malloc_dump(STDERR_FILENO, i, mopts.malloc_pool[i]);
291 	}
292 #endif /* MALLOC_STATS */
293 
294 	errno = saved_errno;
295 
296 	abort();
297 }
298 
299 static void
300 rbytes_init(struct dir_info *d)
301 {
302 	arc4random_buf(d->rbytes, sizeof(d->rbytes));
303 	/* add 1 to account for using d->rbytes[0] */
304 	d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2);
305 }
306 
307 static inline u_char
308 getrbyte(struct dir_info *d)
309 {
310 	u_char x;
311 
312 	if (d->rbytesused >= sizeof(d->rbytes))
313 		rbytes_init(d);
314 	x = d->rbytes[d->rbytesused++];
315 	return x;
316 }
317 
318 /*
319  * Cache maintenance. We keep at most malloc_cache pages cached.
320  * If the cache is becoming full, unmap pages in the cache for real,
321  * and then add the region to the cache
322  * Opposed to the regular region data structure, the sizes in the
323  * cache are in MALLOC_PAGESIZE units.
324  */
325 static void
326 unmap(struct dir_info *d, void *p, size_t sz)
327 {
328 	size_t psz = sz >> MALLOC_PAGESHIFT;
329 	size_t rsz, tounmap;
330 	struct region_info *r;
331 	u_int i, offset;
332 
333 	if (sz != PAGEROUND(sz))
334 		wrterror(d, "munmap round");
335 
336 	if (psz > mopts.malloc_cache) {
337 		i = munmap(p, sz);
338 		if (i)
339 			wrterror(d, "munmap %p", p);
340 		STATS_SUB(d->malloc_used, sz);
341 		return;
342 	}
343 	tounmap = 0;
344 	rsz = mopts.malloc_cache - d->free_regions_size;
345 	if (psz > rsz)
346 		tounmap = psz - rsz;
347 	offset = getrbyte(d);
348 	for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) {
349 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
350 		if (r->p != NULL) {
351 			rsz = r->size << MALLOC_PAGESHIFT;
352 			if (munmap(r->p, rsz))
353 				wrterror(d, "munmap %p", r->p);
354 			r->p = NULL;
355 			if (tounmap > r->size)
356 				tounmap -= r->size;
357 			else
358 				tounmap = 0;
359 			d->free_regions_size -= r->size;
360 			r->size = 0;
361 			STATS_SUB(d->malloc_used, rsz);
362 		}
363 	}
364 	if (tounmap > 0)
365 		wrterror(d, "malloc cache underflow");
366 	for (i = 0; i < mopts.malloc_cache; i++) {
367 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
368 		if (r->p == NULL) {
369 			if (mopts.malloc_junk && !mopts.malloc_freeunmap) {
370 				size_t amt = mopts.malloc_junk == 1 ?
371 				    MALLOC_MAXCHUNK : sz;
372 				memset(p, SOME_FREEJUNK, amt);
373 			}
374 			if (mopts.malloc_freeunmap)
375 				mprotect(p, sz, PROT_NONE);
376 			r->p = p;
377 			r->size = psz;
378 			d->free_regions_size += psz;
379 			break;
380 		}
381 	}
382 	if (i == mopts.malloc_cache)
383 		wrterror(d, "malloc free slot lost");
384 	if (d->free_regions_size > mopts.malloc_cache)
385 		wrterror(d, "malloc cache overflow");
386 }
387 
388 static void
389 zapcacheregion(struct dir_info *d, void *p, size_t len)
390 {
391 	u_int i;
392 	struct region_info *r;
393 	size_t rsz;
394 
395 	for (i = 0; i < mopts.malloc_cache; i++) {
396 		r = &d->free_regions[i];
397 		if (r->p >= p && r->p <= (void *)((char *)p + len)) {
398 			rsz = r->size << MALLOC_PAGESHIFT;
399 			if (munmap(r->p, rsz))
400 				wrterror(d, "munmap %p", r->p);
401 			r->p = NULL;
402 			d->free_regions_size -= r->size;
403 			r->size = 0;
404 			STATS_SUB(d->malloc_used, rsz);
405 		}
406 	}
407 }
408 
409 static void *
410 map(struct dir_info *d, void *hint, size_t sz, int zero_fill)
411 {
412 	size_t psz = sz >> MALLOC_PAGESHIFT;
413 	struct region_info *r, *big = NULL;
414 	u_int i, offset;
415 	void *p;
416 
417 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
418 	    d->canary1 != ~d->canary2)
419 		wrterror(d, "internal struct corrupt");
420 	if (sz != PAGEROUND(sz))
421 		wrterror(d, "map round");
422 
423 	if (!hint && psz > d->free_regions_size) {
424 		_MALLOC_LEAVE(d);
425 		p = MMAP(sz);
426 		_MALLOC_ENTER(d);
427 		if (p != MAP_FAILED)
428 			STATS_ADD(d->malloc_used, sz);
429 		/* zero fill not needed */
430 		return p;
431 	}
432 	offset = getrbyte(d);
433 	for (i = 0; i < mopts.malloc_cache; i++) {
434 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
435 		if (r->p != NULL) {
436 			if (hint && r->p != hint)
437 				continue;
438 			if (r->size == psz) {
439 				p = r->p;
440 				r->p = NULL;
441 				r->size = 0;
442 				d->free_regions_size -= psz;
443 				if (mopts.malloc_freeunmap)
444 					mprotect(p, sz, PROT_READ | PROT_WRITE);
445 				if (zero_fill)
446 					memset(p, 0, sz);
447 				else if (mopts.malloc_junk == 2 &&
448 				    mopts.malloc_freeunmap)
449 					memset(p, SOME_FREEJUNK, sz);
450 				return p;
451 			} else if (r->size > psz)
452 				big = r;
453 		}
454 	}
455 	if (big != NULL) {
456 		r = big;
457 		p = r->p;
458 		r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT);
459 		if (mopts.malloc_freeunmap)
460 			mprotect(p, sz, PROT_READ | PROT_WRITE);
461 		r->size -= psz;
462 		d->free_regions_size -= psz;
463 		if (zero_fill)
464 			memset(p, 0, sz);
465 		else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap)
466 			memset(p, SOME_FREEJUNK, sz);
467 		return p;
468 	}
469 	if (hint)
470 		return MAP_FAILED;
471 	if (d->free_regions_size > mopts.malloc_cache)
472 		wrterror(d, "malloc cache");
473 	_MALLOC_LEAVE(d);
474 	p = MMAP(sz);
475 	_MALLOC_ENTER(d);
476 	if (p != MAP_FAILED)
477 		STATS_ADD(d->malloc_used, sz);
478 	/* zero fill not needed */
479 	return p;
480 }
481 
482 static void
483 omalloc_parseopt(char opt)
484 {
485 	switch (opt) {
486 	case '>':
487 		mopts.malloc_cache <<= 1;
488 		if (mopts.malloc_cache > MALLOC_MAXCACHE)
489 			mopts.malloc_cache = MALLOC_MAXCACHE;
490 		break;
491 	case '<':
492 		mopts.malloc_cache >>= 1;
493 		break;
494 	case 'c':
495 		mopts.chunk_canaries = 0;
496 		break;
497 	case 'C':
498 		mopts.chunk_canaries = 1;
499 		break;
500 #ifdef MALLOC_STATS
501 	case 'd':
502 		mopts.malloc_stats = 0;
503 		break;
504 	case 'D':
505 		mopts.malloc_stats = 1;
506 		break;
507 #endif /* MALLOC_STATS */
508 	case 'f':
509 		mopts.malloc_freenow = 0;
510 		mopts.malloc_freeunmap = 0;
511 		break;
512 	case 'F':
513 		mopts.malloc_freenow = 1;
514 		mopts.malloc_freeunmap = 1;
515 		break;
516 	case 'g':
517 		mopts.malloc_guard = 0;
518 		break;
519 	case 'G':
520 		mopts.malloc_guard = MALLOC_PAGESIZE;
521 		break;
522 	case 'j':
523 		if (mopts.malloc_junk > 0)
524 			mopts.malloc_junk--;
525 		break;
526 	case 'J':
527 		if (mopts.malloc_junk < 2)
528 			mopts.malloc_junk++;
529 		break;
530 	case 'r':
531 		mopts.malloc_realloc = 0;
532 		break;
533 	case 'R':
534 		mopts.malloc_realloc = 1;
535 		break;
536 	case 'u':
537 		mopts.malloc_freeunmap = 0;
538 		break;
539 	case 'U':
540 		mopts.malloc_freeunmap = 1;
541 		break;
542 	case 'x':
543 		mopts.malloc_xmalloc = 0;
544 		break;
545 	case 'X':
546 		mopts.malloc_xmalloc = 1;
547 		break;
548 	default: {
549 		static const char q[] = "malloc() warning: "
550 		    "unknown char in MALLOC_OPTIONS\n";
551 		write(STDERR_FILENO, q, sizeof(q) - 1);
552 		break;
553 	}
554 	}
555 }
556 
557 static void
558 omalloc_init(void)
559 {
560 	char *p, *q, b[64];
561 	int i, j;
562 
563 	/*
564 	 * Default options
565 	 */
566 	mopts.malloc_junk = 1;
567 	mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
568 
569 	for (i = 0; i < 3; i++) {
570 		switch (i) {
571 		case 0:
572 			j = readlink("/etc/malloc.conf", b, sizeof b - 1);
573 			if (j <= 0)
574 				continue;
575 			b[j] = '\0';
576 			p = b;
577 			break;
578 		case 1:
579 			if (issetugid() == 0)
580 				p = getenv("MALLOC_OPTIONS");
581 			else
582 				continue;
583 			break;
584 		case 2:
585 			p = malloc_options;
586 			break;
587 		default:
588 			p = NULL;
589 		}
590 
591 		for (; p != NULL && *p != '\0'; p++) {
592 			switch (*p) {
593 			case 'S':
594 				for (q = "CGJ"; *q != '\0'; q++)
595 					omalloc_parseopt(*q);
596 				mopts.malloc_cache = 0;
597 				break;
598 			case 's':
599 				for (q = "cgj"; *q != '\0'; q++)
600 					omalloc_parseopt(*q);
601 				mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
602 				break;
603 			default:
604 				omalloc_parseopt(*p);
605 				break;
606 			}
607 		}
608 	}
609 
610 #ifdef MALLOC_STATS
611 	if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
612 		static const char q[] = "malloc() warning: atexit(2) failed."
613 		    " Will not be able to dump stats on exit\n";
614 		write(STDERR_FILENO, q, sizeof(q) - 1);
615 	}
616 #endif /* MALLOC_STATS */
617 
618 	while ((mopts.malloc_canary = arc4random()) == 0)
619 		;
620 }
621 
622 /*
623  * Initialize a dir_info, which should have been cleared by caller
624  */
625 static void
626 omalloc_poolinit(struct dir_info **dp)
627 {
628 	void *p;
629 	size_t d_avail, regioninfo_size;
630 	struct dir_info *d;
631 	int i, j;
632 
633 	/*
634 	 * Allocate dir_info with a guard page on either side. Also
635 	 * randomise offset inside the page at which the dir_info
636 	 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
637 	 */
638 	if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED)
639 		wrterror(NULL, "malloc init mmap failed");
640 	mprotect(p, MALLOC_PAGESIZE, PROT_NONE);
641 	mprotect((char *)p + MALLOC_PAGESIZE + DIR_INFO_RSZ,
642 	    MALLOC_PAGESIZE, PROT_NONE);
643 	d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
644 	d = (struct dir_info *)((char *)p + MALLOC_PAGESIZE +
645 	    (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
646 
647 	rbytes_init(d);
648 	d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS;
649 	regioninfo_size = d->regions_total * sizeof(struct region_info);
650 	d->r = MMAP(regioninfo_size);
651 	if (d->r == MAP_FAILED) {
652 		d->regions_total = 0;
653 		wrterror(NULL, "malloc init mmap failed");
654 	}
655 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
656 		LIST_INIT(&d->chunk_info_list[i]);
657 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++)
658 			LIST_INIT(&d->chunk_dir[i][j]);
659 	}
660 	STATS_ADD(d->malloc_used, regioninfo_size);
661 	d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
662 	d->canary2 = ~d->canary1;
663 
664 	*dp = d;
665 }
666 
667 static int
668 omalloc_grow(struct dir_info *d)
669 {
670 	size_t newtotal;
671 	size_t newsize;
672 	size_t mask;
673 	size_t i;
674 	struct region_info *p;
675 
676 	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 )
677 		return 1;
678 
679 	newtotal = d->regions_total * 2;
680 	newsize = newtotal * sizeof(struct region_info);
681 	mask = newtotal - 1;
682 
683 	p = MMAP(newsize);
684 	if (p == MAP_FAILED)
685 		return 1;
686 
687 	STATS_ADD(d->malloc_used, newsize);
688 	STATS_ZERO(d->inserts);
689 	STATS_ZERO(d->insert_collisions);
690 	for (i = 0; i < d->regions_total; i++) {
691 		void *q = d->r[i].p;
692 		if (q != NULL) {
693 			size_t index = hash(q) & mask;
694 			STATS_INC(d->inserts);
695 			while (p[index].p != NULL) {
696 				index = (index - 1) & mask;
697 				STATS_INC(d->insert_collisions);
698 			}
699 			p[index] = d->r[i];
700 		}
701 	}
702 	/* avoid pages containing meta info to end up in cache */
703 	if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
704 		wrterror(d, "munmap %p", (void *)d->r);
705 	else
706 		STATS_SUB(d->malloc_used,
707 		    d->regions_total * sizeof(struct region_info));
708 	d->regions_free = d->regions_free + d->regions_total;
709 	d->regions_total = newtotal;
710 	d->r = p;
711 	return 0;
712 }
713 
714 static struct chunk_info *
715 alloc_chunk_info(struct dir_info *d, int bits)
716 {
717 	struct chunk_info *p;
718 	size_t size, count;
719 
720 	if (bits == 0)
721 		count = MALLOC_PAGESIZE / MALLOC_MINSIZE;
722 	else
723 		count = MALLOC_PAGESIZE >> bits;
724 
725 	size = howmany(count, MALLOC_BITS);
726 	size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
727 	if (mopts.chunk_canaries)
728 		size += count * sizeof(u_short);
729 	size = ALIGN(size);
730 
731 	if (LIST_EMPTY(&d->chunk_info_list[bits])) {
732 		char *q;
733 		int i;
734 
735 		q = MMAP(MALLOC_PAGESIZE);
736 		if (q == MAP_FAILED)
737 			return NULL;
738 		STATS_ADD(d->malloc_used, MALLOC_PAGESIZE);
739 		count = MALLOC_PAGESIZE / size;
740 		for (i = 0; i < count; i++, q += size)
741 			LIST_INSERT_HEAD(&d->chunk_info_list[bits],
742 			    (struct chunk_info *)q, entries);
743 	}
744 	p = LIST_FIRST(&d->chunk_info_list[bits]);
745 	LIST_REMOVE(p, entries);
746 	memset(p, 0, size);
747 	p->canary = d->canary1;
748 	return p;
749 }
750 
751 
752 /*
753  * The hashtable uses the assumption that p is never NULL. This holds since
754  * non-MAP_FIXED mappings with hint 0 start at BRKSIZ.
755  */
756 static int
757 insert(struct dir_info *d, void *p, size_t sz, void *f)
758 {
759 	size_t index;
760 	size_t mask;
761 	void *q;
762 
763 	if (d->regions_free * 4 < d->regions_total) {
764 		if (omalloc_grow(d))
765 			return 1;
766 	}
767 	mask = d->regions_total - 1;
768 	index = hash(p) & mask;
769 	q = d->r[index].p;
770 	STATS_INC(d->inserts);
771 	while (q != NULL) {
772 		index = (index - 1) & mask;
773 		q = d->r[index].p;
774 		STATS_INC(d->insert_collisions);
775 	}
776 	d->r[index].p = p;
777 	d->r[index].size = sz;
778 #ifdef MALLOC_STATS
779 	d->r[index].f = f;
780 #endif
781 	d->regions_free--;
782 	return 0;
783 }
784 
785 static struct region_info *
786 find(struct dir_info *d, void *p)
787 {
788 	size_t index;
789 	size_t mask = d->regions_total - 1;
790 	void *q, *r;
791 
792 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
793 	    d->canary1 != ~d->canary2)
794 		wrterror(d, "internal struct corrupt");
795 	p = MASK_POINTER(p);
796 	index = hash(p) & mask;
797 	r = d->r[index].p;
798 	q = MASK_POINTER(r);
799 	STATS_INC(d->finds);
800 	while (q != p && r != NULL) {
801 		index = (index - 1) & mask;
802 		r = d->r[index].p;
803 		q = MASK_POINTER(r);
804 		STATS_INC(d->find_collisions);
805 	}
806 	return (q == p && r != NULL) ? &d->r[index] : NULL;
807 }
808 
809 static void
810 delete(struct dir_info *d, struct region_info *ri)
811 {
812 	/* algorithm R, Knuth Vol III section 6.4 */
813 	size_t mask = d->regions_total - 1;
814 	size_t i, j, r;
815 
816 	if (d->regions_total & (d->regions_total - 1))
817 		wrterror(d, "regions_total not 2^x");
818 	d->regions_free++;
819 	STATS_INC(d->deletes);
820 
821 	i = ri - d->r;
822 	for (;;) {
823 		d->r[i].p = NULL;
824 		d->r[i].size = 0;
825 		j = i;
826 		for (;;) {
827 			i = (i - 1) & mask;
828 			if (d->r[i].p == NULL)
829 				return;
830 			r = hash(d->r[i].p) & mask;
831 			if ((i <= r && r < j) || (r < j && j < i) ||
832 			    (j < i && i <= r))
833 				continue;
834 			d->r[j] = d->r[i];
835 			STATS_INC(d->delete_moves);
836 			break;
837 		}
838 
839 	}
840 }
841 
842 /*
843  * Allocate a page of chunks
844  */
845 static struct chunk_info *
846 omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
847 {
848 	struct chunk_info *bp;
849 	void		*pp;
850 	int		i, k;
851 
852 	/* Allocate a new bucket */
853 	pp = map(d, NULL, MALLOC_PAGESIZE, 0);
854 	if (pp == MAP_FAILED)
855 		return NULL;
856 
857 	bp = alloc_chunk_info(d, bits);
858 	if (bp == NULL) {
859 		unmap(d, pp, MALLOC_PAGESIZE);
860 		return NULL;
861 	}
862 
863 	/* memory protect the page allocated in the malloc(0) case */
864 	if (bits == 0) {
865 		bp->size = 0;
866 		bp->shift = 1;
867 		i = MALLOC_MINSIZE - 1;
868 		while (i >>= 1)
869 			bp->shift++;
870 		bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift;
871 		bp->page = pp;
872 
873 		k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE);
874 		if (k < 0) {
875 			unmap(d, pp, MALLOC_PAGESIZE);
876 			LIST_INSERT_HEAD(&d->chunk_info_list[0], bp, entries);
877 			return NULL;
878 		}
879 	} else {
880 		bp->size = 1U << bits;
881 		bp->shift = bits;
882 		bp->total = bp->free = MALLOC_PAGESIZE >> bits;
883 		bp->offset = howmany(bp->total, MALLOC_BITS);
884 		bp->page = pp;
885 	}
886 
887 	/* set all valid bits in the bitmap */
888 	k = bp->total;
889 	i = 0;
890 
891 	/* Do a bunch at a time */
892 	for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS)
893 		bp->bits[i / MALLOC_BITS] = (u_short)~0U;
894 
895 	for (; i < k; i++)
896 		bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS);
897 
898 	LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries);
899 
900 	bits++;
901 	if ((uintptr_t)pp & bits)
902 		wrterror(d, "pp & bits %p", pp);
903 
904 	insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL);
905 	return bp;
906 }
907 
908 
909 /*
910  * Allocate a chunk
911  */
912 static void *
913 malloc_bytes(struct dir_info *d, size_t argsize, void *f)
914 {
915 	int		i, j, listnum;
916 	size_t		k, size;
917 	u_short		u, *lp;
918 	struct chunk_info *bp;
919 
920 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
921 	    d->canary1 != ~d->canary2)
922 		wrterror(d, "internal struct corrupt");
923 
924 	size = argsize;
925 
926 	/* Don't bother with anything less than this */
927 	/* unless we have a malloc(0) requests */
928 	if (size != 0 && size < MALLOC_MINSIZE)
929 		size = MALLOC_MINSIZE;
930 
931 	/* Find the right bucket */
932 	if (size == 0)
933 		j = 0;
934 	else {
935 		j = MALLOC_MINSHIFT;
936 		i = (size - 1) >> (MALLOC_MINSHIFT - 1);
937 		while (i >>= 1)
938 			j++;
939 	}
940 
941 	listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
942 	/* If it's empty, make a page more of that size chunks */
943 	if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) {
944 		bp = omalloc_make_chunks(d, j, listnum);
945 		if (bp == NULL)
946 			return NULL;
947 	}
948 
949 	if (bp->canary != d->canary1)
950 		wrterror(d, "chunk info corrupted");
951 
952 	i = d->chunk_start;
953 	if (bp->free > 1)
954 		i += getrbyte(d);
955 	if (i >= bp->total)
956 		i &= bp->total - 1;
957 	for (;;) {
958 		for (;;) {
959 			lp = &bp->bits[i / MALLOC_BITS];
960 			if (!*lp) {
961 				i += MALLOC_BITS;
962 				i &= ~(MALLOC_BITS - 1);
963 				if (i >= bp->total)
964 					i = 0;
965 			} else
966 				break;
967 		}
968 		k = i % MALLOC_BITS;
969 		u = 1 << k;
970 		if (*lp & u)
971 			break;
972 		if (++i >= bp->total)
973 			i = 0;
974 	}
975 	d->chunk_start += i + 1;
976 #ifdef MALLOC_STATS
977 	if (i == 0) {
978 		struct region_info *r = find(d, bp->page);
979 		r->f = f;
980 	}
981 #endif
982 
983 	*lp ^= u;
984 
985 	/* If there are no more free, remove from free-list */
986 	if (!--bp->free)
987 		LIST_REMOVE(bp, entries);
988 
989 	/* Adjust to the real offset of that chunk */
990 	k += (lp - bp->bits) * MALLOC_BITS;
991 
992 	if (mopts.chunk_canaries)
993 		bp->bits[bp->offset + k] = argsize;
994 
995 	k <<= bp->shift;
996 
997 	if (bp->size > 0) {
998 		if (mopts.malloc_junk == 2)
999 			memset((char *)bp->page + k, SOME_JUNK, bp->size);
1000 		else if (mopts.chunk_canaries) {
1001 			size_t sz = bp->size - argsize;
1002 
1003 			if (sz > CHUNK_CHECK_LENGTH)
1004 				sz = CHUNK_CHECK_LENGTH;
1005 			memset((char *)bp->page + k + argsize, SOME_JUNK, sz);
1006 		}
1007 	}
1008 	return ((char *)bp->page + k);
1009 }
1010 
1011 static void
1012 validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1013 {
1014 	size_t check_sz = allocated - sz;
1015 	u_char *p, *q;
1016 
1017 	if (check_sz > CHUNK_CHECK_LENGTH)
1018 		check_sz = CHUNK_CHECK_LENGTH;
1019 	p = ptr + sz;
1020 	q = p + check_sz;
1021 
1022 	while (p < q) {
1023 		if (*p++ != SOME_JUNK) {
1024 			wrterror(d, "chunk canary corrupted %p %#tx@%#zx",
1025 			    ptr, p - ptr - 1, sz);
1026 		}
1027 	}
1028 }
1029 
1030 static uint32_t
1031 find_chunknum(struct dir_info *d, struct region_info *r, void *ptr, int check)
1032 {
1033 	struct chunk_info *info;
1034 	uint32_t chunknum;
1035 
1036 	info = (struct chunk_info *)r->size;
1037 	if (info->canary != d->canary1)
1038 		wrterror(d, "chunk info corrupted");
1039 
1040 	/* Find the chunk number on the page */
1041 	chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1042 	if (check && info->size > 0) {
1043 		validate_canary(d, ptr, info->bits[info->offset + chunknum],
1044 		    info->size);
1045 	}
1046 
1047 	if ((uintptr_t)ptr & ((1U << (info->shift)) - 1))
1048 		wrterror(d, "modified chunk-pointer %p", ptr);
1049 	if (info->bits[chunknum / MALLOC_BITS] &
1050 	    (1U << (chunknum % MALLOC_BITS)))
1051 		wrterror(d, "chunk is already free %p", ptr);
1052 	return chunknum;
1053 }
1054 
1055 /*
1056  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1057  */
1058 static void
1059 free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1060 {
1061 	struct chunk_head *mp;
1062 	struct chunk_info *info;
1063 	uint32_t chunknum;
1064 	int listnum;
1065 
1066 	info = (struct chunk_info *)r->size;
1067 	chunknum = find_chunknum(d, r, ptr, 0);
1068 
1069 	info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
1070 	info->free++;
1071 
1072 	if (info->free == 1) {
1073 		/* Page became non-full */
1074 		listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
1075 		if (info->size != 0)
1076 			mp = &d->chunk_dir[info->shift][listnum];
1077 		else
1078 			mp = &d->chunk_dir[0][listnum];
1079 
1080 		LIST_INSERT_HEAD(mp, info, entries);
1081 		return;
1082 	}
1083 
1084 	if (info->free != info->total)
1085 		return;
1086 
1087 	LIST_REMOVE(info, entries);
1088 
1089 	if (info->size == 0 && !mopts.malloc_freeunmap)
1090 		mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1091 	unmap(d, info->page, MALLOC_PAGESIZE);
1092 
1093 	delete(d, r);
1094 	if (info->size != 0)
1095 		mp = &d->chunk_info_list[info->shift];
1096 	else
1097 		mp = &d->chunk_info_list[0];
1098 	LIST_INSERT_HEAD(mp, info, entries);
1099 }
1100 
1101 
1102 
1103 static void *
1104 omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1105 {
1106 	void *p;
1107 	size_t psz;
1108 
1109 	if (sz > MALLOC_MAXCHUNK) {
1110 		if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1111 			errno = ENOMEM;
1112 			return NULL;
1113 		}
1114 		sz += mopts.malloc_guard;
1115 		psz = PAGEROUND(sz);
1116 		p = map(pool, NULL, psz, zero_fill);
1117 		if (p == MAP_FAILED) {
1118 			errno = ENOMEM;
1119 			return NULL;
1120 		}
1121 		if (insert(pool, p, sz, f)) {
1122 			unmap(pool, p, psz);
1123 			errno = ENOMEM;
1124 			return NULL;
1125 		}
1126 		if (mopts.malloc_guard) {
1127 			if (mprotect((char *)p + psz - mopts.malloc_guard,
1128 			    mopts.malloc_guard, PROT_NONE))
1129 				wrterror(pool, "mprotect");
1130 			STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1131 		}
1132 
1133 		if (sz - mopts.malloc_guard < MALLOC_PAGESIZE - MALLOC_LEEWAY) {
1134 			/* fill whole allocation */
1135 			if (mopts.malloc_junk == 2)
1136 				memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1137 			/* shift towards the end */
1138 			p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY -
1139 			    (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1));
1140 			/* fill zeros if needed and overwritten above */
1141 			if (zero_fill && mopts.malloc_junk == 2)
1142 				memset(p, 0, sz - mopts.malloc_guard);
1143 		} else {
1144 			if (mopts.malloc_junk == 2) {
1145 				if (zero_fill)
1146 					memset((char *)p + sz - mopts.malloc_guard,
1147 					    SOME_JUNK, psz - sz);
1148 				else
1149 					memset(p, SOME_JUNK,
1150 					    psz - mopts.malloc_guard);
1151 			}
1152 			else if (mopts.chunk_canaries) {
1153 				size_t csz = psz - sz;
1154 
1155 				if (csz > CHUNK_CHECK_LENGTH)
1156 					csz = CHUNK_CHECK_LENGTH;
1157 				memset((char *)p + sz - mopts.malloc_guard,
1158 				    SOME_JUNK, csz);
1159 			}
1160 		}
1161 
1162 	} else {
1163 		/* takes care of SOME_JUNK */
1164 		p = malloc_bytes(pool, sz, f);
1165 		if (zero_fill && p != NULL && sz > 0)
1166 			memset(p, 0, sz);
1167 	}
1168 
1169 	return p;
1170 }
1171 
1172 /*
1173  * Common function for handling recursion.  Only
1174  * print the error message once, to avoid making the problem
1175  * potentially worse.
1176  */
1177 static void
1178 malloc_recurse(struct dir_info *d)
1179 {
1180 	static int noprint;
1181 
1182 	if (noprint == 0) {
1183 		noprint = 1;
1184 		wrterror(d, "recursive call");
1185 	}
1186 	d->active--;
1187 	_MALLOC_UNLOCK(d->mutex);
1188 	errno = EDEADLK;
1189 }
1190 
1191 void
1192 _malloc_init(int from_rthreads)
1193 {
1194 	int i, max;
1195 	struct dir_info *d;
1196 
1197 	_MALLOC_LOCK(0);
1198 	if (!from_rthreads && mopts.malloc_pool[0]) {
1199 		_MALLOC_UNLOCK(0);
1200 		return;
1201 	}
1202 	if (!mopts.malloc_canary)
1203 		omalloc_init();
1204 
1205 	max = from_rthreads ? _MALLOC_MUTEXES : 1;
1206 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1207 		mprotect(&malloc_readonly, sizeof(malloc_readonly),
1208 		     PROT_READ | PROT_WRITE);
1209 	for (i = 0; i < max; i++) {
1210 		if (mopts.malloc_pool[i])
1211 			continue;
1212 		omalloc_poolinit(&d);
1213 		d->mutex = i;
1214 		mopts.malloc_pool[i] = d;
1215 	}
1216 
1217 	if (from_rthreads)
1218 		mopts.malloc_mt = 1;
1219 
1220 	/*
1221 	 * Options have been set and will never be reset.
1222 	 * Prevent further tampering with them.
1223 	 */
1224 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1225 		mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
1226 	_MALLOC_UNLOCK(0);
1227 }
1228 DEF_STRONG(_malloc_init);
1229 
1230 void *
1231 malloc(size_t size)
1232 {
1233 	void *r;
1234 	struct dir_info *d;
1235 	int saved_errno = errno;
1236 
1237 	d = getpool();
1238 	if (d == NULL) {
1239 		_malloc_init(0);
1240 		d = getpool();
1241 	}
1242 	_MALLOC_LOCK(d->mutex);
1243 	d->func = "malloc";
1244 
1245 	if (d->active++) {
1246 		malloc_recurse(d);
1247 		return NULL;
1248 	}
1249 	r = omalloc(d, size, 0, CALLER);
1250 	d->active--;
1251 	_MALLOC_UNLOCK(d->mutex);
1252 	if (r == NULL && mopts.malloc_xmalloc)
1253 		wrterror(d, "out of memory");
1254 	if (r != NULL)
1255 		errno = saved_errno;
1256 	return r;
1257 }
1258 /*DEF_STRONG(malloc);*/
1259 
1260 static void
1261 validate_junk(struct dir_info *pool, void *p)
1262 {
1263 	struct region_info *r;
1264 	size_t byte, sz;
1265 
1266 	if (p == NULL)
1267 		return;
1268 	r = find(pool, p);
1269 	if (r == NULL)
1270 		wrterror(pool, "bogus pointer in validate_junk %p", p);
1271 	REALSIZE(sz, r);
1272 	if (sz > CHUNK_CHECK_LENGTH)
1273 		sz = CHUNK_CHECK_LENGTH;
1274 	for (byte = 0; byte < sz; byte++) {
1275 		if (((unsigned char *)p)[byte] != SOME_FREEJUNK)
1276 			wrterror(pool, "use after free %p", p);
1277 	}
1278 }
1279 
1280 static void
1281 ofree(struct dir_info *argpool, void *p)
1282 {
1283 	struct dir_info *pool;
1284 	struct region_info *r;
1285 	size_t sz;
1286 	int i;
1287 
1288 	pool = argpool;
1289 	r = find(pool, p);
1290 	if (r == NULL) {
1291 		if (mopts.malloc_mt)  {
1292 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1293 				if (i == argpool->mutex)
1294 					continue;
1295 				pool->active--;
1296 				_MALLOC_UNLOCK(pool->mutex);
1297 				pool = mopts.malloc_pool[i];
1298 				_MALLOC_LOCK(pool->mutex);
1299 				pool->active++;
1300 				r = find(pool, p);
1301 				if (r != NULL)
1302 					break;
1303 			}
1304 		}
1305 		if (r == NULL)
1306 			wrterror(pool, "bogus pointer (double free?) %p", p);
1307 	}
1308 
1309 	REALSIZE(sz, r);
1310 	if (sz > MALLOC_MAXCHUNK) {
1311 		if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE -
1312 		    MALLOC_LEEWAY) {
1313 			if (r->p != p)
1314 				wrterror(pool, "bogus pointer %p", p);
1315 			if (mopts.chunk_canaries)
1316 				validate_canary(pool, p,
1317 				    sz - mopts.malloc_guard,
1318 				    PAGEROUND(sz - mopts.malloc_guard));
1319 		} else {
1320 #if notyetbecause_of_realloc
1321 			/* shifted towards the end */
1322 			if (p != ((char *)r->p) + ((MALLOC_PAGESIZE -
1323 			    MALLOC_MINSIZE - sz - mopts.malloc_guard) &
1324 			    ~(MALLOC_MINSIZE-1))) {
1325 			}
1326 #endif
1327 			p = r->p;
1328 		}
1329 		if (mopts.malloc_guard) {
1330 			if (sz < mopts.malloc_guard)
1331 				wrterror(pool, "guard size");
1332 			if (!mopts.malloc_freeunmap) {
1333 				if (mprotect((char *)p + PAGEROUND(sz) -
1334 				    mopts.malloc_guard, mopts.malloc_guard,
1335 				    PROT_READ | PROT_WRITE))
1336 					wrterror(pool, "mprotect");
1337 			}
1338 			STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
1339 		}
1340 		unmap(pool, p, PAGEROUND(sz));
1341 		delete(pool, r);
1342 	} else {
1343 		void *tmp;
1344 		int i;
1345 
1346 		/* Delayed free or canaries? Extra check */
1347 		if (!mopts.malloc_freenow || mopts.chunk_canaries)
1348 			find_chunknum(pool, r, p, mopts.chunk_canaries);
1349 		if (!mopts.malloc_freenow) {
1350 			if (mopts.malloc_junk && sz > 0)
1351 				memset(p, SOME_FREEJUNK, sz);
1352 			i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK;
1353 			tmp = p;
1354 			p = pool->delayed_chunks[i];
1355 			if (tmp == p)
1356 				wrterror(pool, "double free %p", tmp);
1357 			if (mopts.malloc_junk)
1358 				validate_junk(pool, p);
1359 			pool->delayed_chunks[i] = tmp;
1360 		} else {
1361 			if (mopts.malloc_junk && sz > 0)
1362 				memset(p, SOME_FREEJUNK, sz);
1363 		}
1364 		if (p != NULL) {
1365 			r = find(pool, p);
1366 			if (r == NULL)
1367 				wrterror(pool,
1368 				    "bogus pointer (double free?) %p", p);
1369 			free_bytes(pool, r, p);
1370 		}
1371 	}
1372 
1373 	if (argpool != pool) {
1374 		pool->active--;
1375 		_MALLOC_UNLOCK(pool->mutex);
1376 		_MALLOC_LOCK(argpool->mutex);
1377 		argpool->active++;
1378 	}
1379 }
1380 
1381 void
1382 free(void *ptr)
1383 {
1384 	struct dir_info *d;
1385 	int saved_errno = errno;
1386 
1387 	/* This is legal. */
1388 	if (ptr == NULL)
1389 		return;
1390 
1391 	d = getpool();
1392 	if (d == NULL)
1393 		wrterror(d, "free() called before allocation");
1394 	_MALLOC_LOCK(d->mutex);
1395 	d->func = "free";
1396 	if (d->active++) {
1397 		malloc_recurse(d);
1398 		return;
1399 	}
1400 	ofree(d, ptr);
1401 	d->active--;
1402 	_MALLOC_UNLOCK(d->mutex);
1403 	errno = saved_errno;
1404 }
1405 /*DEF_STRONG(free);*/
1406 
1407 
1408 static void *
1409 orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
1410 {
1411 	struct dir_info *pool;
1412 	struct region_info *r;
1413 	size_t oldsz, goldsz, gnewsz;
1414 	void *q, *ret;
1415 	int i;
1416 
1417 	pool = argpool;
1418 
1419 	if (p == NULL)
1420 		return omalloc(pool, newsz, 0, f);
1421 
1422 	r = find(pool, p);
1423 	if (r == NULL) {
1424 		if (mopts.malloc_mt) {
1425 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1426 				if (i == argpool->mutex)
1427 					continue;
1428 				pool->active--;
1429 				_MALLOC_UNLOCK(pool->mutex);
1430 				pool = mopts.malloc_pool[i];
1431 				_MALLOC_LOCK(pool->mutex);
1432 				pool->active++;
1433 				r = find(pool, p);
1434 				if (r != NULL)
1435 					break;
1436 			}
1437 		}
1438 		if (r == NULL)
1439 			wrterror(pool, "bogus pointer (double free?) %p", p);
1440 	}
1441 	if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1442 		errno = ENOMEM;
1443 		ret = NULL;
1444 		goto done;
1445 	}
1446 
1447 	REALSIZE(oldsz, r);
1448 	goldsz = oldsz;
1449 	if (oldsz > MALLOC_MAXCHUNK) {
1450 		if (oldsz < mopts.malloc_guard)
1451 			wrterror(pool, "guard size");
1452 		oldsz -= mopts.malloc_guard;
1453 	}
1454 
1455 	gnewsz = newsz;
1456 	if (gnewsz > MALLOC_MAXCHUNK)
1457 		gnewsz += mopts.malloc_guard;
1458 
1459 	if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p &&
1460 	    !mopts.chunk_canaries && !mopts.malloc_realloc) {
1461 		size_t roldsz = PAGEROUND(goldsz);
1462 		size_t rnewsz = PAGEROUND(gnewsz);
1463 
1464 		if (rnewsz > roldsz) {
1465 			if (!mopts.malloc_guard) {
1466 				void *hint = (char *)p + roldsz;
1467 				size_t needed = rnewsz - roldsz;
1468 
1469 				STATS_INC(pool->cheap_realloc_tries);
1470 				q = map(pool, hint, needed, 0);
1471 				if (q == hint)
1472 					goto gotit;
1473 				zapcacheregion(pool, hint, needed);
1474 				q = MQUERY(hint, needed);
1475 				if (q == hint)
1476 					q = MMAPA(hint, needed);
1477 				else
1478 					q = MAP_FAILED;
1479 				if (q == hint) {
1480 gotit:
1481 					STATS_ADD(pool->malloc_used, needed);
1482 					if (mopts.malloc_junk == 2)
1483 						memset(q, SOME_JUNK, needed);
1484 					r->size = newsz;
1485 					STATS_SETF(r, f);
1486 					STATS_INC(pool->cheap_reallocs);
1487 					ret = p;
1488 					goto done;
1489 				} else if (q != MAP_FAILED) {
1490 					if (munmap(q, needed))
1491 						wrterror(pool, "munmap %p", q);
1492 				}
1493 			}
1494 		} else if (rnewsz < roldsz) {
1495 			if (mopts.malloc_guard) {
1496 				if (mprotect((char *)p + roldsz -
1497 				    mopts.malloc_guard, mopts.malloc_guard,
1498 				    PROT_READ | PROT_WRITE))
1499 					wrterror(pool, "mprotect");
1500 				if (mprotect((char *)p + rnewsz -
1501 				    mopts.malloc_guard, mopts.malloc_guard,
1502 				    PROT_NONE))
1503 					wrterror(pool, "mprotect");
1504 			}
1505 			unmap(pool, (char *)p + rnewsz, roldsz - rnewsz);
1506 			r->size = gnewsz;
1507 			STATS_SETF(r, f);
1508 			ret = p;
1509 			goto done;
1510 		} else {
1511 			if (newsz > oldsz && mopts.malloc_junk == 2)
1512 				memset((char *)p + newsz, SOME_JUNK,
1513 				    rnewsz - mopts.malloc_guard - newsz);
1514 			r->size = gnewsz;
1515 			STATS_SETF(r, f);
1516 			ret = p;
1517 			goto done;
1518 		}
1519 	}
1520 	if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.chunk_canaries &&
1521 	    !mopts.malloc_realloc) {
1522 		if (mopts.malloc_junk == 2 && newsz > 0)
1523 			memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1524 		STATS_SETF(r, f);
1525 		ret = p;
1526 	} else if (newsz != oldsz || mopts.chunk_canaries ||
1527 	    mopts.malloc_realloc) {
1528 		q = omalloc(pool, newsz, 0, f);
1529 		if (q == NULL) {
1530 			ret = NULL;
1531 			goto done;
1532 		}
1533 		if (newsz != 0 && oldsz != 0)
1534 			memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1535 		ofree(pool, p);
1536 		ret = q;
1537 	} else {
1538 		STATS_SETF(r, f);
1539 		ret = p;
1540 	}
1541 done:
1542 	if (argpool != pool) {
1543 		pool->active--;
1544 		_MALLOC_UNLOCK(pool->mutex);
1545 		_MALLOC_LOCK(argpool->mutex);
1546 		argpool->active++;
1547 	}
1548 	return ret;
1549 }
1550 
1551 void *
1552 realloc(void *ptr, size_t size)
1553 {
1554 	struct dir_info *d;
1555 	void *r;
1556 	int saved_errno = errno;
1557 
1558 	d = getpool();
1559 	if (d == NULL) {
1560 		_malloc_init(0);
1561 		d = getpool();
1562 	}
1563 	_MALLOC_LOCK(d->mutex);
1564 	d->func = "realloc";
1565 	if (d->active++) {
1566 		malloc_recurse(d);
1567 		return NULL;
1568 	}
1569 	r = orealloc(d, ptr, size, CALLER);
1570 
1571 	d->active--;
1572 	_MALLOC_UNLOCK(d->mutex);
1573 	if (r == NULL && mopts.malloc_xmalloc)
1574 		wrterror(d, "out of memory");
1575 	if (r != NULL)
1576 		errno = saved_errno;
1577 	return r;
1578 }
1579 /*DEF_STRONG(realloc);*/
1580 
1581 
1582 /*
1583  * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
1584  * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
1585  */
1586 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1587 
1588 void *
1589 calloc(size_t nmemb, size_t size)
1590 {
1591 	struct dir_info *d;
1592 	void *r;
1593 	int saved_errno = errno;
1594 
1595 	d = getpool();
1596 	if (d == NULL) {
1597 		_malloc_init(0);
1598 		d = getpool();
1599 	}
1600 	_MALLOC_LOCK(d->mutex);
1601 	d->func = "calloc";
1602 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1603 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1604 		_MALLOC_UNLOCK(d->mutex);
1605 		if (mopts.malloc_xmalloc)
1606 			wrterror(d, "out of memory");
1607 		errno = ENOMEM;
1608 		return NULL;
1609 	}
1610 
1611 	if (d->active++) {
1612 		malloc_recurse(d);
1613 		return NULL;
1614 	}
1615 
1616 	size *= nmemb;
1617 	r = omalloc(d, size, 1, CALLER);
1618 
1619 	d->active--;
1620 	_MALLOC_UNLOCK(d->mutex);
1621 	if (r == NULL && mopts.malloc_xmalloc)
1622 		wrterror(d, "out of memory");
1623 	if (r != NULL)
1624 		errno = saved_errno;
1625 	return r;
1626 }
1627 /*DEF_STRONG(calloc);*/
1628 
1629 static void *
1630 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
1631 {
1632 	char *p, *q;
1633 
1634 	if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
1635 		wrterror(d, "mapalign bad alignment");
1636 	if (sz != PAGEROUND(sz))
1637 		wrterror(d, "mapalign round");
1638 
1639 	/* Allocate sz + alignment bytes of memory, which must include a
1640 	 * subrange of size bytes that is properly aligned.  Unmap the
1641 	 * other bytes, and then return that subrange.
1642 	 */
1643 
1644 	/* We need sz + alignment to fit into a size_t. */
1645 	if (alignment > SIZE_MAX - sz)
1646 		return MAP_FAILED;
1647 
1648 	p = map(d, NULL, sz + alignment, zero_fill);
1649 	if (p == MAP_FAILED)
1650 		return MAP_FAILED;
1651 	q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
1652 	if (q != p) {
1653 		if (munmap(p, q - p))
1654 			wrterror(d, "munmap %p", p);
1655 	}
1656 	if (munmap(q + sz, alignment - (q - p)))
1657 		wrterror(d, "munmap %p", q + sz);
1658 	STATS_SUB(d->malloc_used, alignment);
1659 
1660 	return q;
1661 }
1662 
1663 static void *
1664 omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill, void *f)
1665 {
1666 	size_t psz;
1667 	void *p;
1668 
1669 	if (alignment <= MALLOC_PAGESIZE) {
1670 		/*
1671 		 * max(size, alignment) is enough to assure the requested alignment,
1672 		 * since the allocator always allocates power-of-two blocks.
1673 		 */
1674 		if (sz < alignment)
1675 			sz = alignment;
1676 		return omalloc(pool, sz, zero_fill, f);
1677 	}
1678 
1679 	if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1680 		errno = ENOMEM;
1681 		return NULL;
1682 	}
1683 
1684 	sz += mopts.malloc_guard;
1685 	psz = PAGEROUND(sz);
1686 
1687 	p = mapalign(pool, alignment, psz, zero_fill);
1688 	if (p == NULL) {
1689 		errno = ENOMEM;
1690 		return NULL;
1691 	}
1692 
1693 	if (insert(pool, p, sz, f)) {
1694 		unmap(pool, p, psz);
1695 		errno = ENOMEM;
1696 		return NULL;
1697 	}
1698 
1699 	if (mopts.malloc_guard) {
1700 		if (mprotect((char *)p + psz - mopts.malloc_guard,
1701 		    mopts.malloc_guard, PROT_NONE))
1702 			wrterror(pool, "mprotect");
1703 		STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1704 	}
1705 
1706 	if (mopts.malloc_junk == 2) {
1707 		if (zero_fill)
1708 			memset((char *)p + sz - mopts.malloc_guard,
1709 			    SOME_JUNK, psz - sz);
1710 		else
1711 			memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1712 	}
1713 
1714 	return p;
1715 }
1716 
1717 int
1718 posix_memalign(void **memptr, size_t alignment, size_t size)
1719 {
1720 	struct dir_info *d;
1721 	int res, saved_errno = errno;
1722 	void *r;
1723 
1724 	/* Make sure that alignment is a large enough power of 2. */
1725 	if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
1726 		return EINVAL;
1727 
1728 	d = getpool();
1729 	if (d == NULL) {
1730 		_malloc_init(0);
1731 		d = getpool();
1732 	}
1733 	_MALLOC_LOCK(d->mutex);
1734 	d->func = "posix_memalign";
1735 	if (d->active++) {
1736 		malloc_recurse(d);
1737 		goto err;
1738 	}
1739 	r = omemalign(d, alignment, size, 0, CALLER);
1740 	d->active--;
1741 	_MALLOC_UNLOCK(d->mutex);
1742 	if (r == NULL) {
1743 		if (mopts.malloc_xmalloc)
1744 			wrterror(d, "out of memory");
1745 		goto err;
1746 	}
1747 	errno = saved_errno;
1748 	*memptr = r;
1749 	return 0;
1750 
1751 err:
1752 	res = errno;
1753 	errno = saved_errno;
1754 	return res;
1755 }
1756 /*DEF_STRONG(posix_memalign);*/
1757 
1758 #ifdef MALLOC_STATS
1759 
1760 struct malloc_leak {
1761 	void (*f)();
1762 	size_t total_size;
1763 	int count;
1764 };
1765 
1766 struct leaknode {
1767 	RB_ENTRY(leaknode) entry;
1768 	struct malloc_leak d;
1769 };
1770 
1771 static int
1772 leakcmp(struct leaknode *e1, struct leaknode *e2)
1773 {
1774 	return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
1775 }
1776 
1777 static RB_HEAD(leaktree, leaknode) leakhead;
1778 RB_GENERATE_STATIC(leaktree, leaknode, entry, leakcmp)
1779 
1780 static void
1781 putleakinfo(void *f, size_t sz, int cnt)
1782 {
1783 	struct leaknode key, *p;
1784 	static struct leaknode *page;
1785 	static int used;
1786 
1787 	if (cnt == 0 || page == MAP_FAILED)
1788 		return;
1789 
1790 	key.d.f = f;
1791 	p = RB_FIND(leaktree, &leakhead, &key);
1792 	if (p == NULL) {
1793 		if (page == NULL ||
1794 		    used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
1795 			page = MMAP(MALLOC_PAGESIZE);
1796 			if (page == MAP_FAILED)
1797 				return;
1798 			used = 0;
1799 		}
1800 		p = &page[used++];
1801 		p->d.f = f;
1802 		p->d.total_size = sz * cnt;
1803 		p->d.count = cnt;
1804 		RB_INSERT(leaktree, &leakhead, p);
1805 	} else {
1806 		p->d.total_size += sz * cnt;
1807 		p->d.count += cnt;
1808 	}
1809 }
1810 
1811 static struct malloc_leak *malloc_leaks;
1812 
1813 static void
1814 writestr(int fd, const char *p)
1815 {
1816 	write(fd, p, strlen(p));
1817 }
1818 
1819 static void
1820 dump_leaks(int fd)
1821 {
1822 	struct leaknode *p;
1823 	char buf[64];
1824 	int i = 0;
1825 
1826 	writestr(fd, "Leak report\n");
1827 	writestr(fd, "                 f     sum      #    avg\n");
1828 	/* XXX only one page of summary */
1829 	if (malloc_leaks == NULL)
1830 		malloc_leaks = MMAP(MALLOC_PAGESIZE);
1831 	if (malloc_leaks != MAP_FAILED)
1832 		memset(malloc_leaks, 0, MALLOC_PAGESIZE);
1833 	RB_FOREACH(p, leaktree, &leakhead) {
1834 		snprintf(buf, sizeof(buf), "%18p %7zu %6u %6zu\n", p->d.f,
1835 		    p->d.total_size, p->d.count, p->d.total_size / p->d.count);
1836 		write(fd, buf, strlen(buf));
1837 		if (malloc_leaks == MAP_FAILED ||
1838 		    i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak))
1839 			continue;
1840 		malloc_leaks[i].f = p->d.f;
1841 		malloc_leaks[i].total_size = p->d.total_size;
1842 		malloc_leaks[i].count = p->d.count;
1843 		i++;
1844 	}
1845 }
1846 
1847 static void
1848 dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist)
1849 {
1850 	char buf[64];
1851 
1852 	while (p != NULL) {
1853 		snprintf(buf, sizeof(buf), "chunk %18p %18p %4d %d/%d\n",
1854 		    p->page, ((p->bits[0] & 1) ? NULL : f),
1855 		    p->size, p->free, p->total);
1856 		write(fd, buf, strlen(buf));
1857 		if (!fromfreelist) {
1858 			if (p->bits[0] & 1)
1859 				putleakinfo(NULL, p->size, p->total - p->free);
1860 			else {
1861 				putleakinfo(f, p->size, 1);
1862 				putleakinfo(NULL, p->size,
1863 				    p->total - p->free - 1);
1864 			}
1865 			break;
1866 		}
1867 		p = LIST_NEXT(p, entries);
1868 		if (p != NULL)
1869 			writestr(fd, "        ");
1870 	}
1871 }
1872 
1873 static void
1874 dump_free_chunk_info(int fd, struct dir_info *d)
1875 {
1876 	char buf[64];
1877 	int i, j, count;
1878 	struct chunk_info *p;
1879 
1880 	writestr(fd, "Free chunk structs:\n");
1881 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
1882 		count = 0;
1883 		LIST_FOREACH(p, &d->chunk_info_list[i], entries)
1884 			count++;
1885 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++) {
1886 			p = LIST_FIRST(&d->chunk_dir[i][j]);
1887 			if (p == NULL && count == 0)
1888 				continue;
1889 			snprintf(buf, sizeof(buf), "%2d) %3d ", i, count);
1890 			write(fd, buf, strlen(buf));
1891 			if (p != NULL)
1892 				dump_chunk(fd, p, NULL, 1);
1893 			else
1894 				write(fd, "\n", 1);
1895 		}
1896 	}
1897 
1898 }
1899 
1900 static void
1901 dump_free_page_info(int fd, struct dir_info *d)
1902 {
1903 	char buf[64];
1904 	int i;
1905 
1906 	snprintf(buf, sizeof(buf), "Free pages cached: %zu\n",
1907 	    d->free_regions_size);
1908 	write(fd, buf, strlen(buf));
1909 	for (i = 0; i < mopts.malloc_cache; i++) {
1910 		if (d->free_regions[i].p != NULL) {
1911 			snprintf(buf, sizeof(buf), "%2d) ", i);
1912 			write(fd, buf, strlen(buf));
1913 			snprintf(buf, sizeof(buf), "free at %p: %zu\n",
1914 			    d->free_regions[i].p, d->free_regions[i].size);
1915 			write(fd, buf, strlen(buf));
1916 		}
1917 	}
1918 }
1919 
1920 static void
1921 malloc_dump1(int fd, int poolno, struct dir_info *d)
1922 {
1923 	char buf[100];
1924 	size_t i, realsize;
1925 
1926 	snprintf(buf, sizeof(buf), "Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
1927 	write(fd, buf, strlen(buf));
1928 	if (d == NULL)
1929 		return;
1930 	snprintf(buf, sizeof(buf), "Region slots free %zu/%zu\n",
1931 		d->regions_free, d->regions_total);
1932 	write(fd, buf, strlen(buf));
1933 	snprintf(buf, sizeof(buf), "Finds %zu/%zu\n", d->finds,
1934 	    d->find_collisions);
1935 	write(fd, buf, strlen(buf));
1936 	snprintf(buf, sizeof(buf), "Inserts %zu/%zu\n", d->inserts,
1937 	    d->insert_collisions);
1938 	write(fd, buf, strlen(buf));
1939 	snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes,
1940 	    d->delete_moves);
1941 	write(fd, buf, strlen(buf));
1942 	snprintf(buf, sizeof(buf), "Cheap reallocs %zu/%zu\n",
1943 	    d->cheap_reallocs, d->cheap_realloc_tries);
1944 	write(fd, buf, strlen(buf));
1945 	snprintf(buf, sizeof(buf), "In use %zu\n", d->malloc_used);
1946 	write(fd, buf, strlen(buf));
1947 	snprintf(buf, sizeof(buf), "Guarded %zu\n", d->malloc_guarded);
1948 	write(fd, buf, strlen(buf));
1949 	dump_free_chunk_info(fd, d);
1950 	dump_free_page_info(fd, d);
1951 	writestr(fd,
1952 	    "slot)  hash d  type               page                  f size [free/n]\n");
1953 	for (i = 0; i < d->regions_total; i++) {
1954 		if (d->r[i].p != NULL) {
1955 			size_t h = hash(d->r[i].p) &
1956 			    (d->regions_total - 1);
1957 			snprintf(buf, sizeof(buf), "%4zx) #%4zx %zd ",
1958 			    i, h, h - i);
1959 			write(fd, buf, strlen(buf));
1960 			REALSIZE(realsize, &d->r[i]);
1961 			if (realsize > MALLOC_MAXCHUNK) {
1962 				putleakinfo(d->r[i].f, realsize, 1);
1963 				snprintf(buf, sizeof(buf),
1964 				    "pages %18p %18p %zu\n", d->r[i].p,
1965 				    d->r[i].f, realsize);
1966 				write(fd, buf, strlen(buf));
1967 			} else
1968 				dump_chunk(fd,
1969 				    (struct chunk_info *)d->r[i].size,
1970 				    d->r[i].f, 0);
1971 		}
1972 	}
1973 	dump_leaks(fd);
1974 	write(fd, "\n", 1);
1975 }
1976 
1977 void
1978 malloc_dump(int fd, int poolno, struct dir_info *pool)
1979 {
1980 	int i;
1981 	void *p;
1982 	struct region_info *r;
1983 	int saved_errno = errno;
1984 
1985 	if (pool == NULL)
1986 		return;
1987 	for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) {
1988 		p = pool->delayed_chunks[i];
1989 		if (p == NULL)
1990 			continue;
1991 		r = find(pool, p);
1992 		if (r == NULL)
1993 			wrterror(pool, "bogus pointer in malloc_dump %p", p);
1994 		free_bytes(pool, r, p);
1995 		pool->delayed_chunks[i] = NULL;
1996 	}
1997 	/* XXX leak when run multiple times */
1998 	RB_INIT(&leakhead);
1999 	malloc_dump1(fd, poolno, pool);
2000 	errno = saved_errno;
2001 }
2002 DEF_WEAK(malloc_dump);
2003 
2004 static void
2005 malloc_exit(void)
2006 {
2007 	static const char q[] = "malloc() warning: Couldn't dump stats\n";
2008 	int save_errno = errno, fd, i;
2009 	char buf[100];
2010 
2011 	fd = open("malloc.out", O_RDWR|O_APPEND);
2012 	if (fd != -1) {
2013 		snprintf(buf, sizeof(buf), "******** Start dump %s *******\n",
2014 		     __progname);
2015 		write(fd, buf, strlen(buf));
2016 		snprintf(buf, sizeof(buf),
2017 		    "MT=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u G=%zu\n",
2018 		    mopts.malloc_mt, mopts.malloc_freenow,
2019 		    mopts.malloc_freeunmap, mopts.malloc_junk,
2020 		    mopts.malloc_realloc, mopts.malloc_xmalloc,
2021 		    mopts.chunk_canaries, mopts.malloc_cache,
2022 		    mopts.malloc_guard);
2023 		write(fd, buf, strlen(buf));
2024 
2025 		for (i = 0; i < _MALLOC_MUTEXES; i++)
2026 			malloc_dump(fd, i, mopts.malloc_pool[i]);
2027 		snprintf(buf, sizeof(buf), "******** End dump %s *******\n",
2028 		    __progname);
2029 		write(fd, buf, strlen(buf));
2030 		close(fd);
2031 	} else
2032 		write(STDERR_FILENO, q, sizeof(q) - 1);
2033 	errno = save_errno;
2034 }
2035 
2036 #endif /* MALLOC_STATS */
2037