xref: /openbsd-src/lib/libc/stdlib/malloc.c (revision 03adc85b7600a1f8f04886b8321c1c1c0c4933d4)
1 /*	$OpenBSD: malloc.c,v 1.212 2017/01/21 07:47:42 otto Exp $	*/
2 /*
3  * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
4  * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
5  * Copyright (c) 2008 Damien Miller <djm@openbsd.org>
6  * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * If we meet some day, and you think this stuff is worth it, you
23  * can buy me a beer in return. Poul-Henning Kamp
24  */
25 
26 /* #define MALLOC_STATS */
27 
28 #include <sys/types.h>
29 #include <sys/param.h>	/* PAGE_SHIFT ALIGN */
30 #include <sys/queue.h>
31 #include <sys/mman.h>
32 #include <sys/uio.h>
33 #include <errno.h>
34 #include <stdarg.h>
35 #include <stdint.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <stdio.h>
39 #include <unistd.h>
40 
41 #ifdef MALLOC_STATS
42 #include <sys/tree.h>
43 #include <fcntl.h>
44 #endif
45 
46 #include "thread_private.h"
47 #include <tib.h>
48 
49 #if defined(__mips64__)
50 #define MALLOC_PAGESHIFT	(14U)
51 #else
52 #define MALLOC_PAGESHIFT	(PAGE_SHIFT)
53 #endif
54 
55 #define MALLOC_MINSHIFT		4
56 #define MALLOC_MAXSHIFT		(MALLOC_PAGESHIFT - 1)
57 #define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
58 #define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
59 #define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
60 #define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
61 
62 #define MALLOC_MAXCHUNK		(1 << MALLOC_MAXSHIFT)
63 #define MALLOC_MAXCACHE		256
64 #define MALLOC_DELAYED_CHUNK_MASK	15
65 #define MALLOC_INITIAL_REGIONS	512
66 #define MALLOC_DEFAULT_CACHE	64
67 #define MALLOC_CHUNK_LISTS	4
68 #define CHUNK_CHECK_LENGTH	32
69 
70 /*
71  * We move allocations between half a page and a whole page towards the end,
72  * subject to alignment constraints. This is the extra headroom we allow.
73  * Set to zero to be the most strict.
74  */
75 #define MALLOC_LEEWAY		0
76 #define MALLOC_MOVE_COND(sz)	((sz) - mopts.malloc_guard < 		\
77 				    MALLOC_PAGESIZE - MALLOC_LEEWAY)
78 #define MALLOC_MOVE(p, sz)  	(((char *)(p)) +			\
79 				    ((MALLOC_PAGESIZE - MALLOC_LEEWAY -	\
80 			    	    ((sz) - mopts.malloc_guard)) & 	\
81 				    ~(MALLOC_MINSIZE - 1)))
82 
83 #define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
84 
85 /*
86  * What to use for Junk.  This is the byte value we use to fill with
87  * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
88  * and SOME_FREEJUNK right before free.
89  */
90 #define SOME_JUNK		0xdb	/* deadbeef */
91 #define SOME_FREEJUNK		0xdf	/* dead, free */
92 
93 #define MMAP(sz)	mmap(NULL, (sz), PROT_READ | PROT_WRITE, \
94     MAP_ANON | MAP_PRIVATE, -1, 0)
95 
96 #define MMAPA(a,sz)	mmap((a), (sz), PROT_READ | PROT_WRITE, \
97     MAP_ANON | MAP_PRIVATE, -1, 0)
98 
99 #define MQUERY(a, sz)	mquery((a), (sz), PROT_READ | PROT_WRITE, \
100     MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0)
101 
102 struct region_info {
103 	void *p;		/* page; low bits used to mark chunks */
104 	uintptr_t size;		/* size for pages, or chunk_info pointer */
105 #ifdef MALLOC_STATS
106 	void *f;		/* where allocated from */
107 #endif
108 };
109 
110 LIST_HEAD(chunk_head, chunk_info);
111 
112 struct dir_info {
113 	u_int32_t canary1;
114 	int active;			/* status of malloc */
115 	struct region_info *r;		/* region slots */
116 	size_t regions_total;		/* number of region slots */
117 	size_t regions_free;		/* number of free slots */
118 					/* lists of free chunk info structs */
119 	struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1];
120 					/* lists of chunks with free slots */
121 	struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS];
122 	size_t free_regions_size;	/* free pages cached */
123 					/* free pages cache */
124 	struct region_info free_regions[MALLOC_MAXCACHE];
125 					/* delayed free chunk slots */
126 	void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
127 	size_t rbytesused;		/* random bytes used */
128 	char *func;			/* current function */
129 	int mutex;
130 	u_char rbytes[32];		/* random bytes */
131 	u_short chunk_start;
132 #ifdef MALLOC_STATS
133 	size_t inserts;
134 	size_t insert_collisions;
135 	size_t finds;
136 	size_t find_collisions;
137 	size_t deletes;
138 	size_t delete_moves;
139 	size_t cheap_realloc_tries;
140 	size_t cheap_reallocs;
141 	size_t malloc_used;		/* bytes allocated */
142 	size_t malloc_guarded;		/* bytes used for guards */
143 #define STATS_ADD(x,y)	((x) += (y))
144 #define STATS_SUB(x,y)	((x) -= (y))
145 #define STATS_INC(x)	((x)++)
146 #define STATS_ZERO(x)	((x) = 0)
147 #define STATS_SETF(x,y)	((x)->f = (y))
148 #else
149 #define STATS_ADD(x,y)	/* nothing */
150 #define STATS_SUB(x,y)	/* nothing */
151 #define STATS_INC(x)	/* nothing */
152 #define STATS_ZERO(x)	/* nothing */
153 #define STATS_SETF(x,y)	/* nothing */
154 #endif /* MALLOC_STATS */
155 	u_int32_t canary2;
156 };
157 #define DIR_INFO_RSZ	((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
158 			~MALLOC_PAGEMASK)
159 
160 /*
161  * This structure describes a page worth of chunks.
162  *
163  * How many bits per u_short in the bitmap
164  */
165 #define MALLOC_BITS		(NBBY * sizeof(u_short))
166 struct chunk_info {
167 	LIST_ENTRY(chunk_info) entries;
168 	void *page;			/* pointer to the page */
169 	u_int32_t canary;
170 	u_short size;			/* size of this page's chunks */
171 	u_short shift;			/* how far to shift for this size */
172 	u_short free;			/* how many free chunks */
173 	u_short total;			/* how many chunks */
174 	u_short offset;			/* requested size table offset */
175 					/* which chunks are free */
176 	u_short bits[1];
177 };
178 
179 struct malloc_readonly {
180 	struct dir_info *malloc_pool[_MALLOC_MUTEXES];	/* Main bookkeeping information */
181 	int	malloc_mt;		/* multi-threaded mode? */
182 	int	malloc_freenow;		/* Free quickly - disable chunk rnd */
183 	int	malloc_freeunmap;	/* mprotect free pages PROT_NONE? */
184 	int	malloc_junk;		/* junk fill? */
185 	int	malloc_realloc;		/* always realloc? */
186 	int	malloc_xmalloc;		/* xmalloc behaviour? */
187 	int	chunk_canaries;		/* use canaries after chunks? */
188 	u_int	malloc_cache;		/* free pages we cache */
189 	size_t	malloc_guard;		/* use guard pages after allocations? */
190 #ifdef MALLOC_STATS
191 	int	malloc_stats;		/* dump statistics at end */
192 #endif
193 	u_int32_t malloc_canary;	/* Matched against ones in malloc_pool */
194 };
195 
196 /* This object is mapped PROT_READ after initialisation to prevent tampering */
197 static union {
198 	struct malloc_readonly mopts;
199 	u_char _pad[MALLOC_PAGESIZE];
200 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
201 #define mopts	malloc_readonly.mopts
202 
203 char		*malloc_options;	/* compile-time options */
204 
205 static u_char getrbyte(struct dir_info *d);
206 static __dead void wrterror(struct dir_info *d, char *msg, ...)
207     __attribute__((__format__ (printf, 2, 3)));
208 static void fill_canary(char *ptr, size_t sz, size_t allocated);
209 
210 #ifdef MALLOC_STATS
211 void malloc_dump(int, int, struct dir_info *);
212 PROTO_NORMAL(malloc_dump);
213 static void malloc_exit(void);
214 #define CALLER	__builtin_return_address(0)
215 #else
216 #define CALLER	NULL
217 #endif
218 
219 /* low bits of r->p determine size: 0 means >= page size and r->size holding
220  * real size, otherwise low bits are a shift count, or 1 for malloc(0)
221  */
222 #define REALSIZE(sz, r)						\
223 	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
224 	(sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
225 
226 static inline void
227 _MALLOC_LEAVE(struct dir_info *d)
228 {
229 	if (mopts.malloc_mt) {
230 		d->active--;
231 		_MALLOC_UNLOCK(d->mutex);
232 	}
233 }
234 
235 static inline void
236 _MALLOC_ENTER(struct dir_info *d)
237 {
238 	if (mopts.malloc_mt) {
239 		_MALLOC_LOCK(d->mutex);
240 		d->active++;
241 	}
242 }
243 
244 static inline size_t
245 hash(void *p)
246 {
247 	size_t sum;
248 	uintptr_t u;
249 
250 	u = (uintptr_t)p >> MALLOC_PAGESHIFT;
251 	sum = u;
252 	sum = (sum << 7) - sum + (u >> 16);
253 #ifdef __LP64__
254 	sum = (sum << 7) - sum + (u >> 32);
255 	sum = (sum << 7) - sum + (u >> 48);
256 #endif
257 	return sum;
258 }
259 
260 static inline
261 struct dir_info *getpool(void)
262 {
263 	if (!mopts.malloc_mt)
264 		return mopts.malloc_pool[0];
265 	else
266 		return mopts.malloc_pool[TIB_GET()->tib_tid &
267 		    (_MALLOC_MUTEXES - 1)];
268 }
269 
270 static __dead void
271 wrterror(struct dir_info *d, char *msg, ...)
272 {
273 	struct iovec	iov[3];
274 	char		pidbuf[80];
275 	char		buf[80];
276 	int		saved_errno = errno;
277 	va_list		ap;
278 
279 	iov[0].iov_base = pidbuf;
280 	snprintf(pidbuf, sizeof(pidbuf), "%s(%d) in %s(): ", __progname,
281 	    getpid(), d->func ? d->func : "unknown");
282 	iov[0].iov_len = strlen(pidbuf);
283 	iov[1].iov_base = buf;
284 	va_start(ap, msg);
285 	vsnprintf(buf, sizeof(buf), msg, ap);
286 	va_end(ap);
287 	iov[1].iov_len = strlen(buf);
288 	iov[2].iov_base = "\n";
289 	iov[2].iov_len = 1;
290 	writev(STDERR_FILENO, iov, 3);
291 
292 #ifdef MALLOC_STATS
293 	if (mopts.malloc_stats) {
294 		int i;
295 
296 		for (i = 0; i < _MALLOC_MUTEXES; i++)
297 			malloc_dump(STDERR_FILENO, i, mopts.malloc_pool[i]);
298 	}
299 #endif /* MALLOC_STATS */
300 
301 	errno = saved_errno;
302 
303 	abort();
304 }
305 
306 static void
307 rbytes_init(struct dir_info *d)
308 {
309 	arc4random_buf(d->rbytes, sizeof(d->rbytes));
310 	/* add 1 to account for using d->rbytes[0] */
311 	d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2);
312 }
313 
314 static inline u_char
315 getrbyte(struct dir_info *d)
316 {
317 	u_char x;
318 
319 	if (d->rbytesused >= sizeof(d->rbytes))
320 		rbytes_init(d);
321 	x = d->rbytes[d->rbytesused++];
322 	return x;
323 }
324 
325 /*
326  * Cache maintenance. We keep at most malloc_cache pages cached.
327  * If the cache is becoming full, unmap pages in the cache for real,
328  * and then add the region to the cache
329  * Opposed to the regular region data structure, the sizes in the
330  * cache are in MALLOC_PAGESIZE units.
331  */
332 static void
333 unmap(struct dir_info *d, void *p, size_t sz)
334 {
335 	size_t psz = sz >> MALLOC_PAGESHIFT;
336 	size_t rsz, tounmap;
337 	struct region_info *r;
338 	u_int i, offset;
339 
340 	if (sz != PAGEROUND(sz))
341 		wrterror(d, "munmap round");
342 
343 	if (psz > mopts.malloc_cache) {
344 		i = munmap(p, sz);
345 		if (i)
346 			wrterror(d, "munmap %p", p);
347 		STATS_SUB(d->malloc_used, sz);
348 		return;
349 	}
350 	tounmap = 0;
351 	rsz = mopts.malloc_cache - d->free_regions_size;
352 	if (psz > rsz)
353 		tounmap = psz - rsz;
354 	offset = getrbyte(d);
355 	for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) {
356 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
357 		if (r->p != NULL) {
358 			rsz = r->size << MALLOC_PAGESHIFT;
359 			if (munmap(r->p, rsz))
360 				wrterror(d, "munmap %p", r->p);
361 			r->p = NULL;
362 			if (tounmap > r->size)
363 				tounmap -= r->size;
364 			else
365 				tounmap = 0;
366 			d->free_regions_size -= r->size;
367 			r->size = 0;
368 			STATS_SUB(d->malloc_used, rsz);
369 		}
370 	}
371 	if (tounmap > 0)
372 		wrterror(d, "malloc cache underflow");
373 	for (i = 0; i < mopts.malloc_cache; i++) {
374 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
375 		if (r->p == NULL) {
376 			if (mopts.malloc_junk && !mopts.malloc_freeunmap) {
377 				size_t amt = mopts.malloc_junk == 1 ?
378 				    MALLOC_MAXCHUNK : sz;
379 				memset(p, SOME_FREEJUNK, amt);
380 			}
381 			if (mopts.malloc_freeunmap)
382 				mprotect(p, sz, PROT_NONE);
383 			r->p = p;
384 			r->size = psz;
385 			d->free_regions_size += psz;
386 			break;
387 		}
388 	}
389 	if (i == mopts.malloc_cache)
390 		wrterror(d, "malloc free slot lost");
391 	if (d->free_regions_size > mopts.malloc_cache)
392 		wrterror(d, "malloc cache overflow");
393 }
394 
395 static void
396 zapcacheregion(struct dir_info *d, void *p, size_t len)
397 {
398 	u_int i;
399 	struct region_info *r;
400 	size_t rsz;
401 
402 	for (i = 0; i < mopts.malloc_cache; i++) {
403 		r = &d->free_regions[i];
404 		if (r->p >= p && r->p <= (void *)((char *)p + len)) {
405 			rsz = r->size << MALLOC_PAGESHIFT;
406 			if (munmap(r->p, rsz))
407 				wrterror(d, "munmap %p", r->p);
408 			r->p = NULL;
409 			d->free_regions_size -= r->size;
410 			r->size = 0;
411 			STATS_SUB(d->malloc_used, rsz);
412 		}
413 	}
414 }
415 
416 static void *
417 map(struct dir_info *d, void *hint, size_t sz, int zero_fill)
418 {
419 	size_t psz = sz >> MALLOC_PAGESHIFT;
420 	struct region_info *r, *big = NULL;
421 	u_int i, offset;
422 	void *p;
423 
424 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
425 	    d->canary1 != ~d->canary2)
426 		wrterror(d, "internal struct corrupt");
427 	if (sz != PAGEROUND(sz))
428 		wrterror(d, "map round");
429 
430 	if (!hint && psz > d->free_regions_size) {
431 		_MALLOC_LEAVE(d);
432 		p = MMAP(sz);
433 		_MALLOC_ENTER(d);
434 		if (p != MAP_FAILED)
435 			STATS_ADD(d->malloc_used, sz);
436 		/* zero fill not needed */
437 		return p;
438 	}
439 	offset = getrbyte(d);
440 	for (i = 0; i < mopts.malloc_cache; i++) {
441 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
442 		if (r->p != NULL) {
443 			if (hint && r->p != hint)
444 				continue;
445 			if (r->size == psz) {
446 				p = r->p;
447 				r->p = NULL;
448 				r->size = 0;
449 				d->free_regions_size -= psz;
450 				if (mopts.malloc_freeunmap)
451 					mprotect(p, sz, PROT_READ | PROT_WRITE);
452 				if (zero_fill)
453 					memset(p, 0, sz);
454 				else if (mopts.malloc_junk == 2 &&
455 				    mopts.malloc_freeunmap)
456 					memset(p, SOME_FREEJUNK, sz);
457 				return p;
458 			} else if (r->size > psz)
459 				big = r;
460 		}
461 	}
462 	if (big != NULL) {
463 		r = big;
464 		p = r->p;
465 		r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT);
466 		if (mopts.malloc_freeunmap)
467 			mprotect(p, sz, PROT_READ | PROT_WRITE);
468 		r->size -= psz;
469 		d->free_regions_size -= psz;
470 		if (zero_fill)
471 			memset(p, 0, sz);
472 		else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap)
473 			memset(p, SOME_FREEJUNK, sz);
474 		return p;
475 	}
476 	if (hint)
477 		return MAP_FAILED;
478 	if (d->free_regions_size > mopts.malloc_cache)
479 		wrterror(d, "malloc cache");
480 	_MALLOC_LEAVE(d);
481 	p = MMAP(sz);
482 	_MALLOC_ENTER(d);
483 	if (p != MAP_FAILED)
484 		STATS_ADD(d->malloc_used, sz);
485 	/* zero fill not needed */
486 	return p;
487 }
488 
489 static void
490 omalloc_parseopt(char opt)
491 {
492 	switch (opt) {
493 	case '>':
494 		mopts.malloc_cache <<= 1;
495 		if (mopts.malloc_cache > MALLOC_MAXCACHE)
496 			mopts.malloc_cache = MALLOC_MAXCACHE;
497 		break;
498 	case '<':
499 		mopts.malloc_cache >>= 1;
500 		break;
501 	case 'c':
502 		mopts.chunk_canaries = 0;
503 		break;
504 	case 'C':
505 		mopts.chunk_canaries = 1;
506 		break;
507 #ifdef MALLOC_STATS
508 	case 'd':
509 		mopts.malloc_stats = 0;
510 		break;
511 	case 'D':
512 		mopts.malloc_stats = 1;
513 		break;
514 #endif /* MALLOC_STATS */
515 	case 'f':
516 		mopts.malloc_freenow = 0;
517 		mopts.malloc_freeunmap = 0;
518 		break;
519 	case 'F':
520 		mopts.malloc_freenow = 1;
521 		mopts.malloc_freeunmap = 1;
522 		break;
523 	case 'g':
524 		mopts.malloc_guard = 0;
525 		break;
526 	case 'G':
527 		mopts.malloc_guard = MALLOC_PAGESIZE;
528 		break;
529 	case 'j':
530 		if (mopts.malloc_junk > 0)
531 			mopts.malloc_junk--;
532 		break;
533 	case 'J':
534 		if (mopts.malloc_junk < 2)
535 			mopts.malloc_junk++;
536 		break;
537 	case 'r':
538 		mopts.malloc_realloc = 0;
539 		break;
540 	case 'R':
541 		mopts.malloc_realloc = 1;
542 		break;
543 	case 'u':
544 		mopts.malloc_freeunmap = 0;
545 		break;
546 	case 'U':
547 		mopts.malloc_freeunmap = 1;
548 		break;
549 	case 'x':
550 		mopts.malloc_xmalloc = 0;
551 		break;
552 	case 'X':
553 		mopts.malloc_xmalloc = 1;
554 		break;
555 	default: {
556 		static const char q[] = "malloc() warning: "
557 		    "unknown char in MALLOC_OPTIONS\n";
558 		write(STDERR_FILENO, q, sizeof(q) - 1);
559 		break;
560 	}
561 	}
562 }
563 
564 static void
565 omalloc_init(void)
566 {
567 	char *p, *q, b[64];
568 	int i, j;
569 
570 	/*
571 	 * Default options
572 	 */
573 	mopts.malloc_junk = 1;
574 	mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
575 
576 	for (i = 0; i < 3; i++) {
577 		switch (i) {
578 		case 0:
579 			j = readlink("/etc/malloc.conf", b, sizeof b - 1);
580 			if (j <= 0)
581 				continue;
582 			b[j] = '\0';
583 			p = b;
584 			break;
585 		case 1:
586 			if (issetugid() == 0)
587 				p = getenv("MALLOC_OPTIONS");
588 			else
589 				continue;
590 			break;
591 		case 2:
592 			p = malloc_options;
593 			break;
594 		default:
595 			p = NULL;
596 		}
597 
598 		for (; p != NULL && *p != '\0'; p++) {
599 			switch (*p) {
600 			case 'S':
601 				for (q = "CGJ"; *q != '\0'; q++)
602 					omalloc_parseopt(*q);
603 				mopts.malloc_cache = 0;
604 				break;
605 			case 's':
606 				for (q = "cgj"; *q != '\0'; q++)
607 					omalloc_parseopt(*q);
608 				mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
609 				break;
610 			default:
611 				omalloc_parseopt(*p);
612 				break;
613 			}
614 		}
615 	}
616 
617 #ifdef MALLOC_STATS
618 	if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
619 		static const char q[] = "malloc() warning: atexit(2) failed."
620 		    " Will not be able to dump stats on exit\n";
621 		write(STDERR_FILENO, q, sizeof(q) - 1);
622 	}
623 #endif /* MALLOC_STATS */
624 
625 	while ((mopts.malloc_canary = arc4random()) == 0)
626 		;
627 }
628 
629 /*
630  * Initialize a dir_info, which should have been cleared by caller
631  */
632 static void
633 omalloc_poolinit(struct dir_info **dp)
634 {
635 	void *p;
636 	size_t d_avail, regioninfo_size;
637 	struct dir_info *d;
638 	int i, j;
639 
640 	/*
641 	 * Allocate dir_info with a guard page on either side. Also
642 	 * randomise offset inside the page at which the dir_info
643 	 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
644 	 */
645 	if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED)
646 		wrterror(NULL, "malloc init mmap failed");
647 	mprotect(p, MALLOC_PAGESIZE, PROT_NONE);
648 	mprotect((char *)p + MALLOC_PAGESIZE + DIR_INFO_RSZ,
649 	    MALLOC_PAGESIZE, PROT_NONE);
650 	d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
651 	d = (struct dir_info *)((char *)p + MALLOC_PAGESIZE +
652 	    (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
653 
654 	rbytes_init(d);
655 	d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS;
656 	regioninfo_size = d->regions_total * sizeof(struct region_info);
657 	d->r = MMAP(regioninfo_size);
658 	if (d->r == MAP_FAILED) {
659 		d->regions_total = 0;
660 		wrterror(NULL, "malloc init mmap failed");
661 	}
662 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
663 		LIST_INIT(&d->chunk_info_list[i]);
664 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++)
665 			LIST_INIT(&d->chunk_dir[i][j]);
666 	}
667 	STATS_ADD(d->malloc_used, regioninfo_size);
668 	d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
669 	d->canary2 = ~d->canary1;
670 
671 	*dp = d;
672 }
673 
674 static int
675 omalloc_grow(struct dir_info *d)
676 {
677 	size_t newtotal;
678 	size_t newsize;
679 	size_t mask;
680 	size_t i;
681 	struct region_info *p;
682 
683 	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 )
684 		return 1;
685 
686 	newtotal = d->regions_total * 2;
687 	newsize = newtotal * sizeof(struct region_info);
688 	mask = newtotal - 1;
689 
690 	p = MMAP(newsize);
691 	if (p == MAP_FAILED)
692 		return 1;
693 
694 	STATS_ADD(d->malloc_used, newsize);
695 	STATS_ZERO(d->inserts);
696 	STATS_ZERO(d->insert_collisions);
697 	for (i = 0; i < d->regions_total; i++) {
698 		void *q = d->r[i].p;
699 		if (q != NULL) {
700 			size_t index = hash(q) & mask;
701 			STATS_INC(d->inserts);
702 			while (p[index].p != NULL) {
703 				index = (index - 1) & mask;
704 				STATS_INC(d->insert_collisions);
705 			}
706 			p[index] = d->r[i];
707 		}
708 	}
709 	/* avoid pages containing meta info to end up in cache */
710 	if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
711 		wrterror(d, "munmap %p", (void *)d->r);
712 	else
713 		STATS_SUB(d->malloc_used,
714 		    d->regions_total * sizeof(struct region_info));
715 	d->regions_free = d->regions_free + d->regions_total;
716 	d->regions_total = newtotal;
717 	d->r = p;
718 	return 0;
719 }
720 
721 static struct chunk_info *
722 alloc_chunk_info(struct dir_info *d, int bits)
723 {
724 	struct chunk_info *p;
725 	size_t size, count;
726 
727 	if (bits == 0)
728 		count = MALLOC_PAGESIZE / MALLOC_MINSIZE;
729 	else
730 		count = MALLOC_PAGESIZE >> bits;
731 
732 	size = howmany(count, MALLOC_BITS);
733 	size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
734 	if (mopts.chunk_canaries)
735 		size += count * sizeof(u_short);
736 	size = ALIGN(size);
737 
738 	if (LIST_EMPTY(&d->chunk_info_list[bits])) {
739 		char *q;
740 		int i;
741 
742 		q = MMAP(MALLOC_PAGESIZE);
743 		if (q == MAP_FAILED)
744 			return NULL;
745 		STATS_ADD(d->malloc_used, MALLOC_PAGESIZE);
746 		count = MALLOC_PAGESIZE / size;
747 		for (i = 0; i < count; i++, q += size)
748 			LIST_INSERT_HEAD(&d->chunk_info_list[bits],
749 			    (struct chunk_info *)q, entries);
750 	}
751 	p = LIST_FIRST(&d->chunk_info_list[bits]);
752 	LIST_REMOVE(p, entries);
753 	memset(p, 0, size);
754 	p->canary = d->canary1;
755 	return p;
756 }
757 
758 
759 /*
760  * The hashtable uses the assumption that p is never NULL. This holds since
761  * non-MAP_FIXED mappings with hint 0 start at BRKSIZ.
762  */
763 static int
764 insert(struct dir_info *d, void *p, size_t sz, void *f)
765 {
766 	size_t index;
767 	size_t mask;
768 	void *q;
769 
770 	if (d->regions_free * 4 < d->regions_total) {
771 		if (omalloc_grow(d))
772 			return 1;
773 	}
774 	mask = d->regions_total - 1;
775 	index = hash(p) & mask;
776 	q = d->r[index].p;
777 	STATS_INC(d->inserts);
778 	while (q != NULL) {
779 		index = (index - 1) & mask;
780 		q = d->r[index].p;
781 		STATS_INC(d->insert_collisions);
782 	}
783 	d->r[index].p = p;
784 	d->r[index].size = sz;
785 #ifdef MALLOC_STATS
786 	d->r[index].f = f;
787 #endif
788 	d->regions_free--;
789 	return 0;
790 }
791 
792 static struct region_info *
793 find(struct dir_info *d, void *p)
794 {
795 	size_t index;
796 	size_t mask = d->regions_total - 1;
797 	void *q, *r;
798 
799 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
800 	    d->canary1 != ~d->canary2)
801 		wrterror(d, "internal struct corrupt");
802 	p = MASK_POINTER(p);
803 	index = hash(p) & mask;
804 	r = d->r[index].p;
805 	q = MASK_POINTER(r);
806 	STATS_INC(d->finds);
807 	while (q != p && r != NULL) {
808 		index = (index - 1) & mask;
809 		r = d->r[index].p;
810 		q = MASK_POINTER(r);
811 		STATS_INC(d->find_collisions);
812 	}
813 	return (q == p && r != NULL) ? &d->r[index] : NULL;
814 }
815 
816 static void
817 delete(struct dir_info *d, struct region_info *ri)
818 {
819 	/* algorithm R, Knuth Vol III section 6.4 */
820 	size_t mask = d->regions_total - 1;
821 	size_t i, j, r;
822 
823 	if (d->regions_total & (d->regions_total - 1))
824 		wrterror(d, "regions_total not 2^x");
825 	d->regions_free++;
826 	STATS_INC(d->deletes);
827 
828 	i = ri - d->r;
829 	for (;;) {
830 		d->r[i].p = NULL;
831 		d->r[i].size = 0;
832 		j = i;
833 		for (;;) {
834 			i = (i - 1) & mask;
835 			if (d->r[i].p == NULL)
836 				return;
837 			r = hash(d->r[i].p) & mask;
838 			if ((i <= r && r < j) || (r < j && j < i) ||
839 			    (j < i && i <= r))
840 				continue;
841 			d->r[j] = d->r[i];
842 			STATS_INC(d->delete_moves);
843 			break;
844 		}
845 
846 	}
847 }
848 
849 /*
850  * Allocate a page of chunks
851  */
852 static struct chunk_info *
853 omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
854 {
855 	struct chunk_info *bp;
856 	void		*pp;
857 	int		i, k;
858 
859 	/* Allocate a new bucket */
860 	pp = map(d, NULL, MALLOC_PAGESIZE, 0);
861 	if (pp == MAP_FAILED)
862 		return NULL;
863 
864 	bp = alloc_chunk_info(d, bits);
865 	if (bp == NULL) {
866 		unmap(d, pp, MALLOC_PAGESIZE);
867 		return NULL;
868 	}
869 
870 	/* memory protect the page allocated in the malloc(0) case */
871 	if (bits == 0) {
872 		bp->size = 0;
873 		bp->shift = 1;
874 		i = MALLOC_MINSIZE - 1;
875 		while (i >>= 1)
876 			bp->shift++;
877 		bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift;
878 		bp->page = pp;
879 
880 		k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE);
881 		if (k < 0) {
882 			unmap(d, pp, MALLOC_PAGESIZE);
883 			LIST_INSERT_HEAD(&d->chunk_info_list[0], bp, entries);
884 			return NULL;
885 		}
886 	} else {
887 		bp->size = 1U << bits;
888 		bp->shift = bits;
889 		bp->total = bp->free = MALLOC_PAGESIZE >> bits;
890 		bp->offset = howmany(bp->total, MALLOC_BITS);
891 		bp->page = pp;
892 	}
893 
894 	/* set all valid bits in the bitmap */
895 	k = bp->total;
896 	i = 0;
897 
898 	/* Do a bunch at a time */
899 	for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS)
900 		bp->bits[i / MALLOC_BITS] = (u_short)~0U;
901 
902 	for (; i < k; i++)
903 		bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS);
904 
905 	LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries);
906 
907 	bits++;
908 	if ((uintptr_t)pp & bits)
909 		wrterror(d, "pp & bits %p", pp);
910 
911 	insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL);
912 	return bp;
913 }
914 
915 static int
916 find_chunksize(size_t size)
917 {
918 	int		i, j;
919 
920 	/* Don't bother with anything less than this */
921 	/* unless we have a malloc(0) requests */
922 	if (size != 0 && size < MALLOC_MINSIZE)
923 		size = MALLOC_MINSIZE;
924 
925 	/* Find the right bucket */
926 	if (size == 0)
927 		j = 0;
928 	else {
929 		j = MALLOC_MINSHIFT;
930 		i = (size - 1) >> (MALLOC_MINSHIFT - 1);
931 		while (i >>= 1)
932 			j++;
933 	}
934 	return j;
935 }
936 
937 /*
938  * Allocate a chunk
939  */
940 static void *
941 malloc_bytes(struct dir_info *d, size_t size, void *f)
942 {
943 	int		i, j, listnum;
944 	size_t		k;
945 	u_short		u, *lp;
946 	struct chunk_info *bp;
947 
948 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
949 	    d->canary1 != ~d->canary2)
950 		wrterror(d, "internal struct corrupt");
951 
952 	j = find_chunksize(size);
953 
954 	listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
955 	/* If it's empty, make a page more of that size chunks */
956 	if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) {
957 		bp = omalloc_make_chunks(d, j, listnum);
958 		if (bp == NULL)
959 			return NULL;
960 	}
961 
962 	if (bp->canary != d->canary1)
963 		wrterror(d, "chunk info corrupted");
964 
965 	i = d->chunk_start;
966 	if (bp->free > 1)
967 		i += getrbyte(d);
968 	if (i >= bp->total)
969 		i &= bp->total - 1;
970 	for (;;) {
971 		for (;;) {
972 			lp = &bp->bits[i / MALLOC_BITS];
973 			if (!*lp) {
974 				i += MALLOC_BITS;
975 				i &= ~(MALLOC_BITS - 1);
976 				if (i >= bp->total)
977 					i = 0;
978 			} else
979 				break;
980 		}
981 		k = i % MALLOC_BITS;
982 		u = 1 << k;
983 		if (*lp & u)
984 			break;
985 		if (++i >= bp->total)
986 			i = 0;
987 	}
988 	d->chunk_start += i + 1;
989 #ifdef MALLOC_STATS
990 	if (i == 0) {
991 		struct region_info *r = find(d, bp->page);
992 		r->f = f;
993 	}
994 #endif
995 
996 	*lp ^= u;
997 
998 	/* If there are no more free, remove from free-list */
999 	if (!--bp->free)
1000 		LIST_REMOVE(bp, entries);
1001 
1002 	/* Adjust to the real offset of that chunk */
1003 	k += (lp - bp->bits) * MALLOC_BITS;
1004 
1005 	if (mopts.chunk_canaries)
1006 		bp->bits[bp->offset + k] = size;
1007 
1008 	k <<= bp->shift;
1009 
1010 	if (bp->size > 0) {
1011 		if (mopts.malloc_junk == 2)
1012 			memset((char *)bp->page + k, SOME_JUNK, bp->size);
1013 		else if (mopts.chunk_canaries)
1014 			fill_canary((char *)bp->page + k, size, bp->size);
1015 	}
1016 	return ((char *)bp->page + k);
1017 }
1018 
1019 static void
1020 fill_canary(char *ptr, size_t sz, size_t allocated)
1021 {
1022 	size_t check_sz = allocated - sz;
1023 
1024 	if (check_sz > CHUNK_CHECK_LENGTH)
1025 		check_sz = CHUNK_CHECK_LENGTH;
1026 	memset(ptr + sz, SOME_JUNK, check_sz);
1027 }
1028 
1029 static void
1030 validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1031 {
1032 	size_t check_sz = allocated - sz;
1033 	u_char *p, *q;
1034 
1035 	if (check_sz > CHUNK_CHECK_LENGTH)
1036 		check_sz = CHUNK_CHECK_LENGTH;
1037 	p = ptr + sz;
1038 	q = p + check_sz;
1039 
1040 	while (p < q) {
1041 		if (*p++ != SOME_JUNK) {
1042 			wrterror(d, "chunk canary corrupted %p %#tx@%#zx",
1043 			    ptr, p - ptr - 1, sz);
1044 		}
1045 	}
1046 }
1047 
1048 static uint32_t
1049 find_chunknum(struct dir_info *d, struct region_info *r, void *ptr, int check)
1050 {
1051 	struct chunk_info *info;
1052 	uint32_t chunknum;
1053 
1054 	info = (struct chunk_info *)r->size;
1055 	if (info->canary != d->canary1)
1056 		wrterror(d, "chunk info corrupted");
1057 
1058 	/* Find the chunk number on the page */
1059 	chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1060 	if (check && info->size > 0) {
1061 		validate_canary(d, ptr, info->bits[info->offset + chunknum],
1062 		    info->size);
1063 	}
1064 
1065 	if ((uintptr_t)ptr & ((1U << (info->shift)) - 1))
1066 		wrterror(d, "modified chunk-pointer %p", ptr);
1067 	if (info->bits[chunknum / MALLOC_BITS] &
1068 	    (1U << (chunknum % MALLOC_BITS)))
1069 		wrterror(d, "chunk is already free %p", ptr);
1070 	return chunknum;
1071 }
1072 
1073 /*
1074  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1075  */
1076 static void
1077 free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1078 {
1079 	struct chunk_head *mp;
1080 	struct chunk_info *info;
1081 	uint32_t chunknum;
1082 	int listnum;
1083 
1084 	info = (struct chunk_info *)r->size;
1085 	chunknum = find_chunknum(d, r, ptr, 0);
1086 
1087 	info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
1088 	info->free++;
1089 
1090 	if (info->free == 1) {
1091 		/* Page became non-full */
1092 		listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
1093 		if (info->size != 0)
1094 			mp = &d->chunk_dir[info->shift][listnum];
1095 		else
1096 			mp = &d->chunk_dir[0][listnum];
1097 
1098 		LIST_INSERT_HEAD(mp, info, entries);
1099 		return;
1100 	}
1101 
1102 	if (info->free != info->total)
1103 		return;
1104 
1105 	LIST_REMOVE(info, entries);
1106 
1107 	if (info->size == 0 && !mopts.malloc_freeunmap)
1108 		mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1109 	unmap(d, info->page, MALLOC_PAGESIZE);
1110 
1111 	delete(d, r);
1112 	if (info->size != 0)
1113 		mp = &d->chunk_info_list[info->shift];
1114 	else
1115 		mp = &d->chunk_info_list[0];
1116 	LIST_INSERT_HEAD(mp, info, entries);
1117 }
1118 
1119 
1120 
1121 static void *
1122 omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1123 {
1124 	void *p;
1125 	size_t psz;
1126 
1127 	if (sz > MALLOC_MAXCHUNK) {
1128 		if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1129 			errno = ENOMEM;
1130 			return NULL;
1131 		}
1132 		sz += mopts.malloc_guard;
1133 		psz = PAGEROUND(sz);
1134 		p = map(pool, NULL, psz, zero_fill);
1135 		if (p == MAP_FAILED) {
1136 			errno = ENOMEM;
1137 			return NULL;
1138 		}
1139 		if (insert(pool, p, sz, f)) {
1140 			unmap(pool, p, psz);
1141 			errno = ENOMEM;
1142 			return NULL;
1143 		}
1144 		if (mopts.malloc_guard) {
1145 			if (mprotect((char *)p + psz - mopts.malloc_guard,
1146 			    mopts.malloc_guard, PROT_NONE))
1147 				wrterror(pool, "mprotect");
1148 			STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1149 		}
1150 
1151 		if (MALLOC_MOVE_COND(sz)) {
1152 			/* fill whole allocation */
1153 			if (mopts.malloc_junk == 2)
1154 				memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1155 			/* shift towards the end */
1156 			p = MALLOC_MOVE(p, sz);
1157 			/* fill zeros if needed and overwritten above */
1158 			if (zero_fill && mopts.malloc_junk == 2)
1159 				memset(p, 0, sz - mopts.malloc_guard);
1160 		} else {
1161 			if (mopts.malloc_junk == 2) {
1162 				if (zero_fill)
1163 					memset((char *)p + sz - mopts.malloc_guard,
1164 					    SOME_JUNK, psz - sz);
1165 				else
1166 					memset(p, SOME_JUNK,
1167 					    psz - mopts.malloc_guard);
1168 			}
1169 			else if (mopts.chunk_canaries)
1170 				fill_canary(p, sz - mopts.malloc_guard,
1171 				    psz - mopts.malloc_guard);
1172 		}
1173 
1174 	} else {
1175 		/* takes care of SOME_JUNK */
1176 		p = malloc_bytes(pool, sz, f);
1177 		if (zero_fill && p != NULL && sz > 0)
1178 			memset(p, 0, sz);
1179 	}
1180 
1181 	return p;
1182 }
1183 
1184 /*
1185  * Common function for handling recursion.  Only
1186  * print the error message once, to avoid making the problem
1187  * potentially worse.
1188  */
1189 static void
1190 malloc_recurse(struct dir_info *d)
1191 {
1192 	static int noprint;
1193 
1194 	if (noprint == 0) {
1195 		noprint = 1;
1196 		wrterror(d, "recursive call");
1197 	}
1198 	d->active--;
1199 	_MALLOC_UNLOCK(d->mutex);
1200 	errno = EDEADLK;
1201 }
1202 
1203 void
1204 _malloc_init(int from_rthreads)
1205 {
1206 	int i, max;
1207 	struct dir_info *d;
1208 
1209 	_MALLOC_LOCK(0);
1210 	if (!from_rthreads && mopts.malloc_pool[0]) {
1211 		_MALLOC_UNLOCK(0);
1212 		return;
1213 	}
1214 	if (!mopts.malloc_canary)
1215 		omalloc_init();
1216 
1217 	max = from_rthreads ? _MALLOC_MUTEXES : 1;
1218 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1219 		mprotect(&malloc_readonly, sizeof(malloc_readonly),
1220 		     PROT_READ | PROT_WRITE);
1221 	for (i = 0; i < max; i++) {
1222 		if (mopts.malloc_pool[i])
1223 			continue;
1224 		omalloc_poolinit(&d);
1225 		d->mutex = i;
1226 		mopts.malloc_pool[i] = d;
1227 	}
1228 
1229 	if (from_rthreads)
1230 		mopts.malloc_mt = 1;
1231 
1232 	/*
1233 	 * Options have been set and will never be reset.
1234 	 * Prevent further tampering with them.
1235 	 */
1236 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1237 		mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
1238 	_MALLOC_UNLOCK(0);
1239 }
1240 DEF_STRONG(_malloc_init);
1241 
1242 void *
1243 malloc(size_t size)
1244 {
1245 	void *r;
1246 	struct dir_info *d;
1247 	int saved_errno = errno;
1248 
1249 	d = getpool();
1250 	if (d == NULL) {
1251 		_malloc_init(0);
1252 		d = getpool();
1253 	}
1254 	_MALLOC_LOCK(d->mutex);
1255 	d->func = "malloc";
1256 
1257 	if (d->active++) {
1258 		malloc_recurse(d);
1259 		return NULL;
1260 	}
1261 	r = omalloc(d, size, 0, CALLER);
1262 	d->active--;
1263 	_MALLOC_UNLOCK(d->mutex);
1264 	if (r == NULL && mopts.malloc_xmalloc)
1265 		wrterror(d, "out of memory");
1266 	if (r != NULL)
1267 		errno = saved_errno;
1268 	return r;
1269 }
1270 /*DEF_STRONG(malloc);*/
1271 
1272 static void
1273 validate_junk(struct dir_info *pool, void *p)
1274 {
1275 	struct region_info *r;
1276 	size_t byte, sz;
1277 
1278 	if (p == NULL)
1279 		return;
1280 	r = find(pool, p);
1281 	if (r == NULL)
1282 		wrterror(pool, "bogus pointer in validate_junk %p", p);
1283 	REALSIZE(sz, r);
1284 	if (sz > CHUNK_CHECK_LENGTH)
1285 		sz = CHUNK_CHECK_LENGTH;
1286 	for (byte = 0; byte < sz; byte++) {
1287 		if (((unsigned char *)p)[byte] != SOME_FREEJUNK)
1288 			wrterror(pool, "use after free %p", p);
1289 	}
1290 }
1291 
1292 static void
1293 ofree(struct dir_info *argpool, void *p)
1294 {
1295 	struct dir_info *pool;
1296 	struct region_info *r;
1297 	size_t sz;
1298 	int i;
1299 
1300 	pool = argpool;
1301 	r = find(pool, p);
1302 	if (r == NULL) {
1303 		if (mopts.malloc_mt)  {
1304 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1305 				if (i == argpool->mutex)
1306 					continue;
1307 				pool->active--;
1308 				_MALLOC_UNLOCK(pool->mutex);
1309 				pool = mopts.malloc_pool[i];
1310 				_MALLOC_LOCK(pool->mutex);
1311 				pool->active++;
1312 				r = find(pool, p);
1313 				if (r != NULL)
1314 					break;
1315 			}
1316 		}
1317 		if (r == NULL)
1318 			wrterror(pool, "bogus pointer (double free?) %p", p);
1319 	}
1320 
1321 	REALSIZE(sz, r);
1322 	if (sz > MALLOC_MAXCHUNK) {
1323 		if (!MALLOC_MOVE_COND(sz)) {
1324 			if (r->p != p)
1325 				wrterror(pool, "bogus pointer %p", p);
1326 			if (mopts.chunk_canaries)
1327 				validate_canary(pool, p,
1328 				    sz - mopts.malloc_guard,
1329 				    PAGEROUND(sz - mopts.malloc_guard));
1330 		} else {
1331 #if notyetbecause_of_realloc
1332 			/* shifted towards the end */
1333 			if (p != ((char *)r->p) + ((MALLOC_PAGESIZE -
1334 			    MALLOC_MINSIZE - sz - mopts.malloc_guard) &
1335 			    ~(MALLOC_MINSIZE-1))) {
1336 			}
1337 #endif
1338 			p = r->p;
1339 		}
1340 		if (mopts.malloc_guard) {
1341 			if (sz < mopts.malloc_guard)
1342 				wrterror(pool, "guard size");
1343 			if (!mopts.malloc_freeunmap) {
1344 				if (mprotect((char *)p + PAGEROUND(sz) -
1345 				    mopts.malloc_guard, mopts.malloc_guard,
1346 				    PROT_READ | PROT_WRITE))
1347 					wrterror(pool, "mprotect");
1348 			}
1349 			STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
1350 		}
1351 		unmap(pool, p, PAGEROUND(sz));
1352 		delete(pool, r);
1353 	} else {
1354 		void *tmp;
1355 		int i;
1356 
1357 		/* Delayed free or canaries? Extra check */
1358 		if (!mopts.malloc_freenow || mopts.chunk_canaries)
1359 			find_chunknum(pool, r, p, mopts.chunk_canaries);
1360 		if (!mopts.malloc_freenow) {
1361 			if (mopts.malloc_junk && sz > 0)
1362 				memset(p, SOME_FREEJUNK, sz);
1363 			i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK;
1364 			tmp = p;
1365 			p = pool->delayed_chunks[i];
1366 			if (tmp == p)
1367 				wrterror(pool, "double free %p", tmp);
1368 			if (mopts.malloc_junk)
1369 				validate_junk(pool, p);
1370 			pool->delayed_chunks[i] = tmp;
1371 		} else {
1372 			if (mopts.malloc_junk && sz > 0)
1373 				memset(p, SOME_FREEJUNK, sz);
1374 		}
1375 		if (p != NULL) {
1376 			r = find(pool, p);
1377 			if (r == NULL)
1378 				wrterror(pool,
1379 				    "bogus pointer (double free?) %p", p);
1380 			free_bytes(pool, r, p);
1381 		}
1382 	}
1383 
1384 	if (argpool != pool) {
1385 		pool->active--;
1386 		_MALLOC_UNLOCK(pool->mutex);
1387 		_MALLOC_LOCK(argpool->mutex);
1388 		argpool->active++;
1389 	}
1390 }
1391 
1392 void
1393 free(void *ptr)
1394 {
1395 	struct dir_info *d;
1396 	int saved_errno = errno;
1397 
1398 	/* This is legal. */
1399 	if (ptr == NULL)
1400 		return;
1401 
1402 	d = getpool();
1403 	if (d == NULL)
1404 		wrterror(d, "free() called before allocation");
1405 	_MALLOC_LOCK(d->mutex);
1406 	d->func = "free";
1407 	if (d->active++) {
1408 		malloc_recurse(d);
1409 		return;
1410 	}
1411 	ofree(d, ptr);
1412 	d->active--;
1413 	_MALLOC_UNLOCK(d->mutex);
1414 	errno = saved_errno;
1415 }
1416 /*DEF_STRONG(free);*/
1417 
1418 
1419 static void *
1420 orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
1421 {
1422 	struct dir_info *pool;
1423 	struct region_info *r;
1424 	struct chunk_info *info;
1425 	size_t oldsz, goldsz, gnewsz;
1426 	void *q, *ret;
1427 	int i;
1428 	uint32_t chunknum;
1429 
1430 	pool = argpool;
1431 
1432 	if (p == NULL)
1433 		return omalloc(pool, newsz, 0, f);
1434 
1435 	r = find(pool, p);
1436 	if (r == NULL) {
1437 		if (mopts.malloc_mt) {
1438 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1439 				if (i == argpool->mutex)
1440 					continue;
1441 				pool->active--;
1442 				_MALLOC_UNLOCK(pool->mutex);
1443 				pool = mopts.malloc_pool[i];
1444 				_MALLOC_LOCK(pool->mutex);
1445 				pool->active++;
1446 				r = find(pool, p);
1447 				if (r != NULL)
1448 					break;
1449 			}
1450 		}
1451 		if (r == NULL)
1452 			wrterror(pool, "bogus pointer (double free?) %p", p);
1453 	}
1454 	if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1455 		errno = ENOMEM;
1456 		ret = NULL;
1457 		goto done;
1458 	}
1459 
1460 	REALSIZE(oldsz, r);
1461 	if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) {
1462 		chunknum = find_chunknum(pool, r, p, 0);
1463 		info = (struct chunk_info *)r->size;
1464 	}
1465 
1466 	goldsz = oldsz;
1467 	if (oldsz > MALLOC_MAXCHUNK) {
1468 		if (oldsz < mopts.malloc_guard)
1469 			wrterror(pool, "guard size");
1470 		oldsz -= mopts.malloc_guard;
1471 	}
1472 
1473 	gnewsz = newsz;
1474 	if (gnewsz > MALLOC_MAXCHUNK)
1475 		gnewsz += mopts.malloc_guard;
1476 
1477 	if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p &&
1478 	    !mopts.malloc_realloc) {
1479 		/* First case: from n pages sized allocation to m pages sized
1480 		   allocation, no malloc_move in effect */
1481 		size_t roldsz = PAGEROUND(goldsz);
1482 		size_t rnewsz = PAGEROUND(gnewsz);
1483 
1484 		if (rnewsz > roldsz) {
1485 			/* try to extend existing region */
1486 			if (!mopts.malloc_guard) {
1487 				void *hint = (char *)p + roldsz;
1488 				size_t needed = rnewsz - roldsz;
1489 
1490 				STATS_INC(pool->cheap_realloc_tries);
1491 				q = map(pool, hint, needed, 0);
1492 				if (q == hint)
1493 					goto gotit;
1494 				zapcacheregion(pool, hint, needed);
1495 				q = MQUERY(hint, needed);
1496 				if (q == hint)
1497 					q = MMAPA(hint, needed);
1498 				else
1499 					q = MAP_FAILED;
1500 				if (q == hint) {
1501 gotit:
1502 					STATS_ADD(pool->malloc_used, needed);
1503 					if (mopts.malloc_junk == 2)
1504 						memset(q, SOME_JUNK, needed);
1505 					r->size = newsz;
1506 					if (mopts.chunk_canaries)
1507 						fill_canary(p, newsz, PAGEROUND(newsz));
1508 					STATS_SETF(r, f);
1509 					STATS_INC(pool->cheap_reallocs);
1510 					ret = p;
1511 					goto done;
1512 				} else if (q != MAP_FAILED) {
1513 					if (munmap(q, needed))
1514 						wrterror(pool, "munmap %p", q);
1515 				}
1516 			}
1517 		} else if (rnewsz < roldsz) {
1518 			/* shrink number of pages */
1519 			if (mopts.malloc_guard) {
1520 				if (mprotect((char *)p + roldsz -
1521 				    mopts.malloc_guard, mopts.malloc_guard,
1522 				    PROT_READ | PROT_WRITE))
1523 					wrterror(pool, "mprotect");
1524 				if (mprotect((char *)p + rnewsz -
1525 				    mopts.malloc_guard, mopts.malloc_guard,
1526 				    PROT_NONE))
1527 					wrterror(pool, "mprotect");
1528 			}
1529 			unmap(pool, (char *)p + rnewsz, roldsz - rnewsz);
1530 			r->size = gnewsz;
1531 			if (mopts.chunk_canaries)
1532 				fill_canary(p, newsz, PAGEROUND(newsz));
1533 			STATS_SETF(r, f);
1534 			ret = p;
1535 			goto done;
1536 		} else {
1537 			/* number of pages remains the same */
1538 			if (newsz > oldsz && mopts.malloc_junk == 2)
1539 				memset((char *)p + newsz, SOME_JUNK,
1540 				    rnewsz - mopts.malloc_guard - newsz);
1541 			r->size = gnewsz;
1542 			if (mopts.chunk_canaries)
1543 				fill_canary(p, newsz, PAGEROUND(newsz));
1544 			STATS_SETF(r, f);
1545 			ret = p;
1546 			goto done;
1547 		}
1548 	}
1549 	if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 &&
1550 	    newsz <= MALLOC_MAXCHUNK && newsz > 0 &&
1551 	    1 << find_chunksize(newsz) == oldsz && !mopts.malloc_realloc) {
1552 		/* do not reallocate if new size fits good in existing chunk */
1553 		if (mopts.malloc_junk == 2)
1554 			memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1555 		if (mopts.chunk_canaries) {
1556 			info->bits[info->offset + chunknum] = newsz;
1557 			fill_canary(p, newsz, info->size);
1558 		}
1559 		STATS_SETF(r, f);
1560 		ret = p;
1561 	} else if (newsz != oldsz || mopts.malloc_realloc) {
1562 		/* create new allocation */
1563 		q = omalloc(pool, newsz, 0, f);
1564 		if (q == NULL) {
1565 			ret = NULL;
1566 			goto done;
1567 		}
1568 		if (newsz != 0 && oldsz != 0)
1569 			memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1570 		ofree(pool, p);
1571 		ret = q;
1572 	} else {
1573 		/* > page size allocation didnt change */
1574 		if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) {
1575 			info->bits[info->offset + chunknum] = newsz;
1576 			if (info->size > 0)
1577 				fill_canary(p, newsz, info->size);
1578 		}
1579 		STATS_SETF(r, f);
1580 		ret = p;
1581 	}
1582 done:
1583 	if (argpool != pool) {
1584 		pool->active--;
1585 		_MALLOC_UNLOCK(pool->mutex);
1586 		_MALLOC_LOCK(argpool->mutex);
1587 		argpool->active++;
1588 	}
1589 	return ret;
1590 }
1591 
1592 void *
1593 realloc(void *ptr, size_t size)
1594 {
1595 	struct dir_info *d;
1596 	void *r;
1597 	int saved_errno = errno;
1598 
1599 	d = getpool();
1600 	if (d == NULL) {
1601 		_malloc_init(0);
1602 		d = getpool();
1603 	}
1604 	_MALLOC_LOCK(d->mutex);
1605 	d->func = "realloc";
1606 	if (d->active++) {
1607 		malloc_recurse(d);
1608 		return NULL;
1609 	}
1610 	r = orealloc(d, ptr, size, CALLER);
1611 
1612 	d->active--;
1613 	_MALLOC_UNLOCK(d->mutex);
1614 	if (r == NULL && mopts.malloc_xmalloc)
1615 		wrterror(d, "out of memory");
1616 	if (r != NULL)
1617 		errno = saved_errno;
1618 	return r;
1619 }
1620 /*DEF_STRONG(realloc);*/
1621 
1622 
1623 /*
1624  * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
1625  * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
1626  */
1627 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1628 
1629 void *
1630 calloc(size_t nmemb, size_t size)
1631 {
1632 	struct dir_info *d;
1633 	void *r;
1634 	int saved_errno = errno;
1635 
1636 	d = getpool();
1637 	if (d == NULL) {
1638 		_malloc_init(0);
1639 		d = getpool();
1640 	}
1641 	_MALLOC_LOCK(d->mutex);
1642 	d->func = "calloc";
1643 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1644 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1645 		_MALLOC_UNLOCK(d->mutex);
1646 		if (mopts.malloc_xmalloc)
1647 			wrterror(d, "out of memory");
1648 		errno = ENOMEM;
1649 		return NULL;
1650 	}
1651 
1652 	if (d->active++) {
1653 		malloc_recurse(d);
1654 		return NULL;
1655 	}
1656 
1657 	size *= nmemb;
1658 	r = omalloc(d, size, 1, CALLER);
1659 
1660 	d->active--;
1661 	_MALLOC_UNLOCK(d->mutex);
1662 	if (r == NULL && mopts.malloc_xmalloc)
1663 		wrterror(d, "out of memory");
1664 	if (r != NULL)
1665 		errno = saved_errno;
1666 	return r;
1667 }
1668 /*DEF_STRONG(calloc);*/
1669 
1670 static void *
1671 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
1672 {
1673 	char *p, *q;
1674 
1675 	if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
1676 		wrterror(d, "mapalign bad alignment");
1677 	if (sz != PAGEROUND(sz))
1678 		wrterror(d, "mapalign round");
1679 
1680 	/* Allocate sz + alignment bytes of memory, which must include a
1681 	 * subrange of size bytes that is properly aligned.  Unmap the
1682 	 * other bytes, and then return that subrange.
1683 	 */
1684 
1685 	/* We need sz + alignment to fit into a size_t. */
1686 	if (alignment > SIZE_MAX - sz)
1687 		return MAP_FAILED;
1688 
1689 	p = map(d, NULL, sz + alignment, zero_fill);
1690 	if (p == MAP_FAILED)
1691 		return MAP_FAILED;
1692 	q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
1693 	if (q != p) {
1694 		if (munmap(p, q - p))
1695 			wrterror(d, "munmap %p", p);
1696 	}
1697 	if (munmap(q + sz, alignment - (q - p)))
1698 		wrterror(d, "munmap %p", q + sz);
1699 	STATS_SUB(d->malloc_used, alignment);
1700 
1701 	return q;
1702 }
1703 
1704 static void *
1705 omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill, void *f)
1706 {
1707 	size_t psz;
1708 	void *p;
1709 
1710 	if (alignment <= MALLOC_PAGESIZE) {
1711 		/*
1712 		 * max(size, alignment) is enough to assure the requested alignment,
1713 		 * since the allocator always allocates power-of-two blocks.
1714 		 */
1715 		if (sz < alignment)
1716 			sz = alignment;
1717 		return omalloc(pool, sz, zero_fill, f);
1718 	}
1719 
1720 	if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1721 		errno = ENOMEM;
1722 		return NULL;
1723 	}
1724 
1725 	sz += mopts.malloc_guard;
1726 	psz = PAGEROUND(sz);
1727 
1728 	p = mapalign(pool, alignment, psz, zero_fill);
1729 	if (p == NULL) {
1730 		errno = ENOMEM;
1731 		return NULL;
1732 	}
1733 
1734 	if (insert(pool, p, sz, f)) {
1735 		unmap(pool, p, psz);
1736 		errno = ENOMEM;
1737 		return NULL;
1738 	}
1739 
1740 	if (mopts.malloc_guard) {
1741 		if (mprotect((char *)p + psz - mopts.malloc_guard,
1742 		    mopts.malloc_guard, PROT_NONE))
1743 			wrterror(pool, "mprotect");
1744 		STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1745 	}
1746 
1747 	if (mopts.malloc_junk == 2) {
1748 		if (zero_fill)
1749 			memset((char *)p + sz - mopts.malloc_guard,
1750 			    SOME_JUNK, psz - sz);
1751 		else
1752 			memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1753 	}
1754 
1755 	return p;
1756 }
1757 
1758 int
1759 posix_memalign(void **memptr, size_t alignment, size_t size)
1760 {
1761 	struct dir_info *d;
1762 	int res, saved_errno = errno;
1763 	void *r;
1764 
1765 	/* Make sure that alignment is a large enough power of 2. */
1766 	if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
1767 		return EINVAL;
1768 
1769 	d = getpool();
1770 	if (d == NULL) {
1771 		_malloc_init(0);
1772 		d = getpool();
1773 	}
1774 	_MALLOC_LOCK(d->mutex);
1775 	d->func = "posix_memalign";
1776 	if (d->active++) {
1777 		malloc_recurse(d);
1778 		goto err;
1779 	}
1780 	r = omemalign(d, alignment, size, 0, CALLER);
1781 	d->active--;
1782 	_MALLOC_UNLOCK(d->mutex);
1783 	if (r == NULL) {
1784 		if (mopts.malloc_xmalloc)
1785 			wrterror(d, "out of memory");
1786 		goto err;
1787 	}
1788 	errno = saved_errno;
1789 	*memptr = r;
1790 	return 0;
1791 
1792 err:
1793 	res = errno;
1794 	errno = saved_errno;
1795 	return res;
1796 }
1797 /*DEF_STRONG(posix_memalign);*/
1798 
1799 #ifdef MALLOC_STATS
1800 
1801 struct malloc_leak {
1802 	void (*f)();
1803 	size_t total_size;
1804 	int count;
1805 };
1806 
1807 struct leaknode {
1808 	RB_ENTRY(leaknode) entry;
1809 	struct malloc_leak d;
1810 };
1811 
1812 static int
1813 leakcmp(struct leaknode *e1, struct leaknode *e2)
1814 {
1815 	return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
1816 }
1817 
1818 static RB_HEAD(leaktree, leaknode) leakhead;
1819 RB_GENERATE_STATIC(leaktree, leaknode, entry, leakcmp)
1820 
1821 static void
1822 putleakinfo(void *f, size_t sz, int cnt)
1823 {
1824 	struct leaknode key, *p;
1825 	static struct leaknode *page;
1826 	static int used;
1827 
1828 	if (cnt == 0 || page == MAP_FAILED)
1829 		return;
1830 
1831 	key.d.f = f;
1832 	p = RB_FIND(leaktree, &leakhead, &key);
1833 	if (p == NULL) {
1834 		if (page == NULL ||
1835 		    used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
1836 			page = MMAP(MALLOC_PAGESIZE);
1837 			if (page == MAP_FAILED)
1838 				return;
1839 			used = 0;
1840 		}
1841 		p = &page[used++];
1842 		p->d.f = f;
1843 		p->d.total_size = sz * cnt;
1844 		p->d.count = cnt;
1845 		RB_INSERT(leaktree, &leakhead, p);
1846 	} else {
1847 		p->d.total_size += sz * cnt;
1848 		p->d.count += cnt;
1849 	}
1850 }
1851 
1852 static struct malloc_leak *malloc_leaks;
1853 
1854 static void
1855 writestr(int fd, const char *p)
1856 {
1857 	write(fd, p, strlen(p));
1858 }
1859 
1860 static void
1861 dump_leaks(int fd)
1862 {
1863 	struct leaknode *p;
1864 	char buf[64];
1865 	int i = 0;
1866 
1867 	writestr(fd, "Leak report\n");
1868 	writestr(fd, "                 f     sum      #    avg\n");
1869 	/* XXX only one page of summary */
1870 	if (malloc_leaks == NULL)
1871 		malloc_leaks = MMAP(MALLOC_PAGESIZE);
1872 	if (malloc_leaks != MAP_FAILED)
1873 		memset(malloc_leaks, 0, MALLOC_PAGESIZE);
1874 	RB_FOREACH(p, leaktree, &leakhead) {
1875 		snprintf(buf, sizeof(buf), "%18p %7zu %6u %6zu\n", p->d.f,
1876 		    p->d.total_size, p->d.count, p->d.total_size / p->d.count);
1877 		write(fd, buf, strlen(buf));
1878 		if (malloc_leaks == MAP_FAILED ||
1879 		    i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak))
1880 			continue;
1881 		malloc_leaks[i].f = p->d.f;
1882 		malloc_leaks[i].total_size = p->d.total_size;
1883 		malloc_leaks[i].count = p->d.count;
1884 		i++;
1885 	}
1886 }
1887 
1888 static void
1889 dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist)
1890 {
1891 	char buf[64];
1892 
1893 	while (p != NULL) {
1894 		snprintf(buf, sizeof(buf), "chunk %18p %18p %4d %d/%d\n",
1895 		    p->page, ((p->bits[0] & 1) ? NULL : f),
1896 		    p->size, p->free, p->total);
1897 		write(fd, buf, strlen(buf));
1898 		if (!fromfreelist) {
1899 			if (p->bits[0] & 1)
1900 				putleakinfo(NULL, p->size, p->total - p->free);
1901 			else {
1902 				putleakinfo(f, p->size, 1);
1903 				putleakinfo(NULL, p->size,
1904 				    p->total - p->free - 1);
1905 			}
1906 			break;
1907 		}
1908 		p = LIST_NEXT(p, entries);
1909 		if (p != NULL)
1910 			writestr(fd, "        ");
1911 	}
1912 }
1913 
1914 static void
1915 dump_free_chunk_info(int fd, struct dir_info *d)
1916 {
1917 	char buf[64];
1918 	int i, j, count;
1919 	struct chunk_info *p;
1920 
1921 	writestr(fd, "Free chunk structs:\n");
1922 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
1923 		count = 0;
1924 		LIST_FOREACH(p, &d->chunk_info_list[i], entries)
1925 			count++;
1926 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++) {
1927 			p = LIST_FIRST(&d->chunk_dir[i][j]);
1928 			if (p == NULL && count == 0)
1929 				continue;
1930 			snprintf(buf, sizeof(buf), "%2d) %3d ", i, count);
1931 			write(fd, buf, strlen(buf));
1932 			if (p != NULL)
1933 				dump_chunk(fd, p, NULL, 1);
1934 			else
1935 				write(fd, "\n", 1);
1936 		}
1937 	}
1938 
1939 }
1940 
1941 static void
1942 dump_free_page_info(int fd, struct dir_info *d)
1943 {
1944 	char buf[64];
1945 	int i;
1946 
1947 	snprintf(buf, sizeof(buf), "Free pages cached: %zu\n",
1948 	    d->free_regions_size);
1949 	write(fd, buf, strlen(buf));
1950 	for (i = 0; i < mopts.malloc_cache; i++) {
1951 		if (d->free_regions[i].p != NULL) {
1952 			snprintf(buf, sizeof(buf), "%2d) ", i);
1953 			write(fd, buf, strlen(buf));
1954 			snprintf(buf, sizeof(buf), "free at %p: %zu\n",
1955 			    d->free_regions[i].p, d->free_regions[i].size);
1956 			write(fd, buf, strlen(buf));
1957 		}
1958 	}
1959 }
1960 
1961 static void
1962 malloc_dump1(int fd, int poolno, struct dir_info *d)
1963 {
1964 	char buf[100];
1965 	size_t i, realsize;
1966 
1967 	snprintf(buf, sizeof(buf), "Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
1968 	write(fd, buf, strlen(buf));
1969 	if (d == NULL)
1970 		return;
1971 	snprintf(buf, sizeof(buf), "Region slots free %zu/%zu\n",
1972 		d->regions_free, d->regions_total);
1973 	write(fd, buf, strlen(buf));
1974 	snprintf(buf, sizeof(buf), "Finds %zu/%zu\n", d->finds,
1975 	    d->find_collisions);
1976 	write(fd, buf, strlen(buf));
1977 	snprintf(buf, sizeof(buf), "Inserts %zu/%zu\n", d->inserts,
1978 	    d->insert_collisions);
1979 	write(fd, buf, strlen(buf));
1980 	snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes,
1981 	    d->delete_moves);
1982 	write(fd, buf, strlen(buf));
1983 	snprintf(buf, sizeof(buf), "Cheap reallocs %zu/%zu\n",
1984 	    d->cheap_reallocs, d->cheap_realloc_tries);
1985 	write(fd, buf, strlen(buf));
1986 	snprintf(buf, sizeof(buf), "In use %zu\n", d->malloc_used);
1987 	write(fd, buf, strlen(buf));
1988 	snprintf(buf, sizeof(buf), "Guarded %zu\n", d->malloc_guarded);
1989 	write(fd, buf, strlen(buf));
1990 	dump_free_chunk_info(fd, d);
1991 	dump_free_page_info(fd, d);
1992 	writestr(fd,
1993 	    "slot)  hash d  type               page                  f size [free/n]\n");
1994 	for (i = 0; i < d->regions_total; i++) {
1995 		if (d->r[i].p != NULL) {
1996 			size_t h = hash(d->r[i].p) &
1997 			    (d->regions_total - 1);
1998 			snprintf(buf, sizeof(buf), "%4zx) #%4zx %zd ",
1999 			    i, h, h - i);
2000 			write(fd, buf, strlen(buf));
2001 			REALSIZE(realsize, &d->r[i]);
2002 			if (realsize > MALLOC_MAXCHUNK) {
2003 				putleakinfo(d->r[i].f, realsize, 1);
2004 				snprintf(buf, sizeof(buf),
2005 				    "pages %18p %18p %zu\n", d->r[i].p,
2006 				    d->r[i].f, realsize);
2007 				write(fd, buf, strlen(buf));
2008 			} else
2009 				dump_chunk(fd,
2010 				    (struct chunk_info *)d->r[i].size,
2011 				    d->r[i].f, 0);
2012 		}
2013 	}
2014 	dump_leaks(fd);
2015 	write(fd, "\n", 1);
2016 }
2017 
2018 void
2019 malloc_dump(int fd, int poolno, struct dir_info *pool)
2020 {
2021 	int i;
2022 	void *p;
2023 	struct region_info *r;
2024 	int saved_errno = errno;
2025 
2026 	if (pool == NULL)
2027 		return;
2028 	for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) {
2029 		p = pool->delayed_chunks[i];
2030 		if (p == NULL)
2031 			continue;
2032 		r = find(pool, p);
2033 		if (r == NULL)
2034 			wrterror(pool, "bogus pointer in malloc_dump %p", p);
2035 		free_bytes(pool, r, p);
2036 		pool->delayed_chunks[i] = NULL;
2037 	}
2038 	/* XXX leak when run multiple times */
2039 	RB_INIT(&leakhead);
2040 	malloc_dump1(fd, poolno, pool);
2041 	errno = saved_errno;
2042 }
2043 DEF_WEAK(malloc_dump);
2044 
2045 static void
2046 malloc_exit(void)
2047 {
2048 	static const char q[] = "malloc() warning: Couldn't dump stats\n";
2049 	int save_errno = errno, fd, i;
2050 	char buf[100];
2051 
2052 	fd = open("malloc.out", O_RDWR|O_APPEND);
2053 	if (fd != -1) {
2054 		snprintf(buf, sizeof(buf), "******** Start dump %s *******\n",
2055 		     __progname);
2056 		write(fd, buf, strlen(buf));
2057 		snprintf(buf, sizeof(buf),
2058 		    "MT=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u G=%zu\n",
2059 		    mopts.malloc_mt, mopts.malloc_freenow,
2060 		    mopts.malloc_freeunmap, mopts.malloc_junk,
2061 		    mopts.malloc_realloc, mopts.malloc_xmalloc,
2062 		    mopts.chunk_canaries, mopts.malloc_cache,
2063 		    mopts.malloc_guard);
2064 		write(fd, buf, strlen(buf));
2065 
2066 		for (i = 0; i < _MALLOC_MUTEXES; i++)
2067 			malloc_dump(fd, i, mopts.malloc_pool[i]);
2068 		snprintf(buf, sizeof(buf), "******** End dump %s *******\n",
2069 		    __progname);
2070 		write(fd, buf, strlen(buf));
2071 		close(fd);
2072 	} else
2073 		write(STDERR_FILENO, q, sizeof(q) - 1);
2074 	errno = save_errno;
2075 }
2076 
2077 #endif /* MALLOC_STATS */
2078