xref: /openbsd-src/lib/libc/stdlib/malloc.c (revision 0778079a1a8964ffa14104e44e3032d5c3accda4)
1 /*	$OpenBSD: malloc.c,v 1.291 2023/10/22 12:19:26 otto Exp $	*/
2 /*
3  * Copyright (c) 2008, 2010, 2011, 2016, 2023 Otto Moerbeek <otto@drijf.net>
4  * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
5  * Copyright (c) 2008 Damien Miller <djm@openbsd.org>
6  * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * If we meet some day, and you think this stuff is worth it, you
23  * can buy me a beer in return. Poul-Henning Kamp
24  */
25 
26 #ifndef MALLOC_SMALL
27 #define MALLOC_STATS
28 #endif
29 
30 #include <sys/types.h>
31 #include <sys/queue.h>
32 #include <sys/mman.h>
33 #include <sys/sysctl.h>
34 #include <uvm/uvmexp.h>
35 #include <errno.h>
36 #include <stdarg.h>
37 #include <stdint.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <unistd.h>
42 
43 #ifdef MALLOC_STATS
44 #include <sys/tree.h>
45 #include <sys/ktrace.h>
46 #include <dlfcn.h>
47 #endif
48 
49 #include "thread_private.h"
50 #include <tib.h>
51 
52 #define MALLOC_PAGESHIFT	_MAX_PAGE_SHIFT
53 
54 #define MALLOC_MINSHIFT		4
55 #define MALLOC_MAXSHIFT		(MALLOC_PAGESHIFT - 1)
56 #define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
57 #define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
58 #define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
59 #define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
60 
61 #define MALLOC_MAXCHUNK		(1 << MALLOC_MAXSHIFT)
62 #define MALLOC_MAXCACHE		256
63 #define MALLOC_DELAYED_CHUNK_MASK	15
64 #ifdef MALLOC_STATS
65 #define MALLOC_INITIAL_REGIONS	512
66 #else
67 #define MALLOC_INITIAL_REGIONS	(MALLOC_PAGESIZE / sizeof(struct region_info))
68 #endif
69 #define MALLOC_DEFAULT_CACHE	64
70 #define MALLOC_CHUNK_LISTS	4
71 #define CHUNK_CHECK_LENGTH	32
72 
73 #define B2SIZE(b)		((b) * MALLOC_MINSIZE)
74 #define B2ALLOC(b)		((b) == 0 ? MALLOC_MINSIZE : \
75 				    (b) * MALLOC_MINSIZE)
76 #define BUCKETS 		(MALLOC_MAXCHUNK / MALLOC_MINSIZE)
77 
78 /*
79  * We move allocations between half a page and a whole page towards the end,
80  * subject to alignment constraints. This is the extra headroom we allow.
81  * Set to zero to be the most strict.
82  */
83 #define MALLOC_LEEWAY		0
84 #define MALLOC_MOVE_COND(sz)	((sz) - mopts.malloc_guard < 		\
85 				    MALLOC_PAGESIZE - MALLOC_LEEWAY)
86 #define MALLOC_MOVE(p, sz)  	(((char *)(p)) +			\
87 				    ((MALLOC_PAGESIZE - MALLOC_LEEWAY -	\
88 			    	    ((sz) - mopts.malloc_guard)) & 	\
89 				    ~(MALLOC_MINSIZE - 1)))
90 
91 #define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
92 
93 /*
94  * What to use for Junk.  This is the byte value we use to fill with
95  * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
96  * and SOME_FREEJUNK right before free.
97  */
98 #define SOME_JUNK		0xdb	/* deadbeef */
99 #define SOME_FREEJUNK		0xdf	/* dead, free */
100 #define SOME_FREEJUNK_ULL	0xdfdfdfdfdfdfdfdfULL
101 
102 #define MMAP(sz,f)	mmap(NULL, (sz), PROT_READ | PROT_WRITE, \
103     MAP_ANON | MAP_PRIVATE | (f), -1, 0)
104 
105 #define MMAPNONE(sz,f)	mmap(NULL, (sz), PROT_NONE, \
106     MAP_ANON | MAP_PRIVATE | (f), -1, 0)
107 
108 #define MMAPA(a,sz,f)	mmap((a), (sz), PROT_READ | PROT_WRITE, \
109     MAP_ANON | MAP_PRIVATE | (f), -1, 0)
110 
111 struct region_info {
112 	void *p;		/* page; low bits used to mark chunks */
113 	uintptr_t size;		/* size for pages, or chunk_info pointer */
114 #ifdef MALLOC_STATS
115 	void **f;		/* where allocated from */
116 #endif
117 };
118 
119 LIST_HEAD(chunk_head, chunk_info);
120 
121 /*
122  * Two caches, one for "small" regions, one for "big".
123  * Small cache is an array per size, big cache is one array with different
124  * sized regions
125  */
126 #define MAX_SMALLCACHEABLE_SIZE	32
127 #define MAX_BIGCACHEABLE_SIZE	512
128 /* If the total # of pages is larger than this, evict before inserting */
129 #define BIGCACHE_FILL(sz)	(MAX_BIGCACHEABLE_SIZE * (sz) / 4)
130 
131 struct smallcache {
132 	void **pages;
133 	ushort length;
134 	ushort max;
135 };
136 
137 struct bigcache {
138 	void *page;
139 	size_t psize;
140 };
141 
142 struct dir_info {
143 	u_int32_t canary1;
144 	int active;			/* status of malloc */
145 	struct region_info *r;		/* region slots */
146 	size_t regions_total;		/* number of region slots */
147 	size_t regions_free;		/* number of free slots */
148 	size_t rbytesused;		/* random bytes used */
149 	const char *func;		/* current function */
150 	int malloc_junk;		/* junk fill? */
151 	int mmap_flag;			/* extra flag for mmap */
152 	int mutex;
153 	int malloc_mt;			/* multi-threaded mode? */
154 					/* lists of free chunk info structs */
155 	struct chunk_head chunk_info_list[BUCKETS + 1];
156 					/* lists of chunks with free slots */
157 	struct chunk_head chunk_dir[BUCKETS + 1][MALLOC_CHUNK_LISTS];
158 					/* delayed free chunk slots */
159 	void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
160 	u_char rbytes[32];		/* random bytes */
161 					/* free pages cache */
162 	struct smallcache smallcache[MAX_SMALLCACHEABLE_SIZE];
163 	size_t bigcache_used;
164 	size_t bigcache_size;
165 	struct bigcache *bigcache;
166 	void *chunk_pages;
167 	size_t chunk_pages_used;
168 #ifdef MALLOC_STATS
169 	void *caller;
170 	size_t inserts;
171 	size_t insert_collisions;
172 	size_t finds;
173 	size_t find_collisions;
174 	size_t deletes;
175 	size_t delete_moves;
176 	size_t cheap_realloc_tries;
177 	size_t cheap_reallocs;
178 	size_t malloc_used;		/* bytes allocated */
179 	size_t malloc_guarded;		/* bytes used for guards */
180 	size_t pool_searches;		/* searches for pool */
181 	size_t other_pool;		/* searches in other pool */
182 #define STATS_ADD(x,y)	((x) += (y))
183 #define STATS_SUB(x,y)	((x) -= (y))
184 #define STATS_INC(x)	((x)++)
185 #define STATS_ZERO(x)	((x) = 0)
186 #define STATS_SETF(x,y)	((x)->f = (y))
187 #define STATS_SETFN(x,k,y)	((x)->f[k] = (y))
188 #define SET_CALLER(x,y)	if (DO_STATS) ((x)->caller = (y))
189 #else
190 #define STATS_ADD(x,y)	/* nothing */
191 #define STATS_SUB(x,y)	/* nothing */
192 #define STATS_INC(x)	/* nothing */
193 #define STATS_ZERO(x)	/* nothing */
194 #define STATS_SETF(x,y)	/* nothing */
195 #define STATS_SETFN(x,k,y)	/* nothing */
196 #define SET_CALLER(x,y)	/* nothing */
197 #endif /* MALLOC_STATS */
198 	u_int32_t canary2;
199 };
200 
201 static void unmap(struct dir_info *d, void *p, size_t sz, size_t clear);
202 
203 /*
204  * This structure describes a page worth of chunks.
205  *
206  * How many bits per u_short in the bitmap
207  */
208 #define MALLOC_BITS		(NBBY * sizeof(u_short))
209 struct chunk_info {
210 	LIST_ENTRY(chunk_info) entries;
211 	void *page;			/* pointer to the page */
212 	u_short canary;
213 	u_short bucket;
214 	u_short free;			/* how many free chunks */
215 	u_short total;			/* how many chunks */
216 	u_short offset;			/* requested size table offset */
217 	u_short bits[1];		/* which chunks are free */
218 };
219 
220 #define CHUNK_FREE(i, n)	((i)->bits[(n) / MALLOC_BITS] & (1U << ((n) % MALLOC_BITS)))
221 
222 struct malloc_readonly {
223 					/* Main bookkeeping information */
224 	struct dir_info *malloc_pool[_MALLOC_MUTEXES];
225 	u_int	malloc_mutexes;		/* how much in actual use? */
226 	int	malloc_freecheck;	/* Extensive double free check */
227 	int	malloc_freeunmap;	/* mprotect free pages PROT_NONE? */
228 	int	def_malloc_junk;	/* junk fill? */
229 	int	malloc_realloc;		/* always realloc? */
230 	int	malloc_xmalloc;		/* xmalloc behaviour? */
231 	u_int	chunk_canaries;		/* use canaries after chunks? */
232 	int	internal_funcs;		/* use better recallocarray/freezero? */
233 	u_int	def_maxcache;		/* free pages we cache */
234 	u_int	junk_loc;		/* variation in location of junk */
235 	size_t	malloc_guard;		/* use guard pages after allocations? */
236 #ifdef MALLOC_STATS
237 	int	malloc_stats;		/* save callers, dump leak report at end */
238 	int	malloc_verbose;		/* dump verbose statistics at end */
239 #define	DO_STATS	mopts.malloc_stats
240 #else
241 #define	DO_STATS	0
242 #endif
243 	u_int32_t malloc_canary;	/* Matched against ones in pool */
244 };
245 
246 
247 /* This object is mapped PROT_READ after initialisation to prevent tampering */
248 static union {
249 	struct malloc_readonly mopts;
250 	u_char _pad[MALLOC_PAGESIZE];
251 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)))
252 		__attribute__((section(".openbsd.mutable")));
253 #define mopts	malloc_readonly.mopts
254 
255 char		*malloc_options;	/* compile-time options */
256 
257 static __dead void wrterror(struct dir_info *d, char *msg, ...)
258     __attribute__((__format__ (printf, 2, 3)));
259 
260 #ifdef MALLOC_STATS
261 void malloc_dump(void);
262 PROTO_NORMAL(malloc_dump);
263 static void malloc_exit(void);
264 static void print_chunk_details(struct dir_info *, void *, size_t, size_t);
265 #endif
266 
267 #if defined(__aarch64__) || \
268 	defined(__amd64__) || \
269 	defined(__arm__)
270 static inline void* caller(void)
271 {
272 	void *p;
273 
274 	switch (DO_STATS) {
275 	case 0:
276 	default:
277 		return NULL;
278 	case 1:
279 		p = __builtin_return_address(0);
280 		break;
281 	case 2:
282 		p = __builtin_return_address(1);
283 		break;
284 	case 3:
285 		p = __builtin_return_address(2);
286 		break;
287 	}
288 	return __builtin_extract_return_addr(p);
289 }
290 #else
291 static inline void* caller(void)
292 {
293 	return DO_STATS == 0 ? NULL :
294 	    __builtin_extract_return_addr(__builtin_return_address(0));
295 }
296 #endif
297 
298 /* low bits of r->p determine size: 0 means >= page size and r->size holding
299  * real size, otherwise low bits is the bucket + 1
300  */
301 #define REALSIZE(sz, r)						\
302 	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
303 	(sz) = ((sz) == 0 ? (r)->size : B2SIZE((sz) - 1))
304 
305 static inline size_t
306 hash(void *p)
307 {
308 	size_t sum;
309 	uintptr_t u;
310 
311 	u = (uintptr_t)p >> MALLOC_PAGESHIFT;
312 	sum = u;
313 	sum = (sum << 7) - sum + (u >> 16);
314 #ifdef __LP64__
315 	sum = (sum << 7) - sum + (u >> 32);
316 	sum = (sum << 7) - sum + (u >> 48);
317 #endif
318 	return sum;
319 }
320 
321 static inline struct dir_info *
322 getpool(void)
323 {
324 	if (mopts.malloc_pool[1] == NULL || !mopts.malloc_pool[1]->malloc_mt)
325 		return mopts.malloc_pool[1];
326 	else	/* first one reserved for special pool */
327 		return mopts.malloc_pool[1 + TIB_GET()->tib_tid %
328 		    (mopts.malloc_mutexes - 1)];
329 }
330 
331 static __dead void
332 wrterror(struct dir_info *d, char *msg, ...)
333 {
334 	int		saved_errno = errno;
335 	va_list		ap;
336 
337 	dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname,
338 	    getpid(), (d != NULL && d->func) ? d->func : "unknown");
339 	va_start(ap, msg);
340 	vdprintf(STDERR_FILENO, msg, ap);
341 	va_end(ap);
342 	dprintf(STDERR_FILENO, "\n");
343 
344 #ifdef MALLOC_STATS
345 	if (DO_STATS && mopts.malloc_verbose)
346 		malloc_dump();
347 #endif
348 
349 	errno = saved_errno;
350 
351 	abort();
352 }
353 
354 static void
355 rbytes_init(struct dir_info *d)
356 {
357 	arc4random_buf(d->rbytes, sizeof(d->rbytes));
358 	/* add 1 to account for using d->rbytes[0] */
359 	d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2);
360 }
361 
362 static inline u_char
363 getrbyte(struct dir_info *d)
364 {
365 	u_char x;
366 
367 	if (d->rbytesused >= sizeof(d->rbytes))
368 		rbytes_init(d);
369 	x = d->rbytes[d->rbytesused++];
370 	return x;
371 }
372 
373 static void
374 omalloc_parseopt(char opt)
375 {
376 	switch (opt) {
377 	case '+':
378 		mopts.malloc_mutexes <<= 1;
379 		if (mopts.malloc_mutexes > _MALLOC_MUTEXES)
380 			mopts.malloc_mutexes = _MALLOC_MUTEXES;
381 		break;
382 	case '-':
383 		mopts.malloc_mutexes >>= 1;
384 		if (mopts.malloc_mutexes < 2)
385 			mopts.malloc_mutexes = 2;
386 		break;
387 	case '>':
388 		mopts.def_maxcache <<= 1;
389 		if (mopts.def_maxcache > MALLOC_MAXCACHE)
390 			mopts.def_maxcache = MALLOC_MAXCACHE;
391 		break;
392 	case '<':
393 		mopts.def_maxcache >>= 1;
394 		break;
395 	case 'c':
396 		mopts.chunk_canaries = 0;
397 		break;
398 	case 'C':
399 		mopts.chunk_canaries = 1;
400 		break;
401 #ifdef MALLOC_STATS
402 	case 'd':
403 		mopts.malloc_stats = 0;
404 		break;
405 	case 'D':
406 	case '1':
407 		mopts.malloc_stats = 1;
408 		break;
409 	case '2':
410 		mopts.malloc_stats = 2;
411 		break;
412 	case '3':
413 		mopts.malloc_stats = 3;
414 		break;
415 #endif /* MALLOC_STATS */
416 	case 'f':
417 		mopts.malloc_freecheck = 0;
418 		mopts.malloc_freeunmap = 0;
419 		break;
420 	case 'F':
421 		mopts.malloc_freecheck = 1;
422 		mopts.malloc_freeunmap = 1;
423 		break;
424 	case 'g':
425 		mopts.malloc_guard = 0;
426 		break;
427 	case 'G':
428 		mopts.malloc_guard = MALLOC_PAGESIZE;
429 		break;
430 	case 'j':
431 		if (mopts.def_malloc_junk > 0)
432 			mopts.def_malloc_junk--;
433 		break;
434 	case 'J':
435 		if (mopts.def_malloc_junk < 2)
436 			mopts.def_malloc_junk++;
437 		break;
438 	case 'r':
439 		mopts.malloc_realloc = 0;
440 		break;
441 	case 'R':
442 		mopts.malloc_realloc = 1;
443 		break;
444 	case 'u':
445 		mopts.malloc_freeunmap = 0;
446 		break;
447 	case 'U':
448 		mopts.malloc_freeunmap = 1;
449 		break;
450 #ifdef MALLOC_STATS
451 	case 'v':
452 		mopts.malloc_verbose = 0;
453 		break;
454 	case 'V':
455 		mopts.malloc_verbose = 1;
456 		break;
457 #endif /* MALLOC_STATS */
458 	case 'x':
459 		mopts.malloc_xmalloc = 0;
460 		break;
461 	case 'X':
462 		mopts.malloc_xmalloc = 1;
463 		break;
464 	default:
465 		dprintf(STDERR_FILENO, "malloc() warning: "
466                     "unknown char in MALLOC_OPTIONS\n");
467 		break;
468 	}
469 }
470 
471 static void
472 omalloc_init(void)
473 {
474 	char *p, *q, b[16];
475 	int i, j;
476 	const int mib[2] = { CTL_VM, VM_MALLOC_CONF };
477 	size_t sb;
478 
479 	/*
480 	 * Default options
481 	 */
482 	mopts.malloc_mutexes = 8;
483 	mopts.def_malloc_junk = 1;
484 	mopts.def_maxcache = MALLOC_DEFAULT_CACHE;
485 
486 	for (i = 0; i < 3; i++) {
487 		switch (i) {
488 		case 0:
489 			sb = sizeof(b);
490 			j = sysctl(mib, 2, b, &sb, NULL, 0);
491 			if (j != 0)
492 				continue;
493 			p = b;
494 			break;
495 		case 1:
496 			if (issetugid() == 0)
497 				p = getenv("MALLOC_OPTIONS");
498 			else
499 				continue;
500 			break;
501 		case 2:
502 			p = malloc_options;
503 			break;
504 		default:
505 			p = NULL;
506 		}
507 
508 		for (; p != NULL && *p != '\0'; p++) {
509 			switch (*p) {
510 			case 'S':
511 				for (q = "CFGJ"; *q != '\0'; q++)
512 					omalloc_parseopt(*q);
513 				mopts.def_maxcache = 0;
514 				break;
515 			case 's':
516 				for (q = "cfgj"; *q != '\0'; q++)
517 					omalloc_parseopt(*q);
518 				mopts.def_maxcache = MALLOC_DEFAULT_CACHE;
519 				break;
520 			default:
521 				omalloc_parseopt(*p);
522 				break;
523 			}
524 		}
525 	}
526 
527 #ifdef MALLOC_STATS
528 	if (DO_STATS && (atexit(malloc_exit) == -1)) {
529 		dprintf(STDERR_FILENO, "malloc() warning: atexit(2) failed."
530 		    " Will not be able to dump stats on exit\n");
531 	}
532 #endif
533 
534 	while ((mopts.malloc_canary = arc4random()) == 0)
535 		;
536 	mopts.junk_loc = arc4random();
537 	if (mopts.chunk_canaries)
538 		do {
539 			mopts.chunk_canaries = arc4random();
540 		} while ((u_char)mopts.chunk_canaries == 0 ||
541 		    (u_char)mopts.chunk_canaries == SOME_FREEJUNK);
542 }
543 
544 static void
545 omalloc_poolinit(struct dir_info *d, int mmap_flag)
546 {
547 	int i, j;
548 
549 	d->r = NULL;
550 	d->rbytesused = sizeof(d->rbytes);
551 	d->regions_free = d->regions_total = 0;
552 	for (i = 0; i <= BUCKETS; i++) {
553 		LIST_INIT(&d->chunk_info_list[i]);
554 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++)
555 			LIST_INIT(&d->chunk_dir[i][j]);
556 	}
557 	d->mmap_flag = mmap_flag;
558 	d->malloc_junk = mopts.def_malloc_junk;
559 	d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
560 	d->canary2 = ~d->canary1;
561 }
562 
563 static int
564 omalloc_grow(struct dir_info *d)
565 {
566 	size_t newtotal;
567 	size_t newsize;
568 	size_t mask;
569 	size_t i, oldpsz;
570 	struct region_info *p;
571 
572 	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2)
573 		return 1;
574 
575 	newtotal = d->regions_total == 0 ? MALLOC_INITIAL_REGIONS :
576 	    d->regions_total * 2;
577 	newsize = PAGEROUND(newtotal * sizeof(struct region_info));
578 	mask = newtotal - 1;
579 
580 	/* Don't use cache here, we don't want user uaf touch this */
581 	p = MMAP(newsize, d->mmap_flag);
582 	if (p == MAP_FAILED)
583 		return 1;
584 
585 	STATS_ADD(d->malloc_used, newsize);
586 	STATS_ZERO(d->inserts);
587 	STATS_ZERO(d->insert_collisions);
588 	for (i = 0; i < d->regions_total; i++) {
589 		void *q = d->r[i].p;
590 		if (q != NULL) {
591 			size_t index = hash(q) & mask;
592 			STATS_INC(d->inserts);
593 			while (p[index].p != NULL) {
594 				index = (index - 1) & mask;
595 				STATS_INC(d->insert_collisions);
596 			}
597 			p[index] = d->r[i];
598 		}
599 	}
600 
601 	if (d->regions_total > 0) {
602 		oldpsz = PAGEROUND(d->regions_total * sizeof(struct region_info));
603 		/* clear to avoid meta info ending up in the cache */
604 		unmap(d, d->r, oldpsz, oldpsz);
605 	}
606 	d->regions_free += newtotal - d->regions_total;
607 	d->regions_total = newtotal;
608 	d->r = p;
609 	return 0;
610 }
611 
612 /*
613  * The hashtable uses the assumption that p is never NULL. This holds since
614  * non-MAP_FIXED mappings with hint 0 start at BRKSIZ.
615  */
616 static int
617 insert(struct dir_info *d, void *p, size_t sz, void *f)
618 {
619 	size_t index;
620 	size_t mask;
621 	void *q;
622 
623 	if (d->regions_free * 4 < d->regions_total || d->regions_total == 0) {
624 		if (omalloc_grow(d))
625 			return 1;
626 	}
627 	mask = d->regions_total - 1;
628 	index = hash(p) & mask;
629 	q = d->r[index].p;
630 	STATS_INC(d->inserts);
631 	while (q != NULL) {
632 		index = (index - 1) & mask;
633 		q = d->r[index].p;
634 		STATS_INC(d->insert_collisions);
635 	}
636 	d->r[index].p = p;
637 	d->r[index].size = sz;
638 	STATS_SETF(&d->r[index], f);
639 	d->regions_free--;
640 	return 0;
641 }
642 
643 static struct region_info *
644 find(struct dir_info *d, void *p)
645 {
646 	size_t index;
647 	size_t mask = d->regions_total - 1;
648 	void *q, *r;
649 
650 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
651 	    d->canary1 != ~d->canary2)
652 		wrterror(d, "internal struct corrupt");
653 	if (d->r == NULL)
654 		return NULL;
655 	p = MASK_POINTER(p);
656 	index = hash(p) & mask;
657 	r = d->r[index].p;
658 	q = MASK_POINTER(r);
659 	STATS_INC(d->finds);
660 	while (q != p && r != NULL) {
661 		index = (index - 1) & mask;
662 		r = d->r[index].p;
663 		q = MASK_POINTER(r);
664 		STATS_INC(d->find_collisions);
665 	}
666 	return (q == p && r != NULL) ? &d->r[index] : NULL;
667 }
668 
669 static void
670 delete(struct dir_info *d, struct region_info *ri)
671 {
672 	/* algorithm R, Knuth Vol III section 6.4 */
673 	size_t mask = d->regions_total - 1;
674 	size_t i, j, r;
675 
676 	if (d->regions_total & (d->regions_total - 1))
677 		wrterror(d, "regions_total not 2^x");
678 	d->regions_free++;
679 	STATS_INC(d->deletes);
680 
681 	i = ri - d->r;
682 	for (;;) {
683 		d->r[i].p = NULL;
684 		d->r[i].size = 0;
685 		j = i;
686 		for (;;) {
687 			i = (i - 1) & mask;
688 			if (d->r[i].p == NULL)
689 				return;
690 			r = hash(d->r[i].p) & mask;
691 			if ((i <= r && r < j) || (r < j && j < i) ||
692 			    (j < i && i <= r))
693 				continue;
694 			d->r[j] = d->r[i];
695 			STATS_INC(d->delete_moves);
696 			break;
697 		}
698 
699 	}
700 }
701 
702 static inline void
703 junk_free(int junk, void *p, size_t sz)
704 {
705 	size_t i, step = 1;
706 	uint64_t *lp = p;
707 
708 	if (junk == 0 || sz == 0)
709 		return;
710 	sz /= sizeof(uint64_t);
711 	if (junk == 1) {
712 		if (sz > MALLOC_PAGESIZE / sizeof(uint64_t))
713 			sz = MALLOC_PAGESIZE / sizeof(uint64_t);
714 		step = sz / 4;
715 		if (step == 0)
716 			step = 1;
717 	}
718 	/* Do not always put the free junk bytes in the same spot.
719 	   There is modulo bias here, but we ignore that. */
720 	for (i = mopts.junk_loc % step; i < sz; i += step)
721 		lp[i] = SOME_FREEJUNK_ULL;
722 }
723 
724 static inline void
725 validate_junk(struct dir_info *pool, void *p, size_t argsz)
726 {
727 	size_t i, sz, step = 1;
728 	uint64_t *lp = p;
729 
730 	if (pool->malloc_junk == 0 || argsz == 0)
731 		return;
732 	sz = argsz / sizeof(uint64_t);
733 	if (pool->malloc_junk == 1) {
734 		if (sz > MALLOC_PAGESIZE / sizeof(uint64_t))
735 			sz = MALLOC_PAGESIZE / sizeof(uint64_t);
736 		step = sz / 4;
737 		if (step == 0)
738 			step = 1;
739 	}
740 	/* see junk_free */
741 	for (i = mopts.junk_loc % step; i < sz; i += step) {
742 		if (lp[i] != SOME_FREEJUNK_ULL) {
743 #ifdef MALLOC_STATS
744 			if (DO_STATS && argsz <= MALLOC_MAXCHUNK)
745 				print_chunk_details(pool, lp, argsz, i);
746 			else
747 #endif
748 				wrterror(pool,
749 				    "write to free mem %p[%zu..%zu]@%zu",
750 				    lp, i * sizeof(uint64_t),
751 				    (i + 1) * sizeof(uint64_t) - 1, argsz);
752 		}
753 	}
754 }
755 
756 
757 /*
758  * Cache maintenance.
759  * Opposed to the regular region data structure, the sizes in the
760  * cache are in MALLOC_PAGESIZE units.
761  */
762 static void
763 unmap(struct dir_info *d, void *p, size_t sz, size_t clear)
764 {
765 	size_t psz = sz >> MALLOC_PAGESHIFT;
766 	void *r;
767 	u_short i;
768 	struct smallcache *cache;
769 
770 	if (sz != PAGEROUND(sz) || psz == 0)
771 		wrterror(d, "munmap round");
772 
773 	if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE &&
774 	    psz <= MAX_BIGCACHEABLE_SIZE) {
775 		u_short base = getrbyte(d);
776 		u_short j;
777 
778 		/* don't look through all slots */
779 		for (j = 0; j < d->bigcache_size / 4; j++) {
780 			i = (base + j) & (d->bigcache_size - 1);
781 			if (d->bigcache_used <
782 			    BIGCACHE_FILL(d->bigcache_size))  {
783 				if (d->bigcache[i].psize == 0)
784 					break;
785 			} else {
786 				if (d->bigcache[i].psize != 0)
787 					break;
788 			}
789 		}
790 		/* if we didn't find a preferred slot, use random one */
791 		if (d->bigcache[i].psize != 0) {
792 			size_t tmp;
793 
794 			r = d->bigcache[i].page;
795 			d->bigcache_used -= d->bigcache[i].psize;
796 			tmp = d->bigcache[i].psize << MALLOC_PAGESHIFT;
797 			if (!mopts.malloc_freeunmap)
798 				validate_junk(d, r, tmp);
799 			if (munmap(r, tmp))
800 				 wrterror(d, "munmap %p", r);
801 			STATS_SUB(d->malloc_used, tmp);
802 		}
803 
804 		if (clear > 0)
805 			explicit_bzero(p, clear);
806 		if (mopts.malloc_freeunmap) {
807 			if (mprotect(p, sz, PROT_NONE))
808 				wrterror(d, "mprotect %p", r);
809 		} else
810 			junk_free(d->malloc_junk, p, sz);
811 		d->bigcache[i].page = p;
812 		d->bigcache[i].psize = psz;
813 		d->bigcache_used += psz;
814 		return;
815 	}
816 	if (psz > MAX_SMALLCACHEABLE_SIZE || d->smallcache[psz - 1].max == 0) {
817 		if (munmap(p, sz))
818 			wrterror(d, "munmap %p", p);
819 		STATS_SUB(d->malloc_used, sz);
820 		return;
821 	}
822 	cache = &d->smallcache[psz - 1];
823 	if (cache->length == cache->max) {
824 		int fresh;
825 		/* use a random slot */
826 		i = getrbyte(d) & (cache->max - 1);
827 		r = cache->pages[i];
828 		fresh = (uintptr_t)r & 1;
829 		*(uintptr_t*)&r &= ~1UL;
830 		if (!fresh && !mopts.malloc_freeunmap)
831 			validate_junk(d, r, sz);
832 		if (munmap(r, sz))
833 			wrterror(d, "munmap %p", r);
834 		STATS_SUB(d->malloc_used, sz);
835 		cache->length--;
836 	} else
837 		i = cache->length;
838 
839 	/* fill slot */
840 	if (clear > 0)
841 		explicit_bzero(p, clear);
842 	if (mopts.malloc_freeunmap)
843 		mprotect(p, sz, PROT_NONE);
844 	else
845 		junk_free(d->malloc_junk, p, sz);
846 	cache->pages[i] = p;
847 	cache->length++;
848 }
849 
850 static void *
851 map(struct dir_info *d, size_t sz, int zero_fill)
852 {
853 	size_t psz = sz >> MALLOC_PAGESHIFT;
854 	u_short i;
855 	void *p;
856 	struct smallcache *cache;
857 
858 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
859 	    d->canary1 != ~d->canary2)
860 		wrterror(d, "internal struct corrupt");
861 	if (sz != PAGEROUND(sz) || psz == 0)
862 		wrterror(d, "map round");
863 
864 
865 	if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE &&
866 	    psz <= MAX_BIGCACHEABLE_SIZE) {
867 		size_t base = getrbyte(d);
868 		size_t cached = d->bigcache_used;
869 		ushort j;
870 
871 		for (j = 0; j < d->bigcache_size && cached >= psz; j++) {
872 			i = (j + base) & (d->bigcache_size - 1);
873 			if (d->bigcache[i].psize == psz) {
874 				p = d->bigcache[i].page;
875 				d->bigcache_used -= psz;
876 				d->bigcache[i].page = NULL;
877 				d->bigcache[i].psize = 0;
878 
879 				if (!mopts.malloc_freeunmap)
880 					validate_junk(d, p, sz);
881 				if (mopts.malloc_freeunmap)
882 					mprotect(p, sz, PROT_READ | PROT_WRITE);
883 				if (zero_fill)
884 					memset(p, 0, sz);
885 				else if (mopts.malloc_freeunmap)
886 					junk_free(d->malloc_junk, p, sz);
887 				return p;
888 			}
889 			cached -= d->bigcache[i].psize;
890 		}
891 	}
892 	if (psz <= MAX_SMALLCACHEABLE_SIZE && d->smallcache[psz - 1].max > 0) {
893 		cache = &d->smallcache[psz - 1];
894 		if (cache->length > 0) {
895 			int fresh;
896 			if (cache->length == 1)
897 				p = cache->pages[--cache->length];
898 			else {
899 				i = getrbyte(d) % cache->length;
900 				p = cache->pages[i];
901 				cache->pages[i] = cache->pages[--cache->length];
902 			}
903 			/* check if page was not junked, i.e. "fresh
904 			   we use the lsb of the pointer for that */
905 			fresh = (uintptr_t)p & 1UL;
906 			*(uintptr_t*)&p &= ~1UL;
907 			if (!fresh && !mopts.malloc_freeunmap)
908 				validate_junk(d, p, sz);
909 			if (mopts.malloc_freeunmap)
910 				mprotect(p, sz, PROT_READ | PROT_WRITE);
911 			if (zero_fill)
912 				memset(p, 0, sz);
913 			else if (mopts.malloc_freeunmap)
914 				junk_free(d->malloc_junk, p, sz);
915 			return p;
916 		}
917 		if (psz <= 1) {
918 			p = MMAP(cache->max * sz, d->mmap_flag);
919 			if (p != MAP_FAILED) {
920 				STATS_ADD(d->malloc_used, cache->max * sz);
921 				cache->length = cache->max - 1;
922 				for (i = 0; i < cache->max - 1; i++) {
923 					void *q = (char*)p + i * sz;
924 					cache->pages[i] = q;
925 					/* mark pointer in slot as not junked */
926 					*(uintptr_t*)&cache->pages[i] |= 1UL;
927 				}
928 				if (mopts.malloc_freeunmap)
929 					mprotect(p, (cache->max - 1) * sz,
930 					    PROT_NONE);
931 				p = (char*)p + (cache->max - 1) * sz;
932 				/* zero fill not needed, freshly mmapped */
933 				return p;
934 			}
935 		}
936 
937 	}
938 	p = MMAP(sz, d->mmap_flag);
939 	if (p != MAP_FAILED)
940 		STATS_ADD(d->malloc_used, sz);
941 	/* zero fill not needed */
942 	return p;
943 }
944 
945 static void
946 init_chunk_info(struct dir_info *d, struct chunk_info *p, u_int bucket)
947 {
948 	u_int i;
949 
950 	p->bucket = bucket;
951 	p->total = p->free = MALLOC_PAGESIZE / B2ALLOC(bucket);
952 	p->offset = bucket == 0 ? 0xdead : howmany(p->total, MALLOC_BITS);
953 	p->canary = (u_short)d->canary1;
954 
955 	/* set all valid bits in the bitmap */
956  	i = p->total - 1;
957 	memset(p->bits, 0xff, sizeof(p->bits[0]) * (i / MALLOC_BITS));
958 	p->bits[i / MALLOC_BITS] = (2U << (i % MALLOC_BITS)) - 1;
959 }
960 
961 static struct chunk_info *
962 alloc_chunk_info(struct dir_info *d, u_int bucket)
963 {
964 	struct chunk_info *p;
965 
966 	if (LIST_EMPTY(&d->chunk_info_list[bucket])) {
967 		const size_t chunk_pages = 64;
968 		size_t size, count, i;
969 		char *q;
970 
971 		count = MALLOC_PAGESIZE / B2ALLOC(bucket);
972 
973 		size = howmany(count, MALLOC_BITS);
974 		size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
975 		if (mopts.chunk_canaries)
976 			size += count * sizeof(u_short);
977 		size = _ALIGN(size);
978 		count = MALLOC_PAGESIZE / size;
979 
980 		/* Don't use cache here, we don't want user uaf touch this */
981 		if (d->chunk_pages_used == chunk_pages ||
982 		     d->chunk_pages == NULL) {
983 			q = MMAP(MALLOC_PAGESIZE * chunk_pages, d->mmap_flag);
984 			if (q == MAP_FAILED)
985 				return NULL;
986 			d->chunk_pages = q;
987 			d->chunk_pages_used = 0;
988 			STATS_ADD(d->malloc_used, MALLOC_PAGESIZE *
989 			    chunk_pages);
990 		}
991 		q = (char *)d->chunk_pages + d->chunk_pages_used *
992 		    MALLOC_PAGESIZE;
993 		d->chunk_pages_used++;
994 
995 		for (i = 0; i < count; i++, q += size) {
996 			p = (struct chunk_info *)q;
997 			LIST_INSERT_HEAD(&d->chunk_info_list[bucket], p, entries);
998 		}
999 	}
1000 	p = LIST_FIRST(&d->chunk_info_list[bucket]);
1001 	LIST_REMOVE(p, entries);
1002 	if (p->total == 0)
1003 		init_chunk_info(d, p, bucket);
1004 	return p;
1005 }
1006 
1007 /*
1008  * Allocate a page of chunks
1009  */
1010 static struct chunk_info *
1011 omalloc_make_chunks(struct dir_info *d, u_int bucket, u_int listnum)
1012 {
1013 	struct chunk_info *bp;
1014 	void *pp;
1015 	void *ff = NULL;
1016 
1017 	/* Allocate a new bucket */
1018 	pp = map(d, MALLOC_PAGESIZE, 0);
1019 	if (pp == MAP_FAILED)
1020 		return NULL;
1021 	if (DO_STATS) {
1022 		ff = map(d, MALLOC_PAGESIZE, 0);
1023 		if (ff == MAP_FAILED)
1024 			goto err;
1025 		memset(ff, 0, sizeof(void *) * MALLOC_PAGESIZE / B2ALLOC(bucket));
1026 	}
1027 
1028 	/* memory protect the page allocated in the malloc(0) case */
1029 	if (bucket == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) == -1)
1030 		goto err;
1031 
1032 	bp = alloc_chunk_info(d, bucket);
1033 	if (bp == NULL)
1034 		goto err;
1035 	bp->page = pp;
1036 
1037 	if (insert(d, (void *)((uintptr_t)pp | (bucket + 1)), (uintptr_t)bp,
1038 	    ff))
1039 		goto err;
1040 	LIST_INSERT_HEAD(&d->chunk_dir[bucket][listnum], bp, entries);
1041 
1042 	if (bucket > 0 && d->malloc_junk != 0)
1043 		memset(pp, SOME_FREEJUNK, MALLOC_PAGESIZE);
1044 
1045 	return bp;
1046 
1047 err:
1048 	unmap(d, pp, MALLOC_PAGESIZE, 0);
1049 	if (ff != NULL && ff != MAP_FAILED)
1050 		unmap(d, ff, MALLOC_PAGESIZE, 0);
1051 	return NULL;
1052 }
1053 
1054 #if defined(__GNUC__) && __GNUC__ < 4
1055 static inline unsigned int
1056 lb(u_int x)
1057 {
1058 #if defined(__m88k__)
1059 	__asm__ __volatile__ ("ff1 %0, %0" : "=r" (x) : "0" (x));
1060 	return x;
1061 #else
1062 	/* portable version */
1063 	unsigned int count = 0;
1064 	while ((x & (1U << (sizeof(int) * CHAR_BIT - 1))) == 0) {
1065 		count++;
1066 		x <<= 1;
1067 	}
1068 	return (sizeof(int) * CHAR_BIT - 1) - count;
1069 #endif
1070 }
1071 #else
1072 /* using built-in function version */
1073 static inline unsigned int
1074 lb(u_int x)
1075 {
1076 	/* I need an extension just for integer-length (: */
1077 	return (sizeof(int) * CHAR_BIT - 1) - __builtin_clz(x);
1078 }
1079 #endif
1080 
1081 /* https://pvk.ca/Blog/2015/06/27/linear-log-bucketing-fast-versatile-simple/
1082    via Tony Finch */
1083 static inline unsigned int
1084 bin_of(unsigned int size)
1085 {
1086 	const unsigned int linear = 6;
1087 	const unsigned int subbin = 2;
1088 
1089 	unsigned int mask, range, rounded, sub_index, rounded_size;
1090 	unsigned int n_bits, shift;
1091 
1092 	n_bits = lb(size | (1U << linear));
1093 	shift = n_bits - subbin;
1094 	mask = (1ULL << shift) - 1;
1095 	rounded = size + mask; /* XXX: overflow. */
1096 	sub_index = rounded >> shift;
1097 	range = n_bits - linear;
1098 
1099 	rounded_size = rounded & ~mask;
1100 	return rounded_size;
1101 }
1102 
1103 static inline u_short
1104 find_bucket(u_short size)
1105 {
1106 	/* malloc(0) is special */
1107 	if (size == 0)
1108 		return 0;
1109 	if (size < MALLOC_MINSIZE)
1110 		size = MALLOC_MINSIZE;
1111 	if (mopts.def_maxcache != 0)
1112 		size = bin_of(size);
1113 	return howmany(size, MALLOC_MINSIZE);
1114 }
1115 
1116 static void
1117 fill_canary(char *ptr, size_t sz, size_t allocated)
1118 {
1119 	size_t check_sz = allocated - sz;
1120 
1121 	if (check_sz > CHUNK_CHECK_LENGTH)
1122 		check_sz = CHUNK_CHECK_LENGTH;
1123 	memset(ptr + sz, mopts.chunk_canaries, check_sz);
1124 }
1125 
1126 /*
1127  * Allocate a chunk
1128  */
1129 static void *
1130 malloc_bytes(struct dir_info *d, size_t size)
1131 {
1132 	u_int i, r, bucket, listnum;
1133 	size_t k;
1134 	u_short	*lp;
1135 	struct chunk_info *bp;
1136 	void *p;
1137 
1138 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
1139 	    d->canary1 != ~d->canary2)
1140 		wrterror(d, "internal struct corrupt");
1141 
1142 	bucket = find_bucket(size);
1143 
1144 	r = ((u_int)getrbyte(d) << 8) | getrbyte(d);
1145 	listnum = r % MALLOC_CHUNK_LISTS;
1146 
1147 	/* If it's empty, make a page more of that size chunks */
1148 	if ((bp = LIST_FIRST(&d->chunk_dir[bucket][listnum])) == NULL) {
1149 		bp = omalloc_make_chunks(d, bucket, listnum);
1150 		if (bp == NULL)
1151 			return NULL;
1152 	}
1153 
1154 	if (bp->canary != (u_short)d->canary1)
1155 		wrterror(d, "chunk info corrupted");
1156 
1157 	/* bias, as bp->total is not a power of 2 */
1158 	i = (r / MALLOC_CHUNK_LISTS) % bp->total;
1159 
1160 	/* potentially start somewhere in a short */
1161 	lp = &bp->bits[i / MALLOC_BITS];
1162 	if (*lp) {
1163 		int j = i % MALLOC_BITS; /* j must be signed */
1164 		k = ffs(*lp >> j);
1165 		if (k != 0) {
1166 			k += j - 1;
1167 			goto found;
1168 		}
1169 	}
1170 	/* no bit halfway, go to next full short */
1171 	i /= MALLOC_BITS;
1172 	for (;;) {
1173 		if (++i >= howmany(bp->total, MALLOC_BITS))
1174 			i = 0;
1175 		lp = &bp->bits[i];
1176 		if (*lp) {
1177 			k = ffs(*lp) - 1;
1178 			break;
1179 		}
1180 	}
1181 found:
1182 	*lp ^= 1 << k;
1183 
1184 	/* If there are no more free, remove from free-list */
1185 	if (--bp->free == 0)
1186 		LIST_REMOVE(bp, entries);
1187 
1188 	/* Adjust to the real offset of that chunk */
1189 	k += (lp - bp->bits) * MALLOC_BITS;
1190 
1191 	if (mopts.chunk_canaries && size > 0)
1192 		bp->bits[bp->offset + k] = size;
1193 
1194 	if (DO_STATS) {
1195 		struct region_info *r = find(d, bp->page);
1196 		STATS_SETFN(r, k, d->caller);
1197 	}
1198 
1199 	k *= B2ALLOC(bp->bucket);
1200 
1201 	p = (char *)bp->page + k;
1202 	if (bp->bucket > 0) {
1203 		validate_junk(d, p, B2SIZE(bp->bucket));
1204 		if (mopts.chunk_canaries)
1205 			fill_canary(p, size, B2SIZE(bp->bucket));
1206 	}
1207 	return p;
1208 }
1209 
1210 static void
1211 validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1212 {
1213 	size_t check_sz = allocated - sz;
1214 	u_char *p, *q;
1215 
1216 	if (check_sz > CHUNK_CHECK_LENGTH)
1217 		check_sz = CHUNK_CHECK_LENGTH;
1218 	p = ptr + sz;
1219 	q = p + check_sz;
1220 
1221 	while (p < q) {
1222 		if (*p != (u_char)mopts.chunk_canaries && *p != SOME_JUNK) {
1223 			wrterror(d, "canary corrupted %p[%tu]@%zu/%zu%s",
1224 			    ptr, p - ptr, sz, allocated,
1225 			    *p == SOME_FREEJUNK ? " (double free?)" : "");
1226 		}
1227 		p++;
1228 	}
1229 }
1230 
1231 static uint32_t
1232 find_chunknum(struct dir_info *d, struct chunk_info *info, void *ptr, int check)
1233 {
1234 	uint32_t chunknum;
1235 
1236 	if (info->canary != (u_short)d->canary1)
1237 		wrterror(d, "chunk info corrupted");
1238 
1239 	/* Find the chunk number on the page */
1240 	chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) / B2ALLOC(info->bucket);
1241 
1242 	if ((uintptr_t)ptr & (MALLOC_MINSIZE - 1))
1243 		wrterror(d, "modified chunk-pointer %p", ptr);
1244 	if (CHUNK_FREE(info, chunknum))
1245 		wrterror(d, "double free %p", ptr);
1246 	if (check && info->bucket > 0) {
1247 		validate_canary(d, ptr, info->bits[info->offset + chunknum],
1248 		    B2SIZE(info->bucket));
1249 	}
1250 	return chunknum;
1251 }
1252 
1253 /*
1254  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1255  */
1256 static void
1257 free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1258 {
1259 	struct chunk_head *mp;
1260 	struct chunk_info *info;
1261 	uint32_t chunknum;
1262 	uint32_t listnum;
1263 
1264 	info = (struct chunk_info *)r->size;
1265 	chunknum = find_chunknum(d, info, ptr, 0);
1266 
1267 	info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
1268 	info->free++;
1269 
1270 	if (info->free == 1) {
1271 		/* Page became non-full */
1272 		listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
1273 		mp = &d->chunk_dir[info->bucket][listnum];
1274 		LIST_INSERT_HEAD(mp, info, entries);
1275 		return;
1276 	}
1277 
1278 	if (info->free != info->total)
1279 		return;
1280 
1281 	LIST_REMOVE(info, entries);
1282 
1283 	if (info->bucket == 0 && !mopts.malloc_freeunmap)
1284 		mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1285 	unmap(d, info->page, MALLOC_PAGESIZE, 0);
1286 #ifdef MALLOC_STATS
1287 	if (r->f != NULL) {
1288 		unmap(d, r->f, MALLOC_PAGESIZE, MALLOC_PAGESIZE);
1289 		r->f = NULL;
1290 	}
1291 #endif
1292 
1293 	delete(d, r);
1294 	mp = &d->chunk_info_list[info->bucket];
1295 	LIST_INSERT_HEAD(mp, info, entries);
1296 }
1297 
1298 static void *
1299 omalloc(struct dir_info *pool, size_t sz, int zero_fill)
1300 {
1301 	void *p, *caller = NULL;
1302 	size_t psz;
1303 
1304 	if (sz > MALLOC_MAXCHUNK) {
1305 		if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1306 			errno = ENOMEM;
1307 			return NULL;
1308 		}
1309 		sz += mopts.malloc_guard;
1310 		psz = PAGEROUND(sz);
1311 		p = map(pool, psz, zero_fill);
1312 		if (p == MAP_FAILED) {
1313 			errno = ENOMEM;
1314 			return NULL;
1315 		}
1316 #ifdef MALLOC_STATS
1317 		if (DO_STATS)
1318 			caller = pool->caller;
1319 #endif
1320 		if (insert(pool, p, sz, caller)) {
1321 			unmap(pool, p, psz, 0);
1322 			errno = ENOMEM;
1323 			return NULL;
1324 		}
1325 		if (mopts.malloc_guard) {
1326 			if (mprotect((char *)p + psz - mopts.malloc_guard,
1327 			    mopts.malloc_guard, PROT_NONE))
1328 				wrterror(pool, "mprotect");
1329 			STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1330 		}
1331 
1332 		if (MALLOC_MOVE_COND(sz)) {
1333 			/* fill whole allocation */
1334 			if (pool->malloc_junk == 2)
1335 				memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1336 			/* shift towards the end */
1337 			p = MALLOC_MOVE(p, sz);
1338 			/* fill zeros if needed and overwritten above */
1339 			if (zero_fill && pool->malloc_junk == 2)
1340 				memset(p, 0, sz - mopts.malloc_guard);
1341 		} else {
1342 			if (pool->malloc_junk == 2) {
1343 				if (zero_fill)
1344 					memset((char *)p + sz -
1345 					    mopts.malloc_guard, SOME_JUNK,
1346 					    psz - sz);
1347 				else
1348 					memset(p, SOME_JUNK,
1349 					    psz - mopts.malloc_guard);
1350 			} else if (mopts.chunk_canaries)
1351 				fill_canary(p, sz - mopts.malloc_guard,
1352 				    psz - mopts.malloc_guard);
1353 		}
1354 
1355 	} else {
1356 		/* takes care of SOME_JUNK */
1357 		p = malloc_bytes(pool, sz);
1358 		if (zero_fill && p != NULL && sz > 0)
1359 			memset(p, 0, sz);
1360 	}
1361 
1362 	return p;
1363 }
1364 
1365 /*
1366  * Common function for handling recursion.  Only
1367  * print the error message once, to avoid making the problem
1368  * potentially worse.
1369  */
1370 static void
1371 malloc_recurse(struct dir_info *d)
1372 {
1373 	static int noprint;
1374 
1375 	if (noprint == 0) {
1376 		noprint = 1;
1377 		wrterror(d, "recursive call");
1378 	}
1379 	d->active--;
1380 	_MALLOC_UNLOCK(d->mutex);
1381 	errno = EDEADLK;
1382 }
1383 
1384 void
1385 _malloc_init(int from_rthreads)
1386 {
1387 	u_int i, j, nmutexes;
1388 	struct dir_info *d;
1389 
1390 	_MALLOC_LOCK(1);
1391 	if (!from_rthreads && mopts.malloc_pool[1]) {
1392 		_MALLOC_UNLOCK(1);
1393 		return;
1394 	}
1395 	if (!mopts.malloc_canary) {
1396 		char *p;
1397 		size_t sz, d_avail;
1398 
1399 		omalloc_init();
1400 		/*
1401 		 * Allocate dir_infos with a guard page on either side. Also
1402 		 * randomise offset inside the page at which the dir_infos
1403 		 * lay (subject to alignment by 1 << MALLOC_MINSHIFT)
1404 		 */
1405 		sz = mopts.malloc_mutexes * sizeof(*d) + 2 * MALLOC_PAGESIZE;
1406 		if ((p = MMAPNONE(sz, 0)) == MAP_FAILED)
1407 			wrterror(NULL, "malloc_init mmap1 failed");
1408 		if (mprotect(p + MALLOC_PAGESIZE, mopts.malloc_mutexes * sizeof(*d),
1409 		    PROT_READ | PROT_WRITE))
1410 			wrterror(NULL, "malloc_init mprotect1 failed");
1411 		if (mimmutable(p, sz))
1412 			wrterror(NULL, "malloc_init mimmutable1 failed");
1413 		d_avail = (((mopts.malloc_mutexes * sizeof(*d) + MALLOC_PAGEMASK) &
1414 		    ~MALLOC_PAGEMASK) - (mopts.malloc_mutexes * sizeof(*d))) >>
1415 		    MALLOC_MINSHIFT;
1416 		d = (struct dir_info *)(p + MALLOC_PAGESIZE +
1417 		    (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
1418 		STATS_ADD(d[1].malloc_used, sz);
1419 		for (i = 0; i < mopts.malloc_mutexes; i++)
1420 			mopts.malloc_pool[i] = &d[i];
1421 		mopts.internal_funcs = 1;
1422 		if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) {
1423 			if (mprotect(&malloc_readonly, sizeof(malloc_readonly),
1424 			    PROT_READ))
1425 				wrterror(NULL, "malloc_init mprotect r/o failed");
1426 			if (mimmutable(&malloc_readonly, sizeof(malloc_readonly)))
1427 				wrterror(NULL, "malloc_init mimmutable r/o failed");
1428 		}
1429 	}
1430 
1431 	nmutexes = from_rthreads ? mopts.malloc_mutexes : 2;
1432 	for (i = 0; i < nmutexes; i++) {
1433 		d = mopts.malloc_pool[i];
1434 		d->malloc_mt = from_rthreads;
1435 		if (d->canary1 == ~d->canary2)
1436 			continue;
1437 		if (i == 0) {
1438 			omalloc_poolinit(d, MAP_CONCEAL);
1439 			d->malloc_junk = 2;
1440 			d->bigcache_size = 0;
1441 			for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++)
1442 				d->smallcache[j].max = 0;
1443 		} else {
1444 			size_t sz = 0;
1445 
1446 			omalloc_poolinit(d, 0);
1447 			d->malloc_junk = mopts.def_malloc_junk;
1448 			d->bigcache_size = mopts.def_maxcache;
1449 			for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) {
1450 				d->smallcache[j].max =
1451 				    mopts.def_maxcache >> (j / 8);
1452 				sz += d->smallcache[j].max * sizeof(void *);
1453 			}
1454 			sz += d->bigcache_size * sizeof(struct bigcache);
1455 			if (sz > 0) {
1456 				void *p = MMAP(sz, 0);
1457 				if (p == MAP_FAILED)
1458 					wrterror(NULL,
1459 					    "malloc_init mmap2 failed");
1460 				if (mimmutable(p, sz))
1461 					wrterror(NULL, "malloc_init mimmutable2 failed");
1462 				for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) {
1463 					d->smallcache[j].pages = p;
1464 					p = (char *)p + d->smallcache[j].max *
1465 					    sizeof(void *);
1466 				}
1467 				d->bigcache = p;
1468 			}
1469 		}
1470 		d->mutex = i;
1471 	}
1472 
1473 	_MALLOC_UNLOCK(1);
1474 }
1475 DEF_STRONG(_malloc_init);
1476 
1477 #define PROLOGUE(p, fn)			\
1478 	d = (p); 			\
1479 	if (d == NULL) { 		\
1480 		_malloc_init(0);	\
1481 		d = (p);		\
1482 	}				\
1483 	_MALLOC_LOCK(d->mutex);		\
1484 	d->func = fn;			\
1485 	if (d->active++) {		\
1486 		malloc_recurse(d);	\
1487 		return NULL;		\
1488 	}				\
1489 
1490 #define EPILOGUE()				\
1491 	d->active--;				\
1492 	_MALLOC_UNLOCK(d->mutex);		\
1493 	if (r == NULL && mopts.malloc_xmalloc)	\
1494 		wrterror(d, "out of memory");	\
1495 	if (r != NULL)				\
1496 		errno = saved_errno;		\
1497 
1498 void *
1499 malloc(size_t size)
1500 {
1501 	void *r;
1502 	struct dir_info *d;
1503 	int saved_errno = errno;
1504 
1505 	PROLOGUE(getpool(), "malloc")
1506 	SET_CALLER(d, caller());
1507 	r = omalloc(d, size, 0);
1508 	EPILOGUE()
1509 	return r;
1510 }
1511 DEF_STRONG(malloc);
1512 
1513 void *
1514 malloc_conceal(size_t size)
1515 {
1516 	void *r;
1517 	struct dir_info *d;
1518 	int saved_errno = errno;
1519 
1520 	PROLOGUE(mopts.malloc_pool[0], "malloc_conceal")
1521 	SET_CALLER(d, caller());
1522 	r = omalloc(d, size, 0);
1523 	EPILOGUE()
1524 	return r;
1525 }
1526 DEF_WEAK(malloc_conceal);
1527 
1528 static struct region_info *
1529 findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool,
1530     const char ** saved_function)
1531 {
1532 	struct dir_info *pool = argpool;
1533 	struct region_info *r = find(pool, p);
1534 
1535 	STATS_INC(pool->pool_searches);
1536 	if (r == NULL) {
1537 		u_int i, nmutexes;
1538 
1539 		nmutexes = mopts.malloc_pool[1]->malloc_mt ? mopts.malloc_mutexes : 2;
1540 		STATS_INC(pool->other_pool);
1541 		for (i = 1; i < nmutexes; i++) {
1542 			u_int j = (argpool->mutex + i) & (nmutexes - 1);
1543 
1544 			pool->active--;
1545 			_MALLOC_UNLOCK(pool->mutex);
1546 			pool = mopts.malloc_pool[j];
1547 			_MALLOC_LOCK(pool->mutex);
1548 			pool->active++;
1549 			r = find(pool, p);
1550 			if (r != NULL) {
1551 				*saved_function = pool->func;
1552 				pool->func = argpool->func;
1553 				break;
1554 			}
1555 		}
1556 		if (r == NULL)
1557 			wrterror(argpool, "bogus pointer (double free?) %p", p);
1558 	}
1559 	*foundpool = pool;
1560 	return r;
1561 }
1562 
1563 static void
1564 ofree(struct dir_info **argpool, void *p, int clear, int check, size_t argsz)
1565 {
1566 	struct region_info *r;
1567 	struct dir_info *pool;
1568 	const char *saved_function;
1569 	size_t sz;
1570 
1571 	r = findpool(p, *argpool, &pool, &saved_function);
1572 
1573 	REALSIZE(sz, r);
1574 	if (pool->mmap_flag) {
1575 		clear = 1;
1576 		if (!check) {
1577 			argsz = sz;
1578 			if (sz > MALLOC_MAXCHUNK)
1579 				argsz -= mopts.malloc_guard;
1580 		}
1581 	}
1582 	if (check) {
1583 		if (sz <= MALLOC_MAXCHUNK) {
1584 			if (mopts.chunk_canaries && sz > 0) {
1585 				struct chunk_info *info =
1586 				    (struct chunk_info *)r->size;
1587 				uint32_t chunknum =
1588 				    find_chunknum(pool, info, p, 0);
1589 
1590 				if (info->bits[info->offset + chunknum] < argsz)
1591 					wrterror(pool, "recorded size %hu"
1592 					    " < %zu",
1593 					    info->bits[info->offset + chunknum],
1594 					    argsz);
1595 			} else {
1596 				if (sz < argsz)
1597 					wrterror(pool, "chunk size %zu < %zu",
1598 					    sz, argsz);
1599 			}
1600 		} else if (sz - mopts.malloc_guard < argsz) {
1601 			wrterror(pool, "recorded size %zu < %zu",
1602 			    sz - mopts.malloc_guard, argsz);
1603 		}
1604 	}
1605 	if (sz > MALLOC_MAXCHUNK) {
1606 		if (!MALLOC_MOVE_COND(sz)) {
1607 			if (r->p != p)
1608 				wrterror(pool, "bogus pointer %p", p);
1609 			if (mopts.chunk_canaries)
1610 				validate_canary(pool, p,
1611 				    sz - mopts.malloc_guard,
1612 				    PAGEROUND(sz - mopts.malloc_guard));
1613 		} else {
1614 			/* shifted towards the end */
1615 			if (p != MALLOC_MOVE(r->p, sz))
1616 				wrterror(pool, "bogus moved pointer %p", p);
1617 			p = r->p;
1618 		}
1619 		if (mopts.malloc_guard) {
1620 			if (sz < mopts.malloc_guard)
1621 				wrterror(pool, "guard size");
1622 			if (!mopts.malloc_freeunmap) {
1623 				if (mprotect((char *)p + PAGEROUND(sz) -
1624 				    mopts.malloc_guard, mopts.malloc_guard,
1625 				    PROT_READ | PROT_WRITE))
1626 					wrterror(pool, "mprotect");
1627 			}
1628 			STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
1629 		}
1630 		unmap(pool, p, PAGEROUND(sz), clear ? argsz : 0);
1631 		delete(pool, r);
1632 	} else {
1633 		void *tmp;
1634 		u_int i;
1635 
1636 		/* Validate and optionally canary check */
1637 		struct chunk_info *info = (struct chunk_info *)r->size;
1638 		if (B2SIZE(info->bucket) != sz)
1639 			wrterror(pool, "internal struct corrupt");
1640 		find_chunknum(pool, info, p, mopts.chunk_canaries);
1641 
1642 		if (mopts.malloc_freecheck) {
1643 			for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK; i++) {
1644 				tmp = pool->delayed_chunks[i];
1645 				if (tmp == p)
1646 					wrterror(pool,
1647 					    "double free %p", p);
1648 				if (tmp != NULL) {
1649 					size_t tmpsz;
1650 
1651 					r = find(pool, tmp);
1652 					if (r == NULL)
1653 						wrterror(pool,
1654 						    "bogus pointer ("
1655 						    "double free?) %p", tmp);
1656 					REALSIZE(tmpsz, r);
1657 					validate_junk(pool, tmp, tmpsz);
1658 				}
1659 			}
1660 		}
1661 
1662 		if (clear && argsz > 0)
1663 			explicit_bzero(p, argsz);
1664 		junk_free(pool->malloc_junk, p, sz);
1665 
1666 		i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK;
1667 		tmp = p;
1668 		p = pool->delayed_chunks[i];
1669 		if (tmp == p)
1670 			wrterror(pool, "double free %p", p);
1671 		pool->delayed_chunks[i] = tmp;
1672 		if (p != NULL) {
1673 			r = find(pool, p);
1674 			if (r == NULL)
1675 				wrterror(pool,
1676 				    "bogus pointer (double free?) %p", p);
1677 			if (!mopts.malloc_freecheck) {
1678 				REALSIZE(sz, r);
1679 				validate_junk(pool, p, sz);
1680 			}
1681 			free_bytes(pool, r, p);
1682 		}
1683 	}
1684 
1685 	if (*argpool != pool) {
1686 		pool->func = saved_function;
1687 		*argpool = pool;
1688 	}
1689 }
1690 
1691 void
1692 free(void *ptr)
1693 {
1694 	struct dir_info *d;
1695 	int saved_errno = errno;
1696 
1697 	/* This is legal. */
1698 	if (ptr == NULL)
1699 		return;
1700 
1701 	d = getpool();
1702 	if (d == NULL)
1703 		wrterror(d, "free() called before allocation");
1704 	_MALLOC_LOCK(d->mutex);
1705 	d->func = "free";
1706 	if (d->active++) {
1707 		malloc_recurse(d);
1708 		return;
1709 	}
1710 	ofree(&d, ptr, 0, 0, 0);
1711 	d->active--;
1712 	_MALLOC_UNLOCK(d->mutex);
1713 	errno = saved_errno;
1714 }
1715 DEF_STRONG(free);
1716 
1717 static void
1718 freezero_p(void *ptr, size_t sz)
1719 {
1720 	explicit_bzero(ptr, sz);
1721 	free(ptr);
1722 }
1723 
1724 void
1725 freezero(void *ptr, size_t sz)
1726 {
1727 	struct dir_info *d;
1728 	int saved_errno = errno;
1729 
1730 	/* This is legal. */
1731 	if (ptr == NULL)
1732 		return;
1733 
1734 	if (!mopts.internal_funcs) {
1735 		freezero_p(ptr, sz);
1736 		return;
1737 	}
1738 
1739 	d = getpool();
1740 	if (d == NULL)
1741 		wrterror(d, "freezero() called before allocation");
1742 	_MALLOC_LOCK(d->mutex);
1743 	d->func = "freezero";
1744 	if (d->active++) {
1745 		malloc_recurse(d);
1746 		return;
1747 	}
1748 	ofree(&d, ptr, 1, 1, sz);
1749 	d->active--;
1750 	_MALLOC_UNLOCK(d->mutex);
1751 	errno = saved_errno;
1752 }
1753 DEF_WEAK(freezero);
1754 
1755 static void *
1756 orealloc(struct dir_info **argpool, void *p, size_t newsz)
1757 {
1758 	struct region_info *r;
1759 	struct dir_info *pool;
1760 	const char *saved_function;
1761 	struct chunk_info *info;
1762 	size_t oldsz, goldsz, gnewsz;
1763 	void *q, *ret;
1764 	uint32_t chunknum;
1765 	int forced;
1766 
1767 	if (p == NULL)
1768 		return omalloc(*argpool, newsz, 0);
1769 
1770 	if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1771 		errno = ENOMEM;
1772 		return  NULL;
1773 	}
1774 
1775 	r = findpool(p, *argpool, &pool, &saved_function);
1776 
1777 	REALSIZE(oldsz, r);
1778 	if (oldsz <= MALLOC_MAXCHUNK) {
1779 		if (DO_STATS || mopts.chunk_canaries) {
1780 			info = (struct chunk_info *)r->size;
1781 			chunknum = find_chunknum(pool, info, p, 0);
1782 		}
1783 	}
1784 
1785 	goldsz = oldsz;
1786 	if (oldsz > MALLOC_MAXCHUNK) {
1787 		if (oldsz < mopts.malloc_guard)
1788 			wrterror(pool, "guard size");
1789 		oldsz -= mopts.malloc_guard;
1790 	}
1791 
1792 	gnewsz = newsz;
1793 	if (gnewsz > MALLOC_MAXCHUNK)
1794 		gnewsz += mopts.malloc_guard;
1795 
1796 	forced = mopts.malloc_realloc || pool->mmap_flag;
1797 	if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && !forced) {
1798 		/* First case: from n pages sized allocation to m pages sized
1799 		   allocation, m > n */
1800 		size_t roldsz = PAGEROUND(goldsz);
1801 		size_t rnewsz = PAGEROUND(gnewsz);
1802 
1803 		if (rnewsz < roldsz && rnewsz > roldsz / 2 &&
1804 		    roldsz - rnewsz < mopts.def_maxcache * MALLOC_PAGESIZE &&
1805 		    !mopts.malloc_guard) {
1806 
1807 			ret = p;
1808 			goto done;
1809 		}
1810 
1811 		if (rnewsz > roldsz) {
1812 			/* try to extend existing region */
1813 			if (!mopts.malloc_guard) {
1814 				void *hint = (char *)r->p + roldsz;
1815 				size_t needed = rnewsz - roldsz;
1816 
1817 				STATS_INC(pool->cheap_realloc_tries);
1818 				q = MMAPA(hint, needed, MAP_FIXED | __MAP_NOREPLACE | pool->mmap_flag);
1819 				if (q == hint) {
1820 					STATS_ADD(pool->malloc_used, needed);
1821 					if (pool->malloc_junk == 2)
1822 						memset(q, SOME_JUNK, needed);
1823 					r->size = gnewsz;
1824 					if (r->p != p) {
1825 						/* old pointer is moved */
1826 						memmove(r->p, p, oldsz);
1827 						p = r->p;
1828 					}
1829 					if (mopts.chunk_canaries)
1830 						fill_canary(p, newsz,
1831 						    PAGEROUND(newsz));
1832 					STATS_SETF(r, (*argpool)->caller);
1833 					STATS_INC(pool->cheap_reallocs);
1834 					ret = p;
1835 					goto done;
1836 				}
1837 			}
1838 		} else if (rnewsz < roldsz) {
1839 			/* shrink number of pages */
1840 			if (mopts.malloc_guard) {
1841 				if (mprotect((char *)r->p + rnewsz -
1842 				    mopts.malloc_guard, mopts.malloc_guard,
1843 				    PROT_NONE))
1844 					wrterror(pool, "mprotect");
1845 			}
1846 			if (munmap((char *)r->p + rnewsz, roldsz - rnewsz))
1847 				wrterror(pool, "munmap %p", (char *)r->p +
1848 				    rnewsz);
1849 			STATS_SUB(pool->malloc_used, roldsz - rnewsz);
1850 			r->size = gnewsz;
1851 			if (MALLOC_MOVE_COND(gnewsz)) {
1852 				void *pp = MALLOC_MOVE(r->p, gnewsz);
1853 				memmove(pp, p, newsz);
1854 				p = pp;
1855 			} else if (mopts.chunk_canaries)
1856 				fill_canary(p, newsz, PAGEROUND(newsz));
1857 			STATS_SETF(r, (*argpool)->caller);
1858 			ret = p;
1859 			goto done;
1860 		} else {
1861 			/* number of pages remains the same */
1862 			void *pp = r->p;
1863 
1864 			r->size = gnewsz;
1865 			if (MALLOC_MOVE_COND(gnewsz))
1866 				pp = MALLOC_MOVE(r->p, gnewsz);
1867 			if (p != pp) {
1868 				memmove(pp, p, oldsz < newsz ? oldsz : newsz);
1869 				p = pp;
1870 			}
1871 			if (p == r->p) {
1872 				if (newsz > oldsz && pool->malloc_junk == 2)
1873 					memset((char *)p + newsz, SOME_JUNK,
1874 					    rnewsz - mopts.malloc_guard -
1875 					    newsz);
1876 				if (mopts.chunk_canaries)
1877 					fill_canary(p, newsz, PAGEROUND(newsz));
1878 			}
1879 			STATS_SETF(r, (*argpool)->caller);
1880 			ret = p;
1881 			goto done;
1882 		}
1883 	}
1884 	if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 &&
1885 	    newsz <= MALLOC_MAXCHUNK && newsz > 0 &&
1886 	    !forced && find_bucket(newsz) == find_bucket(oldsz)) {
1887 		/* do not reallocate if new size fits good in existing chunk */
1888 		if (pool->malloc_junk == 2)
1889 			memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1890 		if (mopts.chunk_canaries) {
1891 			info->bits[info->offset + chunknum] = newsz;
1892 			fill_canary(p, newsz, B2SIZE(info->bucket));
1893 		}
1894 		if (DO_STATS)
1895 			STATS_SETFN(r, chunknum, (*argpool)->caller);
1896 		ret = p;
1897 	} else if (newsz != oldsz || forced) {
1898 		/* create new allocation */
1899 		q = omalloc(pool, newsz, 0);
1900 		if (q == NULL) {
1901 			ret = NULL;
1902 			goto done;
1903 		}
1904 		if (newsz != 0 && oldsz != 0)
1905 			memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1906 		ofree(&pool, p, 0, 0, 0);
1907 		ret = q;
1908 	} else {
1909 		/* oldsz == newsz */
1910 		if (newsz != 0)
1911 			wrterror(pool, "realloc internal inconsistency");
1912 		if (DO_STATS)
1913 			STATS_SETFN(r, chunknum, (*argpool)->caller);
1914 		ret = p;
1915 	}
1916 done:
1917 	if (*argpool != pool) {
1918 		pool->func = saved_function;
1919 		*argpool = pool;
1920 	}
1921 	return ret;
1922 }
1923 
1924 void *
1925 realloc(void *ptr, size_t size)
1926 {
1927 	struct dir_info *d;
1928 	void *r;
1929 	int saved_errno = errno;
1930 
1931 	PROLOGUE(getpool(), "realloc")
1932 	SET_CALLER(d, caller());
1933 	r = orealloc(&d, ptr, size);
1934 	EPILOGUE()
1935 	return r;
1936 }
1937 DEF_STRONG(realloc);
1938 
1939 /*
1940  * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
1941  * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
1942  */
1943 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1944 
1945 void *
1946 calloc(size_t nmemb, size_t size)
1947 {
1948 	struct dir_info *d;
1949 	void *r;
1950 	int saved_errno = errno;
1951 
1952 	PROLOGUE(getpool(), "calloc")
1953 	SET_CALLER(d, caller());
1954 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1955 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1956 		d->active--;
1957 		_MALLOC_UNLOCK(d->mutex);
1958 		if (mopts.malloc_xmalloc)
1959 			wrterror(d, "out of memory");
1960 		errno = ENOMEM;
1961 		return NULL;
1962 	}
1963 
1964 	size *= nmemb;
1965 	r = omalloc(d, size, 1);
1966 	EPILOGUE()
1967 	return r;
1968 }
1969 DEF_STRONG(calloc);
1970 
1971 void *
1972 calloc_conceal(size_t nmemb, size_t size)
1973 {
1974 	struct dir_info *d;
1975 	void *r;
1976 	int saved_errno = errno;
1977 
1978 	PROLOGUE(mopts.malloc_pool[0], "calloc_conceal")
1979 	SET_CALLER(d, caller());
1980 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1981 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1982 		d->active--;
1983 		_MALLOC_UNLOCK(d->mutex);
1984 		if (mopts.malloc_xmalloc)
1985 			wrterror(d, "out of memory");
1986 		errno = ENOMEM;
1987 		return NULL;
1988 	}
1989 
1990 	size *= nmemb;
1991 	r = omalloc(d, size, 1);
1992 	EPILOGUE()
1993 	return r;
1994 }
1995 DEF_WEAK(calloc_conceal);
1996 
1997 static void *
1998 orecallocarray(struct dir_info **argpool, void *p, size_t oldsize,
1999     size_t newsize)
2000 {
2001 	struct region_info *r;
2002 	struct dir_info *pool;
2003 	const char *saved_function;
2004 	void *newptr;
2005 	size_t sz;
2006 
2007 	if (p == NULL)
2008 		return omalloc(*argpool, newsize, 1);
2009 
2010 	if (oldsize == newsize)
2011 		return p;
2012 
2013 	r = findpool(p, *argpool, &pool, &saved_function);
2014 
2015 	REALSIZE(sz, r);
2016 	if (sz <= MALLOC_MAXCHUNK) {
2017 		if (mopts.chunk_canaries && sz > 0) {
2018 			struct chunk_info *info = (struct chunk_info *)r->size;
2019 			uint32_t chunknum = find_chunknum(pool, info, p, 0);
2020 
2021 			if (info->bits[info->offset + chunknum] != oldsize)
2022 				wrterror(pool, "recorded size %hu != %zu",
2023 				    info->bits[info->offset + chunknum],
2024 				    oldsize);
2025 		} else {
2026 			if (sz < oldsize)
2027 				wrterror(pool, "chunk size %zu < %zu",
2028 				    sz, oldsize);
2029 		}
2030 	} else {
2031 		if (sz - mopts.malloc_guard < oldsize)
2032 			wrterror(pool, "recorded size %zu < %zu",
2033 			    sz - mopts.malloc_guard, oldsize);
2034 		if (oldsize < (sz - mopts.malloc_guard) / 2)
2035 			wrterror(pool, "recorded size %zu inconsistent with %zu",
2036 			    sz - mopts.malloc_guard, oldsize);
2037 	}
2038 
2039 	newptr = omalloc(pool, newsize, 0);
2040 	if (newptr == NULL)
2041 		goto done;
2042 
2043 	if (newsize > oldsize) {
2044 		memcpy(newptr, p, oldsize);
2045 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
2046 	} else
2047 		memcpy(newptr, p, newsize);
2048 
2049 	ofree(&pool, p, 1, 0, oldsize);
2050 
2051 done:
2052 	if (*argpool != pool) {
2053 		pool->func = saved_function;
2054 		*argpool = pool;
2055 	}
2056 
2057 	return newptr;
2058 }
2059 
2060 static void *
2061 recallocarray_p(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
2062 {
2063 	size_t oldsize, newsize;
2064 	void *newptr;
2065 
2066 	if (ptr == NULL)
2067 		return calloc(newnmemb, size);
2068 
2069 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2070 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
2071 		errno = ENOMEM;
2072 		return NULL;
2073 	}
2074 	newsize = newnmemb * size;
2075 
2076 	if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2077 	    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
2078 		errno = EINVAL;
2079 		return NULL;
2080 	}
2081 	oldsize = oldnmemb * size;
2082 
2083 	/*
2084 	 * Don't bother too much if we're shrinking just a bit,
2085 	 * we do not shrink for series of small steps, oh well.
2086 	 */
2087 	if (newsize <= oldsize) {
2088 		size_t d = oldsize - newsize;
2089 
2090 		if (d < oldsize / 2 && d < MALLOC_PAGESIZE) {
2091 			memset((char *)ptr + newsize, 0, d);
2092 			return ptr;
2093 		}
2094 	}
2095 
2096 	newptr = malloc(newsize);
2097 	if (newptr == NULL)
2098 		return NULL;
2099 
2100 	if (newsize > oldsize) {
2101 		memcpy(newptr, ptr, oldsize);
2102 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
2103 	} else
2104 		memcpy(newptr, ptr, newsize);
2105 
2106 	explicit_bzero(ptr, oldsize);
2107 	free(ptr);
2108 
2109 	return newptr;
2110 }
2111 
2112 void *
2113 recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
2114 {
2115 	struct dir_info *d;
2116 	size_t oldsize = 0, newsize;
2117 	void *r;
2118 	int saved_errno = errno;
2119 
2120 	if (!mopts.internal_funcs)
2121 		return recallocarray_p(ptr, oldnmemb, newnmemb, size);
2122 
2123 	PROLOGUE(getpool(), "recallocarray")
2124 	SET_CALLER(d, caller());
2125 
2126 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2127 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
2128 		d->active--;
2129 		_MALLOC_UNLOCK(d->mutex);
2130 		if (mopts.malloc_xmalloc)
2131 			wrterror(d, "out of memory");
2132 		errno = ENOMEM;
2133 		return NULL;
2134 	}
2135 	newsize = newnmemb * size;
2136 
2137 	if (ptr != NULL) {
2138 		if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2139 		    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
2140 			d->active--;
2141 			_MALLOC_UNLOCK(d->mutex);
2142 			errno = EINVAL;
2143 			return NULL;
2144 		}
2145 		oldsize = oldnmemb * size;
2146 	}
2147 
2148 	r = orecallocarray(&d, ptr, oldsize, newsize);
2149 	EPILOGUE()
2150 	return r;
2151 }
2152 DEF_WEAK(recallocarray);
2153 
2154 static void *
2155 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
2156 {
2157 	char *p, *q;
2158 
2159 	if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
2160 		wrterror(d, "mapalign bad alignment");
2161 	if (sz != PAGEROUND(sz))
2162 		wrterror(d, "mapalign round");
2163 
2164 	/* Allocate sz + alignment bytes of memory, which must include a
2165 	 * subrange of size bytes that is properly aligned.  Unmap the
2166 	 * other bytes, and then return that subrange.
2167 	 */
2168 
2169 	/* We need sz + alignment to fit into a size_t. */
2170 	if (alignment > SIZE_MAX - sz)
2171 		return MAP_FAILED;
2172 
2173 	p = map(d, sz + alignment, zero_fill);
2174 	if (p == MAP_FAILED)
2175 		return MAP_FAILED;
2176 	q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
2177 	if (q != p) {
2178 		if (munmap(p, q - p))
2179 			wrterror(d, "munmap %p", p);
2180 	}
2181 	if (munmap(q + sz, alignment - (q - p)))
2182 		wrterror(d, "munmap %p", q + sz);
2183 	STATS_SUB(d->malloc_used, alignment);
2184 
2185 	return q;
2186 }
2187 
2188 static void *
2189 omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill)
2190 {
2191 	size_t psz;
2192 	void *p, *caller = NULL;
2193 
2194 	/* If between half a page and a page, avoid MALLOC_MOVE. */
2195 	if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE)
2196 		sz = MALLOC_PAGESIZE;
2197 	if (alignment <= MALLOC_PAGESIZE) {
2198 		size_t pof2;
2199 		/*
2200 		 * max(size, alignment) rounded up to power of 2 is enough
2201 		 * to assure the requested alignment. Large regions are
2202 		 * always page aligned.
2203 		 */
2204 		if (sz < alignment)
2205 			sz = alignment;
2206 		if (sz < MALLOC_PAGESIZE) {
2207 			pof2 = MALLOC_MINSIZE;
2208 			while (pof2 < sz)
2209 				pof2 <<= 1;
2210 		} else
2211 			pof2 = sz;
2212 		return omalloc(pool, pof2, zero_fill);
2213 	}
2214 
2215 	if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
2216 		errno = ENOMEM;
2217 		return NULL;
2218 	}
2219 
2220 	if (sz < MALLOC_PAGESIZE)
2221 		sz = MALLOC_PAGESIZE;
2222 	sz += mopts.malloc_guard;
2223 	psz = PAGEROUND(sz);
2224 
2225 	p = mapalign(pool, alignment, psz, zero_fill);
2226 	if (p == MAP_FAILED) {
2227 		errno = ENOMEM;
2228 		return NULL;
2229 	}
2230 
2231 #ifdef MALLOC_STATS
2232 	if (DO_STATS)
2233 		caller = pool->caller;
2234 #endif
2235 	if (insert(pool, p, sz, caller)) {
2236 		unmap(pool, p, psz, 0);
2237 		errno = ENOMEM;
2238 		return NULL;
2239 	}
2240 
2241 	if (mopts.malloc_guard) {
2242 		if (mprotect((char *)p + psz - mopts.malloc_guard,
2243 		    mopts.malloc_guard, PROT_NONE))
2244 			wrterror(pool, "mprotect");
2245 		STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
2246 	}
2247 
2248 	if (pool->malloc_junk == 2) {
2249 		if (zero_fill)
2250 			memset((char *)p + sz - mopts.malloc_guard,
2251 			    SOME_JUNK, psz - sz);
2252 		else
2253 			memset(p, SOME_JUNK, psz - mopts.malloc_guard);
2254 	} else if (mopts.chunk_canaries)
2255 		fill_canary(p, sz - mopts.malloc_guard,
2256 		    psz - mopts.malloc_guard);
2257 
2258 	return p;
2259 }
2260 
2261 int
2262 posix_memalign(void **memptr, size_t alignment, size_t size)
2263 {
2264 	struct dir_info *d;
2265 	int res, saved_errno = errno;
2266 	void *r;
2267 
2268 	/* Make sure that alignment is a large enough power of 2. */
2269 	if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
2270 		return EINVAL;
2271 
2272 	d = getpool();
2273 	if (d == NULL) {
2274 		_malloc_init(0);
2275 		d = getpool();
2276 	}
2277 	_MALLOC_LOCK(d->mutex);
2278 	d->func = "posix_memalign";
2279 	if (d->active++) {
2280 		malloc_recurse(d);
2281 		goto err;
2282 	}
2283 	SET_CALLER(d, caller());
2284 	r = omemalign(d, alignment, size, 0);
2285 	d->active--;
2286 	_MALLOC_UNLOCK(d->mutex);
2287 	if (r == NULL) {
2288 		if (mopts.malloc_xmalloc)
2289 			wrterror(d, "out of memory");
2290 		goto err;
2291 	}
2292 	errno = saved_errno;
2293 	*memptr = r;
2294 	return 0;
2295 
2296 err:
2297 	res = errno;
2298 	errno = saved_errno;
2299 	return res;
2300 }
2301 DEF_STRONG(posix_memalign);
2302 
2303 void *
2304 aligned_alloc(size_t alignment, size_t size)
2305 {
2306 	struct dir_info *d;
2307 	int saved_errno = errno;
2308 	void *r;
2309 
2310 	/* Make sure that alignment is a positive power of 2. */
2311 	if (((alignment - 1) & alignment) != 0 || alignment == 0) {
2312 		errno = EINVAL;
2313 		return NULL;
2314 	};
2315 	/* Per spec, size should be a multiple of alignment */
2316 	if ((size & (alignment - 1)) != 0) {
2317 		errno = EINVAL;
2318 		return NULL;
2319 	}
2320 
2321 	PROLOGUE(getpool(), "aligned_alloc")
2322 	SET_CALLER(d, caller());
2323 	r = omemalign(d, alignment, size, 0);
2324 	EPILOGUE()
2325 	return r;
2326 }
2327 DEF_STRONG(aligned_alloc);
2328 
2329 #ifdef MALLOC_STATS
2330 
2331 static void
2332 print_chunk_details(struct dir_info *pool, void *p, size_t sz, size_t i)
2333 {
2334 	struct region_info *r;
2335 	struct chunk_info *chunkinfo;
2336 	uint32_t chunknum;
2337 	Dl_info info;
2338 	const char *caller, *pcaller = NULL;
2339 	const char *object = ".";
2340 	const char *pobject = ".";
2341 	const char *msg = "";
2342 
2343 	r = find(pool, p);
2344 	chunkinfo = (struct chunk_info *)r->size;
2345 	chunknum = find_chunknum(pool, chunkinfo, p, 0);
2346 	caller = r->f[chunknum];
2347 	if (dladdr(caller, &info) != 0) {
2348 		caller -= (uintptr_t)info.dli_fbase;
2349 		object = info.dli_fname;
2350 	}
2351 	if (chunknum > 0) {
2352 		chunknum--;
2353 		pcaller = r->f[chunknum];
2354 		if (dladdr(pcaller, &info) != 0) {
2355 			pcaller -= (uintptr_t)info.dli_fbase;
2356 			pobject = info.dli_fname;
2357 		}
2358 		if (CHUNK_FREE(chunkinfo, chunknum))
2359 			msg = " (now free)";
2360 	}
2361 
2362 	wrterror(pool,
2363 	    "write to free chunk %p[%zu..%zu]@%zu allocated at %s %p "
2364 	    "(preceding chunk %p allocated at %s %p%s)",
2365 	    p, i * sizeof(uint64_t),
2366 	    (i + 1) * sizeof(uint64_t) - 1, sz, object, caller, p - sz,
2367 	    pobject, pcaller, msg);
2368 }
2369 
2370 static void
2371 ulog(const char *format, ...)
2372 {
2373 	va_list ap;
2374 	static char* buf;
2375 	static size_t filled;
2376 	int len;
2377 
2378 	if (buf == NULL)
2379 		buf = MMAP(KTR_USER_MAXLEN, 0);
2380 	if (buf == MAP_FAILED)
2381 		return;
2382 
2383 	va_start(ap, format);
2384 	len = vsnprintf(buf + filled, KTR_USER_MAXLEN - filled, format, ap);
2385 	va_end(ap);
2386 	if (len < 0)
2387 		return;
2388 	if (len > KTR_USER_MAXLEN - filled)
2389 		len = KTR_USER_MAXLEN - filled;
2390 	filled += len;
2391 	if (filled > 0) {
2392 		if (filled == KTR_USER_MAXLEN || buf[filled - 1] == '\n') {
2393 			utrace("malloc", buf, filled);
2394 			filled = 0;
2395 		}
2396 	}
2397 }
2398 
2399 struct malloc_leak {
2400 	void *f;
2401 	size_t total_size;
2402 	int count;
2403 };
2404 
2405 struct leaknode {
2406 	RBT_ENTRY(leaknode) entry;
2407 	struct malloc_leak d;
2408 };
2409 
2410 static inline int
2411 leakcmp(const struct leaknode *e1, const struct leaknode *e2)
2412 {
2413 	return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
2414 }
2415 
2416 RBT_HEAD(leaktree, leaknode);
2417 RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp);
2418 RBT_GENERATE(leaktree, leaknode, entry, leakcmp);
2419 
2420 static void
2421 wrtwarning(const char *func, char *msg, ...)
2422 {
2423 	int		saved_errno = errno;
2424 	va_list		ap;
2425 
2426 	dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname,
2427 	    getpid(), func != NULL ? func : "unknown");
2428 	va_start(ap, msg);
2429 	vdprintf(STDERR_FILENO, msg, ap);
2430 	va_end(ap);
2431 	dprintf(STDERR_FILENO, "\n");
2432 
2433 	errno = saved_errno;
2434 }
2435 
2436 static void
2437 putleakinfo(struct leaktree *leaks, void *f, size_t sz, int cnt)
2438 {
2439 	struct leaknode key, *p;
2440 	static struct leaknode *page;
2441 	static unsigned int used;
2442 
2443 	if (cnt == 0 || page == MAP_FAILED)
2444 		return;
2445 
2446 	key.d.f = f;
2447 	p = RBT_FIND(leaktree, leaks, &key);
2448 	if (p == NULL) {
2449 		if (page == NULL ||
2450 		    used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
2451 			page = MMAP(MALLOC_PAGESIZE, 0);
2452 			if (page == MAP_FAILED) {
2453 				wrtwarning(__func__, strerror(errno));
2454 				return;
2455 			}
2456 			used = 0;
2457 		}
2458 		p = &page[used++];
2459 		p->d.f = f;
2460 		p->d.total_size = sz * cnt;
2461 		p->d.count = cnt;
2462 		RBT_INSERT(leaktree, leaks, p);
2463 	} else {
2464 		p->d.total_size += sz * cnt;
2465 		p->d.count += cnt;
2466 	}
2467 }
2468 
2469 static void
2470 dump_leaks(struct leaktree *leaks)
2471 {
2472 	struct leaknode *p;
2473 
2474 	ulog("Leak report:\n");
2475 	ulog("                 f     sum      #    avg\n");
2476 
2477 	RBT_FOREACH(p, leaktree, leaks) {
2478 		Dl_info info;
2479 		const char *caller = p->d.f;
2480 		const char *object = ".";
2481 
2482 		if (caller != NULL) {
2483 			if (dladdr(p->d.f, &info) != 0) {
2484 				caller -= (uintptr_t)info.dli_fbase;
2485 				object = info.dli_fname;
2486 			}
2487 		}
2488 		ulog("%18p %7zu %6u %6zu addr2line -e %s %p\n",
2489 		    p->d.f, p->d.total_size, p->d.count,
2490 		    p->d.total_size / p->d.count,
2491 		    object, caller);
2492 	}
2493 }
2494 
2495 static void
2496 dump_chunk(struct leaktree* leaks, struct chunk_info *p, void **f,
2497     int fromfreelist)
2498 {
2499 	while (p != NULL) {
2500 		if (mopts.malloc_verbose)
2501 			ulog("chunk %18p %18p %4zu %d/%d\n",
2502 			    p->page, NULL,
2503 			    B2SIZE(p->bucket), p->free, p->total);
2504 		if (!fromfreelist) {
2505 			size_t i, sz =  B2SIZE(p->bucket);
2506 			for (i = 0; i < p->total; i++) {
2507 				if (!CHUNK_FREE(p, i))
2508 					putleakinfo(leaks, f[i], sz, 1);
2509 			}
2510 			break;
2511 		}
2512 		p = LIST_NEXT(p, entries);
2513 		if (mopts.malloc_verbose && p != NULL)
2514 			ulog("       ->");
2515 	}
2516 }
2517 
2518 static void
2519 dump_free_chunk_info(struct dir_info *d, struct leaktree *leaks)
2520 {
2521 	int i, j, count;
2522 	struct chunk_info *p;
2523 
2524 	ulog("Free chunk structs:\n");
2525 	ulog("Bkt) #CI                     page"
2526 	    "                  f size free/n\n");
2527 	for (i = 0; i <= BUCKETS; i++) {
2528 		count = 0;
2529 		LIST_FOREACH(p, &d->chunk_info_list[i], entries)
2530 			count++;
2531 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++) {
2532 			p = LIST_FIRST(&d->chunk_dir[i][j]);
2533 			if (p == NULL && count == 0)
2534 				continue;
2535 			if (j == 0)
2536 				ulog("%3d) %3d ", i, count);
2537 			else
2538 				ulog("         ");
2539 			if (p != NULL)
2540 				dump_chunk(leaks, p, NULL, 1);
2541 			else
2542 				ulog(".\n");
2543 		}
2544 	}
2545 
2546 }
2547 
2548 static void
2549 dump_free_page_info(struct dir_info *d)
2550 {
2551 	struct smallcache *cache;
2552 	size_t i, total = 0;
2553 
2554 	ulog("Cached in small cache:\n");
2555 	for (i = 0; i < MAX_SMALLCACHEABLE_SIZE; i++) {
2556 		cache = &d->smallcache[i];
2557 		if (cache->length != 0)
2558 			ulog("%zu(%u): %u = %zu\n", i + 1, cache->max,
2559 			    cache->length, cache->length * (i + 1));
2560 		total += cache->length * (i + 1);
2561 	}
2562 
2563 	ulog("Cached in big cache: %zu/%zu\n", d->bigcache_used,
2564 	    d->bigcache_size);
2565 	for (i = 0; i < d->bigcache_size; i++) {
2566 		if (d->bigcache[i].psize != 0)
2567 			ulog("%zu: %zu\n", i, d->bigcache[i].psize);
2568 		total += d->bigcache[i].psize;
2569 	}
2570 	ulog("Free pages cached: %zu\n", total);
2571 }
2572 
2573 static void
2574 malloc_dump1(int poolno, struct dir_info *d, struct leaktree *leaks)
2575 {
2576 	size_t i, realsize;
2577 
2578 	if (mopts.malloc_verbose) {
2579 		ulog("Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
2580 		ulog("MT=%d J=%d Fl=%#x\n", d->malloc_mt, d->malloc_junk,
2581 		    d->mmap_flag);
2582 		ulog("Region slots free %zu/%zu\n",
2583 			d->regions_free, d->regions_total);
2584 		ulog("Finds %zu/%zu\n", d->finds, d->find_collisions);
2585 		ulog("Inserts %zu/%zu\n", d->inserts, d->insert_collisions);
2586 		ulog("Deletes %zu/%zu\n", d->deletes, d->delete_moves);
2587 		ulog("Cheap reallocs %zu/%zu\n",
2588 		    d->cheap_reallocs, d->cheap_realloc_tries);
2589 		ulog("Other pool searches %zu/%zu\n",
2590 		    d->other_pool, d->pool_searches);
2591 		ulog("In use %zu\n", d->malloc_used);
2592 		ulog("Guarded %zu\n", d->malloc_guarded);
2593 		dump_free_chunk_info(d, leaks);
2594 		dump_free_page_info(d);
2595 		ulog("Hash table:\n");
2596 		ulog("slot)  hash d  type               page                  "
2597 		    "f size [free/n]\n");
2598 	}
2599 	for (i = 0; i < d->regions_total; i++) {
2600 		if (d->r[i].p != NULL) {
2601 			size_t h = hash(d->r[i].p) &
2602 			    (d->regions_total - 1);
2603 			if (mopts.malloc_verbose)
2604 				ulog("%4zx) #%4zx %zd ",
2605 			        i, h, h - i);
2606 			REALSIZE(realsize, &d->r[i]);
2607 			if (realsize > MALLOC_MAXCHUNK) {
2608 				putleakinfo(leaks, d->r[i].f, realsize, 1);
2609 				if (mopts.malloc_verbose)
2610 					ulog("pages %18p %18p %zu\n", d->r[i].p,
2611 				        d->r[i].f, realsize);
2612 			} else
2613 				dump_chunk(leaks,
2614 				    (struct chunk_info *)d->r[i].size,
2615 				    d->r[i].f, 0);
2616 		}
2617 	}
2618 	if (mopts.malloc_verbose)
2619 		ulog("\n");
2620 }
2621 
2622 static void
2623 malloc_dump0(int poolno, struct dir_info *pool, struct leaktree *leaks)
2624 {
2625 	int i;
2626 	void *p;
2627 	struct region_info *r;
2628 
2629 	if (pool == NULL || pool->r == NULL)
2630 		return;
2631 	for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) {
2632 		p = pool->delayed_chunks[i];
2633 		if (p == NULL)
2634 			continue;
2635 		r = find(pool, p);
2636 		if (r == NULL)
2637 			wrterror(pool, "bogus pointer in malloc_dump %p", p);
2638 		free_bytes(pool, r, p);
2639 		pool->delayed_chunks[i] = NULL;
2640 	}
2641 	malloc_dump1(poolno, pool, leaks);
2642 }
2643 
2644 void
2645 malloc_dump(void)
2646 {
2647 	int i;
2648 	int saved_errno = errno;
2649 
2650 	/* XXX leak when run multiple times */
2651 	struct leaktree leaks = RBT_INITIALIZER(&leaks);
2652 
2653 	for (i = 0; i < mopts.malloc_mutexes; i++)
2654 		malloc_dump0(i, mopts.malloc_pool[i], &leaks);
2655 
2656 	dump_leaks(&leaks);
2657 	ulog("\n");
2658 	errno = saved_errno;
2659 }
2660 DEF_WEAK(malloc_dump);
2661 
2662 static void
2663 malloc_exit(void)
2664 {
2665 	int save_errno = errno;
2666 
2667 	ulog("******** Start dump %s *******\n", __progname);
2668 	ulog("M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%#x cache=%u "
2669 	    "G=%zu\n",
2670 	    mopts.malloc_mutexes,
2671 	    mopts.internal_funcs, mopts.malloc_freecheck,
2672 	    mopts.malloc_freeunmap, mopts.def_malloc_junk,
2673 	    mopts.malloc_realloc, mopts.malloc_xmalloc,
2674 	    mopts.chunk_canaries, mopts.def_maxcache,
2675 	    mopts.malloc_guard);
2676 
2677 	malloc_dump();
2678 	ulog("******** End dump %s *******\n", __progname);
2679 	errno = save_errno;
2680 }
2681 
2682 #endif /* MALLOC_STATS */
2683