xref: /openbsd-src/lib/libc/stdlib/malloc.c (revision 53a1814dc6f46dffcf6dd66394d54ab3cded0eb5)
1 /*	$OpenBSD: malloc.c,v 1.292 2023/10/26 17:59:16 otto Exp $	*/
2 /*
3  * Copyright (c) 2008, 2010, 2011, 2016, 2023 Otto Moerbeek <otto@drijf.net>
4  * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
5  * Copyright (c) 2008 Damien Miller <djm@openbsd.org>
6  * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * If we meet some day, and you think this stuff is worth it, you
23  * can buy me a beer in return. Poul-Henning Kamp
24  */
25 
26 #ifndef MALLOC_SMALL
27 #define MALLOC_STATS
28 #endif
29 
30 #include <sys/types.h>
31 #include <sys/queue.h>
32 #include <sys/mman.h>
33 #include <sys/sysctl.h>
34 #include <uvm/uvmexp.h>
35 #include <errno.h>
36 #include <stdarg.h>
37 #include <stdint.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <unistd.h>
42 
43 #ifdef MALLOC_STATS
44 #include <sys/tree.h>
45 #include <sys/ktrace.h>
46 #include <dlfcn.h>
47 #endif
48 
49 #include "thread_private.h"
50 #include <tib.h>
51 
52 #define MALLOC_PAGESHIFT	_MAX_PAGE_SHIFT
53 
54 #define MALLOC_MINSHIFT		4
55 #define MALLOC_MAXSHIFT		(MALLOC_PAGESHIFT - 1)
56 #define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
57 #define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
58 #define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
59 #define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
60 
61 #define MALLOC_MAXCHUNK		(1 << MALLOC_MAXSHIFT)
62 #define MALLOC_MAXCACHE		256
63 #define MALLOC_DELAYED_CHUNK_MASK	15
64 #ifdef MALLOC_STATS
65 #define MALLOC_INITIAL_REGIONS	512
66 #else
67 #define MALLOC_INITIAL_REGIONS	(MALLOC_PAGESIZE / sizeof(struct region_info))
68 #endif
69 #define MALLOC_DEFAULT_CACHE	64
70 #define MALLOC_CHUNK_LISTS	4
71 #define CHUNK_CHECK_LENGTH	32
72 
73 #define B2SIZE(b)		((b) * MALLOC_MINSIZE)
74 #define B2ALLOC(b)		((b) == 0 ? MALLOC_MINSIZE : \
75 				    (b) * MALLOC_MINSIZE)
76 #define BUCKETS 		(MALLOC_MAXCHUNK / MALLOC_MINSIZE)
77 
78 /*
79  * We move allocations between half a page and a whole page towards the end,
80  * subject to alignment constraints. This is the extra headroom we allow.
81  * Set to zero to be the most strict.
82  */
83 #define MALLOC_LEEWAY		0
84 #define MALLOC_MOVE_COND(sz)	((sz) - mopts.malloc_guard < 		\
85 				    MALLOC_PAGESIZE - MALLOC_LEEWAY)
86 #define MALLOC_MOVE(p, sz)  	(((char *)(p)) +			\
87 				    ((MALLOC_PAGESIZE - MALLOC_LEEWAY -	\
88 			    	    ((sz) - mopts.malloc_guard)) & 	\
89 				    ~(MALLOC_MINSIZE - 1)))
90 
91 #define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
92 
93 /*
94  * What to use for Junk.  This is the byte value we use to fill with
95  * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
96  * and SOME_FREEJUNK right before free.
97  */
98 #define SOME_JUNK		0xdb	/* deadbeef */
99 #define SOME_FREEJUNK		0xdf	/* dead, free */
100 #define SOME_FREEJUNK_ULL	0xdfdfdfdfdfdfdfdfULL
101 
102 #define MMAP(sz,f)	mmap(NULL, (sz), PROT_READ | PROT_WRITE, \
103     MAP_ANON | MAP_PRIVATE | (f), -1, 0)
104 
105 #define MMAPNONE(sz,f)	mmap(NULL, (sz), PROT_NONE, \
106     MAP_ANON | MAP_PRIVATE | (f), -1, 0)
107 
108 #define MMAPA(a,sz,f)	mmap((a), (sz), PROT_READ | PROT_WRITE, \
109     MAP_ANON | MAP_PRIVATE | (f), -1, 0)
110 
111 struct region_info {
112 	void *p;		/* page; low bits used to mark chunks */
113 	uintptr_t size;		/* size for pages, or chunk_info pointer */
114 #ifdef MALLOC_STATS
115 	void **f;		/* where allocated from */
116 #endif
117 };
118 
119 LIST_HEAD(chunk_head, chunk_info);
120 
121 /*
122  * Two caches, one for "small" regions, one for "big".
123  * Small cache is an array per size, big cache is one array with different
124  * sized regions
125  */
126 #define MAX_SMALLCACHEABLE_SIZE	32
127 #define MAX_BIGCACHEABLE_SIZE	512
128 /* If the total # of pages is larger than this, evict before inserting */
129 #define BIGCACHE_FILL(sz)	(MAX_BIGCACHEABLE_SIZE * (sz) / 4)
130 
131 struct smallcache {
132 	void **pages;
133 	ushort length;
134 	ushort max;
135 };
136 
137 struct bigcache {
138 	void *page;
139 	size_t psize;
140 };
141 
142 struct dir_info {
143 	u_int32_t canary1;
144 	int active;			/* status of malloc */
145 	struct region_info *r;		/* region slots */
146 	size_t regions_total;		/* number of region slots */
147 	size_t regions_free;		/* number of free slots */
148 	size_t rbytesused;		/* random bytes used */
149 	const char *func;		/* current function */
150 	int malloc_junk;		/* junk fill? */
151 	int mmap_flag;			/* extra flag for mmap */
152 	int mutex;
153 	int malloc_mt;			/* multi-threaded mode? */
154 					/* lists of free chunk info structs */
155 	struct chunk_head chunk_info_list[BUCKETS + 1];
156 					/* lists of chunks with free slots */
157 	struct chunk_head chunk_dir[BUCKETS + 1][MALLOC_CHUNK_LISTS];
158 					/* delayed free chunk slots */
159 	void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
160 	u_char rbytes[32];		/* random bytes */
161 					/* free pages cache */
162 	struct smallcache smallcache[MAX_SMALLCACHEABLE_SIZE];
163 	size_t bigcache_used;
164 	size_t bigcache_size;
165 	struct bigcache *bigcache;
166 	void *chunk_pages;
167 	size_t chunk_pages_used;
168 #ifdef MALLOC_STATS
169 	void *caller;
170 	size_t inserts;
171 	size_t insert_collisions;
172 	size_t deletes;
173 	size_t delete_moves;
174 	size_t cheap_realloc_tries;
175 	size_t cheap_reallocs;
176 	size_t malloc_used;		/* bytes allocated */
177 	size_t malloc_guarded;		/* bytes used for guards */
178 #define STATS_ADD(x,y)	((x) += (y))
179 #define STATS_SUB(x,y)	((x) -= (y))
180 #define STATS_INC(x)	((x)++)
181 #define STATS_ZERO(x)	((x) = 0)
182 #define STATS_SETF(x,y)	((x)->f = (y))
183 #define STATS_SETFN(x,k,y)	((x)->f[k] = (y))
184 #define SET_CALLER(x,y)	if (DO_STATS) ((x)->caller = (y))
185 #else
186 #define STATS_ADD(x,y)	/* nothing */
187 #define STATS_SUB(x,y)	/* nothing */
188 #define STATS_INC(x)	/* nothing */
189 #define STATS_ZERO(x)	/* nothing */
190 #define STATS_SETF(x,y)	/* nothing */
191 #define STATS_SETFN(x,k,y)	/* nothing */
192 #define SET_CALLER(x,y)	/* nothing */
193 #endif /* MALLOC_STATS */
194 	u_int32_t canary2;
195 };
196 
197 static void unmap(struct dir_info *d, void *p, size_t sz, size_t clear);
198 
199 /*
200  * This structure describes a page worth of chunks.
201  *
202  * How many bits per u_short in the bitmap
203  */
204 #define MALLOC_BITS		(NBBY * sizeof(u_short))
205 struct chunk_info {
206 	LIST_ENTRY(chunk_info) entries;
207 	void *page;			/* pointer to the page */
208 	/* number of shorts should add up to 8, check alloc_chunk_info() */
209 	u_short canary;
210 	u_short bucket;
211 	u_short free;			/* how many free chunks */
212 	u_short total;			/* how many chunks */
213 	u_short offset;			/* requested size table offset */
214 #define CHUNK_INFO_TAIL			3
215 	u_short bits[CHUNK_INFO_TAIL];	/* which chunks are free */
216 };
217 
218 #define CHUNK_FREE(i, n)	((i)->bits[(n) / MALLOC_BITS] & (1U << ((n) % MALLOC_BITS)))
219 
220 struct malloc_readonly {
221 					/* Main bookkeeping information */
222 	struct dir_info *malloc_pool[_MALLOC_MUTEXES];
223 	u_int	malloc_mutexes;		/* how much in actual use? */
224 	int	malloc_freecheck;	/* Extensive double free check */
225 	int	malloc_freeunmap;	/* mprotect free pages PROT_NONE? */
226 	int	def_malloc_junk;	/* junk fill? */
227 	int	malloc_realloc;		/* always realloc? */
228 	int	malloc_xmalloc;		/* xmalloc behaviour? */
229 	u_int	chunk_canaries;		/* use canaries after chunks? */
230 	int	internal_funcs;		/* use better recallocarray/freezero? */
231 	u_int	def_maxcache;		/* free pages we cache */
232 	u_int	junk_loc;		/* variation in location of junk */
233 	size_t	malloc_guard;		/* use guard pages after allocations? */
234 #ifdef MALLOC_STATS
235 	int	malloc_stats;		/* save callers, dump leak report at end */
236 	int	malloc_verbose;		/* dump verbose statistics at end */
237 #define	DO_STATS	mopts.malloc_stats
238 #else
239 #define	DO_STATS	0
240 #endif
241 	u_int32_t malloc_canary;	/* Matched against ones in pool */
242 };
243 
244 
245 /* This object is mapped PROT_READ after initialisation to prevent tampering */
246 static union {
247 	struct malloc_readonly mopts;
248 	u_char _pad[MALLOC_PAGESIZE];
249 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)))
250 		__attribute__((section(".openbsd.mutable")));
251 #define mopts	malloc_readonly.mopts
252 
253 char		*malloc_options;	/* compile-time options */
254 
255 static __dead void wrterror(struct dir_info *d, char *msg, ...)
256     __attribute__((__format__ (printf, 2, 3)));
257 
258 #ifdef MALLOC_STATS
259 void malloc_dump(void);
260 PROTO_NORMAL(malloc_dump);
261 static void malloc_exit(void);
262 static void print_chunk_details(struct dir_info *, void *, size_t, size_t);
263 #endif
264 
265 #if defined(__aarch64__) || \
266 	defined(__amd64__) || \
267 	defined(__arm__)
268 static inline void* caller(void)
269 {
270 	void *p;
271 
272 	switch (DO_STATS) {
273 	case 0:
274 	default:
275 		return NULL;
276 	case 1:
277 		p = __builtin_return_address(0);
278 		break;
279 	case 2:
280 		p = __builtin_return_address(1);
281 		break;
282 	case 3:
283 		p = __builtin_return_address(2);
284 		break;
285 	}
286 	return __builtin_extract_return_addr(p);
287 }
288 #else
289 static inline void* caller(void)
290 {
291 	return DO_STATS == 0 ? NULL :
292 	    __builtin_extract_return_addr(__builtin_return_address(0));
293 }
294 #endif
295 
296 /* low bits of r->p determine size: 0 means >= page size and r->size holding
297  * real size, otherwise low bits is the bucket + 1
298  */
299 #define REALSIZE(sz, r)						\
300 	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
301 	(sz) = ((sz) == 0 ? (r)->size : B2SIZE((sz) - 1))
302 
303 static inline size_t
304 hash(void *p)
305 {
306 	size_t sum;
307 	uintptr_t u;
308 
309 	u = (uintptr_t)p >> MALLOC_PAGESHIFT;
310 	sum = u;
311 	sum = (sum << 7) - sum + (u >> 16);
312 #ifdef __LP64__
313 	sum = (sum << 7) - sum + (u >> 32);
314 	sum = (sum << 7) - sum + (u >> 48);
315 #endif
316 	return sum;
317 }
318 
319 static inline struct dir_info *
320 getpool(void)
321 {
322 	if (mopts.malloc_pool[1] == NULL || !mopts.malloc_pool[1]->malloc_mt)
323 		return mopts.malloc_pool[1];
324 	else	/* first one reserved for special pool */
325 		return mopts.malloc_pool[1 + TIB_GET()->tib_tid %
326 		    (mopts.malloc_mutexes - 1)];
327 }
328 
329 static __dead void
330 wrterror(struct dir_info *d, char *msg, ...)
331 {
332 	int		saved_errno = errno;
333 	va_list		ap;
334 
335 	dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname,
336 	    getpid(), (d != NULL && d->func) ? d->func : "unknown");
337 	va_start(ap, msg);
338 	vdprintf(STDERR_FILENO, msg, ap);
339 	va_end(ap);
340 	dprintf(STDERR_FILENO, "\n");
341 
342 #ifdef MALLOC_STATS
343 	if (DO_STATS && mopts.malloc_verbose)
344 		malloc_dump();
345 #endif
346 
347 	errno = saved_errno;
348 
349 	abort();
350 }
351 
352 static void
353 rbytes_init(struct dir_info *d)
354 {
355 	arc4random_buf(d->rbytes, sizeof(d->rbytes));
356 	/* add 1 to account for using d->rbytes[0] */
357 	d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2);
358 }
359 
360 static inline u_char
361 getrbyte(struct dir_info *d)
362 {
363 	u_char x;
364 
365 	if (d->rbytesused >= sizeof(d->rbytes))
366 		rbytes_init(d);
367 	x = d->rbytes[d->rbytesused++];
368 	return x;
369 }
370 
371 static void
372 omalloc_parseopt(char opt)
373 {
374 	switch (opt) {
375 	case '+':
376 		mopts.malloc_mutexes <<= 1;
377 		if (mopts.malloc_mutexes > _MALLOC_MUTEXES)
378 			mopts.malloc_mutexes = _MALLOC_MUTEXES;
379 		break;
380 	case '-':
381 		mopts.malloc_mutexes >>= 1;
382 		if (mopts.malloc_mutexes < 2)
383 			mopts.malloc_mutexes = 2;
384 		break;
385 	case '>':
386 		mopts.def_maxcache <<= 1;
387 		if (mopts.def_maxcache > MALLOC_MAXCACHE)
388 			mopts.def_maxcache = MALLOC_MAXCACHE;
389 		break;
390 	case '<':
391 		mopts.def_maxcache >>= 1;
392 		break;
393 	case 'c':
394 		mopts.chunk_canaries = 0;
395 		break;
396 	case 'C':
397 		mopts.chunk_canaries = 1;
398 		break;
399 #ifdef MALLOC_STATS
400 	case 'd':
401 		mopts.malloc_stats = 0;
402 		break;
403 	case 'D':
404 	case '1':
405 		mopts.malloc_stats = 1;
406 		break;
407 	case '2':
408 		mopts.malloc_stats = 2;
409 		break;
410 	case '3':
411 		mopts.malloc_stats = 3;
412 		break;
413 #endif /* MALLOC_STATS */
414 	case 'f':
415 		mopts.malloc_freecheck = 0;
416 		mopts.malloc_freeunmap = 0;
417 		break;
418 	case 'F':
419 		mopts.malloc_freecheck = 1;
420 		mopts.malloc_freeunmap = 1;
421 		break;
422 	case 'g':
423 		mopts.malloc_guard = 0;
424 		break;
425 	case 'G':
426 		mopts.malloc_guard = MALLOC_PAGESIZE;
427 		break;
428 	case 'j':
429 		if (mopts.def_malloc_junk > 0)
430 			mopts.def_malloc_junk--;
431 		break;
432 	case 'J':
433 		if (mopts.def_malloc_junk < 2)
434 			mopts.def_malloc_junk++;
435 		break;
436 	case 'r':
437 		mopts.malloc_realloc = 0;
438 		break;
439 	case 'R':
440 		mopts.malloc_realloc = 1;
441 		break;
442 	case 'u':
443 		mopts.malloc_freeunmap = 0;
444 		break;
445 	case 'U':
446 		mopts.malloc_freeunmap = 1;
447 		break;
448 #ifdef MALLOC_STATS
449 	case 'v':
450 		mopts.malloc_verbose = 0;
451 		break;
452 	case 'V':
453 		mopts.malloc_verbose = 1;
454 		break;
455 #endif /* MALLOC_STATS */
456 	case 'x':
457 		mopts.malloc_xmalloc = 0;
458 		break;
459 	case 'X':
460 		mopts.malloc_xmalloc = 1;
461 		break;
462 	default:
463 		dprintf(STDERR_FILENO, "malloc() warning: "
464                     "unknown char in MALLOC_OPTIONS\n");
465 		break;
466 	}
467 }
468 
469 static void
470 omalloc_init(void)
471 {
472 	char *p, *q, b[16];
473 	int i, j;
474 	const int mib[2] = { CTL_VM, VM_MALLOC_CONF };
475 	size_t sb;
476 
477 	/*
478 	 * Default options
479 	 */
480 	mopts.malloc_mutexes = 8;
481 	mopts.def_malloc_junk = 1;
482 	mopts.def_maxcache = MALLOC_DEFAULT_CACHE;
483 
484 	for (i = 0; i < 3; i++) {
485 		switch (i) {
486 		case 0:
487 			sb = sizeof(b);
488 			j = sysctl(mib, 2, b, &sb, NULL, 0);
489 			if (j != 0)
490 				continue;
491 			p = b;
492 			break;
493 		case 1:
494 			if (issetugid() == 0)
495 				p = getenv("MALLOC_OPTIONS");
496 			else
497 				continue;
498 			break;
499 		case 2:
500 			p = malloc_options;
501 			break;
502 		default:
503 			p = NULL;
504 		}
505 
506 		for (; p != NULL && *p != '\0'; p++) {
507 			switch (*p) {
508 			case 'S':
509 				for (q = "CFGJ"; *q != '\0'; q++)
510 					omalloc_parseopt(*q);
511 				mopts.def_maxcache = 0;
512 				break;
513 			case 's':
514 				for (q = "cfgj"; *q != '\0'; q++)
515 					omalloc_parseopt(*q);
516 				mopts.def_maxcache = MALLOC_DEFAULT_CACHE;
517 				break;
518 			default:
519 				omalloc_parseopt(*p);
520 				break;
521 			}
522 		}
523 	}
524 
525 #ifdef MALLOC_STATS
526 	if (DO_STATS && (atexit(malloc_exit) == -1)) {
527 		dprintf(STDERR_FILENO, "malloc() warning: atexit(2) failed."
528 		    " Will not be able to dump stats on exit\n");
529 	}
530 #endif
531 
532 	while ((mopts.malloc_canary = arc4random()) == 0)
533 		;
534 	mopts.junk_loc = arc4random();
535 	if (mopts.chunk_canaries)
536 		do {
537 			mopts.chunk_canaries = arc4random();
538 		} while ((u_char)mopts.chunk_canaries == 0 ||
539 		    (u_char)mopts.chunk_canaries == SOME_FREEJUNK);
540 }
541 
542 static void
543 omalloc_poolinit(struct dir_info *d, int mmap_flag)
544 {
545 	int i, j;
546 
547 	d->r = NULL;
548 	d->rbytesused = sizeof(d->rbytes);
549 	d->regions_free = d->regions_total = 0;
550 	for (i = 0; i <= BUCKETS; i++) {
551 		LIST_INIT(&d->chunk_info_list[i]);
552 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++)
553 			LIST_INIT(&d->chunk_dir[i][j]);
554 	}
555 	d->mmap_flag = mmap_flag;
556 	d->malloc_junk = mopts.def_malloc_junk;
557 	d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
558 	d->canary2 = ~d->canary1;
559 }
560 
561 static int
562 omalloc_grow(struct dir_info *d)
563 {
564 	size_t newtotal;
565 	size_t newsize;
566 	size_t mask;
567 	size_t i, oldpsz;
568 	struct region_info *p;
569 
570 	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2)
571 		return 1;
572 
573 	newtotal = d->regions_total == 0 ? MALLOC_INITIAL_REGIONS :
574 	    d->regions_total * 2;
575 	newsize = PAGEROUND(newtotal * sizeof(struct region_info));
576 	mask = newtotal - 1;
577 
578 	/* Don't use cache here, we don't want user uaf touch this */
579 	p = MMAP(newsize, d->mmap_flag);
580 	if (p == MAP_FAILED)
581 		return 1;
582 
583 	STATS_ADD(d->malloc_used, newsize);
584 	STATS_ZERO(d->inserts);
585 	STATS_ZERO(d->insert_collisions);
586 	for (i = 0; i < d->regions_total; i++) {
587 		void *q = d->r[i].p;
588 		if (q != NULL) {
589 			size_t index = hash(q) & mask;
590 			STATS_INC(d->inserts);
591 			while (p[index].p != NULL) {
592 				index = (index - 1) & mask;
593 				STATS_INC(d->insert_collisions);
594 			}
595 			p[index] = d->r[i];
596 		}
597 	}
598 
599 	if (d->regions_total > 0) {
600 		oldpsz = PAGEROUND(d->regions_total * sizeof(struct region_info));
601 		/* clear to avoid meta info ending up in the cache */
602 		unmap(d, d->r, oldpsz, oldpsz);
603 	}
604 	d->regions_free += newtotal - d->regions_total;
605 	d->regions_total = newtotal;
606 	d->r = p;
607 	return 0;
608 }
609 
610 /*
611  * The hashtable uses the assumption that p is never NULL. This holds since
612  * non-MAP_FIXED mappings with hint 0 start at BRKSIZ.
613  */
614 static int
615 insert(struct dir_info *d, void *p, size_t sz, void *f)
616 {
617 	size_t index;
618 	size_t mask;
619 	void *q;
620 
621 	if (d->regions_free * 4 < d->regions_total || d->regions_total == 0) {
622 		if (omalloc_grow(d))
623 			return 1;
624 	}
625 	mask = d->regions_total - 1;
626 	index = hash(p) & mask;
627 	q = d->r[index].p;
628 	STATS_INC(d->inserts);
629 	while (q != NULL) {
630 		index = (index - 1) & mask;
631 		q = d->r[index].p;
632 		STATS_INC(d->insert_collisions);
633 	}
634 	d->r[index].p = p;
635 	d->r[index].size = sz;
636 	STATS_SETF(&d->r[index], f);
637 	d->regions_free--;
638 	return 0;
639 }
640 
641 static struct region_info *
642 find(struct dir_info *d, void *p)
643 {
644 	size_t index;
645 	size_t mask = d->regions_total - 1;
646 	void *q, *r;
647 
648 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
649 	    d->canary1 != ~d->canary2)
650 		wrterror(d, "internal struct corrupt");
651 	if (d->r == NULL)
652 		return NULL;
653 	p = MASK_POINTER(p);
654 	index = hash(p) & mask;
655 	r = d->r[index].p;
656 	q = MASK_POINTER(r);
657 	while (q != p && r != NULL) {
658 		index = (index - 1) & mask;
659 		r = d->r[index].p;
660 		q = MASK_POINTER(r);
661 	}
662 	return (q == p && r != NULL) ? &d->r[index] : NULL;
663 }
664 
665 static void
666 delete(struct dir_info *d, struct region_info *ri)
667 {
668 	/* algorithm R, Knuth Vol III section 6.4 */
669 	size_t mask = d->regions_total - 1;
670 	size_t i, j, r;
671 
672 	if (d->regions_total & (d->regions_total - 1))
673 		wrterror(d, "regions_total not 2^x");
674 	d->regions_free++;
675 	STATS_INC(d->deletes);
676 
677 	i = ri - d->r;
678 	for (;;) {
679 		d->r[i].p = NULL;
680 		d->r[i].size = 0;
681 		j = i;
682 		for (;;) {
683 			i = (i - 1) & mask;
684 			if (d->r[i].p == NULL)
685 				return;
686 			r = hash(d->r[i].p) & mask;
687 			if ((i <= r && r < j) || (r < j && j < i) ||
688 			    (j < i && i <= r))
689 				continue;
690 			d->r[j] = d->r[i];
691 			STATS_INC(d->delete_moves);
692 			break;
693 		}
694 
695 	}
696 }
697 
698 static inline void
699 junk_free(int junk, void *p, size_t sz)
700 {
701 	size_t i, step = 1;
702 	uint64_t *lp = p;
703 
704 	if (junk == 0 || sz == 0)
705 		return;
706 	sz /= sizeof(uint64_t);
707 	if (junk == 1) {
708 		if (sz > MALLOC_PAGESIZE / sizeof(uint64_t))
709 			sz = MALLOC_PAGESIZE / sizeof(uint64_t);
710 		step = sz / 4;
711 		if (step == 0)
712 			step = 1;
713 	}
714 	/* Do not always put the free junk bytes in the same spot.
715 	   There is modulo bias here, but we ignore that. */
716 	for (i = mopts.junk_loc % step; i < sz; i += step)
717 		lp[i] = SOME_FREEJUNK_ULL;
718 }
719 
720 static inline void
721 validate_junk(struct dir_info *pool, void *p, size_t argsz)
722 {
723 	size_t i, sz, step = 1;
724 	uint64_t *lp = p;
725 
726 	if (pool->malloc_junk == 0 || argsz == 0)
727 		return;
728 	sz = argsz / sizeof(uint64_t);
729 	if (pool->malloc_junk == 1) {
730 		if (sz > MALLOC_PAGESIZE / sizeof(uint64_t))
731 			sz = MALLOC_PAGESIZE / sizeof(uint64_t);
732 		step = sz / 4;
733 		if (step == 0)
734 			step = 1;
735 	}
736 	/* see junk_free */
737 	for (i = mopts.junk_loc % step; i < sz; i += step) {
738 		if (lp[i] != SOME_FREEJUNK_ULL) {
739 #ifdef MALLOC_STATS
740 			if (DO_STATS && argsz <= MALLOC_MAXCHUNK)
741 				print_chunk_details(pool, lp, argsz, i);
742 			else
743 #endif
744 				wrterror(pool,
745 				    "write to free mem %p[%zu..%zu]@%zu",
746 				    lp, i * sizeof(uint64_t),
747 				    (i + 1) * sizeof(uint64_t) - 1, argsz);
748 		}
749 	}
750 }
751 
752 
753 /*
754  * Cache maintenance.
755  * Opposed to the regular region data structure, the sizes in the
756  * cache are in MALLOC_PAGESIZE units.
757  */
758 static void
759 unmap(struct dir_info *d, void *p, size_t sz, size_t clear)
760 {
761 	size_t psz = sz >> MALLOC_PAGESHIFT;
762 	void *r;
763 	u_short i;
764 	struct smallcache *cache;
765 
766 	if (sz != PAGEROUND(sz) || psz == 0)
767 		wrterror(d, "munmap round");
768 
769 	if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE &&
770 	    psz <= MAX_BIGCACHEABLE_SIZE) {
771 		u_short base = getrbyte(d);
772 		u_short j;
773 
774 		/* don't look through all slots */
775 		for (j = 0; j < d->bigcache_size / 4; j++) {
776 			i = (base + j) & (d->bigcache_size - 1);
777 			if (d->bigcache_used <
778 			    BIGCACHE_FILL(d->bigcache_size))  {
779 				if (d->bigcache[i].psize == 0)
780 					break;
781 			} else {
782 				if (d->bigcache[i].psize != 0)
783 					break;
784 			}
785 		}
786 		/* if we didn't find a preferred slot, use random one */
787 		if (d->bigcache[i].psize != 0) {
788 			size_t tmp;
789 
790 			r = d->bigcache[i].page;
791 			d->bigcache_used -= d->bigcache[i].psize;
792 			tmp = d->bigcache[i].psize << MALLOC_PAGESHIFT;
793 			if (!mopts.malloc_freeunmap)
794 				validate_junk(d, r, tmp);
795 			if (munmap(r, tmp))
796 				 wrterror(d, "munmap %p", r);
797 			STATS_SUB(d->malloc_used, tmp);
798 		}
799 
800 		if (clear > 0)
801 			explicit_bzero(p, clear);
802 		if (mopts.malloc_freeunmap) {
803 			if (mprotect(p, sz, PROT_NONE))
804 				wrterror(d, "mprotect %p", r);
805 		} else
806 			junk_free(d->malloc_junk, p, sz);
807 		d->bigcache[i].page = p;
808 		d->bigcache[i].psize = psz;
809 		d->bigcache_used += psz;
810 		return;
811 	}
812 	if (psz > MAX_SMALLCACHEABLE_SIZE || d->smallcache[psz - 1].max == 0) {
813 		if (munmap(p, sz))
814 			wrterror(d, "munmap %p", p);
815 		STATS_SUB(d->malloc_used, sz);
816 		return;
817 	}
818 	cache = &d->smallcache[psz - 1];
819 	if (cache->length == cache->max) {
820 		int fresh;
821 		/* use a random slot */
822 		i = getrbyte(d) & (cache->max - 1);
823 		r = cache->pages[i];
824 		fresh = (uintptr_t)r & 1;
825 		*(uintptr_t*)&r &= ~1UL;
826 		if (!fresh && !mopts.malloc_freeunmap)
827 			validate_junk(d, r, sz);
828 		if (munmap(r, sz))
829 			wrterror(d, "munmap %p", r);
830 		STATS_SUB(d->malloc_used, sz);
831 		cache->length--;
832 	} else
833 		i = cache->length;
834 
835 	/* fill slot */
836 	if (clear > 0)
837 		explicit_bzero(p, clear);
838 	if (mopts.malloc_freeunmap)
839 		mprotect(p, sz, PROT_NONE);
840 	else
841 		junk_free(d->malloc_junk, p, sz);
842 	cache->pages[i] = p;
843 	cache->length++;
844 }
845 
846 static void *
847 map(struct dir_info *d, size_t sz, int zero_fill)
848 {
849 	size_t psz = sz >> MALLOC_PAGESHIFT;
850 	u_short i;
851 	void *p;
852 	struct smallcache *cache;
853 
854 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
855 	    d->canary1 != ~d->canary2)
856 		wrterror(d, "internal struct corrupt");
857 	if (sz != PAGEROUND(sz) || psz == 0)
858 		wrterror(d, "map round");
859 
860 
861 	if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE &&
862 	    psz <= MAX_BIGCACHEABLE_SIZE) {
863 		size_t base = getrbyte(d);
864 		size_t cached = d->bigcache_used;
865 		ushort j;
866 
867 		for (j = 0; j < d->bigcache_size && cached >= psz; j++) {
868 			i = (j + base) & (d->bigcache_size - 1);
869 			if (d->bigcache[i].psize == psz) {
870 				p = d->bigcache[i].page;
871 				d->bigcache_used -= psz;
872 				d->bigcache[i].page = NULL;
873 				d->bigcache[i].psize = 0;
874 
875 				if (!mopts.malloc_freeunmap)
876 					validate_junk(d, p, sz);
877 				if (mopts.malloc_freeunmap)
878 					mprotect(p, sz, PROT_READ | PROT_WRITE);
879 				if (zero_fill)
880 					memset(p, 0, sz);
881 				else if (mopts.malloc_freeunmap)
882 					junk_free(d->malloc_junk, p, sz);
883 				return p;
884 			}
885 			cached -= d->bigcache[i].psize;
886 		}
887 	}
888 	if (psz <= MAX_SMALLCACHEABLE_SIZE && d->smallcache[psz - 1].max > 0) {
889 		cache = &d->smallcache[psz - 1];
890 		if (cache->length > 0) {
891 			int fresh;
892 			if (cache->length == 1)
893 				p = cache->pages[--cache->length];
894 			else {
895 				i = getrbyte(d) % cache->length;
896 				p = cache->pages[i];
897 				cache->pages[i] = cache->pages[--cache->length];
898 			}
899 			/* check if page was not junked, i.e. "fresh
900 			   we use the lsb of the pointer for that */
901 			fresh = (uintptr_t)p & 1UL;
902 			*(uintptr_t*)&p &= ~1UL;
903 			if (!fresh && !mopts.malloc_freeunmap)
904 				validate_junk(d, p, sz);
905 			if (mopts.malloc_freeunmap)
906 				mprotect(p, sz, PROT_READ | PROT_WRITE);
907 			if (zero_fill)
908 				memset(p, 0, sz);
909 			else if (mopts.malloc_freeunmap)
910 				junk_free(d->malloc_junk, p, sz);
911 			return p;
912 		}
913 		if (psz <= 1) {
914 			p = MMAP(cache->max * sz, d->mmap_flag);
915 			if (p != MAP_FAILED) {
916 				STATS_ADD(d->malloc_used, cache->max * sz);
917 				cache->length = cache->max - 1;
918 				for (i = 0; i < cache->max - 1; i++) {
919 					void *q = (char*)p + i * sz;
920 					cache->pages[i] = q;
921 					/* mark pointer in slot as not junked */
922 					*(uintptr_t*)&cache->pages[i] |= 1UL;
923 				}
924 				if (mopts.malloc_freeunmap)
925 					mprotect(p, (cache->max - 1) * sz,
926 					    PROT_NONE);
927 				p = (char*)p + (cache->max - 1) * sz;
928 				/* zero fill not needed, freshly mmapped */
929 				return p;
930 			}
931 		}
932 
933 	}
934 	p = MMAP(sz, d->mmap_flag);
935 	if (p != MAP_FAILED)
936 		STATS_ADD(d->malloc_used, sz);
937 	/* zero fill not needed */
938 	return p;
939 }
940 
941 static void
942 init_chunk_info(struct dir_info *d, struct chunk_info *p, u_int bucket)
943 {
944 	u_int i;
945 
946 	p->bucket = bucket;
947 	p->total = p->free = MALLOC_PAGESIZE / B2ALLOC(bucket);
948 	p->offset = howmany(p->total, MALLOC_BITS);
949 	p->canary = (u_short)d->canary1;
950 
951 	/* set all valid bits in the bitmap */
952  	i = p->total - 1;
953 	memset(p->bits, 0xff, sizeof(p->bits[0]) * (i / MALLOC_BITS));
954 	p->bits[i / MALLOC_BITS] = (2U << (i % MALLOC_BITS)) - 1;
955 }
956 
957 static struct chunk_info *
958 alloc_chunk_info(struct dir_info *d, u_int bucket)
959 {
960 	struct chunk_info *p;
961 
962 	if (LIST_EMPTY(&d->chunk_info_list[bucket])) {
963 		const size_t chunk_pages = 64;
964 		size_t size, count, i;
965 		char *q;
966 
967 		count = MALLOC_PAGESIZE / B2ALLOC(bucket);
968 
969 		size = howmany(count, MALLOC_BITS);
970 		/* see declaration of struct chunk_info */
971 		if (size <= CHUNK_INFO_TAIL)
972 			size = 0;
973 		else
974 			size -= CHUNK_INFO_TAIL;
975 		size = sizeof(struct chunk_info) + size * sizeof(u_short);
976 		if (mopts.chunk_canaries && bucket > 0)
977 			size += count * sizeof(u_short);
978 		size = _ALIGN(size);
979 		count = MALLOC_PAGESIZE / size;
980 
981 		/* Don't use cache here, we don't want user uaf touch this */
982 		if (d->chunk_pages_used == chunk_pages ||
983 		     d->chunk_pages == NULL) {
984 			q = MMAP(MALLOC_PAGESIZE * chunk_pages, d->mmap_flag);
985 			if (q == MAP_FAILED)
986 				return NULL;
987 			d->chunk_pages = q;
988 			d->chunk_pages_used = 0;
989 			STATS_ADD(d->malloc_used, MALLOC_PAGESIZE *
990 			    chunk_pages);
991 		}
992 		q = (char *)d->chunk_pages + d->chunk_pages_used *
993 		    MALLOC_PAGESIZE;
994 		d->chunk_pages_used++;
995 
996 		for (i = 0; i < count; i++, q += size) {
997 			p = (struct chunk_info *)q;
998 			LIST_INSERT_HEAD(&d->chunk_info_list[bucket], p, entries);
999 		}
1000 	}
1001 	p = LIST_FIRST(&d->chunk_info_list[bucket]);
1002 	LIST_REMOVE(p, entries);
1003 	if (p->total == 0)
1004 		init_chunk_info(d, p, bucket);
1005 	return p;
1006 }
1007 
1008 /*
1009  * Allocate a page of chunks
1010  */
1011 static struct chunk_info *
1012 omalloc_make_chunks(struct dir_info *d, u_int bucket, u_int listnum)
1013 {
1014 	struct chunk_info *bp;
1015 	void *pp;
1016 	void *ff = NULL;
1017 
1018 	/* Allocate a new bucket */
1019 	pp = map(d, MALLOC_PAGESIZE, 0);
1020 	if (pp == MAP_FAILED)
1021 		return NULL;
1022 	if (DO_STATS) {
1023 		ff = map(d, MALLOC_PAGESIZE, 0);
1024 		if (ff == MAP_FAILED)
1025 			goto err;
1026 		memset(ff, 0, sizeof(void *) * MALLOC_PAGESIZE / B2ALLOC(bucket));
1027 	}
1028 
1029 	/* memory protect the page allocated in the malloc(0) case */
1030 	if (bucket == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) == -1)
1031 		goto err;
1032 
1033 	bp = alloc_chunk_info(d, bucket);
1034 	if (bp == NULL)
1035 		goto err;
1036 	bp->page = pp;
1037 
1038 	if (insert(d, (void *)((uintptr_t)pp | (bucket + 1)), (uintptr_t)bp,
1039 	    ff))
1040 		goto err;
1041 	LIST_INSERT_HEAD(&d->chunk_dir[bucket][listnum], bp, entries);
1042 
1043 	if (bucket > 0 && d->malloc_junk != 0)
1044 		memset(pp, SOME_FREEJUNK, MALLOC_PAGESIZE);
1045 
1046 	return bp;
1047 
1048 err:
1049 	unmap(d, pp, MALLOC_PAGESIZE, 0);
1050 	if (ff != NULL && ff != MAP_FAILED)
1051 		unmap(d, ff, MALLOC_PAGESIZE, 0);
1052 	return NULL;
1053 }
1054 
1055 #if defined(__GNUC__) && __GNUC__ < 4
1056 static inline unsigned int
1057 lb(u_int x)
1058 {
1059 #if defined(__m88k__)
1060 	__asm__ __volatile__ ("ff1 %0, %0" : "=r" (x) : "0" (x));
1061 	return x;
1062 #else
1063 	/* portable version */
1064 	unsigned int count = 0;
1065 	while ((x & (1U << (sizeof(int) * CHAR_BIT - 1))) == 0) {
1066 		count++;
1067 		x <<= 1;
1068 	}
1069 	return (sizeof(int) * CHAR_BIT - 1) - count;
1070 #endif
1071 }
1072 #else
1073 /* using built-in function version */
1074 static inline unsigned int
1075 lb(u_int x)
1076 {
1077 	/* I need an extension just for integer-length (: */
1078 	return (sizeof(int) * CHAR_BIT - 1) - __builtin_clz(x);
1079 }
1080 #endif
1081 
1082 /* https://pvk.ca/Blog/2015/06/27/linear-log-bucketing-fast-versatile-simple/
1083    via Tony Finch */
1084 static inline unsigned int
1085 bin_of(unsigned int size)
1086 {
1087 	const unsigned int linear = 6;
1088 	const unsigned int subbin = 2;
1089 
1090 	unsigned int mask, range, rounded, sub_index, rounded_size;
1091 	unsigned int n_bits, shift;
1092 
1093 	n_bits = lb(size | (1U << linear));
1094 	shift = n_bits - subbin;
1095 	mask = (1ULL << shift) - 1;
1096 	rounded = size + mask; /* XXX: overflow. */
1097 	sub_index = rounded >> shift;
1098 	range = n_bits - linear;
1099 
1100 	rounded_size = rounded & ~mask;
1101 	return rounded_size;
1102 }
1103 
1104 static inline u_short
1105 find_bucket(u_short size)
1106 {
1107 	/* malloc(0) is special */
1108 	if (size == 0)
1109 		return 0;
1110 	if (size < MALLOC_MINSIZE)
1111 		size = MALLOC_MINSIZE;
1112 	if (mopts.def_maxcache != 0)
1113 		size = bin_of(size);
1114 	return howmany(size, MALLOC_MINSIZE);
1115 }
1116 
1117 static void
1118 fill_canary(char *ptr, size_t sz, size_t allocated)
1119 {
1120 	size_t check_sz = allocated - sz;
1121 
1122 	if (check_sz > CHUNK_CHECK_LENGTH)
1123 		check_sz = CHUNK_CHECK_LENGTH;
1124 	memset(ptr + sz, mopts.chunk_canaries, check_sz);
1125 }
1126 
1127 /*
1128  * Allocate a chunk
1129  */
1130 static void *
1131 malloc_bytes(struct dir_info *d, size_t size)
1132 {
1133 	u_int i, k, r, bucket, listnum;
1134 	u_short	*lp;
1135 	struct chunk_info *bp;
1136 	void *p;
1137 
1138 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
1139 	    d->canary1 != ~d->canary2)
1140 		wrterror(d, "internal struct corrupt");
1141 
1142 	bucket = find_bucket(size);
1143 
1144 	r = ((u_int)getrbyte(d) << 8) | getrbyte(d);
1145 	listnum = r % MALLOC_CHUNK_LISTS;
1146 
1147 	/* If it's empty, make a page more of that size chunks */
1148 	if ((bp = LIST_FIRST(&d->chunk_dir[bucket][listnum])) == NULL) {
1149 		bp = omalloc_make_chunks(d, bucket, listnum);
1150 		if (bp == NULL)
1151 			return NULL;
1152 	}
1153 
1154 	if (bp->canary != (u_short)d->canary1)
1155 		wrterror(d, "chunk info corrupted");
1156 
1157 	/* bias, as bp->total is not a power of 2 */
1158 	i = (r / MALLOC_CHUNK_LISTS) % bp->total;
1159 
1160 	/* potentially start somewhere in a short */
1161 	lp = &bp->bits[i / MALLOC_BITS];
1162 	if (*lp) {
1163 		int j = i % MALLOC_BITS; /* j must be signed */
1164 		k = ffs(*lp >> j);
1165 		if (k != 0) {
1166 			k += j - 1;
1167 			goto found;
1168 		}
1169 	}
1170 	/* no bit halfway, go to next full short */
1171 	i /= MALLOC_BITS;
1172 	for (;;) {
1173 		if (++i >= bp->offset)
1174 			i = 0;
1175 		lp = &bp->bits[i];
1176 		if (*lp) {
1177 			k = ffs(*lp) - 1;
1178 			break;
1179 		}
1180 	}
1181 found:
1182 	*lp ^= 1 << k;
1183 
1184 	/* If there are no more free, remove from free-list */
1185 	if (--bp->free == 0)
1186 		LIST_REMOVE(bp, entries);
1187 
1188 	/* Adjust to the real offset of that chunk */
1189 	k += (lp - bp->bits) * MALLOC_BITS;
1190 
1191 	if (mopts.chunk_canaries && size > 0)
1192 		bp->bits[bp->offset + k] = size;
1193 
1194 	if (DO_STATS) {
1195 		struct region_info *r = find(d, bp->page);
1196 		STATS_SETFN(r, k, d->caller);
1197 	}
1198 
1199 	k *= B2ALLOC(bp->bucket);
1200 
1201 	p = (char *)bp->page + k;
1202 	if (bp->bucket > 0) {
1203 		validate_junk(d, p, B2SIZE(bp->bucket));
1204 		if (mopts.chunk_canaries)
1205 			fill_canary(p, size, B2SIZE(bp->bucket));
1206 	}
1207 	return p;
1208 }
1209 
1210 static void
1211 validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1212 {
1213 	size_t check_sz = allocated - sz;
1214 	u_char *p, *q;
1215 
1216 	if (check_sz > CHUNK_CHECK_LENGTH)
1217 		check_sz = CHUNK_CHECK_LENGTH;
1218 	p = ptr + sz;
1219 	q = p + check_sz;
1220 
1221 	while (p < q) {
1222 		if (*p != (u_char)mopts.chunk_canaries && *p != SOME_JUNK) {
1223 			wrterror(d, "canary corrupted %p[%tu]@%zu/%zu%s",
1224 			    ptr, p - ptr, sz, allocated,
1225 			    *p == SOME_FREEJUNK ? " (double free?)" : "");
1226 		}
1227 		p++;
1228 	}
1229 }
1230 
1231 static inline uint32_t
1232 find_chunknum(struct dir_info *d, struct chunk_info *info, void *ptr, int check)
1233 {
1234 	uint32_t chunknum;
1235 
1236 	if (info->canary != (u_short)d->canary1)
1237 		wrterror(d, "chunk info corrupted");
1238 
1239 	/* Find the chunk number on the page */
1240 	chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) / B2ALLOC(info->bucket);
1241 
1242 	if ((uintptr_t)ptr & (MALLOC_MINSIZE - 1))
1243 		wrterror(d, "modified chunk-pointer %p", ptr);
1244 	if (CHUNK_FREE(info, chunknum))
1245 		wrterror(d, "double free %p", ptr);
1246 	if (check && info->bucket > 0) {
1247 		validate_canary(d, ptr, info->bits[info->offset + chunknum],
1248 		    B2SIZE(info->bucket));
1249 	}
1250 	return chunknum;
1251 }
1252 
1253 /*
1254  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1255  */
1256 static void
1257 free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1258 {
1259 	struct chunk_head *mp;
1260 	struct chunk_info *info;
1261 	uint32_t chunknum;
1262 	uint32_t listnum;
1263 
1264 	info = (struct chunk_info *)r->size;
1265 	chunknum = find_chunknum(d, info, ptr, 0);
1266 
1267 	info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
1268 	info->free++;
1269 
1270 	if (info->free == 1) {
1271 		/* Page became non-full */
1272 		listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
1273 		mp = &d->chunk_dir[info->bucket][listnum];
1274 		LIST_INSERT_HEAD(mp, info, entries);
1275 		return;
1276 	}
1277 
1278 	if (info->free != info->total)
1279 		return;
1280 
1281 	LIST_REMOVE(info, entries);
1282 
1283 	if (info->bucket == 0 && !mopts.malloc_freeunmap)
1284 		mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1285 	unmap(d, info->page, MALLOC_PAGESIZE, 0);
1286 #ifdef MALLOC_STATS
1287 	if (r->f != NULL) {
1288 		unmap(d, r->f, MALLOC_PAGESIZE, MALLOC_PAGESIZE);
1289 		r->f = NULL;
1290 	}
1291 #endif
1292 
1293 	delete(d, r);
1294 	mp = &d->chunk_info_list[info->bucket];
1295 	LIST_INSERT_HEAD(mp, info, entries);
1296 }
1297 
1298 static void *
1299 omalloc(struct dir_info *pool, size_t sz, int zero_fill)
1300 {
1301 	void *p, *caller = NULL;
1302 	size_t psz;
1303 
1304 	if (sz > MALLOC_MAXCHUNK) {
1305 		if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1306 			errno = ENOMEM;
1307 			return NULL;
1308 		}
1309 		sz += mopts.malloc_guard;
1310 		psz = PAGEROUND(sz);
1311 		p = map(pool, psz, zero_fill);
1312 		if (p == MAP_FAILED) {
1313 			errno = ENOMEM;
1314 			return NULL;
1315 		}
1316 #ifdef MALLOC_STATS
1317 		if (DO_STATS)
1318 			caller = pool->caller;
1319 #endif
1320 		if (insert(pool, p, sz, caller)) {
1321 			unmap(pool, p, psz, 0);
1322 			errno = ENOMEM;
1323 			return NULL;
1324 		}
1325 		if (mopts.malloc_guard) {
1326 			if (mprotect((char *)p + psz - mopts.malloc_guard,
1327 			    mopts.malloc_guard, PROT_NONE))
1328 				wrterror(pool, "mprotect");
1329 			STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1330 		}
1331 
1332 		if (MALLOC_MOVE_COND(sz)) {
1333 			/* fill whole allocation */
1334 			if (pool->malloc_junk == 2)
1335 				memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1336 			/* shift towards the end */
1337 			p = MALLOC_MOVE(p, sz);
1338 			/* fill zeros if needed and overwritten above */
1339 			if (zero_fill && pool->malloc_junk == 2)
1340 				memset(p, 0, sz - mopts.malloc_guard);
1341 		} else {
1342 			if (pool->malloc_junk == 2) {
1343 				if (zero_fill)
1344 					memset((char *)p + sz -
1345 					    mopts.malloc_guard, SOME_JUNK,
1346 					    psz - sz);
1347 				else
1348 					memset(p, SOME_JUNK,
1349 					    psz - mopts.malloc_guard);
1350 			} else if (mopts.chunk_canaries)
1351 				fill_canary(p, sz - mopts.malloc_guard,
1352 				    psz - mopts.malloc_guard);
1353 		}
1354 
1355 	} else {
1356 		/* takes care of SOME_JUNK */
1357 		p = malloc_bytes(pool, sz);
1358 		if (zero_fill && p != NULL && sz > 0)
1359 			memset(p, 0, sz);
1360 	}
1361 
1362 	return p;
1363 }
1364 
1365 /*
1366  * Common function for handling recursion.  Only
1367  * print the error message once, to avoid making the problem
1368  * potentially worse.
1369  */
1370 static void
1371 malloc_recurse(struct dir_info *d)
1372 {
1373 	static int noprint;
1374 
1375 	if (noprint == 0) {
1376 		noprint = 1;
1377 		wrterror(d, "recursive call");
1378 	}
1379 	d->active--;
1380 	_MALLOC_UNLOCK(d->mutex);
1381 	errno = EDEADLK;
1382 }
1383 
1384 void
1385 _malloc_init(int from_rthreads)
1386 {
1387 	u_int i, j, nmutexes;
1388 	struct dir_info *d;
1389 
1390 	_MALLOC_LOCK(1);
1391 	if (!from_rthreads && mopts.malloc_pool[1]) {
1392 		_MALLOC_UNLOCK(1);
1393 		return;
1394 	}
1395 	if (!mopts.malloc_canary) {
1396 		char *p;
1397 		size_t sz, d_avail;
1398 
1399 		omalloc_init();
1400 		/*
1401 		 * Allocate dir_infos with a guard page on either side. Also
1402 		 * randomise offset inside the page at which the dir_infos
1403 		 * lay (subject to alignment by 1 << MALLOC_MINSHIFT)
1404 		 */
1405 		sz = mopts.malloc_mutexes * sizeof(*d) + 2 * MALLOC_PAGESIZE;
1406 		if ((p = MMAPNONE(sz, 0)) == MAP_FAILED)
1407 			wrterror(NULL, "malloc_init mmap1 failed");
1408 		if (mprotect(p + MALLOC_PAGESIZE, mopts.malloc_mutexes * sizeof(*d),
1409 		    PROT_READ | PROT_WRITE))
1410 			wrterror(NULL, "malloc_init mprotect1 failed");
1411 		if (mimmutable(p, sz))
1412 			wrterror(NULL, "malloc_init mimmutable1 failed");
1413 		d_avail = (((mopts.malloc_mutexes * sizeof(*d) + MALLOC_PAGEMASK) &
1414 		    ~MALLOC_PAGEMASK) - (mopts.malloc_mutexes * sizeof(*d))) >>
1415 		    MALLOC_MINSHIFT;
1416 		d = (struct dir_info *)(p + MALLOC_PAGESIZE +
1417 		    (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
1418 		STATS_ADD(d[1].malloc_used, sz);
1419 		for (i = 0; i < mopts.malloc_mutexes; i++)
1420 			mopts.malloc_pool[i] = &d[i];
1421 		mopts.internal_funcs = 1;
1422 		if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) {
1423 			if (mprotect(&malloc_readonly, sizeof(malloc_readonly),
1424 			    PROT_READ))
1425 				wrterror(NULL, "malloc_init mprotect r/o failed");
1426 			if (mimmutable(&malloc_readonly, sizeof(malloc_readonly)))
1427 				wrterror(NULL, "malloc_init mimmutable r/o failed");
1428 		}
1429 	}
1430 
1431 	nmutexes = from_rthreads ? mopts.malloc_mutexes : 2;
1432 	for (i = 0; i < nmutexes; i++) {
1433 		d = mopts.malloc_pool[i];
1434 		d->malloc_mt = from_rthreads;
1435 		if (d->canary1 == ~d->canary2)
1436 			continue;
1437 		if (i == 0) {
1438 			omalloc_poolinit(d, MAP_CONCEAL);
1439 			d->malloc_junk = 2;
1440 			d->bigcache_size = 0;
1441 			for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++)
1442 				d->smallcache[j].max = 0;
1443 		} else {
1444 			size_t sz = 0;
1445 
1446 			omalloc_poolinit(d, 0);
1447 			d->malloc_junk = mopts.def_malloc_junk;
1448 			d->bigcache_size = mopts.def_maxcache;
1449 			for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) {
1450 				d->smallcache[j].max =
1451 				    mopts.def_maxcache >> (j / 8);
1452 				sz += d->smallcache[j].max * sizeof(void *);
1453 			}
1454 			sz += d->bigcache_size * sizeof(struct bigcache);
1455 			if (sz > 0) {
1456 				void *p = MMAP(sz, 0);
1457 				if (p == MAP_FAILED)
1458 					wrterror(NULL,
1459 					    "malloc_init mmap2 failed");
1460 				if (mimmutable(p, sz))
1461 					wrterror(NULL, "malloc_init mimmutable2 failed");
1462 				for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) {
1463 					d->smallcache[j].pages = p;
1464 					p = (char *)p + d->smallcache[j].max *
1465 					    sizeof(void *);
1466 				}
1467 				d->bigcache = p;
1468 			}
1469 		}
1470 		d->mutex = i;
1471 	}
1472 
1473 	_MALLOC_UNLOCK(1);
1474 }
1475 DEF_STRONG(_malloc_init);
1476 
1477 #define PROLOGUE(p, fn)			\
1478 	d = (p); 			\
1479 	if (d == NULL) { 		\
1480 		_malloc_init(0);	\
1481 		d = (p);		\
1482 	}				\
1483 	_MALLOC_LOCK(d->mutex);		\
1484 	d->func = fn;			\
1485 	if (d->active++) {		\
1486 		malloc_recurse(d);	\
1487 		return NULL;		\
1488 	}				\
1489 
1490 #define EPILOGUE()				\
1491 	d->active--;				\
1492 	_MALLOC_UNLOCK(d->mutex);		\
1493 	if (r == NULL && mopts.malloc_xmalloc)	\
1494 		wrterror(d, "out of memory");	\
1495 	if (r != NULL)				\
1496 		errno = saved_errno;		\
1497 
1498 void *
1499 malloc(size_t size)
1500 {
1501 	void *r;
1502 	struct dir_info *d;
1503 	int saved_errno = errno;
1504 
1505 	PROLOGUE(getpool(), "malloc")
1506 	SET_CALLER(d, caller());
1507 	r = omalloc(d, size, 0);
1508 	EPILOGUE()
1509 	return r;
1510 }
1511 DEF_STRONG(malloc);
1512 
1513 void *
1514 malloc_conceal(size_t size)
1515 {
1516 	void *r;
1517 	struct dir_info *d;
1518 	int saved_errno = errno;
1519 
1520 	PROLOGUE(mopts.malloc_pool[0], "malloc_conceal")
1521 	SET_CALLER(d, caller());
1522 	r = omalloc(d, size, 0);
1523 	EPILOGUE()
1524 	return r;
1525 }
1526 DEF_WEAK(malloc_conceal);
1527 
1528 static struct region_info *
1529 findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool,
1530     const char ** saved_function)
1531 {
1532 	struct dir_info *pool = argpool;
1533 	struct region_info *r = find(pool, p);
1534 
1535 	if (r == NULL) {
1536 		u_int i, nmutexes;
1537 
1538 		nmutexes = mopts.malloc_pool[1]->malloc_mt ? mopts.malloc_mutexes : 2;
1539 		for (i = 1; i < nmutexes; i++) {
1540 			u_int j = (argpool->mutex + i) & (nmutexes - 1);
1541 
1542 			pool->active--;
1543 			_MALLOC_UNLOCK(pool->mutex);
1544 			pool = mopts.malloc_pool[j];
1545 			_MALLOC_LOCK(pool->mutex);
1546 			pool->active++;
1547 			r = find(pool, p);
1548 			if (r != NULL) {
1549 				*saved_function = pool->func;
1550 				pool->func = argpool->func;
1551 				break;
1552 			}
1553 		}
1554 		if (r == NULL)
1555 			wrterror(argpool, "bogus pointer (double free?) %p", p);
1556 	}
1557 	*foundpool = pool;
1558 	return r;
1559 }
1560 
1561 static void
1562 ofree(struct dir_info **argpool, void *p, int clear, int check, size_t argsz)
1563 {
1564 	struct region_info *r;
1565 	struct dir_info *pool;
1566 	const char *saved_function;
1567 	size_t sz;
1568 
1569 	r = findpool(p, *argpool, &pool, &saved_function);
1570 
1571 	REALSIZE(sz, r);
1572 	if (pool->mmap_flag) {
1573 		clear = 1;
1574 		if (!check) {
1575 			argsz = sz;
1576 			if (sz > MALLOC_MAXCHUNK)
1577 				argsz -= mopts.malloc_guard;
1578 		}
1579 	}
1580 	if (check) {
1581 		if (sz <= MALLOC_MAXCHUNK) {
1582 			if (mopts.chunk_canaries && sz > 0) {
1583 				struct chunk_info *info =
1584 				    (struct chunk_info *)r->size;
1585 				uint32_t chunknum =
1586 				    find_chunknum(pool, info, p, 0);
1587 
1588 				if (info->bits[info->offset + chunknum] < argsz)
1589 					wrterror(pool, "recorded size %hu"
1590 					    " < %zu",
1591 					    info->bits[info->offset + chunknum],
1592 					    argsz);
1593 			} else {
1594 				if (sz < argsz)
1595 					wrterror(pool, "chunk size %zu < %zu",
1596 					    sz, argsz);
1597 			}
1598 		} else if (sz - mopts.malloc_guard < argsz) {
1599 			wrterror(pool, "recorded size %zu < %zu",
1600 			    sz - mopts.malloc_guard, argsz);
1601 		}
1602 	}
1603 	if (sz > MALLOC_MAXCHUNK) {
1604 		if (!MALLOC_MOVE_COND(sz)) {
1605 			if (r->p != p)
1606 				wrterror(pool, "bogus pointer %p", p);
1607 			if (mopts.chunk_canaries)
1608 				validate_canary(pool, p,
1609 				    sz - mopts.malloc_guard,
1610 				    PAGEROUND(sz - mopts.malloc_guard));
1611 		} else {
1612 			/* shifted towards the end */
1613 			if (p != MALLOC_MOVE(r->p, sz))
1614 				wrterror(pool, "bogus moved pointer %p", p);
1615 			p = r->p;
1616 		}
1617 		if (mopts.malloc_guard) {
1618 			if (sz < mopts.malloc_guard)
1619 				wrterror(pool, "guard size");
1620 			if (!mopts.malloc_freeunmap) {
1621 				if (mprotect((char *)p + PAGEROUND(sz) -
1622 				    mopts.malloc_guard, mopts.malloc_guard,
1623 				    PROT_READ | PROT_WRITE))
1624 					wrterror(pool, "mprotect");
1625 			}
1626 			STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
1627 		}
1628 		unmap(pool, p, PAGEROUND(sz), clear ? argsz : 0);
1629 		delete(pool, r);
1630 	} else {
1631 		void *tmp;
1632 		u_int i;
1633 
1634 		/* Validate and optionally canary check */
1635 		struct chunk_info *info = (struct chunk_info *)r->size;
1636 		if (B2SIZE(info->bucket) != sz)
1637 			wrterror(pool, "internal struct corrupt");
1638 		find_chunknum(pool, info, p, mopts.chunk_canaries);
1639 
1640 		if (mopts.malloc_freecheck) {
1641 			for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK; i++) {
1642 				tmp = pool->delayed_chunks[i];
1643 				if (tmp == p)
1644 					wrterror(pool,
1645 					    "double free %p", p);
1646 				if (tmp != NULL) {
1647 					size_t tmpsz;
1648 
1649 					r = find(pool, tmp);
1650 					if (r == NULL)
1651 						wrterror(pool,
1652 						    "bogus pointer ("
1653 						    "double free?) %p", tmp);
1654 					REALSIZE(tmpsz, r);
1655 					validate_junk(pool, tmp, tmpsz);
1656 				}
1657 			}
1658 		}
1659 
1660 		if (clear && argsz > 0)
1661 			explicit_bzero(p, argsz);
1662 		junk_free(pool->malloc_junk, p, sz);
1663 
1664 		i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK;
1665 		tmp = p;
1666 		p = pool->delayed_chunks[i];
1667 		if (tmp == p)
1668 			wrterror(pool, "double free %p", p);
1669 		pool->delayed_chunks[i] = tmp;
1670 		if (p != NULL) {
1671 			r = find(pool, p);
1672 			if (r == NULL)
1673 				wrterror(pool,
1674 				    "bogus pointer (double free?) %p", p);
1675 			if (!mopts.malloc_freecheck) {
1676 				REALSIZE(sz, r);
1677 				validate_junk(pool, p, sz);
1678 			}
1679 			free_bytes(pool, r, p);
1680 		}
1681 	}
1682 
1683 	if (*argpool != pool) {
1684 		pool->func = saved_function;
1685 		*argpool = pool;
1686 	}
1687 }
1688 
1689 void
1690 free(void *ptr)
1691 {
1692 	struct dir_info *d;
1693 	int saved_errno = errno;
1694 
1695 	/* This is legal. */
1696 	if (ptr == NULL)
1697 		return;
1698 
1699 	d = getpool();
1700 	if (d == NULL)
1701 		wrterror(d, "free() called before allocation");
1702 	_MALLOC_LOCK(d->mutex);
1703 	d->func = "free";
1704 	if (d->active++) {
1705 		malloc_recurse(d);
1706 		return;
1707 	}
1708 	ofree(&d, ptr, 0, 0, 0);
1709 	d->active--;
1710 	_MALLOC_UNLOCK(d->mutex);
1711 	errno = saved_errno;
1712 }
1713 DEF_STRONG(free);
1714 
1715 static void
1716 freezero_p(void *ptr, size_t sz)
1717 {
1718 	explicit_bzero(ptr, sz);
1719 	free(ptr);
1720 }
1721 
1722 void
1723 freezero(void *ptr, size_t sz)
1724 {
1725 	struct dir_info *d;
1726 	int saved_errno = errno;
1727 
1728 	/* This is legal. */
1729 	if (ptr == NULL)
1730 		return;
1731 
1732 	if (!mopts.internal_funcs) {
1733 		freezero_p(ptr, sz);
1734 		return;
1735 	}
1736 
1737 	d = getpool();
1738 	if (d == NULL)
1739 		wrterror(d, "freezero() called before allocation");
1740 	_MALLOC_LOCK(d->mutex);
1741 	d->func = "freezero";
1742 	if (d->active++) {
1743 		malloc_recurse(d);
1744 		return;
1745 	}
1746 	ofree(&d, ptr, 1, 1, sz);
1747 	d->active--;
1748 	_MALLOC_UNLOCK(d->mutex);
1749 	errno = saved_errno;
1750 }
1751 DEF_WEAK(freezero);
1752 
1753 static void *
1754 orealloc(struct dir_info **argpool, void *p, size_t newsz)
1755 {
1756 	struct region_info *r;
1757 	struct dir_info *pool;
1758 	const char *saved_function;
1759 	struct chunk_info *info;
1760 	size_t oldsz, goldsz, gnewsz;
1761 	void *q, *ret;
1762 	uint32_t chunknum;
1763 	int forced;
1764 
1765 	if (p == NULL)
1766 		return omalloc(*argpool, newsz, 0);
1767 
1768 	if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1769 		errno = ENOMEM;
1770 		return  NULL;
1771 	}
1772 
1773 	r = findpool(p, *argpool, &pool, &saved_function);
1774 
1775 	REALSIZE(oldsz, r);
1776 	if (oldsz <= MALLOC_MAXCHUNK) {
1777 		if (DO_STATS || mopts.chunk_canaries) {
1778 			info = (struct chunk_info *)r->size;
1779 			chunknum = find_chunknum(pool, info, p, 0);
1780 		}
1781 	}
1782 
1783 	goldsz = oldsz;
1784 	if (oldsz > MALLOC_MAXCHUNK) {
1785 		if (oldsz < mopts.malloc_guard)
1786 			wrterror(pool, "guard size");
1787 		oldsz -= mopts.malloc_guard;
1788 	}
1789 
1790 	gnewsz = newsz;
1791 	if (gnewsz > MALLOC_MAXCHUNK)
1792 		gnewsz += mopts.malloc_guard;
1793 
1794 	forced = mopts.malloc_realloc || pool->mmap_flag;
1795 	if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && !forced) {
1796 		/* First case: from n pages sized allocation to m pages sized
1797 		   allocation, m > n */
1798 		size_t roldsz = PAGEROUND(goldsz);
1799 		size_t rnewsz = PAGEROUND(gnewsz);
1800 
1801 		if (rnewsz < roldsz && rnewsz > roldsz / 2 &&
1802 		    roldsz - rnewsz < mopts.def_maxcache * MALLOC_PAGESIZE &&
1803 		    !mopts.malloc_guard) {
1804 
1805 			ret = p;
1806 			goto done;
1807 		}
1808 
1809 		if (rnewsz > roldsz) {
1810 			/* try to extend existing region */
1811 			if (!mopts.malloc_guard) {
1812 				void *hint = (char *)r->p + roldsz;
1813 				size_t needed = rnewsz - roldsz;
1814 
1815 				STATS_INC(pool->cheap_realloc_tries);
1816 				q = MMAPA(hint, needed, MAP_FIXED | __MAP_NOREPLACE | pool->mmap_flag);
1817 				if (q == hint) {
1818 					STATS_ADD(pool->malloc_used, needed);
1819 					if (pool->malloc_junk == 2)
1820 						memset(q, SOME_JUNK, needed);
1821 					r->size = gnewsz;
1822 					if (r->p != p) {
1823 						/* old pointer is moved */
1824 						memmove(r->p, p, oldsz);
1825 						p = r->p;
1826 					}
1827 					if (mopts.chunk_canaries)
1828 						fill_canary(p, newsz,
1829 						    PAGEROUND(newsz));
1830 					STATS_SETF(r, (*argpool)->caller);
1831 					STATS_INC(pool->cheap_reallocs);
1832 					ret = p;
1833 					goto done;
1834 				}
1835 			}
1836 		} else if (rnewsz < roldsz) {
1837 			/* shrink number of pages */
1838 			if (mopts.malloc_guard) {
1839 				if (mprotect((char *)r->p + rnewsz -
1840 				    mopts.malloc_guard, mopts.malloc_guard,
1841 				    PROT_NONE))
1842 					wrterror(pool, "mprotect");
1843 			}
1844 			if (munmap((char *)r->p + rnewsz, roldsz - rnewsz))
1845 				wrterror(pool, "munmap %p", (char *)r->p +
1846 				    rnewsz);
1847 			STATS_SUB(pool->malloc_used, roldsz - rnewsz);
1848 			r->size = gnewsz;
1849 			if (MALLOC_MOVE_COND(gnewsz)) {
1850 				void *pp = MALLOC_MOVE(r->p, gnewsz);
1851 				memmove(pp, p, newsz);
1852 				p = pp;
1853 			} else if (mopts.chunk_canaries)
1854 				fill_canary(p, newsz, PAGEROUND(newsz));
1855 			STATS_SETF(r, (*argpool)->caller);
1856 			ret = p;
1857 			goto done;
1858 		} else {
1859 			/* number of pages remains the same */
1860 			void *pp = r->p;
1861 
1862 			r->size = gnewsz;
1863 			if (MALLOC_MOVE_COND(gnewsz))
1864 				pp = MALLOC_MOVE(r->p, gnewsz);
1865 			if (p != pp) {
1866 				memmove(pp, p, oldsz < newsz ? oldsz : newsz);
1867 				p = pp;
1868 			}
1869 			if (p == r->p) {
1870 				if (newsz > oldsz && pool->malloc_junk == 2)
1871 					memset((char *)p + newsz, SOME_JUNK,
1872 					    rnewsz - mopts.malloc_guard -
1873 					    newsz);
1874 				if (mopts.chunk_canaries)
1875 					fill_canary(p, newsz, PAGEROUND(newsz));
1876 			}
1877 			STATS_SETF(r, (*argpool)->caller);
1878 			ret = p;
1879 			goto done;
1880 		}
1881 	}
1882 	if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 &&
1883 	    newsz <= MALLOC_MAXCHUNK && newsz > 0 &&
1884 	    !forced && find_bucket(newsz) == find_bucket(oldsz)) {
1885 		/* do not reallocate if new size fits good in existing chunk */
1886 		if (pool->malloc_junk == 2)
1887 			memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1888 		if (mopts.chunk_canaries) {
1889 			info->bits[info->offset + chunknum] = newsz;
1890 			fill_canary(p, newsz, B2SIZE(info->bucket));
1891 		}
1892 		if (DO_STATS)
1893 			STATS_SETFN(r, chunknum, (*argpool)->caller);
1894 		ret = p;
1895 	} else if (newsz != oldsz || forced) {
1896 		/* create new allocation */
1897 		q = omalloc(pool, newsz, 0);
1898 		if (q == NULL) {
1899 			ret = NULL;
1900 			goto done;
1901 		}
1902 		if (newsz != 0 && oldsz != 0)
1903 			memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1904 		ofree(&pool, p, 0, 0, 0);
1905 		ret = q;
1906 	} else {
1907 		/* oldsz == newsz */
1908 		if (newsz != 0)
1909 			wrterror(pool, "realloc internal inconsistency");
1910 		if (DO_STATS)
1911 			STATS_SETFN(r, chunknum, (*argpool)->caller);
1912 		ret = p;
1913 	}
1914 done:
1915 	if (*argpool != pool) {
1916 		pool->func = saved_function;
1917 		*argpool = pool;
1918 	}
1919 	return ret;
1920 }
1921 
1922 void *
1923 realloc(void *ptr, size_t size)
1924 {
1925 	struct dir_info *d;
1926 	void *r;
1927 	int saved_errno = errno;
1928 
1929 	PROLOGUE(getpool(), "realloc")
1930 	SET_CALLER(d, caller());
1931 	r = orealloc(&d, ptr, size);
1932 	EPILOGUE()
1933 	return r;
1934 }
1935 DEF_STRONG(realloc);
1936 
1937 /*
1938  * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
1939  * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
1940  */
1941 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1942 
1943 void *
1944 calloc(size_t nmemb, size_t size)
1945 {
1946 	struct dir_info *d;
1947 	void *r;
1948 	int saved_errno = errno;
1949 
1950 	PROLOGUE(getpool(), "calloc")
1951 	SET_CALLER(d, caller());
1952 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1953 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1954 		d->active--;
1955 		_MALLOC_UNLOCK(d->mutex);
1956 		if (mopts.malloc_xmalloc)
1957 			wrterror(d, "out of memory");
1958 		errno = ENOMEM;
1959 		return NULL;
1960 	}
1961 
1962 	size *= nmemb;
1963 	r = omalloc(d, size, 1);
1964 	EPILOGUE()
1965 	return r;
1966 }
1967 DEF_STRONG(calloc);
1968 
1969 void *
1970 calloc_conceal(size_t nmemb, size_t size)
1971 {
1972 	struct dir_info *d;
1973 	void *r;
1974 	int saved_errno = errno;
1975 
1976 	PROLOGUE(mopts.malloc_pool[0], "calloc_conceal")
1977 	SET_CALLER(d, caller());
1978 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1979 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1980 		d->active--;
1981 		_MALLOC_UNLOCK(d->mutex);
1982 		if (mopts.malloc_xmalloc)
1983 			wrterror(d, "out of memory");
1984 		errno = ENOMEM;
1985 		return NULL;
1986 	}
1987 
1988 	size *= nmemb;
1989 	r = omalloc(d, size, 1);
1990 	EPILOGUE()
1991 	return r;
1992 }
1993 DEF_WEAK(calloc_conceal);
1994 
1995 static void *
1996 orecallocarray(struct dir_info **argpool, void *p, size_t oldsize,
1997     size_t newsize)
1998 {
1999 	struct region_info *r;
2000 	struct dir_info *pool;
2001 	const char *saved_function;
2002 	void *newptr;
2003 	size_t sz;
2004 
2005 	if (p == NULL)
2006 		return omalloc(*argpool, newsize, 1);
2007 
2008 	if (oldsize == newsize)
2009 		return p;
2010 
2011 	r = findpool(p, *argpool, &pool, &saved_function);
2012 
2013 	REALSIZE(sz, r);
2014 	if (sz <= MALLOC_MAXCHUNK) {
2015 		if (mopts.chunk_canaries && sz > 0) {
2016 			struct chunk_info *info = (struct chunk_info *)r->size;
2017 			uint32_t chunknum = find_chunknum(pool, info, p, 0);
2018 
2019 			if (info->bits[info->offset + chunknum] != oldsize)
2020 				wrterror(pool, "recorded size %hu != %zu",
2021 				    info->bits[info->offset + chunknum],
2022 				    oldsize);
2023 		} else {
2024 			if (sz < oldsize)
2025 				wrterror(pool, "chunk size %zu < %zu",
2026 				    sz, oldsize);
2027 		}
2028 	} else {
2029 		if (sz - mopts.malloc_guard < oldsize)
2030 			wrterror(pool, "recorded size %zu < %zu",
2031 			    sz - mopts.malloc_guard, oldsize);
2032 		if (oldsize < (sz - mopts.malloc_guard) / 2)
2033 			wrterror(pool, "recorded size %zu inconsistent with %zu",
2034 			    sz - mopts.malloc_guard, oldsize);
2035 	}
2036 
2037 	newptr = omalloc(pool, newsize, 0);
2038 	if (newptr == NULL)
2039 		goto done;
2040 
2041 	if (newsize > oldsize) {
2042 		memcpy(newptr, p, oldsize);
2043 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
2044 	} else
2045 		memcpy(newptr, p, newsize);
2046 
2047 	ofree(&pool, p, 1, 0, oldsize);
2048 
2049 done:
2050 	if (*argpool != pool) {
2051 		pool->func = saved_function;
2052 		*argpool = pool;
2053 	}
2054 
2055 	return newptr;
2056 }
2057 
2058 static void *
2059 recallocarray_p(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
2060 {
2061 	size_t oldsize, newsize;
2062 	void *newptr;
2063 
2064 	if (ptr == NULL)
2065 		return calloc(newnmemb, size);
2066 
2067 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2068 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
2069 		errno = ENOMEM;
2070 		return NULL;
2071 	}
2072 	newsize = newnmemb * size;
2073 
2074 	if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2075 	    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
2076 		errno = EINVAL;
2077 		return NULL;
2078 	}
2079 	oldsize = oldnmemb * size;
2080 
2081 	/*
2082 	 * Don't bother too much if we're shrinking just a bit,
2083 	 * we do not shrink for series of small steps, oh well.
2084 	 */
2085 	if (newsize <= oldsize) {
2086 		size_t d = oldsize - newsize;
2087 
2088 		if (d < oldsize / 2 && d < MALLOC_PAGESIZE) {
2089 			memset((char *)ptr + newsize, 0, d);
2090 			return ptr;
2091 		}
2092 	}
2093 
2094 	newptr = malloc(newsize);
2095 	if (newptr == NULL)
2096 		return NULL;
2097 
2098 	if (newsize > oldsize) {
2099 		memcpy(newptr, ptr, oldsize);
2100 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
2101 	} else
2102 		memcpy(newptr, ptr, newsize);
2103 
2104 	explicit_bzero(ptr, oldsize);
2105 	free(ptr);
2106 
2107 	return newptr;
2108 }
2109 
2110 void *
2111 recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
2112 {
2113 	struct dir_info *d;
2114 	size_t oldsize = 0, newsize;
2115 	void *r;
2116 	int saved_errno = errno;
2117 
2118 	if (!mopts.internal_funcs)
2119 		return recallocarray_p(ptr, oldnmemb, newnmemb, size);
2120 
2121 	PROLOGUE(getpool(), "recallocarray")
2122 	SET_CALLER(d, caller());
2123 
2124 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2125 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
2126 		d->active--;
2127 		_MALLOC_UNLOCK(d->mutex);
2128 		if (mopts.malloc_xmalloc)
2129 			wrterror(d, "out of memory");
2130 		errno = ENOMEM;
2131 		return NULL;
2132 	}
2133 	newsize = newnmemb * size;
2134 
2135 	if (ptr != NULL) {
2136 		if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2137 		    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
2138 			d->active--;
2139 			_MALLOC_UNLOCK(d->mutex);
2140 			errno = EINVAL;
2141 			return NULL;
2142 		}
2143 		oldsize = oldnmemb * size;
2144 	}
2145 
2146 	r = orecallocarray(&d, ptr, oldsize, newsize);
2147 	EPILOGUE()
2148 	return r;
2149 }
2150 DEF_WEAK(recallocarray);
2151 
2152 static void *
2153 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
2154 {
2155 	char *p, *q;
2156 
2157 	if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
2158 		wrterror(d, "mapalign bad alignment");
2159 	if (sz != PAGEROUND(sz))
2160 		wrterror(d, "mapalign round");
2161 
2162 	/* Allocate sz + alignment bytes of memory, which must include a
2163 	 * subrange of size bytes that is properly aligned.  Unmap the
2164 	 * other bytes, and then return that subrange.
2165 	 */
2166 
2167 	/* We need sz + alignment to fit into a size_t. */
2168 	if (alignment > SIZE_MAX - sz)
2169 		return MAP_FAILED;
2170 
2171 	p = map(d, sz + alignment, zero_fill);
2172 	if (p == MAP_FAILED)
2173 		return MAP_FAILED;
2174 	q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
2175 	if (q != p) {
2176 		if (munmap(p, q - p))
2177 			wrterror(d, "munmap %p", p);
2178 	}
2179 	if (munmap(q + sz, alignment - (q - p)))
2180 		wrterror(d, "munmap %p", q + sz);
2181 	STATS_SUB(d->malloc_used, alignment);
2182 
2183 	return q;
2184 }
2185 
2186 static void *
2187 omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill)
2188 {
2189 	size_t psz;
2190 	void *p, *caller = NULL;
2191 
2192 	/* If between half a page and a page, avoid MALLOC_MOVE. */
2193 	if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE)
2194 		sz = MALLOC_PAGESIZE;
2195 	if (alignment <= MALLOC_PAGESIZE) {
2196 		size_t pof2;
2197 		/*
2198 		 * max(size, alignment) rounded up to power of 2 is enough
2199 		 * to assure the requested alignment. Large regions are
2200 		 * always page aligned.
2201 		 */
2202 		if (sz < alignment)
2203 			sz = alignment;
2204 		if (sz < MALLOC_PAGESIZE) {
2205 			pof2 = MALLOC_MINSIZE;
2206 			while (pof2 < sz)
2207 				pof2 <<= 1;
2208 		} else
2209 			pof2 = sz;
2210 		return omalloc(pool, pof2, zero_fill);
2211 	}
2212 
2213 	if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
2214 		errno = ENOMEM;
2215 		return NULL;
2216 	}
2217 
2218 	if (sz < MALLOC_PAGESIZE)
2219 		sz = MALLOC_PAGESIZE;
2220 	sz += mopts.malloc_guard;
2221 	psz = PAGEROUND(sz);
2222 
2223 	p = mapalign(pool, alignment, psz, zero_fill);
2224 	if (p == MAP_FAILED) {
2225 		errno = ENOMEM;
2226 		return NULL;
2227 	}
2228 
2229 #ifdef MALLOC_STATS
2230 	if (DO_STATS)
2231 		caller = pool->caller;
2232 #endif
2233 	if (insert(pool, p, sz, caller)) {
2234 		unmap(pool, p, psz, 0);
2235 		errno = ENOMEM;
2236 		return NULL;
2237 	}
2238 
2239 	if (mopts.malloc_guard) {
2240 		if (mprotect((char *)p + psz - mopts.malloc_guard,
2241 		    mopts.malloc_guard, PROT_NONE))
2242 			wrterror(pool, "mprotect");
2243 		STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
2244 	}
2245 
2246 	if (pool->malloc_junk == 2) {
2247 		if (zero_fill)
2248 			memset((char *)p + sz - mopts.malloc_guard,
2249 			    SOME_JUNK, psz - sz);
2250 		else
2251 			memset(p, SOME_JUNK, psz - mopts.malloc_guard);
2252 	} else if (mopts.chunk_canaries)
2253 		fill_canary(p, sz - mopts.malloc_guard,
2254 		    psz - mopts.malloc_guard);
2255 
2256 	return p;
2257 }
2258 
2259 int
2260 posix_memalign(void **memptr, size_t alignment, size_t size)
2261 {
2262 	struct dir_info *d;
2263 	int res, saved_errno = errno;
2264 	void *r;
2265 
2266 	/* Make sure that alignment is a large enough power of 2. */
2267 	if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
2268 		return EINVAL;
2269 
2270 	d = getpool();
2271 	if (d == NULL) {
2272 		_malloc_init(0);
2273 		d = getpool();
2274 	}
2275 	_MALLOC_LOCK(d->mutex);
2276 	d->func = "posix_memalign";
2277 	if (d->active++) {
2278 		malloc_recurse(d);
2279 		goto err;
2280 	}
2281 	SET_CALLER(d, caller());
2282 	r = omemalign(d, alignment, size, 0);
2283 	d->active--;
2284 	_MALLOC_UNLOCK(d->mutex);
2285 	if (r == NULL) {
2286 		if (mopts.malloc_xmalloc)
2287 			wrterror(d, "out of memory");
2288 		goto err;
2289 	}
2290 	errno = saved_errno;
2291 	*memptr = r;
2292 	return 0;
2293 
2294 err:
2295 	res = errno;
2296 	errno = saved_errno;
2297 	return res;
2298 }
2299 DEF_STRONG(posix_memalign);
2300 
2301 void *
2302 aligned_alloc(size_t alignment, size_t size)
2303 {
2304 	struct dir_info *d;
2305 	int saved_errno = errno;
2306 	void *r;
2307 
2308 	/* Make sure that alignment is a positive power of 2. */
2309 	if (((alignment - 1) & alignment) != 0 || alignment == 0) {
2310 		errno = EINVAL;
2311 		return NULL;
2312 	};
2313 	/* Per spec, size should be a multiple of alignment */
2314 	if ((size & (alignment - 1)) != 0) {
2315 		errno = EINVAL;
2316 		return NULL;
2317 	}
2318 
2319 	PROLOGUE(getpool(), "aligned_alloc")
2320 	SET_CALLER(d, caller());
2321 	r = omemalign(d, alignment, size, 0);
2322 	EPILOGUE()
2323 	return r;
2324 }
2325 DEF_STRONG(aligned_alloc);
2326 
2327 #ifdef MALLOC_STATS
2328 
2329 static void
2330 print_chunk_details(struct dir_info *pool, void *p, size_t sz, size_t i)
2331 {
2332 	struct region_info *r;
2333 	struct chunk_info *chunkinfo;
2334 	uint32_t chunknum;
2335 	Dl_info info;
2336 	const char *caller, *pcaller = NULL;
2337 	const char *object = ".";
2338 	const char *pobject = ".";
2339 	const char *msg = "";
2340 
2341 	r = find(pool, p);
2342 	chunkinfo = (struct chunk_info *)r->size;
2343 	chunknum = find_chunknum(pool, chunkinfo, p, 0);
2344 	caller = r->f[chunknum];
2345 	if (dladdr(caller, &info) != 0) {
2346 		caller -= (uintptr_t)info.dli_fbase;
2347 		object = info.dli_fname;
2348 	}
2349 	if (chunknum > 0) {
2350 		chunknum--;
2351 		pcaller = r->f[chunknum];
2352 		if (dladdr(pcaller, &info) != 0) {
2353 			pcaller -= (uintptr_t)info.dli_fbase;
2354 			pobject = info.dli_fname;
2355 		}
2356 		if (CHUNK_FREE(chunkinfo, chunknum))
2357 			msg = " (now free)";
2358 	}
2359 
2360 	wrterror(pool,
2361 	    "write to free chunk %p[%zu..%zu]@%zu allocated at %s %p "
2362 	    "(preceding chunk %p allocated at %s %p%s)",
2363 	    p, i * sizeof(uint64_t),
2364 	    (i + 1) * sizeof(uint64_t) - 1, sz, object, caller, p - sz,
2365 	    pobject, pcaller, msg);
2366 }
2367 
2368 static void
2369 ulog(const char *format, ...)
2370 {
2371 	va_list ap;
2372 	static char* buf;
2373 	static size_t filled;
2374 	int len;
2375 
2376 	if (buf == NULL)
2377 		buf = MMAP(KTR_USER_MAXLEN, 0);
2378 	if (buf == MAP_FAILED)
2379 		return;
2380 
2381 	va_start(ap, format);
2382 	len = vsnprintf(buf + filled, KTR_USER_MAXLEN - filled, format, ap);
2383 	va_end(ap);
2384 	if (len < 0)
2385 		return;
2386 	if (len > KTR_USER_MAXLEN - filled)
2387 		len = KTR_USER_MAXLEN - filled;
2388 	filled += len;
2389 	if (filled > 0) {
2390 		if (filled == KTR_USER_MAXLEN || buf[filled - 1] == '\n') {
2391 			utrace("malloc", buf, filled);
2392 			filled = 0;
2393 		}
2394 	}
2395 }
2396 
2397 struct malloc_leak {
2398 	void *f;
2399 	size_t total_size;
2400 	int count;
2401 };
2402 
2403 struct leaknode {
2404 	RBT_ENTRY(leaknode) entry;
2405 	struct malloc_leak d;
2406 };
2407 
2408 static inline int
2409 leakcmp(const struct leaknode *e1, const struct leaknode *e2)
2410 {
2411 	return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
2412 }
2413 
2414 RBT_HEAD(leaktree, leaknode);
2415 RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp);
2416 RBT_GENERATE(leaktree, leaknode, entry, leakcmp);
2417 
2418 static void
2419 wrtwarning(const char *func, char *msg, ...)
2420 {
2421 	int		saved_errno = errno;
2422 	va_list		ap;
2423 
2424 	dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname,
2425 	    getpid(), func != NULL ? func : "unknown");
2426 	va_start(ap, msg);
2427 	vdprintf(STDERR_FILENO, msg, ap);
2428 	va_end(ap);
2429 	dprintf(STDERR_FILENO, "\n");
2430 
2431 	errno = saved_errno;
2432 }
2433 
2434 static void
2435 putleakinfo(struct leaktree *leaks, void *f, size_t sz, int cnt)
2436 {
2437 	struct leaknode key, *p;
2438 	static struct leaknode *page;
2439 	static unsigned int used;
2440 
2441 	if (cnt == 0 || page == MAP_FAILED)
2442 		return;
2443 
2444 	key.d.f = f;
2445 	p = RBT_FIND(leaktree, leaks, &key);
2446 	if (p == NULL) {
2447 		if (page == NULL ||
2448 		    used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
2449 			page = MMAP(MALLOC_PAGESIZE, 0);
2450 			if (page == MAP_FAILED) {
2451 				wrtwarning(__func__, strerror(errno));
2452 				return;
2453 			}
2454 			used = 0;
2455 		}
2456 		p = &page[used++];
2457 		p->d.f = f;
2458 		p->d.total_size = sz * cnt;
2459 		p->d.count = cnt;
2460 		RBT_INSERT(leaktree, leaks, p);
2461 	} else {
2462 		p->d.total_size += sz * cnt;
2463 		p->d.count += cnt;
2464 	}
2465 }
2466 
2467 static void
2468 dump_leaks(struct leaktree *leaks)
2469 {
2470 	struct leaknode *p;
2471 
2472 	ulog("Leak report:\n");
2473 	ulog("                 f     sum      #    avg\n");
2474 
2475 	RBT_FOREACH(p, leaktree, leaks) {
2476 		Dl_info info;
2477 		const char *caller = p->d.f;
2478 		const char *object = ".";
2479 
2480 		if (caller != NULL) {
2481 			if (dladdr(p->d.f, &info) != 0) {
2482 				caller -= (uintptr_t)info.dli_fbase;
2483 				object = info.dli_fname;
2484 			}
2485 		}
2486 		ulog("%18p %7zu %6u %6zu addr2line -e %s %p\n",
2487 		    p->d.f, p->d.total_size, p->d.count,
2488 		    p->d.total_size / p->d.count,
2489 		    object, caller);
2490 	}
2491 }
2492 
2493 static void
2494 dump_chunk(struct leaktree* leaks, struct chunk_info *p, void **f,
2495     int fromfreelist)
2496 {
2497 	while (p != NULL) {
2498 		if (mopts.malloc_verbose)
2499 			ulog("chunk %18p %18p %4zu %d/%d\n",
2500 			    p->page, NULL,
2501 			    B2SIZE(p->bucket), p->free, p->total);
2502 		if (!fromfreelist) {
2503 			size_t i, sz =  B2SIZE(p->bucket);
2504 			for (i = 0; i < p->total; i++) {
2505 				if (!CHUNK_FREE(p, i))
2506 					putleakinfo(leaks, f[i], sz, 1);
2507 			}
2508 			break;
2509 		}
2510 		p = LIST_NEXT(p, entries);
2511 		if (mopts.malloc_verbose && p != NULL)
2512 			ulog("       ->");
2513 	}
2514 }
2515 
2516 static void
2517 dump_free_chunk_info(struct dir_info *d, struct leaktree *leaks)
2518 {
2519 	int i, j, count;
2520 	struct chunk_info *p;
2521 
2522 	ulog("Free chunk structs:\n");
2523 	ulog("Bkt) #CI                     page"
2524 	    "                  f size free/n\n");
2525 	for (i = 0; i <= BUCKETS; i++) {
2526 		count = 0;
2527 		LIST_FOREACH(p, &d->chunk_info_list[i], entries)
2528 			count++;
2529 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++) {
2530 			p = LIST_FIRST(&d->chunk_dir[i][j]);
2531 			if (p == NULL && count == 0)
2532 				continue;
2533 			if (j == 0)
2534 				ulog("%3d) %3d ", i, count);
2535 			else
2536 				ulog("         ");
2537 			if (p != NULL)
2538 				dump_chunk(leaks, p, NULL, 1);
2539 			else
2540 				ulog(".\n");
2541 		}
2542 	}
2543 
2544 }
2545 
2546 static void
2547 dump_free_page_info(struct dir_info *d)
2548 {
2549 	struct smallcache *cache;
2550 	size_t i, total = 0;
2551 
2552 	ulog("Cached in small cache:\n");
2553 	for (i = 0; i < MAX_SMALLCACHEABLE_SIZE; i++) {
2554 		cache = &d->smallcache[i];
2555 		if (cache->length != 0)
2556 			ulog("%zu(%u): %u = %zu\n", i + 1, cache->max,
2557 			    cache->length, cache->length * (i + 1));
2558 		total += cache->length * (i + 1);
2559 	}
2560 
2561 	ulog("Cached in big cache: %zu/%zu\n", d->bigcache_used,
2562 	    d->bigcache_size);
2563 	for (i = 0; i < d->bigcache_size; i++) {
2564 		if (d->bigcache[i].psize != 0)
2565 			ulog("%zu: %zu\n", i, d->bigcache[i].psize);
2566 		total += d->bigcache[i].psize;
2567 	}
2568 	ulog("Free pages cached: %zu\n", total);
2569 }
2570 
2571 static void
2572 malloc_dump1(int poolno, struct dir_info *d, struct leaktree *leaks)
2573 {
2574 	size_t i, realsize;
2575 
2576 	if (mopts.malloc_verbose) {
2577 		ulog("Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
2578 		ulog("MT=%d J=%d Fl=%#x\n", d->malloc_mt, d->malloc_junk,
2579 		    d->mmap_flag);
2580 		ulog("Region slots free %zu/%zu\n",
2581 			d->regions_free, d->regions_total);
2582 		ulog("Inserts %zu/%zu\n", d->inserts, d->insert_collisions);
2583 		ulog("Deletes %zu/%zu\n", d->deletes, d->delete_moves);
2584 		ulog("Cheap reallocs %zu/%zu\n",
2585 		    d->cheap_reallocs, d->cheap_realloc_tries);
2586 		ulog("In use %zu\n", d->malloc_used);
2587 		ulog("Guarded %zu\n", d->malloc_guarded);
2588 		dump_free_chunk_info(d, leaks);
2589 		dump_free_page_info(d);
2590 		ulog("Hash table:\n");
2591 		ulog("slot)  hash d  type               page                  "
2592 		    "f size [free/n]\n");
2593 	}
2594 	for (i = 0; i < d->regions_total; i++) {
2595 		if (d->r[i].p != NULL) {
2596 			size_t h = hash(d->r[i].p) &
2597 			    (d->regions_total - 1);
2598 			if (mopts.malloc_verbose)
2599 				ulog("%4zx) #%4zx %zd ",
2600 			        i, h, h - i);
2601 			REALSIZE(realsize, &d->r[i]);
2602 			if (realsize > MALLOC_MAXCHUNK) {
2603 				putleakinfo(leaks, d->r[i].f, realsize, 1);
2604 				if (mopts.malloc_verbose)
2605 					ulog("pages %18p %18p %zu\n", d->r[i].p,
2606 				        d->r[i].f, realsize);
2607 			} else
2608 				dump_chunk(leaks,
2609 				    (struct chunk_info *)d->r[i].size,
2610 				    d->r[i].f, 0);
2611 		}
2612 	}
2613 	if (mopts.malloc_verbose)
2614 		ulog("\n");
2615 }
2616 
2617 static void
2618 malloc_dump0(int poolno, struct dir_info *pool, struct leaktree *leaks)
2619 {
2620 	int i;
2621 	void *p;
2622 	struct region_info *r;
2623 
2624 	if (pool == NULL || pool->r == NULL)
2625 		return;
2626 	for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) {
2627 		p = pool->delayed_chunks[i];
2628 		if (p == NULL)
2629 			continue;
2630 		r = find(pool, p);
2631 		if (r == NULL)
2632 			wrterror(pool, "bogus pointer in malloc_dump %p", p);
2633 		free_bytes(pool, r, p);
2634 		pool->delayed_chunks[i] = NULL;
2635 	}
2636 	malloc_dump1(poolno, pool, leaks);
2637 }
2638 
2639 void
2640 malloc_dump(void)
2641 {
2642 	int i;
2643 	int saved_errno = errno;
2644 
2645 	/* XXX leak when run multiple times */
2646 	struct leaktree leaks = RBT_INITIALIZER(&leaks);
2647 
2648 	for (i = 0; i < mopts.malloc_mutexes; i++)
2649 		malloc_dump0(i, mopts.malloc_pool[i], &leaks);
2650 
2651 	dump_leaks(&leaks);
2652 	ulog("\n");
2653 	errno = saved_errno;
2654 }
2655 DEF_WEAK(malloc_dump);
2656 
2657 static void
2658 malloc_exit(void)
2659 {
2660 	int save_errno = errno;
2661 
2662 	ulog("******** Start dump %s *******\n", __progname);
2663 	ulog("M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%#x cache=%u "
2664 	    "G=%zu\n",
2665 	    mopts.malloc_mutexes,
2666 	    mopts.internal_funcs, mopts.malloc_freecheck,
2667 	    mopts.malloc_freeunmap, mopts.def_malloc_junk,
2668 	    mopts.malloc_realloc, mopts.malloc_xmalloc,
2669 	    mopts.chunk_canaries, mopts.def_maxcache,
2670 	    mopts.malloc_guard);
2671 
2672 	malloc_dump();
2673 	ulog("******** End dump %s *******\n", __progname);
2674 	errno = save_errno;
2675 }
2676 
2677 #endif /* MALLOC_STATS */
2678