xref: /openbsd-src/lib/libc/stdlib/malloc.c (revision 521ba2f2ab0e0e89d1776559874b3ecc227442fc)
1 /*	$OpenBSD: malloc.c,v 1.289 2023/06/30 06:24:58 otto Exp $	*/
2 /*
3  * Copyright (c) 2008, 2010, 2011, 2016, 2023 Otto Moerbeek <otto@drijf.net>
4  * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
5  * Copyright (c) 2008 Damien Miller <djm@openbsd.org>
6  * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * If we meet some day, and you think this stuff is worth it, you
23  * can buy me a beer in return. Poul-Henning Kamp
24  */
25 
26 #ifndef MALLOC_SMALL
27 #define MALLOC_STATS
28 #endif
29 
30 #include <sys/types.h>
31 #include <sys/queue.h>
32 #include <sys/mman.h>
33 #include <sys/sysctl.h>
34 #include <uvm/uvmexp.h>
35 #include <errno.h>
36 #include <stdarg.h>
37 #include <stdint.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <unistd.h>
42 
43 #ifdef MALLOC_STATS
44 #include <sys/tree.h>
45 #include <sys/ktrace.h>
46 #include <dlfcn.h>
47 #endif
48 
49 #include "thread_private.h"
50 #include <tib.h>
51 
52 #define MALLOC_PAGESHIFT	_MAX_PAGE_SHIFT
53 
54 #define MALLOC_MINSHIFT		4
55 #define MALLOC_MAXSHIFT		(MALLOC_PAGESHIFT - 1)
56 #define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
57 #define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
58 #define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
59 #define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
60 
61 #define MALLOC_MAXCHUNK		(1 << MALLOC_MAXSHIFT)
62 #define MALLOC_MAXCACHE		256
63 #define MALLOC_DELAYED_CHUNK_MASK	15
64 #ifdef MALLOC_STATS
65 #define MALLOC_INITIAL_REGIONS	512
66 #else
67 #define MALLOC_INITIAL_REGIONS	(MALLOC_PAGESIZE / sizeof(struct region_info))
68 #endif
69 #define MALLOC_DEFAULT_CACHE	64
70 #define MALLOC_CHUNK_LISTS	4
71 #define CHUNK_CHECK_LENGTH	32
72 
73 #define B2SIZE(b)		((b) * MALLOC_MINSIZE)
74 #define B2ALLOC(b)		((b) == 0 ? MALLOC_MINSIZE : \
75 				    (b) * MALLOC_MINSIZE)
76 #define BUCKETS 		(MALLOC_MAXCHUNK / MALLOC_MINSIZE)
77 
78 /*
79  * We move allocations between half a page and a whole page towards the end,
80  * subject to alignment constraints. This is the extra headroom we allow.
81  * Set to zero to be the most strict.
82  */
83 #define MALLOC_LEEWAY		0
84 #define MALLOC_MOVE_COND(sz)	((sz) - mopts.malloc_guard < 		\
85 				    MALLOC_PAGESIZE - MALLOC_LEEWAY)
86 #define MALLOC_MOVE(p, sz)  	(((char *)(p)) +			\
87 				    ((MALLOC_PAGESIZE - MALLOC_LEEWAY -	\
88 			    	    ((sz) - mopts.malloc_guard)) & 	\
89 				    ~(MALLOC_MINSIZE - 1)))
90 
91 #define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
92 
93 /*
94  * What to use for Junk.  This is the byte value we use to fill with
95  * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
96  * and SOME_FREEJUNK right before free.
97  */
98 #define SOME_JUNK		0xdb	/* deadbeef */
99 #define SOME_FREEJUNK		0xdf	/* dead, free */
100 #define SOME_FREEJUNK_ULL	0xdfdfdfdfdfdfdfdfULL
101 
102 #define MMAP(sz,f)	mmap(NULL, (sz), PROT_READ | PROT_WRITE, \
103     MAP_ANON | MAP_PRIVATE | (f), -1, 0)
104 
105 #define MMAPNONE(sz,f)	mmap(NULL, (sz), PROT_NONE, \
106     MAP_ANON | MAP_PRIVATE | (f), -1, 0)
107 
108 #define MMAPA(a,sz,f)	mmap((a), (sz), PROT_READ | PROT_WRITE, \
109     MAP_ANON | MAP_PRIVATE | (f), -1, 0)
110 
111 struct region_info {
112 	void *p;		/* page; low bits used to mark chunks */
113 	uintptr_t size;		/* size for pages, or chunk_info pointer */
114 #ifdef MALLOC_STATS
115 	void *f;		/* where allocated from */
116 #endif
117 };
118 
119 LIST_HEAD(chunk_head, chunk_info);
120 
121 /*
122  * Two caches, one for "small" regions, one for "big".
123  * Small cache is an array per size, big cache is one array with different
124  * sized regions
125  */
126 #define MAX_SMALLCACHEABLE_SIZE	32
127 #define MAX_BIGCACHEABLE_SIZE	512
128 /* If the total # of pages is larger than this, evict before inserting */
129 #define BIGCACHE_FILL(sz)	(MAX_BIGCACHEABLE_SIZE * (sz) / 4)
130 
131 struct smallcache {
132 	void **pages;
133 	ushort length;
134 	ushort max;
135 };
136 
137 struct bigcache {
138 	void *page;
139 	size_t psize;
140 };
141 
142 struct dir_info {
143 	u_int32_t canary1;
144 	int active;			/* status of malloc */
145 	struct region_info *r;		/* region slots */
146 	size_t regions_total;		/* number of region slots */
147 	size_t regions_free;		/* number of free slots */
148 	size_t rbytesused;		/* random bytes used */
149 	char *func;			/* current function */
150 	int malloc_junk;		/* junk fill? */
151 	int mmap_flag;			/* extra flag for mmap */
152 	int mutex;
153 	int malloc_mt;			/* multi-threaded mode? */
154 					/* lists of free chunk info structs */
155 	struct chunk_head chunk_info_list[BUCKETS + 1];
156 					/* lists of chunks with free slots */
157 	struct chunk_head chunk_dir[BUCKETS + 1][MALLOC_CHUNK_LISTS];
158 					/* delayed free chunk slots */
159 	void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
160 	u_char rbytes[32];		/* random bytes */
161 					/* free pages cache */
162 	struct smallcache smallcache[MAX_SMALLCACHEABLE_SIZE];
163 	size_t bigcache_used;
164 	size_t bigcache_size;
165 	struct bigcache *bigcache;
166 	void *chunk_pages;
167 	size_t chunk_pages_used;
168 #ifdef MALLOC_STATS
169 	size_t inserts;
170 	size_t insert_collisions;
171 	size_t finds;
172 	size_t find_collisions;
173 	size_t deletes;
174 	size_t delete_moves;
175 	size_t cheap_realloc_tries;
176 	size_t cheap_reallocs;
177 	size_t malloc_used;		/* bytes allocated */
178 	size_t malloc_guarded;		/* bytes used for guards */
179 	size_t pool_searches;		/* searches for pool */
180 	size_t other_pool;		/* searches in other pool */
181 #define STATS_ADD(x,y)	((x) += (y))
182 #define STATS_SUB(x,y)	((x) -= (y))
183 #define STATS_INC(x)	((x)++)
184 #define STATS_ZERO(x)	((x) = 0)
185 #define STATS_SETF(x,y)	((x)->f = (y))
186 #else
187 #define STATS_ADD(x,y)	/* nothing */
188 #define STATS_SUB(x,y)	/* nothing */
189 #define STATS_INC(x)	/* nothing */
190 #define STATS_ZERO(x)	/* nothing */
191 #define STATS_SETF(x,y)	/* nothing */
192 #endif /* MALLOC_STATS */
193 	u_int32_t canary2;
194 };
195 
196 static void unmap(struct dir_info *d, void *p, size_t sz, size_t clear);
197 
198 /*
199  * This structure describes a page worth of chunks.
200  *
201  * How many bits per u_short in the bitmap
202  */
203 #define MALLOC_BITS		(NBBY * sizeof(u_short))
204 struct chunk_info {
205 	LIST_ENTRY(chunk_info) entries;
206 	void *page;			/* pointer to the page */
207 	u_short canary;
208 	u_short bucket;
209 	u_short free;			/* how many free chunks */
210 	u_short total;			/* how many chunks */
211 	u_short offset;			/* requested size table offset */
212 	u_short bits[1];		/* which chunks are free */
213 };
214 
215 struct malloc_readonly {
216 					/* Main bookkeeping information */
217 	struct dir_info *malloc_pool[_MALLOC_MUTEXES];
218 	u_int	malloc_mutexes;		/* how much in actual use? */
219 	int	malloc_freecheck;	/* Extensive double free check */
220 	int	malloc_freeunmap;	/* mprotect free pages PROT_NONE? */
221 	int	def_malloc_junk;	/* junk fill? */
222 	int	malloc_realloc;		/* always realloc? */
223 	int	malloc_xmalloc;		/* xmalloc behaviour? */
224 	u_int	chunk_canaries;		/* use canaries after chunks? */
225 	int	internal_funcs;		/* use better recallocarray/freezero? */
226 	u_int	def_maxcache;		/* free pages we cache */
227 	u_int	junk_loc;		/* variation in location of junk */
228 	size_t	malloc_guard;		/* use guard pages after allocations? */
229 #ifdef MALLOC_STATS
230 	int	malloc_stats;		/* dump leak report at end */
231 	int	malloc_verbose;		/* dump verbose statistics at end */
232 #define	DO_STATS	mopts.malloc_stats
233 #else
234 #define	DO_STATS	0
235 #endif
236 	u_int32_t malloc_canary;	/* Matched against ones in pool */
237 };
238 
239 
240 /* This object is mapped PROT_READ after initialisation to prevent tampering */
241 static union {
242 	struct malloc_readonly mopts;
243 	u_char _pad[MALLOC_PAGESIZE];
244 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)))
245 		__attribute__((section(".openbsd.mutable")));
246 #define mopts	malloc_readonly.mopts
247 
248 char		*malloc_options;	/* compile-time options */
249 
250 static __dead void wrterror(struct dir_info *d, char *msg, ...)
251     __attribute__((__format__ (printf, 2, 3)));
252 
253 #ifdef MALLOC_STATS
254 void malloc_dump(void);
255 PROTO_NORMAL(malloc_dump);
256 static void malloc_exit(void);
257 #endif
258 
259 #if defined(__aarch64__) || \
260 	defined(__amd64__) || \
261 	defined(__arm__)
262 static inline void* caller(void)
263 {
264 	void *p;
265 
266 	switch (DO_STATS) {
267 	case 0:
268 	default:
269 		return NULL;
270 	case 1:
271 		p = __builtin_return_address(0);
272 		break;
273 	case 2:
274 		p = __builtin_return_address(1);
275 		break;
276 	case 3:
277 		p = __builtin_return_address(2);
278 		break;
279 	}
280 	return __builtin_extract_return_addr(p);
281 }
282 #else
283 static inline void* caller(void)
284 {
285 	return DO_STATS == 0 ? NULL :
286 	    __builtin_extract_return_addr(__builtin_return_address(0));
287 }
288 #endif
289 
290 /* low bits of r->p determine size: 0 means >= page size and r->size holding
291  * real size, otherwise low bits is the bucket + 1
292  */
293 #define REALSIZE(sz, r)						\
294 	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
295 	(sz) = ((sz) == 0 ? (r)->size : B2SIZE((sz) - 1))
296 
297 static inline size_t
298 hash(void *p)
299 {
300 	size_t sum;
301 	uintptr_t u;
302 
303 	u = (uintptr_t)p >> MALLOC_PAGESHIFT;
304 	sum = u;
305 	sum = (sum << 7) - sum + (u >> 16);
306 #ifdef __LP64__
307 	sum = (sum << 7) - sum + (u >> 32);
308 	sum = (sum << 7) - sum + (u >> 48);
309 #endif
310 	return sum;
311 }
312 
313 static inline struct dir_info *
314 getpool(void)
315 {
316 	if (mopts.malloc_pool[1] == NULL || !mopts.malloc_pool[1]->malloc_mt)
317 		return mopts.malloc_pool[1];
318 	else	/* first one reserved for special pool */
319 		return mopts.malloc_pool[1 + TIB_GET()->tib_tid %
320 		    (mopts.malloc_mutexes - 1)];
321 }
322 
323 static __dead void
324 wrterror(struct dir_info *d, char *msg, ...)
325 {
326 	int		saved_errno = errno;
327 	va_list		ap;
328 
329 	dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname,
330 	    getpid(), (d != NULL && d->func) ? d->func : "unknown");
331 	va_start(ap, msg);
332 	vdprintf(STDERR_FILENO, msg, ap);
333 	va_end(ap);
334 	dprintf(STDERR_FILENO, "\n");
335 
336 #ifdef MALLOC_STATS
337 	if (DO_STATS && mopts.malloc_verbose)
338 		malloc_dump();
339 #endif
340 
341 	errno = saved_errno;
342 
343 	abort();
344 }
345 
346 static void
347 rbytes_init(struct dir_info *d)
348 {
349 	arc4random_buf(d->rbytes, sizeof(d->rbytes));
350 	/* add 1 to account for using d->rbytes[0] */
351 	d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2);
352 }
353 
354 static inline u_char
355 getrbyte(struct dir_info *d)
356 {
357 	u_char x;
358 
359 	if (d->rbytesused >= sizeof(d->rbytes))
360 		rbytes_init(d);
361 	x = d->rbytes[d->rbytesused++];
362 	return x;
363 }
364 
365 static void
366 omalloc_parseopt(char opt)
367 {
368 	switch (opt) {
369 	case '+':
370 		mopts.malloc_mutexes <<= 1;
371 		if (mopts.malloc_mutexes > _MALLOC_MUTEXES)
372 			mopts.malloc_mutexes = _MALLOC_MUTEXES;
373 		break;
374 	case '-':
375 		mopts.malloc_mutexes >>= 1;
376 		if (mopts.malloc_mutexes < 2)
377 			mopts.malloc_mutexes = 2;
378 		break;
379 	case '>':
380 		mopts.def_maxcache <<= 1;
381 		if (mopts.def_maxcache > MALLOC_MAXCACHE)
382 			mopts.def_maxcache = MALLOC_MAXCACHE;
383 		break;
384 	case '<':
385 		mopts.def_maxcache >>= 1;
386 		break;
387 	case 'c':
388 		mopts.chunk_canaries = 0;
389 		break;
390 	case 'C':
391 		mopts.chunk_canaries = 1;
392 		break;
393 #ifdef MALLOC_STATS
394 	case 'd':
395 		mopts.malloc_stats = 0;
396 		break;
397 	case 'D':
398 	case '1':
399 		mopts.malloc_stats = 1;
400 		break;
401 	case '2':
402 		mopts.malloc_stats = 2;
403 		break;
404 	case '3':
405 		mopts.malloc_stats = 3;
406 		break;
407 #endif /* MALLOC_STATS */
408 	case 'f':
409 		mopts.malloc_freecheck = 0;
410 		mopts.malloc_freeunmap = 0;
411 		break;
412 	case 'F':
413 		mopts.malloc_freecheck = 1;
414 		mopts.malloc_freeunmap = 1;
415 		break;
416 	case 'g':
417 		mopts.malloc_guard = 0;
418 		break;
419 	case 'G':
420 		mopts.malloc_guard = MALLOC_PAGESIZE;
421 		break;
422 	case 'j':
423 		if (mopts.def_malloc_junk > 0)
424 			mopts.def_malloc_junk--;
425 		break;
426 	case 'J':
427 		if (mopts.def_malloc_junk < 2)
428 			mopts.def_malloc_junk++;
429 		break;
430 	case 'r':
431 		mopts.malloc_realloc = 0;
432 		break;
433 	case 'R':
434 		mopts.malloc_realloc = 1;
435 		break;
436 	case 'u':
437 		mopts.malloc_freeunmap = 0;
438 		break;
439 	case 'U':
440 		mopts.malloc_freeunmap = 1;
441 		break;
442 #ifdef MALLOC_STATS
443 	case 'v':
444 		mopts.malloc_verbose = 0;
445 		break;
446 	case 'V':
447 		mopts.malloc_verbose = 1;
448 		break;
449 #endif /* MALLOC_STATS */
450 	case 'x':
451 		mopts.malloc_xmalloc = 0;
452 		break;
453 	case 'X':
454 		mopts.malloc_xmalloc = 1;
455 		break;
456 	default:
457 		dprintf(STDERR_FILENO, "malloc() warning: "
458                     "unknown char in MALLOC_OPTIONS\n");
459 		break;
460 	}
461 }
462 
463 static void
464 omalloc_init(void)
465 {
466 	char *p, *q, b[16];
467 	int i, j;
468 	const int mib[2] = { CTL_VM, VM_MALLOC_CONF };
469 	size_t sb;
470 
471 	/*
472 	 * Default options
473 	 */
474 	mopts.malloc_mutexes = 8;
475 	mopts.def_malloc_junk = 1;
476 	mopts.def_maxcache = MALLOC_DEFAULT_CACHE;
477 
478 	for (i = 0; i < 3; i++) {
479 		switch (i) {
480 		case 0:
481 			sb = sizeof(b);
482 			j = sysctl(mib, 2, b, &sb, NULL, 0);
483 			if (j != 0)
484 				continue;
485 			p = b;
486 			break;
487 		case 1:
488 			if (issetugid() == 0)
489 				p = getenv("MALLOC_OPTIONS");
490 			else
491 				continue;
492 			break;
493 		case 2:
494 			p = malloc_options;
495 			break;
496 		default:
497 			p = NULL;
498 		}
499 
500 		for (; p != NULL && *p != '\0'; p++) {
501 			switch (*p) {
502 			case 'S':
503 				for (q = "CFGJ"; *q != '\0'; q++)
504 					omalloc_parseopt(*q);
505 				mopts.def_maxcache = 0;
506 				break;
507 			case 's':
508 				for (q = "cfgj"; *q != '\0'; q++)
509 					omalloc_parseopt(*q);
510 				mopts.def_maxcache = MALLOC_DEFAULT_CACHE;
511 				break;
512 			default:
513 				omalloc_parseopt(*p);
514 				break;
515 			}
516 		}
517 	}
518 
519 #ifdef MALLOC_STATS
520 	if (DO_STATS && (atexit(malloc_exit) == -1)) {
521 		dprintf(STDERR_FILENO, "malloc() warning: atexit(2) failed."
522 		    " Will not be able to dump stats on exit\n");
523 	}
524 #endif
525 
526 	while ((mopts.malloc_canary = arc4random()) == 0)
527 		;
528 	mopts.junk_loc = arc4random();
529 	if (mopts.chunk_canaries)
530 		do {
531 			mopts.chunk_canaries = arc4random();
532 		} while ((u_char)mopts.chunk_canaries == 0 ||
533 		    (u_char)mopts.chunk_canaries == SOME_FREEJUNK);
534 }
535 
536 static void
537 omalloc_poolinit(struct dir_info *d, int mmap_flag)
538 {
539 	int i, j;
540 
541 	d->r = NULL;
542 	d->rbytesused = sizeof(d->rbytes);
543 	d->regions_free = d->regions_total = 0;
544 	for (i = 0; i <= BUCKETS; i++) {
545 		LIST_INIT(&d->chunk_info_list[i]);
546 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++)
547 			LIST_INIT(&d->chunk_dir[i][j]);
548 	}
549 	d->mmap_flag = mmap_flag;
550 	d->malloc_junk = mopts.def_malloc_junk;
551 	d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
552 	d->canary2 = ~d->canary1;
553 }
554 
555 static int
556 omalloc_grow(struct dir_info *d)
557 {
558 	size_t newtotal;
559 	size_t newsize;
560 	size_t mask;
561 	size_t i, oldpsz;
562 	struct region_info *p;
563 
564 	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2)
565 		return 1;
566 
567 	newtotal = d->regions_total == 0 ? MALLOC_INITIAL_REGIONS :
568 	    d->regions_total * 2;
569 	newsize = PAGEROUND(newtotal * sizeof(struct region_info));
570 	mask = newtotal - 1;
571 
572 	/* Don't use cache here, we don't want user uaf touch this */
573 	p = MMAP(newsize, d->mmap_flag);
574 	if (p == MAP_FAILED)
575 		return 1;
576 
577 	STATS_ADD(d->malloc_used, newsize);
578 	STATS_ZERO(d->inserts);
579 	STATS_ZERO(d->insert_collisions);
580 	for (i = 0; i < d->regions_total; i++) {
581 		void *q = d->r[i].p;
582 		if (q != NULL) {
583 			size_t index = hash(q) & mask;
584 			STATS_INC(d->inserts);
585 			while (p[index].p != NULL) {
586 				index = (index - 1) & mask;
587 				STATS_INC(d->insert_collisions);
588 			}
589 			p[index] = d->r[i];
590 		}
591 	}
592 
593 	if (d->regions_total > 0) {
594 		oldpsz = PAGEROUND(d->regions_total * sizeof(struct region_info));
595 		/* clear to avoid meta info ending up in the cache */
596 		unmap(d, d->r, oldpsz, oldpsz);
597 	}
598 	d->regions_free += newtotal - d->regions_total;
599 	d->regions_total = newtotal;
600 	d->r = p;
601 	return 0;
602 }
603 
604 /*
605  * The hashtable uses the assumption that p is never NULL. This holds since
606  * non-MAP_FIXED mappings with hint 0 start at BRKSIZ.
607  */
608 static int
609 insert(struct dir_info *d, void *p, size_t sz, void *f)
610 {
611 	size_t index;
612 	size_t mask;
613 	void *q;
614 
615 	if (d->regions_free * 4 < d->regions_total || d->regions_total == 0) {
616 		if (omalloc_grow(d))
617 			return 1;
618 	}
619 	mask = d->regions_total - 1;
620 	index = hash(p) & mask;
621 	q = d->r[index].p;
622 	STATS_INC(d->inserts);
623 	while (q != NULL) {
624 		index = (index - 1) & mask;
625 		q = d->r[index].p;
626 		STATS_INC(d->insert_collisions);
627 	}
628 	d->r[index].p = p;
629 	d->r[index].size = sz;
630 	STATS_SETF(&d->r[index], f);
631 	d->regions_free--;
632 	return 0;
633 }
634 
635 static struct region_info *
636 find(struct dir_info *d, void *p)
637 {
638 	size_t index;
639 	size_t mask = d->regions_total - 1;
640 	void *q, *r;
641 
642 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
643 	    d->canary1 != ~d->canary2)
644 		wrterror(d, "internal struct corrupt");
645 	if (d->r == NULL)
646 		return NULL;
647 	p = MASK_POINTER(p);
648 	index = hash(p) & mask;
649 	r = d->r[index].p;
650 	q = MASK_POINTER(r);
651 	STATS_INC(d->finds);
652 	while (q != p && r != NULL) {
653 		index = (index - 1) & mask;
654 		r = d->r[index].p;
655 		q = MASK_POINTER(r);
656 		STATS_INC(d->find_collisions);
657 	}
658 	return (q == p && r != NULL) ? &d->r[index] : NULL;
659 }
660 
661 static void
662 delete(struct dir_info *d, struct region_info *ri)
663 {
664 	/* algorithm R, Knuth Vol III section 6.4 */
665 	size_t mask = d->regions_total - 1;
666 	size_t i, j, r;
667 
668 	if (d->regions_total & (d->regions_total - 1))
669 		wrterror(d, "regions_total not 2^x");
670 	d->regions_free++;
671 	STATS_INC(d->deletes);
672 
673 	i = ri - d->r;
674 	for (;;) {
675 		d->r[i].p = NULL;
676 		d->r[i].size = 0;
677 		j = i;
678 		for (;;) {
679 			i = (i - 1) & mask;
680 			if (d->r[i].p == NULL)
681 				return;
682 			r = hash(d->r[i].p) & mask;
683 			if ((i <= r && r < j) || (r < j && j < i) ||
684 			    (j < i && i <= r))
685 				continue;
686 			d->r[j] = d->r[i];
687 			STATS_INC(d->delete_moves);
688 			break;
689 		}
690 
691 	}
692 }
693 
694 static inline void
695 junk_free(int junk, void *p, size_t sz)
696 {
697 	size_t i, step = 1;
698 	uint64_t *lp = p;
699 
700 	if (junk == 0 || sz == 0)
701 		return;
702 	sz /= sizeof(uint64_t);
703 	if (junk == 1) {
704 		if (sz > MALLOC_PAGESIZE / sizeof(uint64_t))
705 			sz = MALLOC_PAGESIZE / sizeof(uint64_t);
706 		step = sz / 4;
707 		if (step == 0)
708 			step = 1;
709 	}
710 	/* Do not always put the free junk bytes in the same spot.
711 	   There is modulo bias here, but we ignore that. */
712 	for (i = mopts.junk_loc % step; i < sz; i += step)
713 		lp[i] = SOME_FREEJUNK_ULL;
714 }
715 
716 static inline void
717 validate_junk(struct dir_info *pool, void *p, size_t sz)
718 {
719 	size_t i, step = 1;
720 	uint64_t *lp = p;
721 
722 	if (pool->malloc_junk == 0 || sz == 0)
723 		return;
724 	sz /= sizeof(uint64_t);
725 	if (pool->malloc_junk == 1) {
726 		if (sz > MALLOC_PAGESIZE / sizeof(uint64_t))
727 			sz = MALLOC_PAGESIZE / sizeof(uint64_t);
728 		step = sz / 4;
729 		if (step == 0)
730 			step = 1;
731 	}
732 	/* see junk_free */
733 	for (i = mopts.junk_loc % step; i < sz; i += step) {
734 		if (lp[i] != SOME_FREEJUNK_ULL)
735 			wrterror(pool, "write after free %p", p);
736 	}
737 }
738 
739 
740 /*
741  * Cache maintenance.
742  * Opposed to the regular region data structure, the sizes in the
743  * cache are in MALLOC_PAGESIZE units.
744  */
745 static void
746 unmap(struct dir_info *d, void *p, size_t sz, size_t clear)
747 {
748 	size_t psz = sz >> MALLOC_PAGESHIFT;
749 	void *r;
750 	u_short i;
751 	struct smallcache *cache;
752 
753 	if (sz != PAGEROUND(sz) || psz == 0)
754 		wrterror(d, "munmap round");
755 
756 	if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE &&
757 	    psz <= MAX_BIGCACHEABLE_SIZE) {
758 		u_short base = getrbyte(d);
759 		u_short j;
760 
761 		/* don't look through all slots */
762 		for (j = 0; j < d->bigcache_size / 4; j++) {
763 			i = (base + j) & (d->bigcache_size - 1);
764 			if (d->bigcache_used <
765 			    BIGCACHE_FILL(d->bigcache_size))  {
766 				if (d->bigcache[i].psize == 0)
767 					break;
768 			} else {
769 				if (d->bigcache[i].psize != 0)
770 					break;
771 			}
772 		}
773 		/* if we didn't find a preferred slot, use random one */
774 		if (d->bigcache[i].psize != 0) {
775 			size_t tmp;
776 
777 			r = d->bigcache[i].page;
778 			d->bigcache_used -= d->bigcache[i].psize;
779 			tmp = d->bigcache[i].psize << MALLOC_PAGESHIFT;
780 			if (!mopts.malloc_freeunmap)
781 				validate_junk(d, r, tmp);
782 			if (munmap(r, tmp))
783 				 wrterror(d, "munmap %p", r);
784 			STATS_SUB(d->malloc_used, tmp);
785 		}
786 
787 		if (clear > 0)
788 			explicit_bzero(p, clear);
789 		if (mopts.malloc_freeunmap) {
790 			if (mprotect(p, sz, PROT_NONE))
791 				wrterror(d, "mprotect %p", r);
792 		} else
793 			junk_free(d->malloc_junk, p, sz);
794 		d->bigcache[i].page = p;
795 		d->bigcache[i].psize = psz;
796 		d->bigcache_used += psz;
797 		return;
798 	}
799 	if (psz > MAX_SMALLCACHEABLE_SIZE || d->smallcache[psz - 1].max == 0) {
800 		if (munmap(p, sz))
801 			wrterror(d, "munmap %p", p);
802 		STATS_SUB(d->malloc_used, sz);
803 		return;
804 	}
805 	cache = &d->smallcache[psz - 1];
806 	if (cache->length == cache->max) {
807 		int fresh;
808 		/* use a random slot */
809 		i = getrbyte(d) & (cache->max - 1);
810 		r = cache->pages[i];
811 		fresh = (uintptr_t)r & 1;
812 		*(uintptr_t*)&r &= ~1ULL;
813 		if (!fresh && !mopts.malloc_freeunmap)
814 			validate_junk(d, r, sz);
815 		if (munmap(r, sz))
816 			wrterror(d, "munmap %p", r);
817 		STATS_SUB(d->malloc_used, sz);
818 		cache->length--;
819 	} else
820 		i = cache->length;
821 
822 	/* fill slot */
823 	if (clear > 0)
824 		explicit_bzero(p, clear);
825 	if (mopts.malloc_freeunmap)
826 		mprotect(p, sz, PROT_NONE);
827 	else
828 		junk_free(d->malloc_junk, p, sz);
829 	cache->pages[i] = p;
830 	cache->length++;
831 }
832 
833 static void *
834 map(struct dir_info *d, size_t sz, int zero_fill)
835 {
836 	size_t psz = sz >> MALLOC_PAGESHIFT;
837 	u_short i;
838 	void *p;
839 	struct smallcache *cache;
840 
841 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
842 	    d->canary1 != ~d->canary2)
843 		wrterror(d, "internal struct corrupt");
844 	if (sz != PAGEROUND(sz) || psz == 0)
845 		wrterror(d, "map round");
846 
847 
848 	if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE &&
849 	    psz <= MAX_BIGCACHEABLE_SIZE) {
850 		size_t base = getrbyte(d);
851 		size_t cached = d->bigcache_used;
852 		ushort j;
853 
854 		for (j = 0; j < d->bigcache_size && cached >= psz; j++) {
855 			i = (j + base) & (d->bigcache_size - 1);
856 			if (d->bigcache[i].psize == psz) {
857 				p = d->bigcache[i].page;
858 				d->bigcache_used -= psz;
859 				d->bigcache[i].page = NULL;
860 				d->bigcache[i].psize = 0;
861 
862 				if (!mopts.malloc_freeunmap)
863 					validate_junk(d, p, sz);
864 				if (mopts.malloc_freeunmap)
865 					mprotect(p, sz, PROT_READ | PROT_WRITE);
866 				if (zero_fill)
867 					memset(p, 0, sz);
868 				else if (mopts.malloc_freeunmap)
869 					junk_free(d->malloc_junk, p, sz);
870 				return p;
871 			}
872 			cached -= d->bigcache[i].psize;
873 		}
874 	}
875 	if (psz <= MAX_SMALLCACHEABLE_SIZE && d->smallcache[psz - 1].max > 0) {
876 		cache = &d->smallcache[psz - 1];
877 		if (cache->length > 0) {
878 			int fresh;
879 			if (cache->length == 1)
880 				p = cache->pages[--cache->length];
881 			else {
882 				i = getrbyte(d) % cache->length;
883 				p = cache->pages[i];
884 				cache->pages[i] = cache->pages[--cache->length];
885 			}
886 			/* check if page was not junked, i.e. "fresh
887 			   we use the lsb of the pointer for that */
888 			fresh = (uintptr_t)p & 1UL;
889 			*(uintptr_t*)&p &= ~1UL;
890 			if (!fresh && !mopts.malloc_freeunmap)
891 				validate_junk(d, p, sz);
892 			if (mopts.malloc_freeunmap)
893 				mprotect(p, sz, PROT_READ | PROT_WRITE);
894 			if (zero_fill)
895 				memset(p, 0, sz);
896 			else if (mopts.malloc_freeunmap)
897 				junk_free(d->malloc_junk, p, sz);
898 			return p;
899 		}
900 		if (psz <= 1) {
901 			p = MMAP(cache->max * sz, d->mmap_flag);
902 			if (p != MAP_FAILED) {
903 				STATS_ADD(d->malloc_used, cache->max * sz);
904 				cache->length = cache->max - 1;
905 				for (i = 0; i < cache->max - 1; i++) {
906 					void *q = (char*)p + i * sz;
907 					cache->pages[i] = q;
908 					/* mark pointer in slot as not junked */
909 					*(uintptr_t*)&cache->pages[i] |= 1UL;
910 				}
911 				if (mopts.malloc_freeunmap)
912 					mprotect(p, (cache->max - 1) * sz,
913 					    PROT_NONE);
914 				p = (char*)p + (cache->max - 1) * sz;
915 				/* zero fill not needed, freshly mmapped */
916 				return p;
917 			}
918 		}
919 
920 	}
921 	p = MMAP(sz, d->mmap_flag);
922 	if (p != MAP_FAILED)
923 		STATS_ADD(d->malloc_used, sz);
924 	/* zero fill not needed */
925 	return p;
926 }
927 
928 static void
929 init_chunk_info(struct dir_info *d, struct chunk_info *p, u_int bucket)
930 {
931 	u_int i;
932 
933 	p->bucket = bucket;
934 	p->total = p->free = MALLOC_PAGESIZE / B2ALLOC(bucket);
935 	p->offset = bucket == 0 ? 0xdead : howmany(p->total, MALLOC_BITS);
936 	p->canary = (u_short)d->canary1;
937 
938 	/* set all valid bits in the bitmap */
939  	i = p->total - 1;
940 	memset(p->bits, 0xff, sizeof(p->bits[0]) * (i / MALLOC_BITS));
941 	p->bits[i / MALLOC_BITS] = (2U << (i % MALLOC_BITS)) - 1;
942 }
943 
944 static struct chunk_info *
945 alloc_chunk_info(struct dir_info *d, u_int bucket)
946 {
947 	struct chunk_info *p;
948 
949 	if (LIST_EMPTY(&d->chunk_info_list[bucket])) {
950 		const size_t chunk_pages = 64;
951 		size_t size, count, i;
952 		char *q;
953 
954 		count = MALLOC_PAGESIZE / B2ALLOC(bucket);
955 
956 		size = howmany(count, MALLOC_BITS);
957 		size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
958 		if (mopts.chunk_canaries)
959 			size += count * sizeof(u_short);
960 		size = _ALIGN(size);
961 		count = MALLOC_PAGESIZE / size;
962 
963 		/* Don't use cache here, we don't want user uaf touch this */
964 		if (d->chunk_pages_used == chunk_pages ||
965 		     d->chunk_pages == NULL) {
966 			q = MMAP(MALLOC_PAGESIZE * chunk_pages, d->mmap_flag);
967 			if (q == MAP_FAILED)
968 				return NULL;
969 			d->chunk_pages = q;
970 			d->chunk_pages_used = 0;
971 			STATS_ADD(d->malloc_used, MALLOC_PAGESIZE *
972 			    chunk_pages);
973 		}
974 		q = (char *)d->chunk_pages + d->chunk_pages_used *
975 		    MALLOC_PAGESIZE;
976 		d->chunk_pages_used++;
977 
978 		for (i = 0; i < count; i++, q += size) {
979 			p = (struct chunk_info *)q;
980 			LIST_INSERT_HEAD(&d->chunk_info_list[bucket], p, entries);
981 		}
982 	}
983 	p = LIST_FIRST(&d->chunk_info_list[bucket]);
984 	LIST_REMOVE(p, entries);
985 	if (p->total == 0)
986 		init_chunk_info(d, p, bucket);
987 	return p;
988 }
989 
990 /*
991  * Allocate a page of chunks
992  */
993 static struct chunk_info *
994 omalloc_make_chunks(struct dir_info *d, u_int bucket, u_int listnum)
995 {
996 	struct chunk_info *bp;
997 	void *pp;
998 
999 	/* Allocate a new bucket */
1000 	pp = map(d, MALLOC_PAGESIZE, 0);
1001 	if (pp == MAP_FAILED)
1002 		return NULL;
1003 
1004 	/* memory protect the page allocated in the malloc(0) case */
1005 	if (bucket == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) == -1)
1006 		goto err;
1007 
1008 	bp = alloc_chunk_info(d, bucket);
1009 	if (bp == NULL)
1010 		goto err;
1011 	bp->page = pp;
1012 
1013 	if (insert(d, (void *)((uintptr_t)pp | (bucket + 1)), (uintptr_t)bp,
1014 	    NULL))
1015 		goto err;
1016 	LIST_INSERT_HEAD(&d->chunk_dir[bucket][listnum], bp, entries);
1017 
1018 	if (bucket > 0 && d->malloc_junk != 0)
1019 		memset(pp, SOME_FREEJUNK, MALLOC_PAGESIZE);
1020 
1021 	return bp;
1022 
1023 err:
1024 	unmap(d, pp, MALLOC_PAGESIZE, 0);
1025 	return NULL;
1026 }
1027 
1028 #if defined(__GNUC__) && __GNUC__ < 4
1029 static inline unsigned int
1030 lb(u_int x)
1031 {
1032 #if defined(__m88k__)
1033 	__asm__ __volatile__ ("ff1 %0, %0" : "=r" (x) : "0" (x));
1034 	return x;
1035 #else
1036 	/* portable version */
1037 	unsigned int count = 0;
1038 	while ((x & (1U << (sizeof(int) * CHAR_BIT - 1))) == 0) {
1039 		count++;
1040 		x <<= 1;
1041 	}
1042 	return (sizeof(int) * CHAR_BIT - 1) - count;
1043 #endif
1044 }
1045 #else
1046 /* using built-in function version */
1047 static inline unsigned int
1048 lb(u_int x)
1049 {
1050 	/* I need an extension just for integer-length (: */
1051 	return (sizeof(int) * CHAR_BIT - 1) - __builtin_clz(x);
1052 }
1053 #endif
1054 
1055 /* https://pvk.ca/Blog/2015/06/27/linear-log-bucketing-fast-versatile-simple/
1056    via Tony Finch */
1057 static inline unsigned int
1058 bin_of(unsigned int size)
1059 {
1060 	const unsigned int linear = 6;
1061 	const unsigned int subbin = 2;
1062 
1063 	unsigned int mask, range, rounded, sub_index, rounded_size;
1064 	unsigned int n_bits, shift;
1065 
1066 	n_bits = lb(size | (1U << linear));
1067 	shift = n_bits - subbin;
1068 	mask = (1ULL << shift) - 1;
1069 	rounded = size + mask; /* XXX: overflow. */
1070 	sub_index = rounded >> shift;
1071 	range = n_bits - linear;
1072 
1073 	rounded_size = rounded & ~mask;
1074 	return rounded_size;
1075 }
1076 
1077 static inline u_short
1078 find_bucket(u_short size)
1079 {
1080 	/* malloc(0) is special */
1081 	if (size == 0)
1082 		return 0;
1083 	if (size < MALLOC_MINSIZE)
1084 		size = MALLOC_MINSIZE;
1085 	if (mopts.def_maxcache != 0)
1086 		size = bin_of(size);
1087 	return howmany(size, MALLOC_MINSIZE);
1088 }
1089 
1090 static void
1091 fill_canary(char *ptr, size_t sz, size_t allocated)
1092 {
1093 	size_t check_sz = allocated - sz;
1094 
1095 	if (check_sz > CHUNK_CHECK_LENGTH)
1096 		check_sz = CHUNK_CHECK_LENGTH;
1097 	memset(ptr + sz, mopts.chunk_canaries, check_sz);
1098 }
1099 
1100 /*
1101  * Allocate a chunk
1102  */
1103 static void *
1104 malloc_bytes(struct dir_info *d, size_t size, void *f)
1105 {
1106 	u_int i, r, bucket, listnum;
1107 	size_t k;
1108 	u_short	*lp;
1109 	struct chunk_info *bp;
1110 	void *p;
1111 
1112 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
1113 	    d->canary1 != ~d->canary2)
1114 		wrterror(d, "internal struct corrupt");
1115 
1116 	bucket = find_bucket(size);
1117 
1118 	r = ((u_int)getrbyte(d) << 8) | getrbyte(d);
1119 	listnum = r % MALLOC_CHUNK_LISTS;
1120 
1121 	/* If it's empty, make a page more of that size chunks */
1122 	if ((bp = LIST_FIRST(&d->chunk_dir[bucket][listnum])) == NULL) {
1123 		bp = omalloc_make_chunks(d, bucket, listnum);
1124 		if (bp == NULL)
1125 			return NULL;
1126 	}
1127 
1128 	if (bp->canary != (u_short)d->canary1)
1129 		wrterror(d, "chunk info corrupted");
1130 
1131 	/* bias, as bp->total is not a power of 2 */
1132 	i = (r / MALLOC_CHUNK_LISTS) % bp->total;
1133 
1134 	/* potentially start somewhere in a short */
1135 	lp = &bp->bits[i / MALLOC_BITS];
1136 	if (*lp) {
1137 		int j = i % MALLOC_BITS; /* j must be signed */
1138 		k = ffs(*lp >> j);
1139 		if (k != 0) {
1140 			k += j - 1;
1141 			goto found;
1142 		}
1143 	}
1144 	/* no bit halfway, go to next full short */
1145 	i /= MALLOC_BITS;
1146 	for (;;) {
1147 		if (++i >= howmany(bp->total, MALLOC_BITS))
1148 			i = 0;
1149 		lp = &bp->bits[i];
1150 		if (*lp) {
1151 			k = ffs(*lp) - 1;
1152 			break;
1153 		}
1154 	}
1155 found:
1156 	if (i == 0 && k == 0 && DO_STATS) {
1157 		struct region_info *r = find(d, bp->page);
1158 		STATS_SETF(r, f);
1159 	}
1160 
1161 	*lp ^= 1 << k;
1162 
1163 	/* If there are no more free, remove from free-list */
1164 	if (--bp->free == 0)
1165 		LIST_REMOVE(bp, entries);
1166 
1167 	/* Adjust to the real offset of that chunk */
1168 	k += (lp - bp->bits) * MALLOC_BITS;
1169 
1170 	if (mopts.chunk_canaries && size > 0)
1171 		bp->bits[bp->offset + k] = size;
1172 
1173 	k *= B2ALLOC(bp->bucket);
1174 
1175 	p = (char *)bp->page + k;
1176 	if (bp->bucket > 0) {
1177 		validate_junk(d, p, B2SIZE(bp->bucket));
1178 		if (mopts.chunk_canaries)
1179 			fill_canary(p, size, B2SIZE(bp->bucket));
1180 	}
1181 	return p;
1182 }
1183 
1184 static void
1185 validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1186 {
1187 	size_t check_sz = allocated - sz;
1188 	u_char *p, *q;
1189 
1190 	if (check_sz > CHUNK_CHECK_LENGTH)
1191 		check_sz = CHUNK_CHECK_LENGTH;
1192 	p = ptr + sz;
1193 	q = p + check_sz;
1194 
1195 	while (p < q) {
1196 		if (*p != (u_char)mopts.chunk_canaries && *p != SOME_JUNK) {
1197 			wrterror(d, "canary corrupted %p %#tx@%#zx%s",
1198 			    ptr, p - ptr, sz,
1199 			    *p == SOME_FREEJUNK ? " (double free?)" : "");
1200 		}
1201 		p++;
1202 	}
1203 }
1204 
1205 static uint32_t
1206 find_chunknum(struct dir_info *d, struct chunk_info *info, void *ptr, int check)
1207 {
1208 	uint32_t chunknum;
1209 
1210 	if (info->canary != (u_short)d->canary1)
1211 		wrterror(d, "chunk info corrupted");
1212 
1213 	/* Find the chunk number on the page */
1214 	chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) / B2ALLOC(info->bucket);
1215 
1216 	if ((uintptr_t)ptr & (MALLOC_MINSIZE - 1))
1217 		wrterror(d, "modified chunk-pointer %p", ptr);
1218 	if (info->bits[chunknum / MALLOC_BITS] &
1219 	    (1U << (chunknum % MALLOC_BITS)))
1220 		wrterror(d, "double free %p", ptr);
1221 	if (check && info->bucket > 0) {
1222 		validate_canary(d, ptr, info->bits[info->offset + chunknum],
1223 		    B2SIZE(info->bucket));
1224 	}
1225 	return chunknum;
1226 }
1227 
1228 /*
1229  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1230  */
1231 static void
1232 free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1233 {
1234 	struct chunk_head *mp;
1235 	struct chunk_info *info;
1236 	uint32_t chunknum;
1237 	uint32_t listnum;
1238 
1239 	info = (struct chunk_info *)r->size;
1240 	chunknum = find_chunknum(d, info, ptr, 0);
1241 
1242 	if (chunknum == 0)
1243 		STATS_SETF(r, NULL);
1244 
1245 	info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
1246 	info->free++;
1247 
1248 	if (info->free == 1) {
1249 		/* Page became non-full */
1250 		listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
1251 		mp = &d->chunk_dir[info->bucket][listnum];
1252 		LIST_INSERT_HEAD(mp, info, entries);
1253 		return;
1254 	}
1255 
1256 	if (info->free != info->total)
1257 		return;
1258 
1259 	LIST_REMOVE(info, entries);
1260 
1261 	if (info->bucket == 0 && !mopts.malloc_freeunmap)
1262 		mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1263 	unmap(d, info->page, MALLOC_PAGESIZE, 0);
1264 
1265 	delete(d, r);
1266 	mp = &d->chunk_info_list[info->bucket];
1267 	LIST_INSERT_HEAD(mp, info, entries);
1268 }
1269 
1270 
1271 
1272 static void *
1273 omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1274 {
1275 	void *p;
1276 	size_t psz;
1277 
1278 	if (sz > MALLOC_MAXCHUNK) {
1279 		if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1280 			errno = ENOMEM;
1281 			return NULL;
1282 		}
1283 		sz += mopts.malloc_guard;
1284 		psz = PAGEROUND(sz);
1285 		p = map(pool, psz, zero_fill);
1286 		if (p == MAP_FAILED) {
1287 			errno = ENOMEM;
1288 			return NULL;
1289 		}
1290 		if (insert(pool, p, sz, f)) {
1291 			unmap(pool, p, psz, 0);
1292 			errno = ENOMEM;
1293 			return NULL;
1294 		}
1295 		if (mopts.malloc_guard) {
1296 			if (mprotect((char *)p + psz - mopts.malloc_guard,
1297 			    mopts.malloc_guard, PROT_NONE))
1298 				wrterror(pool, "mprotect");
1299 			STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1300 		}
1301 
1302 		if (MALLOC_MOVE_COND(sz)) {
1303 			/* fill whole allocation */
1304 			if (pool->malloc_junk == 2)
1305 				memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1306 			/* shift towards the end */
1307 			p = MALLOC_MOVE(p, sz);
1308 			/* fill zeros if needed and overwritten above */
1309 			if (zero_fill && pool->malloc_junk == 2)
1310 				memset(p, 0, sz - mopts.malloc_guard);
1311 		} else {
1312 			if (pool->malloc_junk == 2) {
1313 				if (zero_fill)
1314 					memset((char *)p + sz -
1315 					    mopts.malloc_guard, SOME_JUNK,
1316 					    psz - sz);
1317 				else
1318 					memset(p, SOME_JUNK,
1319 					    psz - mopts.malloc_guard);
1320 			} else if (mopts.chunk_canaries)
1321 				fill_canary(p, sz - mopts.malloc_guard,
1322 				    psz - mopts.malloc_guard);
1323 		}
1324 
1325 	} else {
1326 		/* takes care of SOME_JUNK */
1327 		p = malloc_bytes(pool, sz, f);
1328 		if (zero_fill && p != NULL && sz > 0)
1329 			memset(p, 0, sz);
1330 	}
1331 
1332 	return p;
1333 }
1334 
1335 /*
1336  * Common function for handling recursion.  Only
1337  * print the error message once, to avoid making the problem
1338  * potentially worse.
1339  */
1340 static void
1341 malloc_recurse(struct dir_info *d)
1342 {
1343 	static int noprint;
1344 
1345 	if (noprint == 0) {
1346 		noprint = 1;
1347 		wrterror(d, "recursive call");
1348 	}
1349 	d->active--;
1350 	_MALLOC_UNLOCK(d->mutex);
1351 	errno = EDEADLK;
1352 }
1353 
1354 void
1355 _malloc_init(int from_rthreads)
1356 {
1357 	u_int i, j, nmutexes;
1358 	struct dir_info *d;
1359 
1360 	_MALLOC_LOCK(1);
1361 	if (!from_rthreads && mopts.malloc_pool[1]) {
1362 		_MALLOC_UNLOCK(1);
1363 		return;
1364 	}
1365 	if (!mopts.malloc_canary) {
1366 		char *p;
1367 		size_t sz, d_avail;
1368 
1369 		omalloc_init();
1370 		/*
1371 		 * Allocate dir_infos with a guard page on either side. Also
1372 		 * randomise offset inside the page at which the dir_infos
1373 		 * lay (subject to alignment by 1 << MALLOC_MINSHIFT)
1374 		 */
1375 		sz = mopts.malloc_mutexes * sizeof(*d) + 2 * MALLOC_PAGESIZE;
1376 		if ((p = MMAPNONE(sz, 0)) == MAP_FAILED)
1377 			wrterror(NULL, "malloc_init mmap1 failed");
1378 		if (mprotect(p + MALLOC_PAGESIZE, mopts.malloc_mutexes * sizeof(*d),
1379 		    PROT_READ | PROT_WRITE))
1380 			wrterror(NULL, "malloc_init mprotect1 failed");
1381 		if (mimmutable(p, sz))
1382 			wrterror(NULL, "malloc_init mimmutable1 failed");
1383 		d_avail = (((mopts.malloc_mutexes * sizeof(*d) + MALLOC_PAGEMASK) &
1384 		    ~MALLOC_PAGEMASK) - (mopts.malloc_mutexes * sizeof(*d))) >>
1385 		    MALLOC_MINSHIFT;
1386 		d = (struct dir_info *)(p + MALLOC_PAGESIZE +
1387 		    (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
1388 		STATS_ADD(d[1].malloc_used, sz);
1389 		for (i = 0; i < mopts.malloc_mutexes; i++)
1390 			mopts.malloc_pool[i] = &d[i];
1391 		mopts.internal_funcs = 1;
1392 		if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) {
1393 			if (mprotect(&malloc_readonly, sizeof(malloc_readonly),
1394 			    PROT_READ))
1395 				wrterror(NULL, "malloc_init mprotect r/o failed");
1396 			if (mimmutable(&malloc_readonly, sizeof(malloc_readonly)))
1397 				wrterror(NULL, "malloc_init mimmutable r/o failed");
1398 		}
1399 	}
1400 
1401 	nmutexes = from_rthreads ? mopts.malloc_mutexes : 2;
1402 	for (i = 0; i < nmutexes; i++) {
1403 		d = mopts.malloc_pool[i];
1404 		d->malloc_mt = from_rthreads;
1405 		if (d->canary1 == ~d->canary2)
1406 			continue;
1407 		if (i == 0) {
1408 			omalloc_poolinit(d, MAP_CONCEAL);
1409 			d->malloc_junk = 2;
1410 			d->bigcache_size = 0;
1411 			for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++)
1412 				d->smallcache[j].max = 0;
1413 		} else {
1414 			size_t sz = 0;
1415 
1416 			omalloc_poolinit(d, 0);
1417 			d->malloc_junk = mopts.def_malloc_junk;
1418 			d->bigcache_size = mopts.def_maxcache;
1419 			for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) {
1420 				d->smallcache[j].max =
1421 				    mopts.def_maxcache >> (j / 8);
1422 				sz += d->smallcache[j].max * sizeof(void *);
1423 			}
1424 			sz += d->bigcache_size * sizeof(struct bigcache);
1425 			if (sz > 0) {
1426 				void *p = MMAP(sz, 0);
1427 				if (p == MAP_FAILED)
1428 					wrterror(NULL,
1429 					    "malloc_init mmap2 failed");
1430 				if (mimmutable(p, sz))
1431 					wrterror(NULL, "malloc_init mimmutable2 failed");
1432 				for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) {
1433 					d->smallcache[j].pages = p;
1434 					p = (char *)p + d->smallcache[j].max *
1435 					    sizeof(void *);
1436 				}
1437 				d->bigcache = p;
1438 			}
1439 		}
1440 		d->mutex = i;
1441 	}
1442 
1443 	_MALLOC_UNLOCK(1);
1444 }
1445 DEF_STRONG(_malloc_init);
1446 
1447 #define PROLOGUE(p, fn)			\
1448 	d = (p); 			\
1449 	if (d == NULL) { 		\
1450 		_malloc_init(0);	\
1451 		d = (p);		\
1452 	}				\
1453 	_MALLOC_LOCK(d->mutex);		\
1454 	d->func = fn;			\
1455 	if (d->active++) {		\
1456 		malloc_recurse(d);	\
1457 		return NULL;		\
1458 	}				\
1459 
1460 #define EPILOGUE()				\
1461 	d->active--;				\
1462 	_MALLOC_UNLOCK(d->mutex);		\
1463 	if (r == NULL && mopts.malloc_xmalloc)	\
1464 		wrterror(d, "out of memory");	\
1465 	if (r != NULL)				\
1466 		errno = saved_errno;		\
1467 
1468 void *
1469 malloc(size_t size)
1470 {
1471 	void *r;
1472 	struct dir_info *d;
1473 	int saved_errno = errno;
1474 
1475 	PROLOGUE(getpool(), "malloc")
1476 	r = omalloc(d, size, 0, caller());
1477 	EPILOGUE()
1478 	return r;
1479 }
1480 DEF_STRONG(malloc);
1481 
1482 void *
1483 malloc_conceal(size_t size)
1484 {
1485 	void *r;
1486 	struct dir_info *d;
1487 	int saved_errno = errno;
1488 
1489 	PROLOGUE(mopts.malloc_pool[0], "malloc_conceal")
1490 	r = omalloc(d, size, 0, caller());
1491 	EPILOGUE()
1492 	return r;
1493 }
1494 DEF_WEAK(malloc_conceal);
1495 
1496 static struct region_info *
1497 findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool,
1498     char **saved_function)
1499 {
1500 	struct dir_info *pool = argpool;
1501 	struct region_info *r = find(pool, p);
1502 
1503 	STATS_INC(pool->pool_searches);
1504 	if (r == NULL) {
1505 		u_int i, nmutexes;
1506 
1507 		nmutexes = mopts.malloc_pool[1]->malloc_mt ? mopts.malloc_mutexes : 2;
1508 		STATS_INC(pool->other_pool);
1509 		for (i = 1; i < nmutexes; i++) {
1510 			u_int j = (argpool->mutex + i) & (nmutexes - 1);
1511 
1512 			pool->active--;
1513 			_MALLOC_UNLOCK(pool->mutex);
1514 			pool = mopts.malloc_pool[j];
1515 			_MALLOC_LOCK(pool->mutex);
1516 			pool->active++;
1517 			r = find(pool, p);
1518 			if (r != NULL) {
1519 				*saved_function = pool->func;
1520 				pool->func = argpool->func;
1521 				break;
1522 			}
1523 		}
1524 		if (r == NULL)
1525 			wrterror(argpool, "bogus pointer (double free?) %p", p);
1526 	}
1527 	*foundpool = pool;
1528 	return r;
1529 }
1530 
1531 static void
1532 ofree(struct dir_info **argpool, void *p, int clear, int check, size_t argsz)
1533 {
1534 	struct region_info *r;
1535 	struct dir_info *pool;
1536 	char *saved_function;
1537 	size_t sz;
1538 
1539 	r = findpool(p, *argpool, &pool, &saved_function);
1540 
1541 	REALSIZE(sz, r);
1542 	if (pool->mmap_flag) {
1543 		clear = 1;
1544 		if (!check) {
1545 			argsz = sz;
1546 			if (sz > MALLOC_MAXCHUNK)
1547 				argsz -= mopts.malloc_guard;
1548 		}
1549 	}
1550 	if (check) {
1551 		if (sz <= MALLOC_MAXCHUNK) {
1552 			if (mopts.chunk_canaries && sz > 0) {
1553 				struct chunk_info *info =
1554 				    (struct chunk_info *)r->size;
1555 				uint32_t chunknum =
1556 				    find_chunknum(pool, info, p, 0);
1557 
1558 				if (info->bits[info->offset + chunknum] < argsz)
1559 					wrterror(pool, "recorded size %hu"
1560 					    " < %zu",
1561 					    info->bits[info->offset + chunknum],
1562 					    argsz);
1563 			} else {
1564 				if (sz < argsz)
1565 					wrterror(pool, "chunk size %zu < %zu",
1566 					    sz, argsz);
1567 			}
1568 		} else if (sz - mopts.malloc_guard < argsz) {
1569 			wrterror(pool, "recorded size %zu < %zu",
1570 			    sz - mopts.malloc_guard, argsz);
1571 		}
1572 	}
1573 	if (sz > MALLOC_MAXCHUNK) {
1574 		if (!MALLOC_MOVE_COND(sz)) {
1575 			if (r->p != p)
1576 				wrterror(pool, "bogus pointer %p", p);
1577 			if (mopts.chunk_canaries)
1578 				validate_canary(pool, p,
1579 				    sz - mopts.malloc_guard,
1580 				    PAGEROUND(sz - mopts.malloc_guard));
1581 		} else {
1582 			/* shifted towards the end */
1583 			if (p != MALLOC_MOVE(r->p, sz))
1584 				wrterror(pool, "bogus moved pointer %p", p);
1585 			p = r->p;
1586 		}
1587 		if (mopts.malloc_guard) {
1588 			if (sz < mopts.malloc_guard)
1589 				wrterror(pool, "guard size");
1590 			if (!mopts.malloc_freeunmap) {
1591 				if (mprotect((char *)p + PAGEROUND(sz) -
1592 				    mopts.malloc_guard, mopts.malloc_guard,
1593 				    PROT_READ | PROT_WRITE))
1594 					wrterror(pool, "mprotect");
1595 			}
1596 			STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
1597 		}
1598 		unmap(pool, p, PAGEROUND(sz), clear ? argsz : 0);
1599 		delete(pool, r);
1600 	} else {
1601 		void *tmp;
1602 		u_int i;
1603 
1604 		/* Validate and optionally canary check */
1605 		struct chunk_info *info = (struct chunk_info *)r->size;
1606 		if (B2SIZE(info->bucket) != sz)
1607 			wrterror(pool, "internal struct corrupt");
1608 		find_chunknum(pool, info, p, mopts.chunk_canaries);
1609 
1610 		if (mopts.malloc_freecheck) {
1611 			for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK; i++) {
1612 				tmp = pool->delayed_chunks[i];
1613 				if (tmp == p)
1614 					wrterror(pool,
1615 					    "double free %p", p);
1616 				if (tmp != NULL) {
1617 					size_t tmpsz;
1618 
1619 					r = find(pool, tmp);
1620 					if (r == NULL)
1621 						wrterror(pool,
1622 						    "bogus pointer ("
1623 						    "double free?) %p", tmp);
1624 					REALSIZE(tmpsz, r);
1625 					validate_junk(pool, tmp, tmpsz);
1626 				}
1627 			}
1628 		}
1629 
1630 		if (clear && argsz > 0)
1631 			explicit_bzero(p, argsz);
1632 		junk_free(pool->malloc_junk, p, sz);
1633 
1634 		i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK;
1635 		tmp = p;
1636 		p = pool->delayed_chunks[i];
1637 		if (tmp == p)
1638 			wrterror(pool, "double free %p", p);
1639 		pool->delayed_chunks[i] = tmp;
1640 		if (p != NULL) {
1641 			r = find(pool, p);
1642 			if (r == NULL)
1643 				wrterror(pool,
1644 				    "bogus pointer (double free?) %p", p);
1645 			if (!mopts.malloc_freecheck) {
1646 				REALSIZE(sz, r);
1647 				validate_junk(pool, p, sz);
1648 			}
1649 			free_bytes(pool, r, p);
1650 		}
1651 	}
1652 
1653 	if (*argpool != pool) {
1654 		pool->func = saved_function;
1655 		*argpool = pool;
1656 	}
1657 }
1658 
1659 void
1660 free(void *ptr)
1661 {
1662 	struct dir_info *d;
1663 	int saved_errno = errno;
1664 
1665 	/* This is legal. */
1666 	if (ptr == NULL)
1667 		return;
1668 
1669 	d = getpool();
1670 	if (d == NULL)
1671 		wrterror(d, "free() called before allocation");
1672 	_MALLOC_LOCK(d->mutex);
1673 	d->func = "free";
1674 	if (d->active++) {
1675 		malloc_recurse(d);
1676 		return;
1677 	}
1678 	ofree(&d, ptr, 0, 0, 0);
1679 	d->active--;
1680 	_MALLOC_UNLOCK(d->mutex);
1681 	errno = saved_errno;
1682 }
1683 DEF_STRONG(free);
1684 
1685 static void
1686 freezero_p(void *ptr, size_t sz)
1687 {
1688 	explicit_bzero(ptr, sz);
1689 	free(ptr);
1690 }
1691 
1692 void
1693 freezero(void *ptr, size_t sz)
1694 {
1695 	struct dir_info *d;
1696 	int saved_errno = errno;
1697 
1698 	/* This is legal. */
1699 	if (ptr == NULL)
1700 		return;
1701 
1702 	if (!mopts.internal_funcs) {
1703 		freezero_p(ptr, sz);
1704 		return;
1705 	}
1706 
1707 	d = getpool();
1708 	if (d == NULL)
1709 		wrterror(d, "freezero() called before allocation");
1710 	_MALLOC_LOCK(d->mutex);
1711 	d->func = "freezero";
1712 	if (d->active++) {
1713 		malloc_recurse(d);
1714 		return;
1715 	}
1716 	ofree(&d, ptr, 1, 1, sz);
1717 	d->active--;
1718 	_MALLOC_UNLOCK(d->mutex);
1719 	errno = saved_errno;
1720 }
1721 DEF_WEAK(freezero);
1722 
1723 static void *
1724 orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f)
1725 {
1726 	struct region_info *r;
1727 	struct dir_info *pool;
1728 	char *saved_function;
1729 	struct chunk_info *info;
1730 	size_t oldsz, goldsz, gnewsz;
1731 	void *q, *ret;
1732 	uint32_t chunknum;
1733 	int forced;
1734 
1735 	if (p == NULL)
1736 		return omalloc(*argpool, newsz, 0, f);
1737 
1738 	if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1739 		errno = ENOMEM;
1740 		return  NULL;
1741 	}
1742 
1743 	r = findpool(p, *argpool, &pool, &saved_function);
1744 
1745 	REALSIZE(oldsz, r);
1746 	if (oldsz <= MALLOC_MAXCHUNK) {
1747 		if (DO_STATS || mopts.chunk_canaries) {
1748 			info = (struct chunk_info *)r->size;
1749 			chunknum = find_chunknum(pool, info, p, 0);
1750 		}
1751 	}
1752 
1753 	goldsz = oldsz;
1754 	if (oldsz > MALLOC_MAXCHUNK) {
1755 		if (oldsz < mopts.malloc_guard)
1756 			wrterror(pool, "guard size");
1757 		oldsz -= mopts.malloc_guard;
1758 	}
1759 
1760 	gnewsz = newsz;
1761 	if (gnewsz > MALLOC_MAXCHUNK)
1762 		gnewsz += mopts.malloc_guard;
1763 
1764 	forced = mopts.malloc_realloc || pool->mmap_flag;
1765 	if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && !forced) {
1766 		/* First case: from n pages sized allocation to m pages sized
1767 		   allocation, m > n */
1768 		size_t roldsz = PAGEROUND(goldsz);
1769 		size_t rnewsz = PAGEROUND(gnewsz);
1770 
1771 		if (rnewsz < roldsz && rnewsz > roldsz / 2 &&
1772 		    roldsz - rnewsz < mopts.def_maxcache * MALLOC_PAGESIZE &&
1773 		    !mopts.malloc_guard) {
1774 
1775 			ret = p;
1776 			goto done;
1777 		}
1778 
1779 		if (rnewsz > roldsz) {
1780 			/* try to extend existing region */
1781 			if (!mopts.malloc_guard) {
1782 				void *hint = (char *)r->p + roldsz;
1783 				size_t needed = rnewsz - roldsz;
1784 
1785 				STATS_INC(pool->cheap_realloc_tries);
1786 				q = MMAPA(hint, needed, MAP_FIXED | __MAP_NOREPLACE | pool->mmap_flag);
1787 				if (q == hint) {
1788 					STATS_ADD(pool->malloc_used, needed);
1789 					if (pool->malloc_junk == 2)
1790 						memset(q, SOME_JUNK, needed);
1791 					r->size = gnewsz;
1792 					if (r->p != p) {
1793 						/* old pointer is moved */
1794 						memmove(r->p, p, oldsz);
1795 						p = r->p;
1796 					}
1797 					if (mopts.chunk_canaries)
1798 						fill_canary(p, newsz,
1799 						    PAGEROUND(newsz));
1800 					STATS_SETF(r, f);
1801 					STATS_INC(pool->cheap_reallocs);
1802 					ret = p;
1803 					goto done;
1804 				}
1805 			}
1806 		} else if (rnewsz < roldsz) {
1807 			/* shrink number of pages */
1808 			if (mopts.malloc_guard) {
1809 				if (mprotect((char *)r->p + rnewsz -
1810 				    mopts.malloc_guard, mopts.malloc_guard,
1811 				    PROT_NONE))
1812 					wrterror(pool, "mprotect");
1813 			}
1814 			if (munmap((char *)r->p + rnewsz, roldsz - rnewsz))
1815 				wrterror(pool, "munmap %p", (char *)r->p +
1816 				    rnewsz);
1817 			STATS_SUB(pool->malloc_used, roldsz - rnewsz);
1818 			r->size = gnewsz;
1819 			if (MALLOC_MOVE_COND(gnewsz)) {
1820 				void *pp = MALLOC_MOVE(r->p, gnewsz);
1821 				memmove(pp, p, newsz);
1822 				p = pp;
1823 			} else if (mopts.chunk_canaries)
1824 				fill_canary(p, newsz, PAGEROUND(newsz));
1825 			STATS_SETF(r, f);
1826 			ret = p;
1827 			goto done;
1828 		} else {
1829 			/* number of pages remains the same */
1830 			void *pp = r->p;
1831 
1832 			r->size = gnewsz;
1833 			if (MALLOC_MOVE_COND(gnewsz))
1834 				pp = MALLOC_MOVE(r->p, gnewsz);
1835 			if (p != pp) {
1836 				memmove(pp, p, oldsz < newsz ? oldsz : newsz);
1837 				p = pp;
1838 			}
1839 			if (p == r->p) {
1840 				if (newsz > oldsz && pool->malloc_junk == 2)
1841 					memset((char *)p + newsz, SOME_JUNK,
1842 					    rnewsz - mopts.malloc_guard -
1843 					    newsz);
1844 				if (mopts.chunk_canaries)
1845 					fill_canary(p, newsz, PAGEROUND(newsz));
1846 			}
1847 			STATS_SETF(r, f);
1848 			ret = p;
1849 			goto done;
1850 		}
1851 	}
1852 	if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 &&
1853 	    newsz <= MALLOC_MAXCHUNK && newsz > 0 &&
1854 	    !forced && find_bucket(newsz) == find_bucket(oldsz)) {
1855 		/* do not reallocate if new size fits good in existing chunk */
1856 		if (pool->malloc_junk == 2)
1857 			memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1858 		if (mopts.chunk_canaries) {
1859 			info->bits[info->offset + chunknum] = newsz;
1860 			fill_canary(p, newsz, B2SIZE(info->bucket));
1861 		}
1862 		if (DO_STATS && chunknum == 0)
1863 			STATS_SETF(r, f);
1864 		ret = p;
1865 	} else if (newsz != oldsz || forced) {
1866 		/* create new allocation */
1867 		q = omalloc(pool, newsz, 0, f);
1868 		if (q == NULL) {
1869 			ret = NULL;
1870 			goto done;
1871 		}
1872 		if (newsz != 0 && oldsz != 0)
1873 			memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1874 		ofree(&pool, p, 0, 0, 0);
1875 		ret = q;
1876 	} else {
1877 		/* oldsz == newsz */
1878 		if (newsz != 0)
1879 			wrterror(pool, "realloc internal inconsistency");
1880 		if (DO_STATS && chunknum == 0)
1881 			STATS_SETF(r, f);
1882 		ret = p;
1883 	}
1884 done:
1885 	if (*argpool != pool) {
1886 		pool->func = saved_function;
1887 		*argpool = pool;
1888 	}
1889 	return ret;
1890 }
1891 
1892 void *
1893 realloc(void *ptr, size_t size)
1894 {
1895 	struct dir_info *d;
1896 	void *r;
1897 	int saved_errno = errno;
1898 
1899 	PROLOGUE(getpool(), "realloc")
1900 	r = orealloc(&d, ptr, size, caller());
1901 	EPILOGUE()
1902 	return r;
1903 }
1904 DEF_STRONG(realloc);
1905 
1906 /*
1907  * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
1908  * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
1909  */
1910 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1911 
1912 void *
1913 calloc(size_t nmemb, size_t size)
1914 {
1915 	struct dir_info *d;
1916 	void *r;
1917 	int saved_errno = errno;
1918 
1919 	PROLOGUE(getpool(), "calloc")
1920 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1921 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1922 		d->active--;
1923 		_MALLOC_UNLOCK(d->mutex);
1924 		if (mopts.malloc_xmalloc)
1925 			wrterror(d, "out of memory");
1926 		errno = ENOMEM;
1927 		return NULL;
1928 	}
1929 
1930 	size *= nmemb;
1931 	r = omalloc(d, size, 1, caller());
1932 	EPILOGUE()
1933 	return r;
1934 }
1935 DEF_STRONG(calloc);
1936 
1937 void *
1938 calloc_conceal(size_t nmemb, size_t size)
1939 {
1940 	struct dir_info *d;
1941 	void *r;
1942 	int saved_errno = errno;
1943 
1944 	PROLOGUE(mopts.malloc_pool[0], "calloc_conceal")
1945 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1946 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1947 		d->active--;
1948 		_MALLOC_UNLOCK(d->mutex);
1949 		if (mopts.malloc_xmalloc)
1950 			wrterror(d, "out of memory");
1951 		errno = ENOMEM;
1952 		return NULL;
1953 	}
1954 
1955 	size *= nmemb;
1956 	r = omalloc(d, size, 1, caller());
1957 	EPILOGUE()
1958 	return r;
1959 }
1960 DEF_WEAK(calloc_conceal);
1961 
1962 static void *
1963 orecallocarray(struct dir_info **argpool, void *p, size_t oldsize,
1964     size_t newsize, void *f)
1965 {
1966 	struct region_info *r;
1967 	struct dir_info *pool;
1968 	char *saved_function;
1969 	void *newptr;
1970 	size_t sz;
1971 
1972 	if (p == NULL)
1973 		return omalloc(*argpool, newsize, 1, f);
1974 
1975 	if (oldsize == newsize)
1976 		return p;
1977 
1978 	r = findpool(p, *argpool, &pool, &saved_function);
1979 
1980 	REALSIZE(sz, r);
1981 	if (sz <= MALLOC_MAXCHUNK) {
1982 		if (mopts.chunk_canaries && sz > 0) {
1983 			struct chunk_info *info = (struct chunk_info *)r->size;
1984 			uint32_t chunknum = find_chunknum(pool, info, p, 0);
1985 
1986 			if (info->bits[info->offset + chunknum] != oldsize)
1987 				wrterror(pool, "recorded size %hu != %zu",
1988 				    info->bits[info->offset + chunknum],
1989 				    oldsize);
1990 		} else {
1991 			if (sz < oldsize)
1992 				wrterror(pool, "chunk size %zu < %zu",
1993 				    sz, oldsize);
1994 		}
1995 	} else {
1996 		if (sz - mopts.malloc_guard < oldsize)
1997 			wrterror(pool, "recorded size %zu < %zu",
1998 			    sz - mopts.malloc_guard, oldsize);
1999 		if (oldsize < (sz - mopts.malloc_guard) / 2)
2000 			wrterror(pool, "recorded size %zu inconsistent with %zu",
2001 			    sz - mopts.malloc_guard, oldsize);
2002 	}
2003 
2004 	newptr = omalloc(pool, newsize, 0, f);
2005 	if (newptr == NULL)
2006 		goto done;
2007 
2008 	if (newsize > oldsize) {
2009 		memcpy(newptr, p, oldsize);
2010 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
2011 	} else
2012 		memcpy(newptr, p, newsize);
2013 
2014 	ofree(&pool, p, 1, 0, oldsize);
2015 
2016 done:
2017 	if (*argpool != pool) {
2018 		pool->func = saved_function;
2019 		*argpool = pool;
2020 	}
2021 
2022 	return newptr;
2023 }
2024 
2025 static void *
2026 recallocarray_p(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
2027 {
2028 	size_t oldsize, newsize;
2029 	void *newptr;
2030 
2031 	if (ptr == NULL)
2032 		return calloc(newnmemb, size);
2033 
2034 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2035 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
2036 		errno = ENOMEM;
2037 		return NULL;
2038 	}
2039 	newsize = newnmemb * size;
2040 
2041 	if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2042 	    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
2043 		errno = EINVAL;
2044 		return NULL;
2045 	}
2046 	oldsize = oldnmemb * size;
2047 
2048 	/*
2049 	 * Don't bother too much if we're shrinking just a bit,
2050 	 * we do not shrink for series of small steps, oh well.
2051 	 */
2052 	if (newsize <= oldsize) {
2053 		size_t d = oldsize - newsize;
2054 
2055 		if (d < oldsize / 2 && d < MALLOC_PAGESIZE) {
2056 			memset((char *)ptr + newsize, 0, d);
2057 			return ptr;
2058 		}
2059 	}
2060 
2061 	newptr = malloc(newsize);
2062 	if (newptr == NULL)
2063 		return NULL;
2064 
2065 	if (newsize > oldsize) {
2066 		memcpy(newptr, ptr, oldsize);
2067 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
2068 	} else
2069 		memcpy(newptr, ptr, newsize);
2070 
2071 	explicit_bzero(ptr, oldsize);
2072 	free(ptr);
2073 
2074 	return newptr;
2075 }
2076 
2077 void *
2078 recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
2079 {
2080 	struct dir_info *d;
2081 	size_t oldsize = 0, newsize;
2082 	void *r;
2083 	int saved_errno = errno;
2084 
2085 	if (!mopts.internal_funcs)
2086 		return recallocarray_p(ptr, oldnmemb, newnmemb, size);
2087 
2088 	PROLOGUE(getpool(), "recallocarray")
2089 
2090 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2091 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
2092 		d->active--;
2093 		_MALLOC_UNLOCK(d->mutex);
2094 		if (mopts.malloc_xmalloc)
2095 			wrterror(d, "out of memory");
2096 		errno = ENOMEM;
2097 		return NULL;
2098 	}
2099 	newsize = newnmemb * size;
2100 
2101 	if (ptr != NULL) {
2102 		if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2103 		    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
2104 			d->active--;
2105 			_MALLOC_UNLOCK(d->mutex);
2106 			errno = EINVAL;
2107 			return NULL;
2108 		}
2109 		oldsize = oldnmemb * size;
2110 	}
2111 
2112 	r = orecallocarray(&d, ptr, oldsize, newsize, caller());
2113 	EPILOGUE()
2114 	return r;
2115 }
2116 DEF_WEAK(recallocarray);
2117 
2118 static void *
2119 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
2120 {
2121 	char *p, *q;
2122 
2123 	if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
2124 		wrterror(d, "mapalign bad alignment");
2125 	if (sz != PAGEROUND(sz))
2126 		wrterror(d, "mapalign round");
2127 
2128 	/* Allocate sz + alignment bytes of memory, which must include a
2129 	 * subrange of size bytes that is properly aligned.  Unmap the
2130 	 * other bytes, and then return that subrange.
2131 	 */
2132 
2133 	/* We need sz + alignment to fit into a size_t. */
2134 	if (alignment > SIZE_MAX - sz)
2135 		return MAP_FAILED;
2136 
2137 	p = map(d, sz + alignment, zero_fill);
2138 	if (p == MAP_FAILED)
2139 		return MAP_FAILED;
2140 	q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
2141 	if (q != p) {
2142 		if (munmap(p, q - p))
2143 			wrterror(d, "munmap %p", p);
2144 	}
2145 	if (munmap(q + sz, alignment - (q - p)))
2146 		wrterror(d, "munmap %p", q + sz);
2147 	STATS_SUB(d->malloc_used, alignment);
2148 
2149 	return q;
2150 }
2151 
2152 static void *
2153 omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill,
2154     void *f)
2155 {
2156 	size_t psz;
2157 	void *p;
2158 
2159 	/* If between half a page and a page, avoid MALLOC_MOVE. */
2160 	if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE)
2161 		sz = MALLOC_PAGESIZE;
2162 	if (alignment <= MALLOC_PAGESIZE) {
2163 		size_t pof2;
2164 		/*
2165 		 * max(size, alignment) rounded up to power of 2 is enough
2166 		 * to assure the requested alignment. Large regions are
2167 		 * always page aligned.
2168 		 */
2169 		if (sz < alignment)
2170 			sz = alignment;
2171 		if (sz < MALLOC_PAGESIZE) {
2172 			pof2 = MALLOC_MINSIZE;
2173 			while (pof2 < sz)
2174 				pof2 <<= 1;
2175 		} else
2176 			pof2 = sz;
2177 		return omalloc(pool, pof2, zero_fill, f);
2178 	}
2179 
2180 	if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
2181 		errno = ENOMEM;
2182 		return NULL;
2183 	}
2184 
2185 	if (sz < MALLOC_PAGESIZE)
2186 		sz = MALLOC_PAGESIZE;
2187 	sz += mopts.malloc_guard;
2188 	psz = PAGEROUND(sz);
2189 
2190 	p = mapalign(pool, alignment, psz, zero_fill);
2191 	if (p == MAP_FAILED) {
2192 		errno = ENOMEM;
2193 		return NULL;
2194 	}
2195 
2196 	if (insert(pool, p, sz, f)) {
2197 		unmap(pool, p, psz, 0);
2198 		errno = ENOMEM;
2199 		return NULL;
2200 	}
2201 
2202 	if (mopts.malloc_guard) {
2203 		if (mprotect((char *)p + psz - mopts.malloc_guard,
2204 		    mopts.malloc_guard, PROT_NONE))
2205 			wrterror(pool, "mprotect");
2206 		STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
2207 	}
2208 
2209 	if (pool->malloc_junk == 2) {
2210 		if (zero_fill)
2211 			memset((char *)p + sz - mopts.malloc_guard,
2212 			    SOME_JUNK, psz - sz);
2213 		else
2214 			memset(p, SOME_JUNK, psz - mopts.malloc_guard);
2215 	} else if (mopts.chunk_canaries)
2216 		fill_canary(p, sz - mopts.malloc_guard,
2217 		    psz - mopts.malloc_guard);
2218 
2219 	return p;
2220 }
2221 
2222 int
2223 posix_memalign(void **memptr, size_t alignment, size_t size)
2224 {
2225 	struct dir_info *d;
2226 	int res, saved_errno = errno;
2227 	void *r;
2228 
2229 	/* Make sure that alignment is a large enough power of 2. */
2230 	if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
2231 		return EINVAL;
2232 
2233 	d = getpool();
2234 	if (d == NULL) {
2235 		_malloc_init(0);
2236 		d = getpool();
2237 	}
2238 	_MALLOC_LOCK(d->mutex);
2239 	d->func = "posix_memalign";
2240 	if (d->active++) {
2241 		malloc_recurse(d);
2242 		goto err;
2243 	}
2244 	r = omemalign(d, alignment, size, 0, caller());
2245 	d->active--;
2246 	_MALLOC_UNLOCK(d->mutex);
2247 	if (r == NULL) {
2248 		if (mopts.malloc_xmalloc)
2249 			wrterror(d, "out of memory");
2250 		goto err;
2251 	}
2252 	errno = saved_errno;
2253 	*memptr = r;
2254 	return 0;
2255 
2256 err:
2257 	res = errno;
2258 	errno = saved_errno;
2259 	return res;
2260 }
2261 DEF_STRONG(posix_memalign);
2262 
2263 void *
2264 aligned_alloc(size_t alignment, size_t size)
2265 {
2266 	struct dir_info *d;
2267 	int saved_errno = errno;
2268 	void *r;
2269 
2270 	/* Make sure that alignment is a positive power of 2. */
2271 	if (((alignment - 1) & alignment) != 0 || alignment == 0) {
2272 		errno = EINVAL;
2273 		return NULL;
2274 	};
2275 	/* Per spec, size should be a multiple of alignment */
2276 	if ((size & (alignment - 1)) != 0) {
2277 		errno = EINVAL;
2278 		return NULL;
2279 	}
2280 
2281 	PROLOGUE(getpool(), "aligned_alloc")
2282 	r = omemalign(d, alignment, size, 0, caller());
2283 	EPILOGUE()
2284 	return r;
2285 }
2286 DEF_STRONG(aligned_alloc);
2287 
2288 #ifdef MALLOC_STATS
2289 
2290 static void
2291 ulog(const char *format, ...)
2292 {
2293 	va_list ap;
2294 	static char* buf;
2295 	static size_t filled;
2296 	int len;
2297 
2298 	if (buf == NULL)
2299 		buf = MMAP(KTR_USER_MAXLEN, 0);
2300 	if (buf == MAP_FAILED)
2301 		return;
2302 
2303 	va_start(ap, format);
2304 	len = vsnprintf(buf + filled, KTR_USER_MAXLEN - filled, format, ap);
2305 	va_end(ap);
2306 	if (len < 0)
2307 		return;
2308 	if (len > KTR_USER_MAXLEN - filled)
2309 		len = KTR_USER_MAXLEN - filled;
2310 	filled += len;
2311 	if (filled > 0) {
2312 		if (filled == KTR_USER_MAXLEN || buf[filled - 1] == '\n') {
2313 			utrace("malloc", buf, filled);
2314 			filled = 0;
2315 		}
2316 	}
2317 }
2318 
2319 struct malloc_leak {
2320 	void *f;
2321 	size_t total_size;
2322 	int count;
2323 };
2324 
2325 struct leaknode {
2326 	RBT_ENTRY(leaknode) entry;
2327 	struct malloc_leak d;
2328 };
2329 
2330 static inline int
2331 leakcmp(const struct leaknode *e1, const struct leaknode *e2)
2332 {
2333 	return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
2334 }
2335 
2336 RBT_HEAD(leaktree, leaknode);
2337 RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp);
2338 RBT_GENERATE(leaktree, leaknode, entry, leakcmp);
2339 
2340 static void
2341 putleakinfo(struct leaktree *leaks, void *f, size_t sz, int cnt)
2342 {
2343 	struct leaknode key, *p;
2344 	static struct leaknode *page;
2345 	static unsigned int used;
2346 
2347 	if (cnt == 0 || page == MAP_FAILED)
2348 		return;
2349 
2350 	key.d.f = f;
2351 	p = RBT_FIND(leaktree, leaks, &key);
2352 	if (p == NULL) {
2353 		if (page == NULL ||
2354 		    used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
2355 			page = MMAP(MALLOC_PAGESIZE, 0);
2356 			if (page == MAP_FAILED)
2357 				return;
2358 			used = 0;
2359 		}
2360 		p = &page[used++];
2361 		p->d.f = f;
2362 		p->d.total_size = sz * cnt;
2363 		p->d.count = cnt;
2364 		RBT_INSERT(leaktree, leaks, p);
2365 	} else {
2366 		p->d.total_size += sz * cnt;
2367 		p->d.count += cnt;
2368 	}
2369 }
2370 
2371 static void
2372 dump_leaks(struct leaktree *leaks)
2373 {
2374 	struct leaknode *p;
2375 
2376 	ulog("Leak report:\n");
2377 	ulog("                 f     sum      #    avg\n");
2378 
2379 	RBT_FOREACH(p, leaktree, leaks) {
2380 		Dl_info info;
2381 		const char *caller = p->d.f;
2382 		const char *object = ".";
2383 
2384 		if (caller != NULL) {
2385 			if (dladdr(p->d.f, &info) != 0) {
2386 				caller -= (uintptr_t)info.dli_fbase;
2387 				object = info.dli_fname;
2388 			}
2389 		}
2390 		ulog("%18p %7zu %6u %6zu addr2line -e %s %p\n",
2391 		    p->d.f, p->d.total_size, p->d.count,
2392 		    p->d.total_size / p->d.count,
2393 		    object, caller);
2394 	}
2395 }
2396 
2397 static void
2398 dump_chunk(struct leaktree* leaks, struct chunk_info *p, void *f,
2399     int fromfreelist)
2400 {
2401 	while (p != NULL) {
2402 		if (mopts.malloc_verbose)
2403 			ulog("chunk %18p %18p %4zu %d/%d\n",
2404 			    p->page, ((p->bits[0] & 1) ? NULL : f),
2405 			    B2SIZE(p->bucket), p->free, p->total);
2406 		if (!fromfreelist) {
2407 			size_t sz =  B2SIZE(p->bucket);
2408 			if (p->bits[0] & 1)
2409 				putleakinfo(leaks, NULL, sz, p->total -
2410 				    p->free);
2411 			else {
2412 				putleakinfo(leaks, f, sz, 1);
2413 				putleakinfo(leaks, NULL, sz,
2414 				    p->total - p->free - 1);
2415 			}
2416 			break;
2417 		}
2418 		p = LIST_NEXT(p, entries);
2419 		if (mopts.malloc_verbose && p != NULL)
2420 			ulog("       ->");
2421 	}
2422 }
2423 
2424 static void
2425 dump_free_chunk_info(struct dir_info *d, struct leaktree *leaks)
2426 {
2427 	int i, j, count;
2428 	struct chunk_info *p;
2429 
2430 	ulog("Free chunk structs:\n");
2431 	ulog("Bkt) #CI                     page"
2432 	    "                  f size free/n\n");
2433 	for (i = 0; i <= BUCKETS; i++) {
2434 		count = 0;
2435 		LIST_FOREACH(p, &d->chunk_info_list[i], entries)
2436 			count++;
2437 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++) {
2438 			p = LIST_FIRST(&d->chunk_dir[i][j]);
2439 			if (p == NULL && count == 0)
2440 				continue;
2441 			if (j == 0)
2442 				ulog("%3d) %3d ", i, count);
2443 			else
2444 				ulog("         ");
2445 			if (p != NULL)
2446 				dump_chunk(leaks, p, NULL, 1);
2447 			else
2448 				ulog(".\n");
2449 		}
2450 	}
2451 
2452 }
2453 
2454 static void
2455 dump_free_page_info(struct dir_info *d)
2456 {
2457 	struct smallcache *cache;
2458 	size_t i, total = 0;
2459 
2460 	ulog("Cached in small cache:\n");
2461 	for (i = 0; i < MAX_SMALLCACHEABLE_SIZE; i++) {
2462 		cache = &d->smallcache[i];
2463 		if (cache->length != 0)
2464 			ulog("%zu(%u): %u = %zu\n", i + 1, cache->max,
2465 			    cache->length, cache->length * (i + 1));
2466 		total += cache->length * (i + 1);
2467 	}
2468 
2469 	ulog("Cached in big cache: %zu/%zu\n", d->bigcache_used,
2470 	    d->bigcache_size);
2471 	for (i = 0; i < d->bigcache_size; i++) {
2472 		if (d->bigcache[i].psize != 0)
2473 			ulog("%zu: %zu\n", i, d->bigcache[i].psize);
2474 		total += d->bigcache[i].psize;
2475 	}
2476 	ulog("Free pages cached: %zu\n", total);
2477 }
2478 
2479 static void
2480 malloc_dump1(int poolno, struct dir_info *d, struct leaktree *leaks)
2481 {
2482 	size_t i, realsize;
2483 
2484 	if (mopts.malloc_verbose) {
2485 		ulog("Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
2486 		ulog("MT=%d J=%d Fl=%x\n", d->malloc_mt, d->malloc_junk,
2487 		    d->mmap_flag);
2488 		ulog("Region slots free %zu/%zu\n",
2489 			d->regions_free, d->regions_total);
2490 		ulog("Finds %zu/%zu\n", d->finds, d->find_collisions);
2491 		ulog("Inserts %zu/%zu\n", d->inserts, d->insert_collisions);
2492 		ulog("Deletes %zu/%zu\n", d->deletes, d->delete_moves);
2493 		ulog("Cheap reallocs %zu/%zu\n",
2494 		    d->cheap_reallocs, d->cheap_realloc_tries);
2495 		ulog("Other pool searches %zu/%zu\n",
2496 		    d->other_pool, d->pool_searches);
2497 		ulog("In use %zu\n", d->malloc_used);
2498 		ulog("Guarded %zu\n", d->malloc_guarded);
2499 		dump_free_chunk_info(d, leaks);
2500 		dump_free_page_info(d);
2501 		ulog("Hash table:\n");
2502 		ulog("slot)  hash d  type               page                  "
2503 		    "f size [free/n]\n");
2504 	}
2505 	for (i = 0; i < d->regions_total; i++) {
2506 		if (d->r[i].p != NULL) {
2507 			size_t h = hash(d->r[i].p) &
2508 			    (d->regions_total - 1);
2509 			if (mopts.malloc_verbose)
2510 				ulog("%4zx) #%4zx %zd ",
2511 			        i, h, h - i);
2512 			REALSIZE(realsize, &d->r[i]);
2513 			if (realsize > MALLOC_MAXCHUNK) {
2514 				putleakinfo(leaks, d->r[i].f, realsize, 1);
2515 				if (mopts.malloc_verbose)
2516 					ulog("pages %18p %18p %zu\n", d->r[i].p,
2517 				        d->r[i].f, realsize);
2518 			} else
2519 				dump_chunk(leaks,
2520 				    (struct chunk_info *)d->r[i].size,
2521 				    d->r[i].f, 0);
2522 		}
2523 	}
2524 	if (mopts.malloc_verbose)
2525 		ulog("\n");
2526 }
2527 
2528 static void
2529 malloc_dump0(int poolno, struct dir_info *pool, struct leaktree *leaks)
2530 {
2531 	int i;
2532 	void *p;
2533 	struct region_info *r;
2534 
2535 	if (pool == NULL || pool->r == NULL)
2536 		return;
2537 	for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) {
2538 		p = pool->delayed_chunks[i];
2539 		if (p == NULL)
2540 			continue;
2541 		r = find(pool, p);
2542 		if (r == NULL)
2543 			wrterror(pool, "bogus pointer in malloc_dump %p", p);
2544 		free_bytes(pool, r, p);
2545 		pool->delayed_chunks[i] = NULL;
2546 	}
2547 	malloc_dump1(poolno, pool, leaks);
2548 }
2549 
2550 void
2551 malloc_dump(void)
2552 {
2553 	int i;
2554 	int saved_errno = errno;
2555 
2556 	/* XXX leak when run multiple times */
2557 	struct leaktree leaks = RBT_INITIALIZER(&leaks);
2558 
2559 	for (i = 0; i < mopts.malloc_mutexes; i++)
2560 		malloc_dump0(i, mopts.malloc_pool[i], &leaks);
2561 
2562 	dump_leaks(&leaks);
2563 	ulog("\n");
2564 	errno = saved_errno;
2565 }
2566 DEF_WEAK(malloc_dump);
2567 
2568 static void
2569 malloc_exit(void)
2570 {
2571 	int save_errno = errno;
2572 
2573 	ulog("******** Start dump %s *******\n", __progname);
2574 	ulog("M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u "
2575 	    "G=%zu\n",
2576 	    mopts.malloc_mutexes,
2577 	    mopts.internal_funcs, mopts.malloc_freecheck,
2578 	    mopts.malloc_freeunmap, mopts.def_malloc_junk,
2579 	    mopts.malloc_realloc, mopts.malloc_xmalloc,
2580 	    mopts.chunk_canaries, mopts.def_maxcache,
2581 	    mopts.malloc_guard);
2582 
2583 	malloc_dump();
2584 	ulog("******** End dump %s *******\n", __progname);
2585 	errno = save_errno;
2586 }
2587 
2588 #endif /* MALLOC_STATS */
2589