xref: /openbsd-src/lib/libc/stdlib/malloc.c (revision ae3cb403620ab940fbaabb3055fac045a63d56b7)
1 /*	$OpenBSD: malloc.c,v 1.239 2018/01/08 12:20:23 otto Exp $	*/
2 /*
3  * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
4  * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
5  * Copyright (c) 2008 Damien Miller <djm@openbsd.org>
6  * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * If we meet some day, and you think this stuff is worth it, you
23  * can buy me a beer in return. Poul-Henning Kamp
24  */
25 
26 /* #define MALLOC_STATS */
27 
28 #include <sys/types.h>
29 #include <sys/param.h>	/* PAGE_SHIFT ALIGN */
30 #include <sys/queue.h>
31 #include <sys/mman.h>
32 #include <sys/uio.h>
33 #include <errno.h>
34 #include <stdarg.h>
35 #include <stdint.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <stdio.h>
39 #include <unistd.h>
40 
41 #ifdef MALLOC_STATS
42 #include <sys/tree.h>
43 #include <fcntl.h>
44 #endif
45 
46 #include "thread_private.h"
47 #include <tib.h>
48 
49 #if defined(__mips64__)
50 #define MALLOC_PAGESHIFT	(14U)
51 #else
52 #define MALLOC_PAGESHIFT	(PAGE_SHIFT)
53 #endif
54 
55 #define MALLOC_MINSHIFT		4
56 #define MALLOC_MAXSHIFT		(MALLOC_PAGESHIFT - 1)
57 #define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
58 #define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
59 #define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
60 #define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
61 
62 #define MALLOC_MAXCHUNK		(1 << MALLOC_MAXSHIFT)
63 #define MALLOC_MAXCACHE		256
64 #define MALLOC_DELAYED_CHUNK_MASK	15
65 #define MALLOC_INITIAL_REGIONS	512
66 #define MALLOC_DEFAULT_CACHE	64
67 #define MALLOC_CHUNK_LISTS	4
68 #define CHUNK_CHECK_LENGTH	32
69 
70 /*
71  * We move allocations between half a page and a whole page towards the end,
72  * subject to alignment constraints. This is the extra headroom we allow.
73  * Set to zero to be the most strict.
74  */
75 #define MALLOC_LEEWAY		0
76 #define MALLOC_MOVE_COND(sz)	((sz) - mopts.malloc_guard < 		\
77 				    MALLOC_PAGESIZE - MALLOC_LEEWAY)
78 #define MALLOC_MOVE(p, sz)  	(((char *)(p)) +			\
79 				    ((MALLOC_PAGESIZE - MALLOC_LEEWAY -	\
80 			    	    ((sz) - mopts.malloc_guard)) & 	\
81 				    ~(MALLOC_MINSIZE - 1)))
82 
83 #define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
84 
85 /*
86  * What to use for Junk.  This is the byte value we use to fill with
87  * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
88  * and SOME_FREEJUNK right before free.
89  */
90 #define SOME_JUNK		0xdb	/* deadbeef */
91 #define SOME_FREEJUNK		0xdf	/* dead, free */
92 
93 #define MMAP(sz)	mmap(NULL, (sz), PROT_READ | PROT_WRITE, \
94     MAP_ANON | MAP_PRIVATE, -1, 0)
95 
96 #define MMAPA(a,sz)	mmap((a), (sz), PROT_READ | PROT_WRITE, \
97     MAP_ANON | MAP_PRIVATE, -1, 0)
98 
99 #define MQUERY(a, sz)	mquery((a), (sz), PROT_READ | PROT_WRITE, \
100     MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0)
101 
102 struct region_info {
103 	void *p;		/* page; low bits used to mark chunks */
104 	uintptr_t size;		/* size for pages, or chunk_info pointer */
105 #ifdef MALLOC_STATS
106 	void *f;		/* where allocated from */
107 #endif
108 };
109 
110 LIST_HEAD(chunk_head, chunk_info);
111 
112 struct dir_info {
113 	u_int32_t canary1;
114 	int active;			/* status of malloc */
115 	struct region_info *r;		/* region slots */
116 	size_t regions_total;		/* number of region slots */
117 	size_t regions_free;		/* number of free slots */
118 					/* lists of free chunk info structs */
119 	struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1];
120 					/* lists of chunks with free slots */
121 	struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS];
122 	size_t free_regions_size;	/* free pages cached */
123 					/* free pages cache */
124 	struct region_info free_regions[MALLOC_MAXCACHE];
125 					/* delayed free chunk slots */
126 	u_int rotor;
127 	void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
128 	size_t rbytesused;		/* random bytes used */
129 	char *func;			/* current function */
130 	int mutex;
131 	u_char rbytes[32];		/* random bytes */
132 #ifdef MALLOC_STATS
133 	size_t inserts;
134 	size_t insert_collisions;
135 	size_t finds;
136 	size_t find_collisions;
137 	size_t deletes;
138 	size_t delete_moves;
139 	size_t cheap_realloc_tries;
140 	size_t cheap_reallocs;
141 	size_t malloc_used;		/* bytes allocated */
142 	size_t malloc_guarded;		/* bytes used for guards */
143 #define STATS_ADD(x,y)	((x) += (y))
144 #define STATS_SUB(x,y)	((x) -= (y))
145 #define STATS_INC(x)	((x)++)
146 #define STATS_ZERO(x)	((x) = 0)
147 #define STATS_SETF(x,y)	((x)->f = (y))
148 #else
149 #define STATS_ADD(x,y)	/* nothing */
150 #define STATS_SUB(x,y)	/* nothing */
151 #define STATS_INC(x)	/* nothing */
152 #define STATS_ZERO(x)	/* nothing */
153 #define STATS_SETF(x,y)	/* nothing */
154 #endif /* MALLOC_STATS */
155 	u_int32_t canary2;
156 };
157 #define DIR_INFO_RSZ	((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
158 			~MALLOC_PAGEMASK)
159 
160 /*
161  * This structure describes a page worth of chunks.
162  *
163  * How many bits per u_short in the bitmap
164  */
165 #define MALLOC_BITS		(NBBY * sizeof(u_short))
166 struct chunk_info {
167 	LIST_ENTRY(chunk_info) entries;
168 	void *page;			/* pointer to the page */
169 	u_short canary;
170 	u_short size;			/* size of this page's chunks */
171 	u_short shift;			/* how far to shift for this size */
172 	u_short free;			/* how many free chunks */
173 	u_short total;			/* how many chunks */
174 	u_short offset;			/* requested size table offset */
175 	u_short rotor;			/* randomization rotor */
176 	u_short bits[1];		/* which chunks are free */
177 };
178 
179 struct malloc_readonly {
180 	struct dir_info *malloc_pool[_MALLOC_MUTEXES];	/* Main bookkeeping information */
181 	int	malloc_mt;		/* multi-threaded mode? */
182 	int	malloc_freecheck;	/* Extensive double free check */
183 	int	malloc_freeunmap;	/* mprotect free pages PROT_NONE? */
184 	int	malloc_junk;		/* junk fill? */
185 	int	malloc_realloc;		/* always realloc? */
186 	int	malloc_xmalloc;		/* xmalloc behaviour? */
187 	int	chunk_canaries;		/* use canaries after chunks? */
188 	int	internal_funcs;		/* use better recallocarray/freezero? */
189 	u_int	malloc_cache;		/* free pages we cache */
190 	size_t	malloc_guard;		/* use guard pages after allocations? */
191 #ifdef MALLOC_STATS
192 	int	malloc_stats;		/* dump statistics at end */
193 #endif
194 	u_int32_t malloc_canary;	/* Matched against ones in malloc_pool */
195 };
196 
197 /* This object is mapped PROT_READ after initialisation to prevent tampering */
198 static union {
199 	struct malloc_readonly mopts;
200 	u_char _pad[MALLOC_PAGESIZE];
201 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
202 #define mopts	malloc_readonly.mopts
203 
204 char		*malloc_options;	/* compile-time options */
205 
206 static __dead void wrterror(struct dir_info *d, char *msg, ...)
207     __attribute__((__format__ (printf, 2, 3)));
208 
209 #ifdef MALLOC_STATS
210 void malloc_dump(int, int, struct dir_info *);
211 PROTO_NORMAL(malloc_dump);
212 void malloc_gdump(int);
213 PROTO_NORMAL(malloc_gdump);
214 static void malloc_exit(void);
215 #define CALLER	__builtin_return_address(0)
216 #else
217 #define CALLER	NULL
218 #endif
219 
220 /* low bits of r->p determine size: 0 means >= page size and r->size holding
221  * real size, otherwise low bits are a shift count, or 1 for malloc(0)
222  */
223 #define REALSIZE(sz, r)						\
224 	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
225 	(sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
226 
227 static inline void
228 _MALLOC_LEAVE(struct dir_info *d)
229 {
230 	if (mopts.malloc_mt) {
231 		d->active--;
232 		_MALLOC_UNLOCK(d->mutex);
233 	}
234 }
235 
236 static inline void
237 _MALLOC_ENTER(struct dir_info *d)
238 {
239 	if (mopts.malloc_mt) {
240 		_MALLOC_LOCK(d->mutex);
241 		d->active++;
242 	}
243 }
244 
245 static inline size_t
246 hash(void *p)
247 {
248 	size_t sum;
249 	uintptr_t u;
250 
251 	u = (uintptr_t)p >> MALLOC_PAGESHIFT;
252 	sum = u;
253 	sum = (sum << 7) - sum + (u >> 16);
254 #ifdef __LP64__
255 	sum = (sum << 7) - sum + (u >> 32);
256 	sum = (sum << 7) - sum + (u >> 48);
257 #endif
258 	return sum;
259 }
260 
261 static inline
262 struct dir_info *getpool(void)
263 {
264 	if (!mopts.malloc_mt)
265 		return mopts.malloc_pool[0];
266 	else
267 		return mopts.malloc_pool[TIB_GET()->tib_tid &
268 		    (_MALLOC_MUTEXES - 1)];
269 }
270 
271 static __dead void
272 wrterror(struct dir_info *d, char *msg, ...)
273 {
274 	int		saved_errno = errno;
275 	va_list		ap;
276 
277 	dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname,
278 	    getpid(), (d != NULL && d->func) ? d->func : "unknown");
279 	va_start(ap, msg);
280 	vdprintf(STDERR_FILENO, msg, ap);
281 	va_end(ap);
282 	dprintf(STDERR_FILENO, "\n");
283 
284 #ifdef MALLOC_STATS
285 	if (mopts.malloc_stats)
286 		malloc_gdump(STDERR_FILENO);
287 #endif /* MALLOC_STATS */
288 
289 	errno = saved_errno;
290 
291 	abort();
292 }
293 
294 static void
295 rbytes_init(struct dir_info *d)
296 {
297 	arc4random_buf(d->rbytes, sizeof(d->rbytes));
298 	/* add 1 to account for using d->rbytes[0] */
299 	d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2);
300 }
301 
302 static inline u_char
303 getrbyte(struct dir_info *d)
304 {
305 	u_char x;
306 
307 	if (d->rbytesused >= sizeof(d->rbytes))
308 		rbytes_init(d);
309 	x = d->rbytes[d->rbytesused++];
310 	return x;
311 }
312 
313 static void
314 omalloc_parseopt(char opt)
315 {
316 	switch (opt) {
317 	case '>':
318 		mopts.malloc_cache <<= 1;
319 		if (mopts.malloc_cache > MALLOC_MAXCACHE)
320 			mopts.malloc_cache = MALLOC_MAXCACHE;
321 		break;
322 	case '<':
323 		mopts.malloc_cache >>= 1;
324 		break;
325 	case 'c':
326 		mopts.chunk_canaries = 0;
327 		break;
328 	case 'C':
329 		mopts.chunk_canaries = 1;
330 		break;
331 #ifdef MALLOC_STATS
332 	case 'd':
333 		mopts.malloc_stats = 0;
334 		break;
335 	case 'D':
336 		mopts.malloc_stats = 1;
337 		break;
338 #endif /* MALLOC_STATS */
339 	case 'f':
340 		mopts.malloc_freecheck = 0;
341 		mopts.malloc_freeunmap = 0;
342 		break;
343 	case 'F':
344 		mopts.malloc_freecheck = 1;
345 		mopts.malloc_freeunmap = 1;
346 		break;
347 	case 'g':
348 		mopts.malloc_guard = 0;
349 		break;
350 	case 'G':
351 		mopts.malloc_guard = MALLOC_PAGESIZE;
352 		break;
353 	case 'j':
354 		if (mopts.malloc_junk > 0)
355 			mopts.malloc_junk--;
356 		break;
357 	case 'J':
358 		if (mopts.malloc_junk < 2)
359 			mopts.malloc_junk++;
360 		break;
361 	case 'r':
362 		mopts.malloc_realloc = 0;
363 		break;
364 	case 'R':
365 		mopts.malloc_realloc = 1;
366 		break;
367 	case 'u':
368 		mopts.malloc_freeunmap = 0;
369 		break;
370 	case 'U':
371 		mopts.malloc_freeunmap = 1;
372 		break;
373 	case 'x':
374 		mopts.malloc_xmalloc = 0;
375 		break;
376 	case 'X':
377 		mopts.malloc_xmalloc = 1;
378 		break;
379 	default:
380 		dprintf(STDERR_FILENO, "malloc() warning: "
381                     "unknown char in MALLOC_OPTIONS\n");
382 		break;
383 	}
384 }
385 
386 static void
387 omalloc_init(void)
388 {
389 	char *p, *q, b[64];
390 	int i, j;
391 
392 	/*
393 	 * Default options
394 	 */
395 	mopts.malloc_junk = 1;
396 	mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
397 
398 	for (i = 0; i < 3; i++) {
399 		switch (i) {
400 		case 0:
401 			j = readlink("/etc/malloc.conf", b, sizeof b - 1);
402 			if (j <= 0)
403 				continue;
404 			b[j] = '\0';
405 			p = b;
406 			break;
407 		case 1:
408 			if (issetugid() == 0)
409 				p = getenv("MALLOC_OPTIONS");
410 			else
411 				continue;
412 			break;
413 		case 2:
414 			p = malloc_options;
415 			break;
416 		default:
417 			p = NULL;
418 		}
419 
420 		for (; p != NULL && *p != '\0'; p++) {
421 			switch (*p) {
422 			case 'S':
423 				for (q = "CFGJ"; *q != '\0'; q++)
424 					omalloc_parseopt(*q);
425 				mopts.malloc_cache = 0;
426 				break;
427 			case 's':
428 				for (q = "cfgj"; *q != '\0'; q++)
429 					omalloc_parseopt(*q);
430 				mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
431 				break;
432 			default:
433 				omalloc_parseopt(*p);
434 				break;
435 			}
436 		}
437 	}
438 
439 #ifdef MALLOC_STATS
440 	if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
441 		dprintf(STDERR_FILENO, "malloc() warning: atexit(2) failed."
442 		    " Will not be able to dump stats on exit\n");
443 	}
444 #endif /* MALLOC_STATS */
445 
446 	while ((mopts.malloc_canary = arc4random()) == 0)
447 		;
448 }
449 
450 static void
451 omalloc_poolinit(struct dir_info **dp)
452 {
453 	void *p;
454 	size_t d_avail, regioninfo_size;
455 	struct dir_info *d;
456 	int i, j;
457 
458 	/*
459 	 * Allocate dir_info with a guard page on either side. Also
460 	 * randomise offset inside the page at which the dir_info
461 	 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
462 	 */
463 	if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED)
464 		wrterror(NULL, "malloc init mmap failed");
465 	mprotect(p, MALLOC_PAGESIZE, PROT_NONE);
466 	mprotect((char *)p + MALLOC_PAGESIZE + DIR_INFO_RSZ,
467 	    MALLOC_PAGESIZE, PROT_NONE);
468 	d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
469 	d = (struct dir_info *)((char *)p + MALLOC_PAGESIZE +
470 	    (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
471 
472 	rbytes_init(d);
473 	d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS;
474 	regioninfo_size = d->regions_total * sizeof(struct region_info);
475 	d->r = MMAP(regioninfo_size);
476 	if (d->r == MAP_FAILED) {
477 		d->regions_total = 0;
478 		wrterror(NULL, "malloc init mmap failed");
479 	}
480 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
481 		LIST_INIT(&d->chunk_info_list[i]);
482 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++)
483 			LIST_INIT(&d->chunk_dir[i][j]);
484 	}
485 	STATS_ADD(d->malloc_used, regioninfo_size);
486 	d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
487 	d->canary2 = ~d->canary1;
488 
489 	*dp = d;
490 }
491 
492 static int
493 omalloc_grow(struct dir_info *d)
494 {
495 	size_t newtotal;
496 	size_t newsize;
497 	size_t mask;
498 	size_t i;
499 	struct region_info *p;
500 
501 	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2)
502 		return 1;
503 
504 	newtotal = d->regions_total * 2;
505 	newsize = newtotal * sizeof(struct region_info);
506 	mask = newtotal - 1;
507 
508 	p = MMAP(newsize);
509 	if (p == MAP_FAILED)
510 		return 1;
511 
512 	STATS_ADD(d->malloc_used, newsize);
513 	STATS_ZERO(d->inserts);
514 	STATS_ZERO(d->insert_collisions);
515 	for (i = 0; i < d->regions_total; i++) {
516 		void *q = d->r[i].p;
517 		if (q != NULL) {
518 			size_t index = hash(q) & mask;
519 			STATS_INC(d->inserts);
520 			while (p[index].p != NULL) {
521 				index = (index - 1) & mask;
522 				STATS_INC(d->insert_collisions);
523 			}
524 			p[index] = d->r[i];
525 		}
526 	}
527 	/* avoid pages containing meta info to end up in cache */
528 	if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
529 		wrterror(d, "munmap %p", (void *)d->r);
530 	else
531 		STATS_SUB(d->malloc_used,
532 		    d->regions_total * sizeof(struct region_info));
533 	d->regions_free = d->regions_free + d->regions_total;
534 	d->regions_total = newtotal;
535 	d->r = p;
536 	return 0;
537 }
538 
539 /*
540  * The hashtable uses the assumption that p is never NULL. This holds since
541  * non-MAP_FIXED mappings with hint 0 start at BRKSIZ.
542  */
543 static int
544 insert(struct dir_info *d, void *p, size_t sz, void *f)
545 {
546 	size_t index;
547 	size_t mask;
548 	void *q;
549 
550 	if (d->regions_free * 4 < d->regions_total) {
551 		if (omalloc_grow(d))
552 			return 1;
553 	}
554 	mask = d->regions_total - 1;
555 	index = hash(p) & mask;
556 	q = d->r[index].p;
557 	STATS_INC(d->inserts);
558 	while (q != NULL) {
559 		index = (index - 1) & mask;
560 		q = d->r[index].p;
561 		STATS_INC(d->insert_collisions);
562 	}
563 	d->r[index].p = p;
564 	d->r[index].size = sz;
565 #ifdef MALLOC_STATS
566 	d->r[index].f = f;
567 #endif
568 	d->regions_free--;
569 	return 0;
570 }
571 
572 static struct region_info *
573 find(struct dir_info *d, void *p)
574 {
575 	size_t index;
576 	size_t mask = d->regions_total - 1;
577 	void *q, *r;
578 
579 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
580 	    d->canary1 != ~d->canary2)
581 		wrterror(d, "internal struct corrupt");
582 	p = MASK_POINTER(p);
583 	index = hash(p) & mask;
584 	r = d->r[index].p;
585 	q = MASK_POINTER(r);
586 	STATS_INC(d->finds);
587 	while (q != p && r != NULL) {
588 		index = (index - 1) & mask;
589 		r = d->r[index].p;
590 		q = MASK_POINTER(r);
591 		STATS_INC(d->find_collisions);
592 	}
593 	return (q == p && r != NULL) ? &d->r[index] : NULL;
594 }
595 
596 static void
597 delete(struct dir_info *d, struct region_info *ri)
598 {
599 	/* algorithm R, Knuth Vol III section 6.4 */
600 	size_t mask = d->regions_total - 1;
601 	size_t i, j, r;
602 
603 	if (d->regions_total & (d->regions_total - 1))
604 		wrterror(d, "regions_total not 2^x");
605 	d->regions_free++;
606 	STATS_INC(d->deletes);
607 
608 	i = ri - d->r;
609 	for (;;) {
610 		d->r[i].p = NULL;
611 		d->r[i].size = 0;
612 		j = i;
613 		for (;;) {
614 			i = (i - 1) & mask;
615 			if (d->r[i].p == NULL)
616 				return;
617 			r = hash(d->r[i].p) & mask;
618 			if ((i <= r && r < j) || (r < j && j < i) ||
619 			    (j < i && i <= r))
620 				continue;
621 			d->r[j] = d->r[i];
622 			STATS_INC(d->delete_moves);
623 			break;
624 		}
625 
626 	}
627 }
628 
629 /*
630  * Cache maintenance. We keep at most malloc_cache pages cached.
631  * If the cache is becoming full, unmap pages in the cache for real,
632  * and then add the region to the cache
633  * Opposed to the regular region data structure, the sizes in the
634  * cache are in MALLOC_PAGESIZE units.
635  */
636 static void
637 unmap(struct dir_info *d, void *p, size_t sz, int clear)
638 {
639 	size_t psz = sz >> MALLOC_PAGESHIFT;
640 	size_t rsz;
641 	struct region_info *r;
642 	u_int i, offset, mask;
643 
644 	if (sz != PAGEROUND(sz))
645 		wrterror(d, "munmap round");
646 
647 	rsz = mopts.malloc_cache - d->free_regions_size;
648 
649 	/*
650 	 * normally the cache holds recently freed regions, but if the region
651 	 * to unmap is larger than the cache size or we're clearing and the
652 	 * cache is full, just munmap
653 	 */
654 	if (psz > mopts.malloc_cache || (clear && rsz == 0)) {
655 		i = munmap(p, sz);
656 		if (i)
657 			wrterror(d, "munmap %p", p);
658 		STATS_SUB(d->malloc_used, sz);
659 		return;
660 	}
661 	offset = getrbyte(d);
662 	mask = mopts.malloc_cache - 1;
663 	if (psz > rsz) {
664 		size_t tounmap = psz - rsz;
665 		i = 0;
666 		for (;;) {
667 			r = &d->free_regions[(i + offset) & mask];
668 			if (r->p != NULL) {
669 				rsz = r->size << MALLOC_PAGESHIFT;
670 				if (munmap(r->p, rsz))
671 					wrterror(d, "munmap %p", r->p);
672 				r->p = NULL;
673 				if (tounmap > r->size)
674 					tounmap -= r->size;
675 				else
676 					tounmap = 0;
677 				d->free_regions_size -= r->size;
678 				STATS_SUB(d->malloc_used, rsz);
679 				if (tounmap == 0) {
680 					offset = i;
681 					break;
682 				}
683 			}
684 			i++;
685 		}
686 	}
687 	for (i = 0; ; i++) {
688 		r = &d->free_regions[(i + offset) & mask];
689 		if (r->p == NULL) {
690 			if (clear)
691 				memset(p, 0, sz - mopts.malloc_guard);
692 			if (mopts.malloc_junk && !mopts.malloc_freeunmap) {
693 				size_t amt = mopts.malloc_junk == 1 ?
694 				    MALLOC_MAXCHUNK : sz;
695 				memset(p, SOME_FREEJUNK, amt);
696 			}
697 			if (mopts.malloc_freeunmap)
698 				mprotect(p, sz, PROT_NONE);
699 			r->p = p;
700 			r->size = psz;
701 			d->free_regions_size += psz;
702 			break;
703 		}
704 	}
705 	if (d->free_regions_size > mopts.malloc_cache)
706 		wrterror(d, "malloc cache overflow");
707 }
708 
709 static void
710 zapcacheregion(struct dir_info *d, void *p, size_t len)
711 {
712 	u_int i;
713 	struct region_info *r;
714 	size_t rsz;
715 
716 	for (i = 0; i < mopts.malloc_cache; i++) {
717 		r = &d->free_regions[i];
718 		if (r->p >= p && r->p <= (void *)((char *)p + len)) {
719 			rsz = r->size << MALLOC_PAGESHIFT;
720 			if (munmap(r->p, rsz))
721 				wrterror(d, "munmap %p", r->p);
722 			r->p = NULL;
723 			d->free_regions_size -= r->size;
724 			STATS_SUB(d->malloc_used, rsz);
725 		}
726 	}
727 }
728 
729 static void *
730 map(struct dir_info *d, void *hint, size_t sz, int zero_fill)
731 {
732 	size_t psz = sz >> MALLOC_PAGESHIFT;
733 	struct region_info *r, *big = NULL;
734 	u_int i;
735 	void *p;
736 
737 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
738 	    d->canary1 != ~d->canary2)
739 		wrterror(d, "internal struct corrupt");
740 	if (sz != PAGEROUND(sz))
741 		wrterror(d, "map round");
742 
743 	if (hint == NULL && psz > d->free_regions_size) {
744 		_MALLOC_LEAVE(d);
745 		p = MMAP(sz);
746 		_MALLOC_ENTER(d);
747 		if (p != MAP_FAILED)
748 			STATS_ADD(d->malloc_used, sz);
749 		/* zero fill not needed */
750 		return p;
751 	}
752 	for (i = 0; i < mopts.malloc_cache; i++) {
753 		r = &d->free_regions[(i + d->rotor) & (mopts.malloc_cache - 1)];
754 		if (r->p != NULL) {
755 			if (hint != NULL && r->p != hint)
756 				continue;
757 			if (r->size == psz) {
758 				p = r->p;
759 				r->p = NULL;
760 				d->free_regions_size -= psz;
761 				if (mopts.malloc_freeunmap)
762 					mprotect(p, sz, PROT_READ | PROT_WRITE);
763 				if (zero_fill)
764 					memset(p, 0, sz);
765 				else if (mopts.malloc_junk == 2 &&
766 				    mopts.malloc_freeunmap)
767 					memset(p, SOME_FREEJUNK, sz);
768 				d->rotor += i + 1;
769 				return p;
770 			} else if (r->size > psz)
771 				big = r;
772 		}
773 	}
774 	if (big != NULL) {
775 		r = big;
776 		p = r->p;
777 		r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT);
778 		if (mopts.malloc_freeunmap)
779 			mprotect(p, sz, PROT_READ | PROT_WRITE);
780 		r->size -= psz;
781 		d->free_regions_size -= psz;
782 		if (zero_fill)
783 			memset(p, 0, sz);
784 		else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap)
785 			memset(p, SOME_FREEJUNK, sz);
786 		return p;
787 	}
788 	if (hint != NULL)
789 		return MAP_FAILED;
790 	if (d->free_regions_size > mopts.malloc_cache)
791 		wrterror(d, "malloc cache");
792 	_MALLOC_LEAVE(d);
793 	p = MMAP(sz);
794 	_MALLOC_ENTER(d);
795 	if (p != MAP_FAILED)
796 		STATS_ADD(d->malloc_used, sz);
797 	/* zero fill not needed */
798 	return p;
799 }
800 
801 static void
802 init_chunk_info(struct dir_info *d, struct chunk_info *p, int bits)
803 {
804 	int i;
805 
806 	if (bits == 0) {
807 		p->shift = MALLOC_MINSHIFT;
808 		p->total = p->free = MALLOC_PAGESIZE >> p->shift;
809 		p->size = 0;
810 		p->offset = 0xdead;
811 	} else {
812 		p->shift = bits;
813 		p->total = p->free = MALLOC_PAGESIZE >> p->shift;
814 		p->size = 1U << bits;
815 		p->offset = howmany(p->total, MALLOC_BITS);
816 	}
817 	p->canary = (u_short)d->canary1;
818 
819 	/* set all valid bits in the bitmap */
820  	i = p->total - 1;
821 	memset(p->bits, 0xff, sizeof(p->bits[0]) * (i / MALLOC_BITS));
822 	p->bits[i / MALLOC_BITS] = (2U << (i % MALLOC_BITS)) - 1;
823 }
824 
825 static struct chunk_info *
826 alloc_chunk_info(struct dir_info *d, int bits)
827 {
828 	struct chunk_info *p;
829 
830 	if (LIST_EMPTY(&d->chunk_info_list[bits])) {
831 		size_t size, count, i;
832 		char *q;
833 
834 		if (bits == 0)
835 			count = MALLOC_PAGESIZE / MALLOC_MINSIZE;
836 		else
837 			count = MALLOC_PAGESIZE >> bits;
838 
839 		size = howmany(count, MALLOC_BITS);
840 		size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
841 		if (mopts.chunk_canaries)
842 			size += count * sizeof(u_short);
843 		size = ALIGN(size);
844 
845 		q = MMAP(MALLOC_PAGESIZE);
846 		if (q == MAP_FAILED)
847 			return NULL;
848 		STATS_ADD(d->malloc_used, MALLOC_PAGESIZE);
849 		count = MALLOC_PAGESIZE / size;
850 
851 		for (i = 0; i < count; i++, q += size) {
852 			p = (struct chunk_info *)q;
853 			LIST_INSERT_HEAD(&d->chunk_info_list[bits], p, entries);
854 		}
855 	}
856 	p = LIST_FIRST(&d->chunk_info_list[bits]);
857 	LIST_REMOVE(p, entries);
858 	if (p->shift == 0)
859 		init_chunk_info(d, p, bits);
860 	return p;
861 }
862 
863 /*
864  * Allocate a page of chunks
865  */
866 static struct chunk_info *
867 omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
868 {
869 	struct chunk_info *bp;
870 	void *pp;
871 
872 	/* Allocate a new bucket */
873 	pp = map(d, NULL, MALLOC_PAGESIZE, 0);
874 	if (pp == MAP_FAILED)
875 		return NULL;
876 
877 	/* memory protect the page allocated in the malloc(0) case */
878 	if (bits == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) < 0)
879 		goto err;
880 
881 	bp = alloc_chunk_info(d, bits);
882 	if (bp == NULL)
883 		goto err;
884 	bp->page = pp;
885 
886 	if (insert(d, (void *)((uintptr_t)pp | bits + 1), (uintptr_t)bp, NULL))
887 		goto err;
888 	LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries);
889 	return bp;
890 
891 err:
892 	unmap(d, pp, MALLOC_PAGESIZE, 0);
893 	return NULL;
894 }
895 
896 static int
897 find_chunksize(size_t size)
898 {
899 	int r;
900 
901 	/* malloc(0) is special */
902 	if (size == 0)
903 		return 0;
904 
905 	if (size < MALLOC_MINSIZE)
906 		size = MALLOC_MINSIZE;
907 	size--;
908 
909 	r = MALLOC_MINSHIFT;
910 	while (size >> r)
911 		r++;
912 	return r;
913 }
914 
915 static void
916 fill_canary(char *ptr, size_t sz, size_t allocated)
917 {
918 	size_t check_sz = allocated - sz;
919 
920 	if (check_sz > CHUNK_CHECK_LENGTH)
921 		check_sz = CHUNK_CHECK_LENGTH;
922 	memset(ptr + sz, SOME_JUNK, check_sz);
923 }
924 
925 /*
926  * Allocate a chunk
927  */
928 static void *
929 malloc_bytes(struct dir_info *d, size_t size, void *f)
930 {
931 	u_int i, r;
932 	int j, listnum;
933 	size_t k;
934 	u_short	u, b, *lp;
935 	struct chunk_info *bp;
936 	void *p;
937 
938 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
939 	    d->canary1 != ~d->canary2)
940 		wrterror(d, "internal struct corrupt");
941 
942 	j = find_chunksize(size);
943 
944 	r = getrbyte(d);
945 	listnum = r % MALLOC_CHUNK_LISTS;
946 	/* If it's empty, make a page more of that size chunks */
947 	if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) {
948 		bp = omalloc_make_chunks(d, j, listnum);
949 		if (bp == NULL)
950 			return NULL;
951 	}
952 
953 	if (bp->canary != (u_short)d->canary1)
954 		wrterror(d, "chunk info corrupted");
955 
956 	if (bp->free > 1)
957 		bp->rotor += r;
958 	i = bp->rotor++ & (bp->total - 1);
959 
960 	/* start somewhere in a short */
961 	lp = &bp->bits[i / MALLOC_BITS];
962 	if (*lp) {
963 		b = *lp;
964 		k = i % MALLOC_BITS;
965 		u = 1 << k;
966 		while (k < MALLOC_BITS) {
967 			if (b & u)
968 				goto found;
969 			k++;
970 			u <<= 1;
971 		}
972 	}
973 	/* no bit halfway, go to next full short */
974 	i /= MALLOC_BITS;
975 	for (;;) {
976 		if (++i >= bp->total / MALLOC_BITS)
977 			i = 0;
978 		lp = &bp->bits[i];
979 		if (*lp) {
980 			b = *lp;
981 			k = 0;
982 			u = 1;
983 			for (;;) {
984 				if (b & u)
985 					goto found;
986 				k++;
987 				u <<= 1;
988 			}
989 		}
990 	}
991 found:
992 #ifdef MALLOC_STATS
993 	if (i == 0 && k == 0) {
994 		struct region_info *r = find(d, bp->page);
995 		r->f = f;
996 	}
997 #endif
998 
999 	*lp ^= u;
1000 
1001 	/* If there are no more free, remove from free-list */
1002 	if (--bp->free == 0)
1003 		LIST_REMOVE(bp, entries);
1004 
1005 	/* Adjust to the real offset of that chunk */
1006 	k += (lp - bp->bits) * MALLOC_BITS;
1007 
1008 	if (mopts.chunk_canaries && size > 0)
1009 		bp->bits[bp->offset + k] = size;
1010 
1011 	k <<= bp->shift;
1012 
1013 	p = (char *)bp->page + k;
1014 	if (bp->size > 0) {
1015 		if (mopts.malloc_junk == 2)
1016 			memset(p, SOME_JUNK, bp->size);
1017 		else if (mopts.chunk_canaries)
1018 			fill_canary(p, size, bp->size);
1019 	}
1020 	return p;
1021 }
1022 
1023 static void
1024 validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1025 {
1026 	size_t check_sz = allocated - sz;
1027 	u_char *p, *q;
1028 
1029 	if (check_sz > CHUNK_CHECK_LENGTH)
1030 		check_sz = CHUNK_CHECK_LENGTH;
1031 	p = ptr + sz;
1032 	q = p + check_sz;
1033 
1034 	while (p < q) {
1035 		if (*p != SOME_JUNK) {
1036 			wrterror(d, "chunk canary corrupted %p %#tx@%#zx%s",
1037 			    ptr, p - ptr, sz,
1038 			    *p == SOME_FREEJUNK ? " (double free?)" : "");
1039 		}
1040 		p++;
1041 	}
1042 }
1043 
1044 static uint32_t
1045 find_chunknum(struct dir_info *d, struct chunk_info *info, void *ptr, int check)
1046 {
1047 	uint32_t chunknum;
1048 
1049 	if (info->canary != (u_short)d->canary1)
1050 		wrterror(d, "chunk info corrupted");
1051 
1052 	/* Find the chunk number on the page */
1053 	chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1054 
1055 	if ((uintptr_t)ptr & ((1U << (info->shift)) - 1))
1056 		wrterror(d, "modified chunk-pointer %p", ptr);
1057 	if (info->bits[chunknum / MALLOC_BITS] &
1058 	    (1U << (chunknum % MALLOC_BITS)))
1059 		wrterror(d, "chunk is already free %p", ptr);
1060 	if (check && info->size > 0) {
1061 		validate_canary(d, ptr, info->bits[info->offset + chunknum],
1062 		    info->size);
1063 	}
1064 	return chunknum;
1065 }
1066 
1067 /*
1068  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1069  */
1070 static void
1071 free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1072 {
1073 	struct chunk_head *mp;
1074 	struct chunk_info *info;
1075 	uint32_t chunknum;
1076 	int listnum;
1077 
1078 	info = (struct chunk_info *)r->size;
1079 	chunknum = find_chunknum(d, info, ptr, 0);
1080 
1081 	info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
1082 	info->free++;
1083 
1084 	if (info->free == 1) {
1085 		/* Page became non-full */
1086 		listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
1087 		if (info->size != 0)
1088 			mp = &d->chunk_dir[info->shift][listnum];
1089 		else
1090 			mp = &d->chunk_dir[0][listnum];
1091 
1092 		LIST_INSERT_HEAD(mp, info, entries);
1093 		return;
1094 	}
1095 
1096 	if (info->free != info->total)
1097 		return;
1098 
1099 	LIST_REMOVE(info, entries);
1100 
1101 	if (info->size == 0 && !mopts.malloc_freeunmap)
1102 		mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1103 	unmap(d, info->page, MALLOC_PAGESIZE, 0);
1104 
1105 	delete(d, r);
1106 	if (info->size != 0)
1107 		mp = &d->chunk_info_list[info->shift];
1108 	else
1109 		mp = &d->chunk_info_list[0];
1110 	LIST_INSERT_HEAD(mp, info, entries);
1111 }
1112 
1113 
1114 
1115 static void *
1116 omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1117 {
1118 	void *p;
1119 	size_t psz;
1120 
1121 	if (sz > MALLOC_MAXCHUNK) {
1122 		if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1123 			errno = ENOMEM;
1124 			return NULL;
1125 		}
1126 		sz += mopts.malloc_guard;
1127 		psz = PAGEROUND(sz);
1128 		p = map(pool, NULL, psz, zero_fill);
1129 		if (p == MAP_FAILED) {
1130 			errno = ENOMEM;
1131 			return NULL;
1132 		}
1133 		if (insert(pool, p, sz, f)) {
1134 			unmap(pool, p, psz, 0);
1135 			errno = ENOMEM;
1136 			return NULL;
1137 		}
1138 		if (mopts.malloc_guard) {
1139 			if (mprotect((char *)p + psz - mopts.malloc_guard,
1140 			    mopts.malloc_guard, PROT_NONE))
1141 				wrterror(pool, "mprotect");
1142 			STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1143 		}
1144 
1145 		if (MALLOC_MOVE_COND(sz)) {
1146 			/* fill whole allocation */
1147 			if (mopts.malloc_junk == 2)
1148 				memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1149 			/* shift towards the end */
1150 			p = MALLOC_MOVE(p, sz);
1151 			/* fill zeros if needed and overwritten above */
1152 			if (zero_fill && mopts.malloc_junk == 2)
1153 				memset(p, 0, sz - mopts.malloc_guard);
1154 		} else {
1155 			if (mopts.malloc_junk == 2) {
1156 				if (zero_fill)
1157 					memset((char *)p + sz - mopts.malloc_guard,
1158 					    SOME_JUNK, psz - sz);
1159 				else
1160 					memset(p, SOME_JUNK,
1161 					    psz - mopts.malloc_guard);
1162 			} else if (mopts.chunk_canaries)
1163 				fill_canary(p, sz - mopts.malloc_guard,
1164 				    psz - mopts.malloc_guard);
1165 		}
1166 
1167 	} else {
1168 		/* takes care of SOME_JUNK */
1169 		p = malloc_bytes(pool, sz, f);
1170 		if (zero_fill && p != NULL && sz > 0)
1171 			memset(p, 0, sz);
1172 	}
1173 
1174 	return p;
1175 }
1176 
1177 /*
1178  * Common function for handling recursion.  Only
1179  * print the error message once, to avoid making the problem
1180  * potentially worse.
1181  */
1182 static void
1183 malloc_recurse(struct dir_info *d)
1184 {
1185 	static int noprint;
1186 
1187 	if (noprint == 0) {
1188 		noprint = 1;
1189 		wrterror(d, "recursive call");
1190 	}
1191 	d->active--;
1192 	_MALLOC_UNLOCK(d->mutex);
1193 	errno = EDEADLK;
1194 }
1195 
1196 void
1197 _malloc_init(int from_rthreads)
1198 {
1199 	int i, max;
1200 	struct dir_info *d;
1201 
1202 	_MALLOC_LOCK(0);
1203 	if (!from_rthreads && mopts.malloc_pool[0]) {
1204 		_MALLOC_UNLOCK(0);
1205 		return;
1206 	}
1207 	if (!mopts.malloc_canary)
1208 		omalloc_init();
1209 
1210 	max = from_rthreads ? _MALLOC_MUTEXES : 1;
1211 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1212 		mprotect(&malloc_readonly, sizeof(malloc_readonly),
1213 		    PROT_READ | PROT_WRITE);
1214 	for (i = 0; i < max; i++) {
1215 		if (mopts.malloc_pool[i])
1216 			continue;
1217 		omalloc_poolinit(&d);
1218 		d->mutex = i;
1219 		mopts.malloc_pool[i] = d;
1220 	}
1221 
1222 	if (from_rthreads)
1223 		mopts.malloc_mt = 1;
1224 	else
1225 		mopts.internal_funcs = 1;
1226 
1227 	/*
1228 	 * Options have been set and will never be reset.
1229 	 * Prevent further tampering with them.
1230 	 */
1231 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1232 		mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
1233 	_MALLOC_UNLOCK(0);
1234 }
1235 DEF_STRONG(_malloc_init);
1236 
1237 void *
1238 malloc(size_t size)
1239 {
1240 	void *r;
1241 	struct dir_info *d;
1242 	int saved_errno = errno;
1243 
1244 	d = getpool();
1245 	if (d == NULL) {
1246 		_malloc_init(0);
1247 		d = getpool();
1248 	}
1249 	_MALLOC_LOCK(d->mutex);
1250 	d->func = "malloc";
1251 
1252 	if (d->active++) {
1253 		malloc_recurse(d);
1254 		return NULL;
1255 	}
1256 	r = omalloc(d, size, 0, CALLER);
1257 	d->active--;
1258 	_MALLOC_UNLOCK(d->mutex);
1259 	if (r == NULL && mopts.malloc_xmalloc)
1260 		wrterror(d, "out of memory");
1261 	if (r != NULL)
1262 		errno = saved_errno;
1263 	return r;
1264 }
1265 /*DEF_STRONG(malloc);*/
1266 
1267 static void
1268 validate_junk(struct dir_info *pool, void *p)
1269 {
1270 	struct region_info *r;
1271 	size_t byte, sz;
1272 
1273 	if (p == NULL)
1274 		return;
1275 	r = find(pool, p);
1276 	if (r == NULL)
1277 		wrterror(pool, "bogus pointer in validate_junk %p", p);
1278 	REALSIZE(sz, r);
1279 	if (sz > CHUNK_CHECK_LENGTH)
1280 		sz = CHUNK_CHECK_LENGTH;
1281 	for (byte = 0; byte < sz; byte++) {
1282 		if (((unsigned char *)p)[byte] != SOME_FREEJUNK)
1283 			wrterror(pool, "use after free %p", p);
1284 	}
1285 }
1286 
1287 static void
1288 ofree(struct dir_info *argpool, void *p, int clear, int check, size_t argsz)
1289 {
1290 	struct dir_info *pool;
1291 	struct region_info *r;
1292 	size_t sz;
1293 	int i;
1294 
1295 	pool = argpool;
1296 	r = find(pool, p);
1297 	if (r == NULL) {
1298 		if (mopts.malloc_mt)  {
1299 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1300 				if (i == argpool->mutex)
1301 					continue;
1302 				pool->active--;
1303 				_MALLOC_UNLOCK(pool->mutex);
1304 				pool = mopts.malloc_pool[i];
1305 				_MALLOC_LOCK(pool->mutex);
1306 				pool->active++;
1307 				r = find(pool, p);
1308 				if (r != NULL)
1309 					break;
1310 			}
1311 		}
1312 		if (r == NULL)
1313 			wrterror(pool, "bogus pointer (double free?) %p", p);
1314 	}
1315 
1316 	REALSIZE(sz, r);
1317 	if (check) {
1318 		if (sz <= MALLOC_MAXCHUNK) {
1319 			if (mopts.chunk_canaries && sz > 0) {
1320 				struct chunk_info *info =
1321 				    (struct chunk_info *)r->size;
1322 				uint32_t chunknum =
1323 				    find_chunknum(pool, info, p, 0);
1324 
1325 				if (info->bits[info->offset + chunknum] <
1326 				    argsz)
1327 					wrterror(pool, "recorded size %hu"
1328 					    " < %zu",
1329 					    info->bits[info->offset + chunknum],
1330 					    argsz);
1331 			} else {
1332 				if (sz < argsz)
1333 					wrterror(pool, "chunk size %zu < %zu",
1334 					    sz, argsz);
1335 			}
1336 		} else if (sz - mopts.malloc_guard < argsz) {
1337 			wrterror(pool, "recorded size %zu < %zu",
1338 			    sz - mopts.malloc_guard, argsz);
1339 		}
1340 	}
1341 	if (sz > MALLOC_MAXCHUNK) {
1342 		if (!MALLOC_MOVE_COND(sz)) {
1343 			if (r->p != p)
1344 				wrterror(pool, "bogus pointer %p", p);
1345 			if (mopts.chunk_canaries)
1346 				validate_canary(pool, p,
1347 				    sz - mopts.malloc_guard,
1348 				    PAGEROUND(sz - mopts.malloc_guard));
1349 		} else {
1350 			/* shifted towards the end */
1351 			if (p != MALLOC_MOVE(r->p, sz))
1352 				wrterror(pool, "bogus moved pointer %p", p);
1353 			p = r->p;
1354 		}
1355 		if (mopts.malloc_guard) {
1356 			if (sz < mopts.malloc_guard)
1357 				wrterror(pool, "guard size");
1358 			if (!mopts.malloc_freeunmap) {
1359 				if (mprotect((char *)p + PAGEROUND(sz) -
1360 				    mopts.malloc_guard, mopts.malloc_guard,
1361 				    PROT_READ | PROT_WRITE))
1362 					wrterror(pool, "mprotect");
1363 			}
1364 			STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
1365 		}
1366 		unmap(pool, p, PAGEROUND(sz), clear);
1367 		delete(pool, r);
1368 	} else {
1369 		/* Validate and optionally canary check */
1370 		struct chunk_info *info = (struct chunk_info *)r->size;
1371 		find_chunknum(pool, info, p, mopts.chunk_canaries);
1372 		if (!clear) {
1373 			void *tmp;
1374 			int i;
1375 
1376 			if (mopts.malloc_freecheck) {
1377 				for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK; i++)
1378 					if (p == pool->delayed_chunks[i])
1379 						wrterror(pool,
1380 						    "double free %p", p);
1381 			}
1382 			if (mopts.malloc_junk && sz > 0)
1383 				memset(p, SOME_FREEJUNK, sz);
1384 			i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK;
1385 			tmp = p;
1386 			p = pool->delayed_chunks[i];
1387 			if (tmp == p)
1388 				wrterror(pool, "double free %p", tmp);
1389 			pool->delayed_chunks[i] = tmp;
1390 			if (mopts.malloc_junk)
1391 				validate_junk(pool, p);
1392 		} else if (sz > 0)
1393 			memset(p, 0, sz);
1394 		if (p != NULL) {
1395 			r = find(pool, p);
1396 			if (r == NULL)
1397 				wrterror(pool,
1398 				    "bogus pointer (double free?) %p", p);
1399 			free_bytes(pool, r, p);
1400 		}
1401 	}
1402 
1403 	if (argpool != pool) {
1404 		pool->active--;
1405 		_MALLOC_UNLOCK(pool->mutex);
1406 		_MALLOC_LOCK(argpool->mutex);
1407 		argpool->active++;
1408 	}
1409 }
1410 
1411 void
1412 free(void *ptr)
1413 {
1414 	struct dir_info *d;
1415 	int saved_errno = errno;
1416 
1417 	/* This is legal. */
1418 	if (ptr == NULL)
1419 		return;
1420 
1421 	d = getpool();
1422 	if (d == NULL)
1423 		wrterror(d, "free() called before allocation");
1424 	_MALLOC_LOCK(d->mutex);
1425 	d->func = "free";
1426 	if (d->active++) {
1427 		malloc_recurse(d);
1428 		return;
1429 	}
1430 	ofree(d, ptr, 0, 0, 0);
1431 	d->active--;
1432 	_MALLOC_UNLOCK(d->mutex);
1433 	errno = saved_errno;
1434 }
1435 /*DEF_STRONG(free);*/
1436 
1437 static void
1438 freezero_p(void *ptr, size_t sz)
1439 {
1440 	explicit_bzero(ptr, sz);
1441 	free(ptr);
1442 }
1443 
1444 void
1445 freezero(void *ptr, size_t sz)
1446 {
1447 	struct dir_info *d;
1448 	int saved_errno = errno;
1449 
1450 	/* This is legal. */
1451 	if (ptr == NULL)
1452 		return;
1453 
1454 	if (!mopts.internal_funcs) {
1455 		freezero_p(ptr, sz);
1456 		return;
1457 	}
1458 
1459 	d = getpool();
1460 	if (d == NULL)
1461 		wrterror(d, "freezero() called before allocation");
1462 	_MALLOC_LOCK(d->mutex);
1463 	d->func = "freezero";
1464 	if (d->active++) {
1465 		malloc_recurse(d);
1466 		return;
1467 	}
1468 	ofree(d, ptr, 1, 1, sz);
1469 	d->active--;
1470 	_MALLOC_UNLOCK(d->mutex);
1471 	errno = saved_errno;
1472 }
1473 DEF_WEAK(freezero);
1474 
1475 static void *
1476 orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
1477 {
1478 	struct dir_info *pool;
1479 	struct region_info *r;
1480 	struct chunk_info *info;
1481 	size_t oldsz, goldsz, gnewsz;
1482 	void *q, *ret;
1483 	int i;
1484 	uint32_t chunknum;
1485 
1486 	pool = argpool;
1487 
1488 	if (p == NULL)
1489 		return omalloc(pool, newsz, 0, f);
1490 
1491 	r = find(pool, p);
1492 	if (r == NULL) {
1493 		if (mopts.malloc_mt) {
1494 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1495 				if (i == argpool->mutex)
1496 					continue;
1497 				pool->active--;
1498 				_MALLOC_UNLOCK(pool->mutex);
1499 				pool = mopts.malloc_pool[i];
1500 				_MALLOC_LOCK(pool->mutex);
1501 				pool->active++;
1502 				r = find(pool, p);
1503 				if (r != NULL)
1504 					break;
1505 			}
1506 		}
1507 		if (r == NULL)
1508 			wrterror(pool, "bogus pointer (double free?) %p", p);
1509 	}
1510 	if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1511 		errno = ENOMEM;
1512 		ret = NULL;
1513 		goto done;
1514 	}
1515 
1516 	REALSIZE(oldsz, r);
1517 	if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) {
1518 		info = (struct chunk_info *)r->size;
1519 		chunknum = find_chunknum(pool, info, p, 0);
1520 	}
1521 
1522 	goldsz = oldsz;
1523 	if (oldsz > MALLOC_MAXCHUNK) {
1524 		if (oldsz < mopts.malloc_guard)
1525 			wrterror(pool, "guard size");
1526 		oldsz -= mopts.malloc_guard;
1527 	}
1528 
1529 	gnewsz = newsz;
1530 	if (gnewsz > MALLOC_MAXCHUNK)
1531 		gnewsz += mopts.malloc_guard;
1532 
1533 	if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK &&
1534 	    !mopts.malloc_realloc) {
1535 		/* First case: from n pages sized allocation to m pages sized
1536 		   allocation, m > n */
1537 		size_t roldsz = PAGEROUND(goldsz);
1538 		size_t rnewsz = PAGEROUND(gnewsz);
1539 
1540 		if (rnewsz > roldsz) {
1541 			/* try to extend existing region */
1542 			if (!mopts.malloc_guard) {
1543 				void *hint = (char *)r->p + roldsz;
1544 				size_t needed = rnewsz - roldsz;
1545 
1546 				STATS_INC(pool->cheap_realloc_tries);
1547 				q = map(pool, hint, needed, 0);
1548 				if (q == hint)
1549 					goto gotit;
1550 				zapcacheregion(pool, hint, needed);
1551 				q = MQUERY(hint, needed);
1552 				if (q == hint)
1553 					q = MMAPA(hint, needed);
1554 				else
1555 					q = MAP_FAILED;
1556 				if (q == hint) {
1557 gotit:
1558 					STATS_ADD(pool->malloc_used, needed);
1559 					if (mopts.malloc_junk == 2)
1560 						memset(q, SOME_JUNK, needed);
1561 					r->size = gnewsz;
1562 					if (r->p != p) {
1563 						/* old pointer is moved */
1564 						memmove(r->p, p, oldsz);
1565 						p = r->p;
1566 					}
1567 					if (mopts.chunk_canaries)
1568 						fill_canary(p, newsz,
1569 						    PAGEROUND(newsz));
1570 					STATS_SETF(r, f);
1571 					STATS_INC(pool->cheap_reallocs);
1572 					ret = p;
1573 					goto done;
1574 				} else if (q != MAP_FAILED) {
1575 					if (munmap(q, needed))
1576 						wrterror(pool, "munmap %p", q);
1577 				}
1578 			}
1579 		} else if (rnewsz < roldsz) {
1580 			/* shrink number of pages */
1581 			if (mopts.malloc_guard) {
1582 				if (mprotect((char *)r->p + roldsz -
1583 				    mopts.malloc_guard, mopts.malloc_guard,
1584 				    PROT_READ | PROT_WRITE))
1585 					wrterror(pool, "mprotect");
1586 				if (mprotect((char *)r->p + rnewsz -
1587 				    mopts.malloc_guard, mopts.malloc_guard,
1588 				    PROT_NONE))
1589 					wrterror(pool, "mprotect");
1590 			}
1591 			unmap(pool, (char *)r->p + rnewsz, roldsz - rnewsz, 0);
1592 			r->size = gnewsz;
1593 			if (MALLOC_MOVE_COND(gnewsz)) {
1594 				void *pp = MALLOC_MOVE(r->p, gnewsz);
1595 				memmove(pp, p, newsz);
1596 				p = pp;
1597 			} else if (mopts.chunk_canaries)
1598 				fill_canary(p, newsz, PAGEROUND(newsz));
1599 			STATS_SETF(r, f);
1600 			ret = p;
1601 			goto done;
1602 		} else {
1603 			/* number of pages remains the same */
1604 			void *pp = r->p;
1605 
1606 			r->size = gnewsz;
1607 			if (MALLOC_MOVE_COND(gnewsz))
1608 				pp = MALLOC_MOVE(r->p, gnewsz);
1609 			if (p != pp) {
1610 				memmove(pp, p, oldsz < newsz ? oldsz : newsz);
1611 				p = pp;
1612 			}
1613 			if (p == r->p) {
1614 				if (newsz > oldsz && mopts.malloc_junk == 2)
1615 					memset((char *)p + newsz, SOME_JUNK,
1616 					    rnewsz - mopts.malloc_guard -
1617 					    newsz);
1618 				if (mopts.chunk_canaries)
1619 					fill_canary(p, newsz, PAGEROUND(newsz));
1620 			}
1621 			STATS_SETF(r, f);
1622 			ret = p;
1623 			goto done;
1624 		}
1625 	}
1626 	if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 &&
1627 	    newsz <= MALLOC_MAXCHUNK && newsz > 0 &&
1628 	    1 << find_chunksize(newsz) == oldsz && !mopts.malloc_realloc) {
1629 		/* do not reallocate if new size fits good in existing chunk */
1630 		if (mopts.malloc_junk == 2)
1631 			memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1632 		if (mopts.chunk_canaries) {
1633 			info->bits[info->offset + chunknum] = newsz;
1634 			fill_canary(p, newsz, info->size);
1635 		}
1636 		STATS_SETF(r, f);
1637 		ret = p;
1638 	} else if (newsz != oldsz || mopts.malloc_realloc) {
1639 		/* create new allocation */
1640 		q = omalloc(pool, newsz, 0, f);
1641 		if (q == NULL) {
1642 			ret = NULL;
1643 			goto done;
1644 		}
1645 		if (newsz != 0 && oldsz != 0)
1646 			memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1647 		ofree(pool, p, 0, 0, 0);
1648 		ret = q;
1649 	} else {
1650 		/* oldsz == newsz */
1651 		if (newsz != 0)
1652 			wrterror(pool, "realloc internal inconsistency");
1653 		STATS_SETF(r, f);
1654 		ret = p;
1655 	}
1656 done:
1657 	if (argpool != pool) {
1658 		pool->active--;
1659 		_MALLOC_UNLOCK(pool->mutex);
1660 		_MALLOC_LOCK(argpool->mutex);
1661 		argpool->active++;
1662 	}
1663 	return ret;
1664 }
1665 
1666 void *
1667 realloc(void *ptr, size_t size)
1668 {
1669 	struct dir_info *d;
1670 	void *r;
1671 	int saved_errno = errno;
1672 
1673 	d = getpool();
1674 	if (d == NULL) {
1675 		_malloc_init(0);
1676 		d = getpool();
1677 	}
1678 	_MALLOC_LOCK(d->mutex);
1679 	d->func = "realloc";
1680 	if (d->active++) {
1681 		malloc_recurse(d);
1682 		return NULL;
1683 	}
1684 	r = orealloc(d, ptr, size, CALLER);
1685 
1686 	d->active--;
1687 	_MALLOC_UNLOCK(d->mutex);
1688 	if (r == NULL && mopts.malloc_xmalloc)
1689 		wrterror(d, "out of memory");
1690 	if (r != NULL)
1691 		errno = saved_errno;
1692 	return r;
1693 }
1694 /*DEF_STRONG(realloc);*/
1695 
1696 
1697 /*
1698  * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
1699  * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
1700  */
1701 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1702 
1703 void *
1704 calloc(size_t nmemb, size_t size)
1705 {
1706 	struct dir_info *d;
1707 	void *r;
1708 	int saved_errno = errno;
1709 
1710 	d = getpool();
1711 	if (d == NULL) {
1712 		_malloc_init(0);
1713 		d = getpool();
1714 	}
1715 	_MALLOC_LOCK(d->mutex);
1716 	d->func = "calloc";
1717 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1718 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1719 		_MALLOC_UNLOCK(d->mutex);
1720 		if (mopts.malloc_xmalloc)
1721 			wrterror(d, "out of memory");
1722 		errno = ENOMEM;
1723 		return NULL;
1724 	}
1725 
1726 	if (d->active++) {
1727 		malloc_recurse(d);
1728 		return NULL;
1729 	}
1730 
1731 	size *= nmemb;
1732 	r = omalloc(d, size, 1, CALLER);
1733 
1734 	d->active--;
1735 	_MALLOC_UNLOCK(d->mutex);
1736 	if (r == NULL && mopts.malloc_xmalloc)
1737 		wrterror(d, "out of memory");
1738 	if (r != NULL)
1739 		errno = saved_errno;
1740 	return r;
1741 }
1742 /*DEF_STRONG(calloc);*/
1743 
1744 static void *
1745 orecallocarray(struct dir_info *argpool, void *p, size_t oldsize,
1746     size_t newsize, void *f)
1747 {
1748 	struct dir_info *pool;
1749 	struct region_info *r;
1750 	void *newptr;
1751 	size_t sz;
1752 	int i;
1753 
1754 	pool = argpool;
1755 
1756 	if (p == NULL)
1757 		return omalloc(pool, newsize, 1, f);
1758 
1759 	if (oldsize == newsize)
1760 		return p;
1761 
1762 	r = find(pool, p);
1763 	if (r == NULL) {
1764 		if (mopts.malloc_mt) {
1765 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1766 				if (i == argpool->mutex)
1767 					continue;
1768 				pool->active--;
1769 				_MALLOC_UNLOCK(pool->mutex);
1770 				pool = mopts.malloc_pool[i];
1771 				_MALLOC_LOCK(pool->mutex);
1772 				pool->active++;
1773 				r = find(pool, p);
1774 				if (r != NULL)
1775 					break;
1776 			}
1777 		}
1778 		if (r == NULL)
1779 			wrterror(pool, "bogus pointer (double free?) %p", p);
1780 	}
1781 
1782 	REALSIZE(sz, r);
1783 	if (sz <= MALLOC_MAXCHUNK) {
1784 		if (mopts.chunk_canaries && sz > 0) {
1785 			struct chunk_info *info = (struct chunk_info *)r->size;
1786 			uint32_t chunknum = find_chunknum(pool, info, p, 0);
1787 
1788 			if (info->bits[info->offset + chunknum] != oldsize)
1789 				wrterror(pool, "recorded old size %hu != %zu",
1790 				    info->bits[info->offset + chunknum],
1791 				    oldsize);
1792 		}
1793 	} else if (oldsize != sz - mopts.malloc_guard)
1794 		wrterror(pool, "recorded old size %zu != %zu",
1795 		    sz - mopts.malloc_guard, oldsize);
1796 
1797 	newptr = omalloc(pool, newsize, 0, f);
1798 	if (newptr == NULL)
1799 		goto done;
1800 
1801 	if (newsize > oldsize) {
1802 		memcpy(newptr, p, oldsize);
1803 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
1804 	} else
1805 		memcpy(newptr, p, newsize);
1806 
1807 	ofree(pool, p, 1, 0, 0);
1808 
1809 done:
1810 	if (argpool != pool) {
1811 		pool->active--;
1812 		_MALLOC_UNLOCK(pool->mutex);
1813 		_MALLOC_LOCK(argpool->mutex);
1814 		argpool->active++;
1815 	}
1816 
1817 	return newptr;
1818 }
1819 
1820 static void *
1821 recallocarray_p(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1822 {
1823 	size_t oldsize, newsize;
1824 	void *newptr;
1825 
1826 	if (ptr == NULL)
1827 		return calloc(newnmemb, size);
1828 
1829 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1830 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
1831 		errno = ENOMEM;
1832 		return NULL;
1833 	}
1834 	newsize = newnmemb * size;
1835 
1836 	if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1837 	    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
1838 		errno = EINVAL;
1839 		return NULL;
1840 	}
1841 	oldsize = oldnmemb * size;
1842 
1843 	/*
1844 	 * Don't bother too much if we're shrinking just a bit,
1845 	 * we do not shrink for series of small steps, oh well.
1846 	 */
1847 	if (newsize <= oldsize) {
1848 		size_t d = oldsize - newsize;
1849 
1850 		if (d < oldsize / 2 && d < MALLOC_PAGESIZE) {
1851 			memset((char *)ptr + newsize, 0, d);
1852 			return ptr;
1853 		}
1854 	}
1855 
1856 	newptr = malloc(newsize);
1857 	if (newptr == NULL)
1858 		return NULL;
1859 
1860 	if (newsize > oldsize) {
1861 		memcpy(newptr, ptr, oldsize);
1862 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
1863 	} else
1864 		memcpy(newptr, ptr, newsize);
1865 
1866 	explicit_bzero(ptr, oldsize);
1867 	free(ptr);
1868 
1869 	return newptr;
1870 }
1871 
1872 void *
1873 recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1874 {
1875 	struct dir_info *d;
1876 	size_t oldsize = 0, newsize;
1877 	void *r;
1878 	int saved_errno = errno;
1879 
1880 	if (!mopts.internal_funcs)
1881 		return recallocarray_p(ptr, oldnmemb, newnmemb, size);
1882 
1883 	d = getpool();
1884 	if (d == NULL) {
1885 		_malloc_init(0);
1886 		d = getpool();
1887 	}
1888 
1889 	_MALLOC_LOCK(d->mutex);
1890 	d->func = "recallocarray";
1891 
1892 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1893 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
1894 		_MALLOC_UNLOCK(d->mutex);
1895 		if (mopts.malloc_xmalloc)
1896 			wrterror(d, "out of memory");
1897 		errno = ENOMEM;
1898 		return NULL;
1899 	}
1900 	newsize = newnmemb * size;
1901 
1902 	if (ptr != NULL) {
1903 		if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1904 		    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
1905 			_MALLOC_UNLOCK(d->mutex);
1906 			errno = EINVAL;
1907 			return NULL;
1908 		}
1909 		oldsize = oldnmemb * size;
1910 	}
1911 
1912 	if (d->active++) {
1913 		malloc_recurse(d);
1914 		return NULL;
1915 	}
1916 
1917 	r = orecallocarray(d, ptr, oldsize, newsize, CALLER);
1918 
1919 	d->active--;
1920 	_MALLOC_UNLOCK(d->mutex);
1921 	if (r == NULL && mopts.malloc_xmalloc)
1922 		wrterror(d, "out of memory");
1923 	if (r != NULL)
1924 		errno = saved_errno;
1925 	return r;
1926 }
1927 DEF_WEAK(recallocarray);
1928 
1929 
1930 static void *
1931 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
1932 {
1933 	char *p, *q;
1934 
1935 	if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
1936 		wrterror(d, "mapalign bad alignment");
1937 	if (sz != PAGEROUND(sz))
1938 		wrterror(d, "mapalign round");
1939 
1940 	/* Allocate sz + alignment bytes of memory, which must include a
1941 	 * subrange of size bytes that is properly aligned.  Unmap the
1942 	 * other bytes, and then return that subrange.
1943 	 */
1944 
1945 	/* We need sz + alignment to fit into a size_t. */
1946 	if (alignment > SIZE_MAX - sz)
1947 		return MAP_FAILED;
1948 
1949 	p = map(d, NULL, sz + alignment, zero_fill);
1950 	if (p == MAP_FAILED)
1951 		return MAP_FAILED;
1952 	q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
1953 	if (q != p) {
1954 		if (munmap(p, q - p))
1955 			wrterror(d, "munmap %p", p);
1956 	}
1957 	if (munmap(q + sz, alignment - (q - p)))
1958 		wrterror(d, "munmap %p", q + sz);
1959 	STATS_SUB(d->malloc_used, alignment);
1960 
1961 	return q;
1962 }
1963 
1964 static void *
1965 omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill,
1966     void *f)
1967 {
1968 	size_t psz;
1969 	void *p;
1970 
1971 	/* If between half a page and a page, avoid MALLOC_MOVE. */
1972 	if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE)
1973 		sz = MALLOC_PAGESIZE;
1974 	if (alignment <= MALLOC_PAGESIZE) {
1975 		/*
1976 		 * max(size, alignment) is enough to assure the requested
1977 		 * alignment, since the allocator always allocates
1978 		 * power-of-two blocks.
1979 		 */
1980 		if (sz < alignment)
1981 			sz = alignment;
1982 		return omalloc(pool, sz, zero_fill, f);
1983 	}
1984 
1985 	if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1986 		errno = ENOMEM;
1987 		return NULL;
1988 	}
1989 
1990 	sz += mopts.malloc_guard;
1991 	psz = PAGEROUND(sz);
1992 
1993 	p = mapalign(pool, alignment, psz, zero_fill);
1994 	if (p == MAP_FAILED) {
1995 		errno = ENOMEM;
1996 		return NULL;
1997 	}
1998 
1999 	if (insert(pool, p, sz, f)) {
2000 		unmap(pool, p, psz, 0);
2001 		errno = ENOMEM;
2002 		return NULL;
2003 	}
2004 
2005 	if (mopts.malloc_guard) {
2006 		if (mprotect((char *)p + psz - mopts.malloc_guard,
2007 		    mopts.malloc_guard, PROT_NONE))
2008 			wrterror(pool, "mprotect");
2009 		STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
2010 	}
2011 
2012 	if (mopts.malloc_junk == 2) {
2013 		if (zero_fill)
2014 			memset((char *)p + sz - mopts.malloc_guard,
2015 			    SOME_JUNK, psz - sz);
2016 		else
2017 			memset(p, SOME_JUNK, psz - mopts.malloc_guard);
2018 	} else if (mopts.chunk_canaries)
2019 		fill_canary(p, sz - mopts.malloc_guard,
2020 		    psz - mopts.malloc_guard);
2021 
2022 	return p;
2023 }
2024 
2025 int
2026 posix_memalign(void **memptr, size_t alignment, size_t size)
2027 {
2028 	struct dir_info *d;
2029 	int res, saved_errno = errno;
2030 	void *r;
2031 
2032 	/* Make sure that alignment is a large enough power of 2. */
2033 	if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
2034 		return EINVAL;
2035 
2036 	d = getpool();
2037 	if (d == NULL) {
2038 		_malloc_init(0);
2039 		d = getpool();
2040 	}
2041 	_MALLOC_LOCK(d->mutex);
2042 	d->func = "posix_memalign";
2043 	if (d->active++) {
2044 		malloc_recurse(d);
2045 		goto err;
2046 	}
2047 	r = omemalign(d, alignment, size, 0, CALLER);
2048 	d->active--;
2049 	_MALLOC_UNLOCK(d->mutex);
2050 	if (r == NULL) {
2051 		if (mopts.malloc_xmalloc)
2052 			wrterror(d, "out of memory");
2053 		goto err;
2054 	}
2055 	errno = saved_errno;
2056 	*memptr = r;
2057 	return 0;
2058 
2059 err:
2060 	res = errno;
2061 	errno = saved_errno;
2062 	return res;
2063 }
2064 /*DEF_STRONG(posix_memalign);*/
2065 
2066 #ifdef MALLOC_STATS
2067 
2068 struct malloc_leak {
2069 	void *f;
2070 	size_t total_size;
2071 	int count;
2072 };
2073 
2074 struct leaknode {
2075 	RBT_ENTRY(leaknode) entry;
2076 	struct malloc_leak d;
2077 };
2078 
2079 static inline int
2080 leakcmp(const struct leaknode *e1, const struct leaknode *e2)
2081 {
2082 	return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
2083 }
2084 
2085 static RBT_HEAD(leaktree, leaknode) leakhead;
2086 RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp);
2087 RBT_GENERATE(leaktree, leaknode, entry, leakcmp);
2088 
2089 static void
2090 putleakinfo(void *f, size_t sz, int cnt)
2091 {
2092 	struct leaknode key, *p;
2093 	static struct leaknode *page;
2094 	static int used;
2095 
2096 	if (cnt == 0 || page == MAP_FAILED)
2097 		return;
2098 
2099 	key.d.f = f;
2100 	p = RBT_FIND(leaktree, &leakhead, &key);
2101 	if (p == NULL) {
2102 		if (page == NULL ||
2103 		    used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
2104 			page = MMAP(MALLOC_PAGESIZE);
2105 			if (page == MAP_FAILED)
2106 				return;
2107 			used = 0;
2108 		}
2109 		p = &page[used++];
2110 		p->d.f = f;
2111 		p->d.total_size = sz * cnt;
2112 		p->d.count = cnt;
2113 		RBT_INSERT(leaktree, &leakhead, p);
2114 	} else {
2115 		p->d.total_size += sz * cnt;
2116 		p->d.count += cnt;
2117 	}
2118 }
2119 
2120 static struct malloc_leak *malloc_leaks;
2121 
2122 static void
2123 dump_leaks(int fd)
2124 {
2125 	struct leaknode *p;
2126 	int i = 0;
2127 
2128 	dprintf(fd, "Leak report\n");
2129 	dprintf(fd, "                 f     sum      #    avg\n");
2130 	/* XXX only one page of summary */
2131 	if (malloc_leaks == NULL)
2132 		malloc_leaks = MMAP(MALLOC_PAGESIZE);
2133 	if (malloc_leaks != MAP_FAILED)
2134 		memset(malloc_leaks, 0, MALLOC_PAGESIZE);
2135 	RBT_FOREACH(p, leaktree, &leakhead) {
2136 		dprintf(fd, "%18p %7zu %6u %6zu\n", p->d.f,
2137 		    p->d.total_size, p->d.count, p->d.total_size / p->d.count);
2138 		if (malloc_leaks == MAP_FAILED ||
2139 		    i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak))
2140 			continue;
2141 		malloc_leaks[i].f = p->d.f;
2142 		malloc_leaks[i].total_size = p->d.total_size;
2143 		malloc_leaks[i].count = p->d.count;
2144 		i++;
2145 	}
2146 }
2147 
2148 static void
2149 dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist)
2150 {
2151 	while (p != NULL) {
2152 		dprintf(fd, "chunk %18p %18p %4d %d/%d\n",
2153 		    p->page, ((p->bits[0] & 1) ? NULL : f),
2154 		    p->size, p->free, p->total);
2155 		if (!fromfreelist) {
2156 			if (p->bits[0] & 1)
2157 				putleakinfo(NULL, p->size, p->total - p->free);
2158 			else {
2159 				putleakinfo(f, p->size, 1);
2160 				putleakinfo(NULL, p->size,
2161 				    p->total - p->free - 1);
2162 			}
2163 			break;
2164 		}
2165 		p = LIST_NEXT(p, entries);
2166 		if (p != NULL)
2167 			dprintf(fd, "        ");
2168 	}
2169 }
2170 
2171 static void
2172 dump_free_chunk_info(int fd, struct dir_info *d)
2173 {
2174 	int i, j, count;
2175 	struct chunk_info *p;
2176 
2177 	dprintf(fd, "Free chunk structs:\n");
2178 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
2179 		count = 0;
2180 		LIST_FOREACH(p, &d->chunk_info_list[i], entries)
2181 			count++;
2182 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++) {
2183 			p = LIST_FIRST(&d->chunk_dir[i][j]);
2184 			if (p == NULL && count == 0)
2185 				continue;
2186 			dprintf(fd, "%2d) %3d ", i, count);
2187 			if (p != NULL)
2188 				dump_chunk(fd, p, NULL, 1);
2189 			else
2190 				dprintf(fd, "\n");
2191 		}
2192 	}
2193 
2194 }
2195 
2196 static void
2197 dump_free_page_info(int fd, struct dir_info *d)
2198 {
2199 	int i;
2200 
2201 	dprintf(fd, "Free pages cached: %zu\n", d->free_regions_size);
2202 	for (i = 0; i < mopts.malloc_cache; i++) {
2203 		if (d->free_regions[i].p != NULL) {
2204 			dprintf(fd, "%2d) ", i);
2205 			dprintf(fd, "free at %p: %zu\n",
2206 			    d->free_regions[i].p, d->free_regions[i].size);
2207 		}
2208 	}
2209 }
2210 
2211 static void
2212 malloc_dump1(int fd, int poolno, struct dir_info *d)
2213 {
2214 	size_t i, realsize;
2215 
2216 	dprintf(fd, "Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
2217 	if (d == NULL)
2218 		return;
2219 	dprintf(fd, "Region slots free %zu/%zu\n",
2220 		d->regions_free, d->regions_total);
2221 	dprintf(fd, "Finds %zu/%zu\n", d->finds,
2222 	    d->find_collisions);
2223 	dprintf(fd, "Inserts %zu/%zu\n", d->inserts,
2224 	    d->insert_collisions);
2225 	dprintf(fd, "Deletes %zu/%zu\n", d->deletes,
2226 	    d->delete_moves);
2227 	dprintf(fd, "Cheap reallocs %zu/%zu\n",
2228 	    d->cheap_reallocs, d->cheap_realloc_tries);
2229 	dprintf(fd, "In use %zu\n", d->malloc_used);
2230 	dprintf(fd, "Guarded %zu\n", d->malloc_guarded);
2231 	dump_free_chunk_info(fd, d);
2232 	dump_free_page_info(fd, d);
2233 	dprintf(fd,
2234 	    "slot)  hash d  type               page                  f size [free/n]\n");
2235 	for (i = 0; i < d->regions_total; i++) {
2236 		if (d->r[i].p != NULL) {
2237 			size_t h = hash(d->r[i].p) &
2238 			    (d->regions_total - 1);
2239 			dprintf(fd, "%4zx) #%4zx %zd ",
2240 			    i, h, h - i);
2241 			REALSIZE(realsize, &d->r[i]);
2242 			if (realsize > MALLOC_MAXCHUNK) {
2243 				putleakinfo(d->r[i].f, realsize, 1);
2244 				dprintf(fd,
2245 				    "pages %18p %18p %zu\n", d->r[i].p,
2246 				    d->r[i].f, realsize);
2247 			} else
2248 				dump_chunk(fd,
2249 				    (struct chunk_info *)d->r[i].size,
2250 				    d->r[i].f, 0);
2251 		}
2252 	}
2253 	dump_leaks(fd);
2254 	dprintf(fd, "\n");
2255 }
2256 
2257 void
2258 malloc_dump(int fd, int poolno, struct dir_info *pool)
2259 {
2260 	int i;
2261 	void *p;
2262 	struct region_info *r;
2263 	int saved_errno = errno;
2264 
2265 	if (pool == NULL)
2266 		return;
2267 	for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) {
2268 		p = pool->delayed_chunks[i];
2269 		if (p == NULL)
2270 			continue;
2271 		r = find(pool, p);
2272 		if (r == NULL)
2273 			wrterror(pool, "bogus pointer in malloc_dump %p", p);
2274 		free_bytes(pool, r, p);
2275 		pool->delayed_chunks[i] = NULL;
2276 	}
2277 	/* XXX leak when run multiple times */
2278 	RBT_INIT(leaktree, &leakhead);
2279 	malloc_dump1(fd, poolno, pool);
2280 	errno = saved_errno;
2281 }
2282 DEF_WEAK(malloc_dump);
2283 
2284 void
2285 malloc_gdump(int fd)
2286 {
2287 	int i;
2288 	int saved_errno = errno;
2289 
2290 	for (i = 0; i < _MALLOC_MUTEXES; i++)
2291 		malloc_dump(fd, i, mopts.malloc_pool[i]);
2292 
2293 	errno = saved_errno;
2294 }
2295 DEF_WEAK(malloc_gdump);
2296 
2297 static void
2298 malloc_exit(void)
2299 {
2300 	int save_errno = errno, fd, i;
2301 
2302 	fd = open("malloc.out", O_RDWR|O_APPEND);
2303 	if (fd != -1) {
2304 		dprintf(fd, "******** Start dump %s *******\n", __progname);
2305 		dprintf(fd,
2306 		    "MT=%d I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u G=%zu\n",
2307 		    mopts.malloc_mt, mopts.internal_funcs,
2308 		    mopts.malloc_freecheck,
2309 		    mopts.malloc_freeunmap, mopts.malloc_junk,
2310 		    mopts.malloc_realloc, mopts.malloc_xmalloc,
2311 		    mopts.chunk_canaries, mopts.malloc_cache,
2312 		    mopts.malloc_guard);
2313 
2314 		for (i = 0; i < _MALLOC_MUTEXES; i++)
2315 			malloc_dump(fd, i, mopts.malloc_pool[i]);
2316 		dprintf(fd, "******** End dump %s *******\n", __progname);
2317 		close(fd);
2318 	} else
2319 		dprintf(STDERR_FILENO,
2320 		    "malloc() warning: Couldn't dump stats\n");
2321 	errno = save_errno;
2322 }
2323 
2324 #endif /* MALLOC_STATS */
2325