xref: /openbsd-src/lib/libc/stdlib/malloc.c (revision aa997e528a848ca5596493c2a801bdd6fb26ae61)
1 /*	$OpenBSD: malloc.c,v 1.248 2018/03/30 07:23:15 otto Exp $	*/
2 /*
3  * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
4  * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
5  * Copyright (c) 2008 Damien Miller <djm@openbsd.org>
6  * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * If we meet some day, and you think this stuff is worth it, you
23  * can buy me a beer in return. Poul-Henning Kamp
24  */
25 
26 /* #define MALLOC_STATS */
27 
28 #include <sys/types.h>
29 #include <sys/queue.h>
30 #include <sys/mman.h>
31 #include <sys/uio.h>
32 #include <errno.h>
33 #include <stdarg.h>
34 #include <stdint.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <stdio.h>
38 #include <unistd.h>
39 
40 #ifdef MALLOC_STATS
41 #include <sys/tree.h>
42 #include <fcntl.h>
43 #endif
44 
45 #include "thread_private.h"
46 #include <tib.h>
47 
48 #define MALLOC_PAGESHIFT	_MAX_PAGE_SHIFT
49 
50 #define MALLOC_MINSHIFT		4
51 #define MALLOC_MAXSHIFT		(MALLOC_PAGESHIFT - 1)
52 #define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
53 #define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
54 #define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
55 #define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
56 
57 #define MALLOC_MAXCHUNK		(1 << MALLOC_MAXSHIFT)
58 #define MALLOC_MAXCACHE		256
59 #define MALLOC_DELAYED_CHUNK_MASK	15
60 #ifdef MALLOC_STATS
61 #define MALLOC_INITIAL_REGIONS	512
62 #else
63 #define MALLOC_INITIAL_REGIONS	(MALLOC_PAGESIZE / sizeof(struct region_info))
64 #endif
65 #define MALLOC_DEFAULT_CACHE	64
66 #define MALLOC_CHUNK_LISTS	4
67 #define CHUNK_CHECK_LENGTH	32
68 
69 /*
70  * We move allocations between half a page and a whole page towards the end,
71  * subject to alignment constraints. This is the extra headroom we allow.
72  * Set to zero to be the most strict.
73  */
74 #define MALLOC_LEEWAY		0
75 #define MALLOC_MOVE_COND(sz)	((sz) - mopts.malloc_guard < 		\
76 				    MALLOC_PAGESIZE - MALLOC_LEEWAY)
77 #define MALLOC_MOVE(p, sz)  	(((char *)(p)) +			\
78 				    ((MALLOC_PAGESIZE - MALLOC_LEEWAY -	\
79 			    	    ((sz) - mopts.malloc_guard)) & 	\
80 				    ~(MALLOC_MINSIZE - 1)))
81 
82 #define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
83 
84 /*
85  * What to use for Junk.  This is the byte value we use to fill with
86  * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
87  * and SOME_FREEJUNK right before free.
88  */
89 #define SOME_JUNK		0xdb	/* deadbeef */
90 #define SOME_FREEJUNK		0xdf	/* dead, free */
91 
92 #define MMAP(sz)	mmap(NULL, (sz), PROT_READ | PROT_WRITE, \
93     MAP_ANON | MAP_PRIVATE, -1, 0)
94 
95 #define MMAPNONE(sz)	mmap(NULL, (sz), PROT_NONE, \
96     MAP_ANON | MAP_PRIVATE, -1, 0)
97 
98 #define MMAPA(a,sz)	mmap((a), (sz), PROT_READ | PROT_WRITE, \
99     MAP_ANON | MAP_PRIVATE, -1, 0)
100 
101 #define MQUERY(a, sz)	mquery((a), (sz), PROT_READ | PROT_WRITE, \
102     MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0)
103 
104 struct region_info {
105 	void *p;		/* page; low bits used to mark chunks */
106 	uintptr_t size;		/* size for pages, or chunk_info pointer */
107 #ifdef MALLOC_STATS
108 	void *f;		/* where allocated from */
109 #endif
110 };
111 
112 LIST_HEAD(chunk_head, chunk_info);
113 
114 struct dir_info {
115 	u_int32_t canary1;
116 	int active;			/* status of malloc */
117 	struct region_info *r;		/* region slots */
118 	size_t regions_total;		/* number of region slots */
119 	size_t regions_free;		/* number of free slots */
120 					/* lists of free chunk info structs */
121 	struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1];
122 					/* lists of chunks with free slots */
123 	struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS];
124 	size_t free_regions_size;	/* free pages cached */
125 					/* free pages cache */
126 	struct region_info free_regions[MALLOC_MAXCACHE];
127 					/* delayed free chunk slots */
128 	u_int rotor;
129 	void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
130 	size_t rbytesused;		/* random bytes used */
131 	char *func;			/* current function */
132 	int mutex;
133 	u_char rbytes[32];		/* random bytes */
134 #ifdef MALLOC_STATS
135 	size_t inserts;
136 	size_t insert_collisions;
137 	size_t finds;
138 	size_t find_collisions;
139 	size_t deletes;
140 	size_t delete_moves;
141 	size_t cheap_realloc_tries;
142 	size_t cheap_reallocs;
143 	size_t malloc_used;		/* bytes allocated */
144 	size_t malloc_guarded;		/* bytes used for guards */
145 #define STATS_ADD(x,y)	((x) += (y))
146 #define STATS_SUB(x,y)	((x) -= (y))
147 #define STATS_INC(x)	((x)++)
148 #define STATS_ZERO(x)	((x) = 0)
149 #define STATS_SETF(x,y)	((x)->f = (y))
150 #else
151 #define STATS_ADD(x,y)	/* nothing */
152 #define STATS_SUB(x,y)	/* nothing */
153 #define STATS_INC(x)	/* nothing */
154 #define STATS_ZERO(x)	/* nothing */
155 #define STATS_SETF(x,y)	/* nothing */
156 #endif /* MALLOC_STATS */
157 	u_int32_t canary2;
158 };
159 #define DIR_INFO_RSZ	((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
160 			~MALLOC_PAGEMASK)
161 
162 /*
163  * This structure describes a page worth of chunks.
164  *
165  * How many bits per u_short in the bitmap
166  */
167 #define MALLOC_BITS		(NBBY * sizeof(u_short))
168 struct chunk_info {
169 	LIST_ENTRY(chunk_info) entries;
170 	void *page;			/* pointer to the page */
171 	u_short canary;
172 	u_short size;			/* size of this page's chunks */
173 	u_short shift;			/* how far to shift for this size */
174 	u_short free;			/* how many free chunks */
175 	u_short total;			/* how many chunks */
176 	u_short offset;			/* requested size table offset */
177 	u_short bits[1];		/* which chunks are free */
178 };
179 
180 struct malloc_readonly {
181 	struct dir_info *malloc_pool[_MALLOC_MUTEXES];	/* Main bookkeeping information */
182 	int	malloc_mt;		/* multi-threaded mode? */
183 	int	malloc_freecheck;	/* Extensive double free check */
184 	int	malloc_freeunmap;	/* mprotect free pages PROT_NONE? */
185 	int	malloc_junk;		/* junk fill? */
186 	int	malloc_realloc;		/* always realloc? */
187 	int	malloc_xmalloc;		/* xmalloc behaviour? */
188 	int	chunk_canaries;		/* use canaries after chunks? */
189 	int	internal_funcs;		/* use better recallocarray/freezero? */
190 	u_int	malloc_cache;		/* free pages we cache */
191 	size_t	malloc_guard;		/* use guard pages after allocations? */
192 #ifdef MALLOC_STATS
193 	int	malloc_stats;		/* dump statistics at end */
194 #endif
195 	u_int32_t malloc_canary;	/* Matched against ones in malloc_pool */
196 };
197 
198 /* This object is mapped PROT_READ after initialisation to prevent tampering */
199 static union {
200 	struct malloc_readonly mopts;
201 	u_char _pad[MALLOC_PAGESIZE];
202 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
203 #define mopts	malloc_readonly.mopts
204 
205 char		*malloc_options;	/* compile-time options */
206 
207 static __dead void wrterror(struct dir_info *d, char *msg, ...)
208     __attribute__((__format__ (printf, 2, 3)));
209 
210 #ifdef MALLOC_STATS
211 void malloc_dump(int, int, struct dir_info *);
212 PROTO_NORMAL(malloc_dump);
213 void malloc_gdump(int);
214 PROTO_NORMAL(malloc_gdump);
215 static void malloc_exit(void);
216 #define CALLER	__builtin_return_address(0)
217 #else
218 #define CALLER	NULL
219 #endif
220 
221 /* low bits of r->p determine size: 0 means >= page size and r->size holding
222  * real size, otherwise low bits are a shift count, or 1 for malloc(0)
223  */
224 #define REALSIZE(sz, r)						\
225 	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
226 	(sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
227 
228 static inline void
229 _MALLOC_LEAVE(struct dir_info *d)
230 {
231 	if (mopts.malloc_mt) {
232 		d->active--;
233 		_MALLOC_UNLOCK(d->mutex);
234 	}
235 }
236 
237 static inline void
238 _MALLOC_ENTER(struct dir_info *d)
239 {
240 	if (mopts.malloc_mt) {
241 		_MALLOC_LOCK(d->mutex);
242 		d->active++;
243 	}
244 }
245 
246 static inline size_t
247 hash(void *p)
248 {
249 	size_t sum;
250 	uintptr_t u;
251 
252 	u = (uintptr_t)p >> MALLOC_PAGESHIFT;
253 	sum = u;
254 	sum = (sum << 7) - sum + (u >> 16);
255 #ifdef __LP64__
256 	sum = (sum << 7) - sum + (u >> 32);
257 	sum = (sum << 7) - sum + (u >> 48);
258 #endif
259 	return sum;
260 }
261 
262 static inline
263 struct dir_info *getpool(void)
264 {
265 	if (!mopts.malloc_mt)
266 		return mopts.malloc_pool[0];
267 	else
268 		return mopts.malloc_pool[TIB_GET()->tib_tid &
269 		    (_MALLOC_MUTEXES - 1)];
270 }
271 
272 static __dead void
273 wrterror(struct dir_info *d, char *msg, ...)
274 {
275 	int		saved_errno = errno;
276 	va_list		ap;
277 
278 	dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname,
279 	    getpid(), (d != NULL && d->func) ? d->func : "unknown");
280 	va_start(ap, msg);
281 	vdprintf(STDERR_FILENO, msg, ap);
282 	va_end(ap);
283 	dprintf(STDERR_FILENO, "\n");
284 
285 #ifdef MALLOC_STATS
286 	if (mopts.malloc_stats)
287 		malloc_gdump(STDERR_FILENO);
288 #endif /* MALLOC_STATS */
289 
290 	errno = saved_errno;
291 
292 	abort();
293 }
294 
295 static void
296 rbytes_init(struct dir_info *d)
297 {
298 	arc4random_buf(d->rbytes, sizeof(d->rbytes));
299 	/* add 1 to account for using d->rbytes[0] */
300 	d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2);
301 }
302 
303 static inline u_char
304 getrbyte(struct dir_info *d)
305 {
306 	u_char x;
307 
308 	if (d->rbytesused >= sizeof(d->rbytes))
309 		rbytes_init(d);
310 	x = d->rbytes[d->rbytesused++];
311 	return x;
312 }
313 
314 static void
315 omalloc_parseopt(char opt)
316 {
317 	switch (opt) {
318 	case '>':
319 		mopts.malloc_cache <<= 1;
320 		if (mopts.malloc_cache > MALLOC_MAXCACHE)
321 			mopts.malloc_cache = MALLOC_MAXCACHE;
322 		break;
323 	case '<':
324 		mopts.malloc_cache >>= 1;
325 		break;
326 	case 'c':
327 		mopts.chunk_canaries = 0;
328 		break;
329 	case 'C':
330 		mopts.chunk_canaries = 1;
331 		break;
332 #ifdef MALLOC_STATS
333 	case 'd':
334 		mopts.malloc_stats = 0;
335 		break;
336 	case 'D':
337 		mopts.malloc_stats = 1;
338 		break;
339 #endif /* MALLOC_STATS */
340 	case 'f':
341 		mopts.malloc_freecheck = 0;
342 		mopts.malloc_freeunmap = 0;
343 		break;
344 	case 'F':
345 		mopts.malloc_freecheck = 1;
346 		mopts.malloc_freeunmap = 1;
347 		break;
348 	case 'g':
349 		mopts.malloc_guard = 0;
350 		break;
351 	case 'G':
352 		mopts.malloc_guard = MALLOC_PAGESIZE;
353 		break;
354 	case 'j':
355 		if (mopts.malloc_junk > 0)
356 			mopts.malloc_junk--;
357 		break;
358 	case 'J':
359 		if (mopts.malloc_junk < 2)
360 			mopts.malloc_junk++;
361 		break;
362 	case 'r':
363 		mopts.malloc_realloc = 0;
364 		break;
365 	case 'R':
366 		mopts.malloc_realloc = 1;
367 		break;
368 	case 'u':
369 		mopts.malloc_freeunmap = 0;
370 		break;
371 	case 'U':
372 		mopts.malloc_freeunmap = 1;
373 		break;
374 	case 'x':
375 		mopts.malloc_xmalloc = 0;
376 		break;
377 	case 'X':
378 		mopts.malloc_xmalloc = 1;
379 		break;
380 	default:
381 		dprintf(STDERR_FILENO, "malloc() warning: "
382                     "unknown char in MALLOC_OPTIONS\n");
383 		break;
384 	}
385 }
386 
387 static void
388 omalloc_init(void)
389 {
390 	char *p, *q, b[64];
391 	int i, j;
392 
393 	/*
394 	 * Default options
395 	 */
396 	mopts.malloc_junk = 1;
397 	mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
398 
399 	for (i = 0; i < 3; i++) {
400 		switch (i) {
401 		case 0:
402 			j = readlink("/etc/malloc.conf", b, sizeof b - 1);
403 			if (j <= 0)
404 				continue;
405 			b[j] = '\0';
406 			p = b;
407 			break;
408 		case 1:
409 			if (issetugid() == 0)
410 				p = getenv("MALLOC_OPTIONS");
411 			else
412 				continue;
413 			break;
414 		case 2:
415 			p = malloc_options;
416 			break;
417 		default:
418 			p = NULL;
419 		}
420 
421 		for (; p != NULL && *p != '\0'; p++) {
422 			switch (*p) {
423 			case 'S':
424 				for (q = "CFGJ"; *q != '\0'; q++)
425 					omalloc_parseopt(*q);
426 				mopts.malloc_cache = 0;
427 				break;
428 			case 's':
429 				for (q = "cfgj"; *q != '\0'; q++)
430 					omalloc_parseopt(*q);
431 				mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
432 				break;
433 			default:
434 				omalloc_parseopt(*p);
435 				break;
436 			}
437 		}
438 	}
439 
440 #ifdef MALLOC_STATS
441 	if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
442 		dprintf(STDERR_FILENO, "malloc() warning: atexit(2) failed."
443 		    " Will not be able to dump stats on exit\n");
444 	}
445 #endif /* MALLOC_STATS */
446 
447 	while ((mopts.malloc_canary = arc4random()) == 0)
448 		;
449 }
450 
451 static void
452 omalloc_poolinit(struct dir_info **dp)
453 {
454 	char *p;
455 	size_t d_avail, regioninfo_size;
456 	struct dir_info *d;
457 	int i, j;
458 
459 	/*
460 	 * Allocate dir_info with a guard page on either side. Also
461 	 * randomise offset inside the page at which the dir_info
462 	 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
463 	 */
464 	if ((p = MMAPNONE(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED)
465 		wrterror(NULL, "malloc init mmap failed");
466 	mprotect(p + MALLOC_PAGESIZE, DIR_INFO_RSZ, PROT_READ | PROT_WRITE);
467 	d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
468 	d = (struct dir_info *)(p + MALLOC_PAGESIZE +
469 	    (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
470 
471 	rbytes_init(d);
472 	d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS;
473 	regioninfo_size = d->regions_total * sizeof(struct region_info);
474 	d->r = MMAP(regioninfo_size);
475 	if (d->r == MAP_FAILED) {
476 		d->regions_total = 0;
477 		wrterror(NULL, "malloc init mmap failed");
478 	}
479 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
480 		LIST_INIT(&d->chunk_info_list[i]);
481 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++)
482 			LIST_INIT(&d->chunk_dir[i][j]);
483 	}
484 	STATS_ADD(d->malloc_used, regioninfo_size);
485 	d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
486 	d->canary2 = ~d->canary1;
487 
488 	*dp = d;
489 }
490 
491 static int
492 omalloc_grow(struct dir_info *d)
493 {
494 	size_t newtotal;
495 	size_t newsize;
496 	size_t mask;
497 	size_t i;
498 	struct region_info *p;
499 
500 	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2)
501 		return 1;
502 
503 	newtotal = d->regions_total * 2;
504 	newsize = newtotal * sizeof(struct region_info);
505 	mask = newtotal - 1;
506 
507 	p = MMAP(newsize);
508 	if (p == MAP_FAILED)
509 		return 1;
510 
511 	STATS_ADD(d->malloc_used, newsize);
512 	STATS_ZERO(d->inserts);
513 	STATS_ZERO(d->insert_collisions);
514 	for (i = 0; i < d->regions_total; i++) {
515 		void *q = d->r[i].p;
516 		if (q != NULL) {
517 			size_t index = hash(q) & mask;
518 			STATS_INC(d->inserts);
519 			while (p[index].p != NULL) {
520 				index = (index - 1) & mask;
521 				STATS_INC(d->insert_collisions);
522 			}
523 			p[index] = d->r[i];
524 		}
525 	}
526 	/* avoid pages containing meta info to end up in cache */
527 	if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
528 		wrterror(d, "munmap %p", (void *)d->r);
529 	else
530 		STATS_SUB(d->malloc_used,
531 		    d->regions_total * sizeof(struct region_info));
532 	d->regions_free = d->regions_free + d->regions_total;
533 	d->regions_total = newtotal;
534 	d->r = p;
535 	return 0;
536 }
537 
538 /*
539  * The hashtable uses the assumption that p is never NULL. This holds since
540  * non-MAP_FIXED mappings with hint 0 start at BRKSIZ.
541  */
542 static int
543 insert(struct dir_info *d, void *p, size_t sz, void *f)
544 {
545 	size_t index;
546 	size_t mask;
547 	void *q;
548 
549 	if (d->regions_free * 4 < d->regions_total) {
550 		if (omalloc_grow(d))
551 			return 1;
552 	}
553 	mask = d->regions_total - 1;
554 	index = hash(p) & mask;
555 	q = d->r[index].p;
556 	STATS_INC(d->inserts);
557 	while (q != NULL) {
558 		index = (index - 1) & mask;
559 		q = d->r[index].p;
560 		STATS_INC(d->insert_collisions);
561 	}
562 	d->r[index].p = p;
563 	d->r[index].size = sz;
564 #ifdef MALLOC_STATS
565 	d->r[index].f = f;
566 #endif
567 	d->regions_free--;
568 	return 0;
569 }
570 
571 static struct region_info *
572 find(struct dir_info *d, void *p)
573 {
574 	size_t index;
575 	size_t mask = d->regions_total - 1;
576 	void *q, *r;
577 
578 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
579 	    d->canary1 != ~d->canary2)
580 		wrterror(d, "internal struct corrupt");
581 	p = MASK_POINTER(p);
582 	index = hash(p) & mask;
583 	r = d->r[index].p;
584 	q = MASK_POINTER(r);
585 	STATS_INC(d->finds);
586 	while (q != p && r != NULL) {
587 		index = (index - 1) & mask;
588 		r = d->r[index].p;
589 		q = MASK_POINTER(r);
590 		STATS_INC(d->find_collisions);
591 	}
592 	return (q == p && r != NULL) ? &d->r[index] : NULL;
593 }
594 
595 static void
596 delete(struct dir_info *d, struct region_info *ri)
597 {
598 	/* algorithm R, Knuth Vol III section 6.4 */
599 	size_t mask = d->regions_total - 1;
600 	size_t i, j, r;
601 
602 	if (d->regions_total & (d->regions_total - 1))
603 		wrterror(d, "regions_total not 2^x");
604 	d->regions_free++;
605 	STATS_INC(d->deletes);
606 
607 	i = ri - d->r;
608 	for (;;) {
609 		d->r[i].p = NULL;
610 		d->r[i].size = 0;
611 		j = i;
612 		for (;;) {
613 			i = (i - 1) & mask;
614 			if (d->r[i].p == NULL)
615 				return;
616 			r = hash(d->r[i].p) & mask;
617 			if ((i <= r && r < j) || (r < j && j < i) ||
618 			    (j < i && i <= r))
619 				continue;
620 			d->r[j] = d->r[i];
621 			STATS_INC(d->delete_moves);
622 			break;
623 		}
624 
625 	}
626 }
627 
628 /*
629  * Cache maintenance. We keep at most malloc_cache pages cached.
630  * If the cache is becoming full, unmap pages in the cache for real,
631  * and then add the region to the cache
632  * Opposed to the regular region data structure, the sizes in the
633  * cache are in MALLOC_PAGESIZE units.
634  */
635 static void
636 unmap(struct dir_info *d, void *p, size_t sz, size_t clear, int junk)
637 {
638 	size_t psz = sz >> MALLOC_PAGESHIFT;
639 	size_t rsz;
640 	struct region_info *r;
641 	u_int i, offset, mask;
642 
643 	if (sz != PAGEROUND(sz))
644 		wrterror(d, "munmap round");
645 
646 	rsz = mopts.malloc_cache - d->free_regions_size;
647 
648 	/*
649 	 * normally the cache holds recently freed regions, but if the region
650 	 * to unmap is larger than the cache size or we're clearing and the
651 	 * cache is full, just munmap
652 	 */
653 	if (psz > mopts.malloc_cache || (clear > 0 && rsz == 0)) {
654 		i = munmap(p, sz);
655 		if (i)
656 			wrterror(d, "munmap %p", p);
657 		STATS_SUB(d->malloc_used, sz);
658 		return;
659 	}
660 	offset = getrbyte(d);
661 	mask = mopts.malloc_cache - 1;
662 	if (psz > rsz) {
663 		size_t tounmap = psz - rsz;
664 		for (i = 0; ; i++) {
665 			r = &d->free_regions[(i + offset) & mask];
666 			if (r->p != NULL) {
667 				rsz = r->size << MALLOC_PAGESHIFT;
668 				if (munmap(r->p, rsz))
669 					wrterror(d, "munmap %p", r->p);
670 				r->p = NULL;
671 				if (tounmap > r->size)
672 					tounmap -= r->size;
673 				else
674 					tounmap = 0;
675 				d->free_regions_size -= r->size;
676 				STATS_SUB(d->malloc_used, rsz);
677 				if (tounmap == 0) {
678 					offset = i;
679 					break;
680 				}
681 			}
682 		}
683 	}
684 	for (i = 0; ; i++) {
685 		r = &d->free_regions[(i + offset) & mask];
686 		if (r->p == NULL) {
687 			if (clear > 0)
688 				memset(p, 0, clear);
689 			if (junk && !mopts.malloc_freeunmap) {
690 				size_t amt = junk == 1 ?  MALLOC_MAXCHUNK : sz;
691 				memset(p, SOME_FREEJUNK, amt);
692 			}
693 			if (mopts.malloc_freeunmap)
694 				mprotect(p, sz, PROT_NONE);
695 			r->p = p;
696 			r->size = psz;
697 			d->free_regions_size += psz;
698 			break;
699 		}
700 	}
701 	if (d->free_regions_size > mopts.malloc_cache)
702 		wrterror(d, "malloc cache overflow");
703 }
704 
705 static void
706 zapcacheregion(struct dir_info *d, void *p, size_t len)
707 {
708 	u_int i;
709 	struct region_info *r;
710 	size_t rsz;
711 
712 	for (i = 0; i < mopts.malloc_cache; i++) {
713 		r = &d->free_regions[i];
714 		if (r->p >= p && r->p <= (void *)((char *)p + len)) {
715 			rsz = r->size << MALLOC_PAGESHIFT;
716 			if (munmap(r->p, rsz))
717 				wrterror(d, "munmap %p", r->p);
718 			r->p = NULL;
719 			d->free_regions_size -= r->size;
720 			STATS_SUB(d->malloc_used, rsz);
721 		}
722 	}
723 }
724 
725 static void *
726 map(struct dir_info *d, void *hint, size_t sz, int zero_fill)
727 {
728 	size_t psz = sz >> MALLOC_PAGESHIFT;
729 	struct region_info *r, *big = NULL;
730 	u_int i;
731 	void *p;
732 
733 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
734 	    d->canary1 != ~d->canary2)
735 		wrterror(d, "internal struct corrupt");
736 	if (sz != PAGEROUND(sz))
737 		wrterror(d, "map round");
738 
739 	if (hint == NULL && psz > d->free_regions_size) {
740 		_MALLOC_LEAVE(d);
741 		p = MMAP(sz);
742 		_MALLOC_ENTER(d);
743 		if (p != MAP_FAILED)
744 			STATS_ADD(d->malloc_used, sz);
745 		/* zero fill not needed */
746 		return p;
747 	}
748 	for (i = 0; i < mopts.malloc_cache; i++) {
749 		r = &d->free_regions[(i + d->rotor) & (mopts.malloc_cache - 1)];
750 		if (r->p != NULL) {
751 			if (hint != NULL && r->p != hint)
752 				continue;
753 			if (r->size == psz) {
754 				p = r->p;
755 				r->p = NULL;
756 				d->free_regions_size -= psz;
757 				if (mopts.malloc_freeunmap)
758 					mprotect(p, sz, PROT_READ | PROT_WRITE);
759 				if (zero_fill)
760 					memset(p, 0, sz);
761 				else if (mopts.malloc_junk == 2 &&
762 				    mopts.malloc_freeunmap)
763 					memset(p, SOME_FREEJUNK, sz);
764 				d->rotor += i + 1;
765 				return p;
766 			} else if (r->size > psz)
767 				big = r;
768 		}
769 	}
770 	if (big != NULL) {
771 		r = big;
772 		p = r->p;
773 		r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT);
774 		if (mopts.malloc_freeunmap)
775 			mprotect(p, sz, PROT_READ | PROT_WRITE);
776 		r->size -= psz;
777 		d->free_regions_size -= psz;
778 		if (zero_fill)
779 			memset(p, 0, sz);
780 		else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap)
781 			memset(p, SOME_FREEJUNK, sz);
782 		return p;
783 	}
784 	if (hint != NULL)
785 		return MAP_FAILED;
786 	if (d->free_regions_size > mopts.malloc_cache)
787 		wrterror(d, "malloc cache");
788 	_MALLOC_LEAVE(d);
789 	p = MMAP(sz);
790 	_MALLOC_ENTER(d);
791 	if (p != MAP_FAILED)
792 		STATS_ADD(d->malloc_used, sz);
793 	/* zero fill not needed */
794 	return p;
795 }
796 
797 static void
798 init_chunk_info(struct dir_info *d, struct chunk_info *p, int bits)
799 {
800 	int i;
801 
802 	if (bits == 0) {
803 		p->shift = MALLOC_MINSHIFT;
804 		p->total = p->free = MALLOC_PAGESIZE >> p->shift;
805 		p->size = 0;
806 		p->offset = 0xdead;
807 	} else {
808 		p->shift = bits;
809 		p->total = p->free = MALLOC_PAGESIZE >> p->shift;
810 		p->size = 1U << bits;
811 		p->offset = howmany(p->total, MALLOC_BITS);
812 	}
813 	p->canary = (u_short)d->canary1;
814 
815 	/* set all valid bits in the bitmap */
816  	i = p->total - 1;
817 	memset(p->bits, 0xff, sizeof(p->bits[0]) * (i / MALLOC_BITS));
818 	p->bits[i / MALLOC_BITS] = (2U << (i % MALLOC_BITS)) - 1;
819 }
820 
821 static struct chunk_info *
822 alloc_chunk_info(struct dir_info *d, int bits)
823 {
824 	struct chunk_info *p;
825 
826 	if (LIST_EMPTY(&d->chunk_info_list[bits])) {
827 		size_t size, count, i;
828 		char *q;
829 
830 		if (bits == 0)
831 			count = MALLOC_PAGESIZE / MALLOC_MINSIZE;
832 		else
833 			count = MALLOC_PAGESIZE >> bits;
834 
835 		size = howmany(count, MALLOC_BITS);
836 		size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
837 		if (mopts.chunk_canaries)
838 			size += count * sizeof(u_short);
839 		size = _ALIGN(size);
840 
841 		q = MMAP(MALLOC_PAGESIZE);
842 		if (q == MAP_FAILED)
843 			return NULL;
844 		STATS_ADD(d->malloc_used, MALLOC_PAGESIZE);
845 		count = MALLOC_PAGESIZE / size;
846 
847 		for (i = 0; i < count; i++, q += size) {
848 			p = (struct chunk_info *)q;
849 			LIST_INSERT_HEAD(&d->chunk_info_list[bits], p, entries);
850 		}
851 	}
852 	p = LIST_FIRST(&d->chunk_info_list[bits]);
853 	LIST_REMOVE(p, entries);
854 	if (p->shift == 0)
855 		init_chunk_info(d, p, bits);
856 	return p;
857 }
858 
859 /*
860  * Allocate a page of chunks
861  */
862 static struct chunk_info *
863 omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
864 {
865 	struct chunk_info *bp;
866 	void *pp;
867 
868 	/* Allocate a new bucket */
869 	pp = map(d, NULL, MALLOC_PAGESIZE, 0);
870 	if (pp == MAP_FAILED)
871 		return NULL;
872 
873 	/* memory protect the page allocated in the malloc(0) case */
874 	if (bits == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) < 0)
875 		goto err;
876 
877 	bp = alloc_chunk_info(d, bits);
878 	if (bp == NULL)
879 		goto err;
880 	bp->page = pp;
881 
882 	if (insert(d, (void *)((uintptr_t)pp | (bits + 1)), (uintptr_t)bp,
883 	    NULL))
884 		goto err;
885 	LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries);
886 	return bp;
887 
888 err:
889 	unmap(d, pp, MALLOC_PAGESIZE, 0, mopts.malloc_junk);
890 	return NULL;
891 }
892 
893 static int
894 find_chunksize(size_t size)
895 {
896 	int r;
897 
898 	/* malloc(0) is special */
899 	if (size == 0)
900 		return 0;
901 
902 	if (size < MALLOC_MINSIZE)
903 		size = MALLOC_MINSIZE;
904 	size--;
905 
906 	r = MALLOC_MINSHIFT;
907 	while (size >> r)
908 		r++;
909 	return r;
910 }
911 
912 static void
913 fill_canary(char *ptr, size_t sz, size_t allocated)
914 {
915 	size_t check_sz = allocated - sz;
916 
917 	if (check_sz > CHUNK_CHECK_LENGTH)
918 		check_sz = CHUNK_CHECK_LENGTH;
919 	memset(ptr + sz, SOME_JUNK, check_sz);
920 }
921 
922 /*
923  * Allocate a chunk
924  */
925 static void *
926 malloc_bytes(struct dir_info *d, size_t size, void *f)
927 {
928 	u_int i, r;
929 	int j, listnum;
930 	size_t k;
931 	u_short	*lp;
932 	struct chunk_info *bp;
933 	void *p;
934 
935 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
936 	    d->canary1 != ~d->canary2)
937 		wrterror(d, "internal struct corrupt");
938 
939 	j = find_chunksize(size);
940 
941 	r = ((u_int)getrbyte(d) << 8) | getrbyte(d);
942 	listnum = r % MALLOC_CHUNK_LISTS;
943 	/* If it's empty, make a page more of that size chunks */
944 	if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) {
945 		bp = omalloc_make_chunks(d, j, listnum);
946 		if (bp == NULL)
947 			return NULL;
948 	}
949 
950 	if (bp->canary != (u_short)d->canary1)
951 		wrterror(d, "chunk info corrupted");
952 
953 	i = (r / MALLOC_CHUNK_LISTS) & (bp->total - 1);
954 
955 	/* start somewhere in a short */
956 	lp = &bp->bits[i / MALLOC_BITS];
957 	if (*lp) {
958 		j = i % MALLOC_BITS;
959 		k = ffs(*lp >> j);
960 		if (k != 0) {
961 			k += j - 1;
962 			goto found;
963 		}
964 	}
965 	/* no bit halfway, go to next full short */
966 	i /= MALLOC_BITS;
967 	for (;;) {
968 		if (++i >= bp->total / MALLOC_BITS)
969 			i = 0;
970 		lp = &bp->bits[i];
971 		if (*lp) {
972 			k = ffs(*lp) - 1;
973 			break;
974 		}
975 	}
976 found:
977 #ifdef MALLOC_STATS
978 	if (i == 0 && k == 0) {
979 		struct region_info *r = find(d, bp->page);
980 		r->f = f;
981 	}
982 #endif
983 
984 	*lp ^= 1 << k;
985 
986 	/* If there are no more free, remove from free-list */
987 	if (--bp->free == 0)
988 		LIST_REMOVE(bp, entries);
989 
990 	/* Adjust to the real offset of that chunk */
991 	k += (lp - bp->bits) * MALLOC_BITS;
992 
993 	if (mopts.chunk_canaries && size > 0)
994 		bp->bits[bp->offset + k] = size;
995 
996 	k <<= bp->shift;
997 
998 	p = (char *)bp->page + k;
999 	if (bp->size > 0) {
1000 		if (mopts.malloc_junk == 2)
1001 			memset(p, SOME_JUNK, bp->size);
1002 		else if (mopts.chunk_canaries)
1003 			fill_canary(p, size, bp->size);
1004 	}
1005 	return p;
1006 }
1007 
1008 static void
1009 validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1010 {
1011 	size_t check_sz = allocated - sz;
1012 	u_char *p, *q;
1013 
1014 	if (check_sz > CHUNK_CHECK_LENGTH)
1015 		check_sz = CHUNK_CHECK_LENGTH;
1016 	p = ptr + sz;
1017 	q = p + check_sz;
1018 
1019 	while (p < q) {
1020 		if (*p != SOME_JUNK) {
1021 			wrterror(d, "chunk canary corrupted %p %#tx@%#zx%s",
1022 			    ptr, p - ptr, sz,
1023 			    *p == SOME_FREEJUNK ? " (double free?)" : "");
1024 		}
1025 		p++;
1026 	}
1027 }
1028 
1029 static uint32_t
1030 find_chunknum(struct dir_info *d, struct chunk_info *info, void *ptr, int check)
1031 {
1032 	uint32_t chunknum;
1033 
1034 	if (info->canary != (u_short)d->canary1)
1035 		wrterror(d, "chunk info corrupted");
1036 
1037 	/* Find the chunk number on the page */
1038 	chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1039 
1040 	if ((uintptr_t)ptr & ((1U << (info->shift)) - 1))
1041 		wrterror(d, "modified chunk-pointer %p", ptr);
1042 	if (info->bits[chunknum / MALLOC_BITS] &
1043 	    (1U << (chunknum % MALLOC_BITS)))
1044 		wrterror(d, "chunk is already free %p", ptr);
1045 	if (check && info->size > 0) {
1046 		validate_canary(d, ptr, info->bits[info->offset + chunknum],
1047 		    info->size);
1048 	}
1049 	return chunknum;
1050 }
1051 
1052 /*
1053  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1054  */
1055 static void
1056 free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1057 {
1058 	struct chunk_head *mp;
1059 	struct chunk_info *info;
1060 	uint32_t chunknum;
1061 	int listnum;
1062 
1063 	info = (struct chunk_info *)r->size;
1064 	chunknum = find_chunknum(d, info, ptr, 0);
1065 
1066 	info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
1067 	info->free++;
1068 
1069 	if (info->free == 1) {
1070 		/* Page became non-full */
1071 		listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
1072 		if (info->size != 0)
1073 			mp = &d->chunk_dir[info->shift][listnum];
1074 		else
1075 			mp = &d->chunk_dir[0][listnum];
1076 
1077 		LIST_INSERT_HEAD(mp, info, entries);
1078 		return;
1079 	}
1080 
1081 	if (info->free != info->total)
1082 		return;
1083 
1084 	LIST_REMOVE(info, entries);
1085 
1086 	if (info->size == 0 && !mopts.malloc_freeunmap)
1087 		mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1088 	unmap(d, info->page, MALLOC_PAGESIZE, 0, 0);
1089 
1090 	delete(d, r);
1091 	if (info->size != 0)
1092 		mp = &d->chunk_info_list[info->shift];
1093 	else
1094 		mp = &d->chunk_info_list[0];
1095 	LIST_INSERT_HEAD(mp, info, entries);
1096 }
1097 
1098 
1099 
1100 static void *
1101 omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1102 {
1103 	void *p;
1104 	size_t psz;
1105 
1106 	if (sz > MALLOC_MAXCHUNK) {
1107 		if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1108 			errno = ENOMEM;
1109 			return NULL;
1110 		}
1111 		sz += mopts.malloc_guard;
1112 		psz = PAGEROUND(sz);
1113 		p = map(pool, NULL, psz, zero_fill);
1114 		if (p == MAP_FAILED) {
1115 			errno = ENOMEM;
1116 			return NULL;
1117 		}
1118 		if (insert(pool, p, sz, f)) {
1119 			unmap(pool, p, psz, 0, 0);
1120 			errno = ENOMEM;
1121 			return NULL;
1122 		}
1123 		if (mopts.malloc_guard) {
1124 			if (mprotect((char *)p + psz - mopts.malloc_guard,
1125 			    mopts.malloc_guard, PROT_NONE))
1126 				wrterror(pool, "mprotect");
1127 			STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1128 		}
1129 
1130 		if (MALLOC_MOVE_COND(sz)) {
1131 			/* fill whole allocation */
1132 			if (mopts.malloc_junk == 2)
1133 				memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1134 			/* shift towards the end */
1135 			p = MALLOC_MOVE(p, sz);
1136 			/* fill zeros if needed and overwritten above */
1137 			if (zero_fill && mopts.malloc_junk == 2)
1138 				memset(p, 0, sz - mopts.malloc_guard);
1139 		} else {
1140 			if (mopts.malloc_junk == 2) {
1141 				if (zero_fill)
1142 					memset((char *)p + sz - mopts.malloc_guard,
1143 					    SOME_JUNK, psz - sz);
1144 				else
1145 					memset(p, SOME_JUNK,
1146 					    psz - mopts.malloc_guard);
1147 			} else if (mopts.chunk_canaries)
1148 				fill_canary(p, sz - mopts.malloc_guard,
1149 				    psz - mopts.malloc_guard);
1150 		}
1151 
1152 	} else {
1153 		/* takes care of SOME_JUNK */
1154 		p = malloc_bytes(pool, sz, f);
1155 		if (zero_fill && p != NULL && sz > 0)
1156 			memset(p, 0, sz);
1157 	}
1158 
1159 	return p;
1160 }
1161 
1162 /*
1163  * Common function for handling recursion.  Only
1164  * print the error message once, to avoid making the problem
1165  * potentially worse.
1166  */
1167 static void
1168 malloc_recurse(struct dir_info *d)
1169 {
1170 	static int noprint;
1171 
1172 	if (noprint == 0) {
1173 		noprint = 1;
1174 		wrterror(d, "recursive call");
1175 	}
1176 	d->active--;
1177 	_MALLOC_UNLOCK(d->mutex);
1178 	errno = EDEADLK;
1179 }
1180 
1181 void
1182 _malloc_init(int from_rthreads)
1183 {
1184 	int i, max;
1185 	struct dir_info *d;
1186 
1187 	_MALLOC_LOCK(0);
1188 	if (!from_rthreads && mopts.malloc_pool[0]) {
1189 		_MALLOC_UNLOCK(0);
1190 		return;
1191 	}
1192 	if (!mopts.malloc_canary)
1193 		omalloc_init();
1194 
1195 	max = from_rthreads ? _MALLOC_MUTEXES : 1;
1196 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1197 		mprotect(&malloc_readonly, sizeof(malloc_readonly),
1198 		    PROT_READ | PROT_WRITE);
1199 	for (i = 0; i < max; i++) {
1200 		if (mopts.malloc_pool[i])
1201 			continue;
1202 		omalloc_poolinit(&d);
1203 		d->mutex = i;
1204 		mopts.malloc_pool[i] = d;
1205 	}
1206 
1207 	if (from_rthreads)
1208 		mopts.malloc_mt = 1;
1209 	else
1210 		mopts.internal_funcs = 1;
1211 
1212 	/*
1213 	 * Options have been set and will never be reset.
1214 	 * Prevent further tampering with them.
1215 	 */
1216 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1217 		mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
1218 	_MALLOC_UNLOCK(0);
1219 }
1220 DEF_STRONG(_malloc_init);
1221 
1222 void *
1223 malloc(size_t size)
1224 {
1225 	void *r;
1226 	struct dir_info *d;
1227 	int saved_errno = errno;
1228 
1229 	d = getpool();
1230 	if (d == NULL) {
1231 		_malloc_init(0);
1232 		d = getpool();
1233 	}
1234 	_MALLOC_LOCK(d->mutex);
1235 	d->func = "malloc";
1236 
1237 	if (d->active++) {
1238 		malloc_recurse(d);
1239 		return NULL;
1240 	}
1241 	r = omalloc(d, size, 0, CALLER);
1242 	d->active--;
1243 	_MALLOC_UNLOCK(d->mutex);
1244 	if (r == NULL && mopts.malloc_xmalloc)
1245 		wrterror(d, "out of memory");
1246 	if (r != NULL)
1247 		errno = saved_errno;
1248 	return r;
1249 }
1250 /*DEF_STRONG(malloc);*/
1251 
1252 static void
1253 validate_junk(struct dir_info *pool, void *p)
1254 {
1255 	struct region_info *r;
1256 	size_t byte, sz;
1257 
1258 	if (p == NULL)
1259 		return;
1260 	r = find(pool, p);
1261 	if (r == NULL)
1262 		wrterror(pool, "bogus pointer in validate_junk %p", p);
1263 	REALSIZE(sz, r);
1264 	if (sz > CHUNK_CHECK_LENGTH)
1265 		sz = CHUNK_CHECK_LENGTH;
1266 	for (byte = 0; byte < sz; byte++) {
1267 		if (((unsigned char *)p)[byte] != SOME_FREEJUNK)
1268 			wrterror(pool, "use after free %p", p);
1269 	}
1270 }
1271 
1272 static void
1273 ofree(struct dir_info *argpool, void *p, int clear, int check, size_t argsz)
1274 {
1275 	struct dir_info *pool;
1276 	struct region_info *r;
1277 	char *saved_function;
1278 	size_t sz;
1279 	int i;
1280 
1281 	pool = argpool;
1282 	r = find(pool, p);
1283 	if (r == NULL) {
1284 		if (mopts.malloc_mt)  {
1285 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1286 				if (i == argpool->mutex)
1287 					continue;
1288 				pool->active--;
1289 				_MALLOC_UNLOCK(pool->mutex);
1290 				pool = mopts.malloc_pool[i];
1291 				_MALLOC_LOCK(pool->mutex);
1292 				pool->active++;
1293 				r = find(pool, p);
1294 				if (r != NULL) {
1295 					saved_function = pool->func;
1296 					pool->func = argpool->func;
1297 					break;
1298 				}
1299 			}
1300 		}
1301 		if (r == NULL)
1302 			wrterror(argpool, "bogus pointer (double free?) %p", p);
1303 	}
1304 
1305 	REALSIZE(sz, r);
1306 	if (check) {
1307 		if (sz <= MALLOC_MAXCHUNK) {
1308 			if (mopts.chunk_canaries && sz > 0) {
1309 				struct chunk_info *info =
1310 				    (struct chunk_info *)r->size;
1311 				uint32_t chunknum =
1312 				    find_chunknum(pool, info, p, 0);
1313 
1314 				if (info->bits[info->offset + chunknum] < argsz)
1315 					wrterror(pool, "recorded size %hu"
1316 					    " < %zu",
1317 					    info->bits[info->offset + chunknum],
1318 					    argsz);
1319 			} else {
1320 				if (sz < argsz)
1321 					wrterror(pool, "chunk size %zu < %zu",
1322 					    sz, argsz);
1323 			}
1324 		} else if (sz - mopts.malloc_guard < argsz) {
1325 			wrterror(pool, "recorded size %zu < %zu",
1326 			    sz - mopts.malloc_guard, argsz);
1327 		}
1328 	}
1329 	if (sz > MALLOC_MAXCHUNK) {
1330 		if (!MALLOC_MOVE_COND(sz)) {
1331 			if (r->p != p)
1332 				wrterror(pool, "bogus pointer %p", p);
1333 			if (mopts.chunk_canaries)
1334 				validate_canary(pool, p,
1335 				    sz - mopts.malloc_guard,
1336 				    PAGEROUND(sz - mopts.malloc_guard));
1337 		} else {
1338 			/* shifted towards the end */
1339 			if (p != MALLOC_MOVE(r->p, sz))
1340 				wrterror(pool, "bogus moved pointer %p", p);
1341 			p = r->p;
1342 		}
1343 		if (mopts.malloc_guard) {
1344 			if (sz < mopts.malloc_guard)
1345 				wrterror(pool, "guard size");
1346 			if (!mopts.malloc_freeunmap) {
1347 				if (mprotect((char *)p + PAGEROUND(sz) -
1348 				    mopts.malloc_guard, mopts.malloc_guard,
1349 				    PROT_READ | PROT_WRITE))
1350 					wrterror(pool, "mprotect");
1351 			}
1352 			STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
1353 		}
1354 		unmap(pool, p, PAGEROUND(sz), clear ? argsz : 0,
1355 		    mopts.malloc_junk);
1356 		delete(pool, r);
1357 	} else {
1358 		/* Validate and optionally canary check */
1359 		struct chunk_info *info = (struct chunk_info *)r->size;
1360 		find_chunknum(pool, info, p, mopts.chunk_canaries);
1361 		if (!clear) {
1362 			void *tmp;
1363 			int i;
1364 
1365 			if (mopts.malloc_freecheck) {
1366 				for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK; i++)
1367 					if (p == pool->delayed_chunks[i])
1368 						wrterror(pool,
1369 						    "double free %p", p);
1370 			}
1371 			if (mopts.malloc_junk && sz > 0)
1372 				memset(p, SOME_FREEJUNK, sz);
1373 			i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK;
1374 			tmp = p;
1375 			p = pool->delayed_chunks[i];
1376 			if (tmp == p)
1377 				wrterror(pool, "double free %p", tmp);
1378 			pool->delayed_chunks[i] = tmp;
1379 			if (mopts.malloc_junk)
1380 				validate_junk(pool, p);
1381 		} else if (argsz > 0)
1382 			memset(p, 0, argsz);
1383 		if (p != NULL) {
1384 			r = find(pool, p);
1385 			if (r == NULL)
1386 				wrterror(pool,
1387 				    "bogus pointer (double free?) %p", p);
1388 			free_bytes(pool, r, p);
1389 		}
1390 	}
1391 
1392 	if (argpool != pool) {
1393 		pool->active--;
1394 		pool->func = saved_function;
1395 		_MALLOC_UNLOCK(pool->mutex);
1396 		_MALLOC_LOCK(argpool->mutex);
1397 		argpool->active++;
1398 	}
1399 }
1400 
1401 void
1402 free(void *ptr)
1403 {
1404 	struct dir_info *d;
1405 	int saved_errno = errno;
1406 
1407 	/* This is legal. */
1408 	if (ptr == NULL)
1409 		return;
1410 
1411 	d = getpool();
1412 	if (d == NULL)
1413 		wrterror(d, "free() called before allocation");
1414 	_MALLOC_LOCK(d->mutex);
1415 	d->func = "free";
1416 	if (d->active++) {
1417 		malloc_recurse(d);
1418 		return;
1419 	}
1420 	ofree(d, ptr, 0, 0, 0);
1421 	d->active--;
1422 	_MALLOC_UNLOCK(d->mutex);
1423 	errno = saved_errno;
1424 }
1425 /*DEF_STRONG(free);*/
1426 
1427 static void
1428 freezero_p(void *ptr, size_t sz)
1429 {
1430 	explicit_bzero(ptr, sz);
1431 	free(ptr);
1432 }
1433 
1434 void
1435 freezero(void *ptr, size_t sz)
1436 {
1437 	struct dir_info *d;
1438 	int saved_errno = errno;
1439 
1440 	/* This is legal. */
1441 	if (ptr == NULL)
1442 		return;
1443 
1444 	if (!mopts.internal_funcs) {
1445 		freezero_p(ptr, sz);
1446 		return;
1447 	}
1448 
1449 	d = getpool();
1450 	if (d == NULL)
1451 		wrterror(d, "freezero() called before allocation");
1452 	_MALLOC_LOCK(d->mutex);
1453 	d->func = "freezero";
1454 	if (d->active++) {
1455 		malloc_recurse(d);
1456 		return;
1457 	}
1458 	ofree(d, ptr, 1, 1, sz);
1459 	d->active--;
1460 	_MALLOC_UNLOCK(d->mutex);
1461 	errno = saved_errno;
1462 }
1463 DEF_WEAK(freezero);
1464 
1465 static void *
1466 orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
1467 {
1468 	struct dir_info *pool;
1469 	struct region_info *r;
1470 	struct chunk_info *info;
1471 	size_t oldsz, goldsz, gnewsz;
1472 	void *q, *ret;
1473 	char *saved_function;
1474 	int i;
1475 	uint32_t chunknum;
1476 
1477 	pool = argpool;
1478 
1479 	if (p == NULL)
1480 		return omalloc(pool, newsz, 0, f);
1481 
1482 	r = find(pool, p);
1483 	if (r == NULL) {
1484 		if (mopts.malloc_mt) {
1485 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1486 				if (i == argpool->mutex)
1487 					continue;
1488 				pool->active--;
1489 				_MALLOC_UNLOCK(pool->mutex);
1490 				pool = mopts.malloc_pool[i];
1491 				_MALLOC_LOCK(pool->mutex);
1492 				pool->active++;
1493 				r = find(pool, p);
1494 				if (r != NULL) {
1495 					saved_function = pool->func;
1496 					pool->func = argpool->func;
1497 					break;
1498 				}
1499 			}
1500 		}
1501 		if (r == NULL)
1502 			wrterror(argpool, "bogus pointer (double free?) %p", p);
1503 	}
1504 	if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1505 		errno = ENOMEM;
1506 		ret = NULL;
1507 		goto done;
1508 	}
1509 
1510 	REALSIZE(oldsz, r);
1511 	if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) {
1512 		info = (struct chunk_info *)r->size;
1513 		chunknum = find_chunknum(pool, info, p, 0);
1514 	}
1515 
1516 	goldsz = oldsz;
1517 	if (oldsz > MALLOC_MAXCHUNK) {
1518 		if (oldsz < mopts.malloc_guard)
1519 			wrterror(pool, "guard size");
1520 		oldsz -= mopts.malloc_guard;
1521 	}
1522 
1523 	gnewsz = newsz;
1524 	if (gnewsz > MALLOC_MAXCHUNK)
1525 		gnewsz += mopts.malloc_guard;
1526 
1527 	if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK &&
1528 	    !mopts.malloc_realloc) {
1529 		/* First case: from n pages sized allocation to m pages sized
1530 		   allocation, m > n */
1531 		size_t roldsz = PAGEROUND(goldsz);
1532 		size_t rnewsz = PAGEROUND(gnewsz);
1533 
1534 		if (rnewsz > roldsz) {
1535 			/* try to extend existing region */
1536 			if (!mopts.malloc_guard) {
1537 				void *hint = (char *)r->p + roldsz;
1538 				size_t needed = rnewsz - roldsz;
1539 
1540 				STATS_INC(pool->cheap_realloc_tries);
1541 				q = map(pool, hint, needed, 0);
1542 				if (q == hint)
1543 					goto gotit;
1544 				zapcacheregion(pool, hint, needed);
1545 				q = MQUERY(hint, needed);
1546 				if (q == hint)
1547 					q = MMAPA(hint, needed);
1548 				else
1549 					q = MAP_FAILED;
1550 				if (q == hint) {
1551 gotit:
1552 					STATS_ADD(pool->malloc_used, needed);
1553 					if (mopts.malloc_junk == 2)
1554 						memset(q, SOME_JUNK, needed);
1555 					r->size = gnewsz;
1556 					if (r->p != p) {
1557 						/* old pointer is moved */
1558 						memmove(r->p, p, oldsz);
1559 						p = r->p;
1560 					}
1561 					if (mopts.chunk_canaries)
1562 						fill_canary(p, newsz,
1563 						    PAGEROUND(newsz));
1564 					STATS_SETF(r, f);
1565 					STATS_INC(pool->cheap_reallocs);
1566 					ret = p;
1567 					goto done;
1568 				} else if (q != MAP_FAILED) {
1569 					if (munmap(q, needed))
1570 						wrterror(pool, "munmap %p", q);
1571 				}
1572 			}
1573 		} else if (rnewsz < roldsz) {
1574 			/* shrink number of pages */
1575 			if (mopts.malloc_guard) {
1576 				if (mprotect((char *)r->p + roldsz -
1577 				    mopts.malloc_guard, mopts.malloc_guard,
1578 				    PROT_READ | PROT_WRITE))
1579 					wrterror(pool, "mprotect");
1580 				if (mprotect((char *)r->p + rnewsz -
1581 				    mopts.malloc_guard, mopts.malloc_guard,
1582 				    PROT_NONE))
1583 					wrterror(pool, "mprotect");
1584 			}
1585 			unmap(pool, (char *)r->p + rnewsz, roldsz - rnewsz, 0,
1586 			    mopts.malloc_junk);
1587 			r->size = gnewsz;
1588 			if (MALLOC_MOVE_COND(gnewsz)) {
1589 				void *pp = MALLOC_MOVE(r->p, gnewsz);
1590 				memmove(pp, p, newsz);
1591 				p = pp;
1592 			} else if (mopts.chunk_canaries)
1593 				fill_canary(p, newsz, PAGEROUND(newsz));
1594 			STATS_SETF(r, f);
1595 			ret = p;
1596 			goto done;
1597 		} else {
1598 			/* number of pages remains the same */
1599 			void *pp = r->p;
1600 
1601 			r->size = gnewsz;
1602 			if (MALLOC_MOVE_COND(gnewsz))
1603 				pp = MALLOC_MOVE(r->p, gnewsz);
1604 			if (p != pp) {
1605 				memmove(pp, p, oldsz < newsz ? oldsz : newsz);
1606 				p = pp;
1607 			}
1608 			if (p == r->p) {
1609 				if (newsz > oldsz && mopts.malloc_junk == 2)
1610 					memset((char *)p + newsz, SOME_JUNK,
1611 					    rnewsz - mopts.malloc_guard -
1612 					    newsz);
1613 				if (mopts.chunk_canaries)
1614 					fill_canary(p, newsz, PAGEROUND(newsz));
1615 			}
1616 			STATS_SETF(r, f);
1617 			ret = p;
1618 			goto done;
1619 		}
1620 	}
1621 	if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 &&
1622 	    newsz <= MALLOC_MAXCHUNK && newsz > 0 &&
1623 	    1 << find_chunksize(newsz) == oldsz && !mopts.malloc_realloc) {
1624 		/* do not reallocate if new size fits good in existing chunk */
1625 		if (mopts.malloc_junk == 2)
1626 			memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1627 		if (mopts.chunk_canaries) {
1628 			info->bits[info->offset + chunknum] = newsz;
1629 			fill_canary(p, newsz, info->size);
1630 		}
1631 		STATS_SETF(r, f);
1632 		ret = p;
1633 	} else if (newsz != oldsz || mopts.malloc_realloc) {
1634 		/* create new allocation */
1635 		q = omalloc(pool, newsz, 0, f);
1636 		if (q == NULL) {
1637 			ret = NULL;
1638 			goto done;
1639 		}
1640 		if (newsz != 0 && oldsz != 0)
1641 			memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1642 		ofree(pool, p, 0, 0, 0);
1643 		ret = q;
1644 	} else {
1645 		/* oldsz == newsz */
1646 		if (newsz != 0)
1647 			wrterror(pool, "realloc internal inconsistency");
1648 		STATS_SETF(r, f);
1649 		ret = p;
1650 	}
1651 done:
1652 	if (argpool != pool) {
1653 		pool->active--;
1654 		pool->func = saved_function;
1655 		_MALLOC_UNLOCK(pool->mutex);
1656 		_MALLOC_LOCK(argpool->mutex);
1657 		argpool->active++;
1658 	}
1659 	return ret;
1660 }
1661 
1662 void *
1663 realloc(void *ptr, size_t size)
1664 {
1665 	struct dir_info *d;
1666 	void *r;
1667 	int saved_errno = errno;
1668 
1669 	d = getpool();
1670 	if (d == NULL) {
1671 		_malloc_init(0);
1672 		d = getpool();
1673 	}
1674 	_MALLOC_LOCK(d->mutex);
1675 	d->func = "realloc";
1676 	if (d->active++) {
1677 		malloc_recurse(d);
1678 		return NULL;
1679 	}
1680 	r = orealloc(d, ptr, size, CALLER);
1681 
1682 	d->active--;
1683 	_MALLOC_UNLOCK(d->mutex);
1684 	if (r == NULL && mopts.malloc_xmalloc)
1685 		wrterror(d, "out of memory");
1686 	if (r != NULL)
1687 		errno = saved_errno;
1688 	return r;
1689 }
1690 /*DEF_STRONG(realloc);*/
1691 
1692 
1693 /*
1694  * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
1695  * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
1696  */
1697 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1698 
1699 void *
1700 calloc(size_t nmemb, size_t size)
1701 {
1702 	struct dir_info *d;
1703 	void *r;
1704 	int saved_errno = errno;
1705 
1706 	d = getpool();
1707 	if (d == NULL) {
1708 		_malloc_init(0);
1709 		d = getpool();
1710 	}
1711 	_MALLOC_LOCK(d->mutex);
1712 	d->func = "calloc";
1713 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1714 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1715 		_MALLOC_UNLOCK(d->mutex);
1716 		if (mopts.malloc_xmalloc)
1717 			wrterror(d, "out of memory");
1718 		errno = ENOMEM;
1719 		return NULL;
1720 	}
1721 
1722 	if (d->active++) {
1723 		malloc_recurse(d);
1724 		return NULL;
1725 	}
1726 
1727 	size *= nmemb;
1728 	r = omalloc(d, size, 1, CALLER);
1729 
1730 	d->active--;
1731 	_MALLOC_UNLOCK(d->mutex);
1732 	if (r == NULL && mopts.malloc_xmalloc)
1733 		wrterror(d, "out of memory");
1734 	if (r != NULL)
1735 		errno = saved_errno;
1736 	return r;
1737 }
1738 /*DEF_STRONG(calloc);*/
1739 
1740 static void *
1741 orecallocarray(struct dir_info *argpool, void *p, size_t oldsize,
1742     size_t newsize, void *f)
1743 {
1744 	struct dir_info *pool;
1745 	struct region_info *r;
1746 	void *newptr;
1747 	size_t sz;
1748 	int i;
1749 
1750 	pool = argpool;
1751 
1752 	if (p == NULL)
1753 		return omalloc(pool, newsize, 1, f);
1754 
1755 	if (oldsize == newsize)
1756 		return p;
1757 
1758 	r = find(pool, p);
1759 	if (r == NULL) {
1760 		if (mopts.malloc_mt) {
1761 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1762 				if (i == argpool->mutex)
1763 					continue;
1764 				pool->active--;
1765 				_MALLOC_UNLOCK(pool->mutex);
1766 				pool = mopts.malloc_pool[i];
1767 				_MALLOC_LOCK(pool->mutex);
1768 				pool->active++;
1769 				r = find(pool, p);
1770 				if (r != NULL)
1771 					break;
1772 			}
1773 		}
1774 		if (r == NULL)
1775 			wrterror(pool, "bogus pointer (double free?) %p", p);
1776 	}
1777 
1778 	REALSIZE(sz, r);
1779 	if (sz <= MALLOC_MAXCHUNK) {
1780 		if (mopts.chunk_canaries && sz > 0) {
1781 			struct chunk_info *info = (struct chunk_info *)r->size;
1782 			uint32_t chunknum = find_chunknum(pool, info, p, 0);
1783 
1784 			if (info->bits[info->offset + chunknum] != oldsize)
1785 				wrterror(pool, "recorded old size %hu != %zu",
1786 				    info->bits[info->offset + chunknum],
1787 				    oldsize);
1788 		}
1789 	} else if (oldsize != sz - mopts.malloc_guard)
1790 		wrterror(pool, "recorded old size %zu != %zu",
1791 		    sz - mopts.malloc_guard, oldsize);
1792 
1793 	newptr = omalloc(pool, newsize, 0, f);
1794 	if (newptr == NULL)
1795 		goto done;
1796 
1797 	if (newsize > oldsize) {
1798 		memcpy(newptr, p, oldsize);
1799 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
1800 	} else
1801 		memcpy(newptr, p, newsize);
1802 
1803 	ofree(pool, p, 1, 0, oldsize);
1804 
1805 done:
1806 	if (argpool != pool) {
1807 		pool->active--;
1808 		_MALLOC_UNLOCK(pool->mutex);
1809 		_MALLOC_LOCK(argpool->mutex);
1810 		argpool->active++;
1811 	}
1812 
1813 	return newptr;
1814 }
1815 
1816 static void *
1817 recallocarray_p(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1818 {
1819 	size_t oldsize, newsize;
1820 	void *newptr;
1821 
1822 	if (ptr == NULL)
1823 		return calloc(newnmemb, size);
1824 
1825 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1826 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
1827 		errno = ENOMEM;
1828 		return NULL;
1829 	}
1830 	newsize = newnmemb * size;
1831 
1832 	if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1833 	    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
1834 		errno = EINVAL;
1835 		return NULL;
1836 	}
1837 	oldsize = oldnmemb * size;
1838 
1839 	/*
1840 	 * Don't bother too much if we're shrinking just a bit,
1841 	 * we do not shrink for series of small steps, oh well.
1842 	 */
1843 	if (newsize <= oldsize) {
1844 		size_t d = oldsize - newsize;
1845 
1846 		if (d < oldsize / 2 && d < MALLOC_PAGESIZE) {
1847 			memset((char *)ptr + newsize, 0, d);
1848 			return ptr;
1849 		}
1850 	}
1851 
1852 	newptr = malloc(newsize);
1853 	if (newptr == NULL)
1854 		return NULL;
1855 
1856 	if (newsize > oldsize) {
1857 		memcpy(newptr, ptr, oldsize);
1858 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
1859 	} else
1860 		memcpy(newptr, ptr, newsize);
1861 
1862 	explicit_bzero(ptr, oldsize);
1863 	free(ptr);
1864 
1865 	return newptr;
1866 }
1867 
1868 void *
1869 recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1870 {
1871 	struct dir_info *d;
1872 	size_t oldsize = 0, newsize;
1873 	void *r;
1874 	int saved_errno = errno;
1875 
1876 	if (!mopts.internal_funcs)
1877 		return recallocarray_p(ptr, oldnmemb, newnmemb, size);
1878 
1879 	d = getpool();
1880 	if (d == NULL) {
1881 		_malloc_init(0);
1882 		d = getpool();
1883 	}
1884 
1885 	_MALLOC_LOCK(d->mutex);
1886 	d->func = "recallocarray";
1887 
1888 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1889 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
1890 		_MALLOC_UNLOCK(d->mutex);
1891 		if (mopts.malloc_xmalloc)
1892 			wrterror(d, "out of memory");
1893 		errno = ENOMEM;
1894 		return NULL;
1895 	}
1896 	newsize = newnmemb * size;
1897 
1898 	if (ptr != NULL) {
1899 		if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1900 		    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
1901 			_MALLOC_UNLOCK(d->mutex);
1902 			errno = EINVAL;
1903 			return NULL;
1904 		}
1905 		oldsize = oldnmemb * size;
1906 	}
1907 
1908 	if (d->active++) {
1909 		malloc_recurse(d);
1910 		return NULL;
1911 	}
1912 
1913 	r = orecallocarray(d, ptr, oldsize, newsize, CALLER);
1914 
1915 	d->active--;
1916 	_MALLOC_UNLOCK(d->mutex);
1917 	if (r == NULL && mopts.malloc_xmalloc)
1918 		wrterror(d, "out of memory");
1919 	if (r != NULL)
1920 		errno = saved_errno;
1921 	return r;
1922 }
1923 DEF_WEAK(recallocarray);
1924 
1925 
1926 static void *
1927 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
1928 {
1929 	char *p, *q;
1930 
1931 	if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
1932 		wrterror(d, "mapalign bad alignment");
1933 	if (sz != PAGEROUND(sz))
1934 		wrterror(d, "mapalign round");
1935 
1936 	/* Allocate sz + alignment bytes of memory, which must include a
1937 	 * subrange of size bytes that is properly aligned.  Unmap the
1938 	 * other bytes, and then return that subrange.
1939 	 */
1940 
1941 	/* We need sz + alignment to fit into a size_t. */
1942 	if (alignment > SIZE_MAX - sz)
1943 		return MAP_FAILED;
1944 
1945 	p = map(d, NULL, sz + alignment, zero_fill);
1946 	if (p == MAP_FAILED)
1947 		return MAP_FAILED;
1948 	q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
1949 	if (q != p) {
1950 		if (munmap(p, q - p))
1951 			wrterror(d, "munmap %p", p);
1952 	}
1953 	if (munmap(q + sz, alignment - (q - p)))
1954 		wrterror(d, "munmap %p", q + sz);
1955 	STATS_SUB(d->malloc_used, alignment);
1956 
1957 	return q;
1958 }
1959 
1960 static void *
1961 omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill,
1962     void *f)
1963 {
1964 	size_t psz;
1965 	void *p;
1966 
1967 	/* If between half a page and a page, avoid MALLOC_MOVE. */
1968 	if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE)
1969 		sz = MALLOC_PAGESIZE;
1970 	if (alignment <= MALLOC_PAGESIZE) {
1971 		/*
1972 		 * max(size, alignment) is enough to assure the requested
1973 		 * alignment, since the allocator always allocates
1974 		 * power-of-two blocks.
1975 		 */
1976 		if (sz < alignment)
1977 			sz = alignment;
1978 		return omalloc(pool, sz, zero_fill, f);
1979 	}
1980 
1981 	if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1982 		errno = ENOMEM;
1983 		return NULL;
1984 	}
1985 
1986 	sz += mopts.malloc_guard;
1987 	psz = PAGEROUND(sz);
1988 
1989 	p = mapalign(pool, alignment, psz, zero_fill);
1990 	if (p == MAP_FAILED) {
1991 		errno = ENOMEM;
1992 		return NULL;
1993 	}
1994 
1995 	if (insert(pool, p, sz, f)) {
1996 		unmap(pool, p, psz, 0, 0);
1997 		errno = ENOMEM;
1998 		return NULL;
1999 	}
2000 
2001 	if (mopts.malloc_guard) {
2002 		if (mprotect((char *)p + psz - mopts.malloc_guard,
2003 		    mopts.malloc_guard, PROT_NONE))
2004 			wrterror(pool, "mprotect");
2005 		STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
2006 	}
2007 
2008 	if (mopts.malloc_junk == 2) {
2009 		if (zero_fill)
2010 			memset((char *)p + sz - mopts.malloc_guard,
2011 			    SOME_JUNK, psz - sz);
2012 		else
2013 			memset(p, SOME_JUNK, psz - mopts.malloc_guard);
2014 	} else if (mopts.chunk_canaries)
2015 		fill_canary(p, sz - mopts.malloc_guard,
2016 		    psz - mopts.malloc_guard);
2017 
2018 	return p;
2019 }
2020 
2021 int
2022 posix_memalign(void **memptr, size_t alignment, size_t size)
2023 {
2024 	struct dir_info *d;
2025 	int res, saved_errno = errno;
2026 	void *r;
2027 
2028 	/* Make sure that alignment is a large enough power of 2. */
2029 	if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
2030 		return EINVAL;
2031 
2032 	d = getpool();
2033 	if (d == NULL) {
2034 		_malloc_init(0);
2035 		d = getpool();
2036 	}
2037 	_MALLOC_LOCK(d->mutex);
2038 	d->func = "posix_memalign";
2039 	if (d->active++) {
2040 		malloc_recurse(d);
2041 		goto err;
2042 	}
2043 	r = omemalign(d, alignment, size, 0, CALLER);
2044 	d->active--;
2045 	_MALLOC_UNLOCK(d->mutex);
2046 	if (r == NULL) {
2047 		if (mopts.malloc_xmalloc)
2048 			wrterror(d, "out of memory");
2049 		goto err;
2050 	}
2051 	errno = saved_errno;
2052 	*memptr = r;
2053 	return 0;
2054 
2055 err:
2056 	res = errno;
2057 	errno = saved_errno;
2058 	return res;
2059 }
2060 /*DEF_STRONG(posix_memalign);*/
2061 
2062 #ifdef MALLOC_STATS
2063 
2064 struct malloc_leak {
2065 	void *f;
2066 	size_t total_size;
2067 	int count;
2068 };
2069 
2070 struct leaknode {
2071 	RBT_ENTRY(leaknode) entry;
2072 	struct malloc_leak d;
2073 };
2074 
2075 static inline int
2076 leakcmp(const struct leaknode *e1, const struct leaknode *e2)
2077 {
2078 	return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
2079 }
2080 
2081 static RBT_HEAD(leaktree, leaknode) leakhead;
2082 RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp);
2083 RBT_GENERATE(leaktree, leaknode, entry, leakcmp);
2084 
2085 static void
2086 putleakinfo(void *f, size_t sz, int cnt)
2087 {
2088 	struct leaknode key, *p;
2089 	static struct leaknode *page;
2090 	static int used;
2091 
2092 	if (cnt == 0 || page == MAP_FAILED)
2093 		return;
2094 
2095 	key.d.f = f;
2096 	p = RBT_FIND(leaktree, &leakhead, &key);
2097 	if (p == NULL) {
2098 		if (page == NULL ||
2099 		    used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
2100 			page = MMAP(MALLOC_PAGESIZE);
2101 			if (page == MAP_FAILED)
2102 				return;
2103 			used = 0;
2104 		}
2105 		p = &page[used++];
2106 		p->d.f = f;
2107 		p->d.total_size = sz * cnt;
2108 		p->d.count = cnt;
2109 		RBT_INSERT(leaktree, &leakhead, p);
2110 	} else {
2111 		p->d.total_size += sz * cnt;
2112 		p->d.count += cnt;
2113 	}
2114 }
2115 
2116 static struct malloc_leak *malloc_leaks;
2117 
2118 static void
2119 dump_leaks(int fd)
2120 {
2121 	struct leaknode *p;
2122 	int i = 0;
2123 
2124 	dprintf(fd, "Leak report\n");
2125 	dprintf(fd, "                 f     sum      #    avg\n");
2126 	/* XXX only one page of summary */
2127 	if (malloc_leaks == NULL)
2128 		malloc_leaks = MMAP(MALLOC_PAGESIZE);
2129 	if (malloc_leaks != MAP_FAILED)
2130 		memset(malloc_leaks, 0, MALLOC_PAGESIZE);
2131 	RBT_FOREACH(p, leaktree, &leakhead) {
2132 		dprintf(fd, "%18p %7zu %6u %6zu\n", p->d.f,
2133 		    p->d.total_size, p->d.count, p->d.total_size / p->d.count);
2134 		if (malloc_leaks == MAP_FAILED ||
2135 		    i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak))
2136 			continue;
2137 		malloc_leaks[i].f = p->d.f;
2138 		malloc_leaks[i].total_size = p->d.total_size;
2139 		malloc_leaks[i].count = p->d.count;
2140 		i++;
2141 	}
2142 }
2143 
2144 static void
2145 dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist)
2146 {
2147 	while (p != NULL) {
2148 		dprintf(fd, "chunk %18p %18p %4d %d/%d\n",
2149 		    p->page, ((p->bits[0] & 1) ? NULL : f),
2150 		    p->size, p->free, p->total);
2151 		if (!fromfreelist) {
2152 			if (p->bits[0] & 1)
2153 				putleakinfo(NULL, p->size, p->total - p->free);
2154 			else {
2155 				putleakinfo(f, p->size, 1);
2156 				putleakinfo(NULL, p->size,
2157 				    p->total - p->free - 1);
2158 			}
2159 			break;
2160 		}
2161 		p = LIST_NEXT(p, entries);
2162 		if (p != NULL)
2163 			dprintf(fd, "        ");
2164 	}
2165 }
2166 
2167 static void
2168 dump_free_chunk_info(int fd, struct dir_info *d)
2169 {
2170 	int i, j, count;
2171 	struct chunk_info *p;
2172 
2173 	dprintf(fd, "Free chunk structs:\n");
2174 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
2175 		count = 0;
2176 		LIST_FOREACH(p, &d->chunk_info_list[i], entries)
2177 			count++;
2178 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++) {
2179 			p = LIST_FIRST(&d->chunk_dir[i][j]);
2180 			if (p == NULL && count == 0)
2181 				continue;
2182 			dprintf(fd, "%2d) %3d ", i, count);
2183 			if (p != NULL)
2184 				dump_chunk(fd, p, NULL, 1);
2185 			else
2186 				dprintf(fd, "\n");
2187 		}
2188 	}
2189 
2190 }
2191 
2192 static void
2193 dump_free_page_info(int fd, struct dir_info *d)
2194 {
2195 	int i;
2196 
2197 	dprintf(fd, "Free pages cached: %zu\n", d->free_regions_size);
2198 	for (i = 0; i < mopts.malloc_cache; i++) {
2199 		if (d->free_regions[i].p != NULL) {
2200 			dprintf(fd, "%2d) ", i);
2201 			dprintf(fd, "free at %p: %zu\n",
2202 			    d->free_regions[i].p, d->free_regions[i].size);
2203 		}
2204 	}
2205 }
2206 
2207 static void
2208 malloc_dump1(int fd, int poolno, struct dir_info *d)
2209 {
2210 	size_t i, realsize;
2211 
2212 	dprintf(fd, "Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
2213 	if (d == NULL)
2214 		return;
2215 	dprintf(fd, "Region slots free %zu/%zu\n",
2216 		d->regions_free, d->regions_total);
2217 	dprintf(fd, "Finds %zu/%zu\n", d->finds,
2218 	    d->find_collisions);
2219 	dprintf(fd, "Inserts %zu/%zu\n", d->inserts,
2220 	    d->insert_collisions);
2221 	dprintf(fd, "Deletes %zu/%zu\n", d->deletes,
2222 	    d->delete_moves);
2223 	dprintf(fd, "Cheap reallocs %zu/%zu\n",
2224 	    d->cheap_reallocs, d->cheap_realloc_tries);
2225 	dprintf(fd, "In use %zu\n", d->malloc_used);
2226 	dprintf(fd, "Guarded %zu\n", d->malloc_guarded);
2227 	dump_free_chunk_info(fd, d);
2228 	dump_free_page_info(fd, d);
2229 	dprintf(fd,
2230 	    "slot)  hash d  type               page                  f size [free/n]\n");
2231 	for (i = 0; i < d->regions_total; i++) {
2232 		if (d->r[i].p != NULL) {
2233 			size_t h = hash(d->r[i].p) &
2234 			    (d->regions_total - 1);
2235 			dprintf(fd, "%4zx) #%4zx %zd ",
2236 			    i, h, h - i);
2237 			REALSIZE(realsize, &d->r[i]);
2238 			if (realsize > MALLOC_MAXCHUNK) {
2239 				putleakinfo(d->r[i].f, realsize, 1);
2240 				dprintf(fd,
2241 				    "pages %18p %18p %zu\n", d->r[i].p,
2242 				    d->r[i].f, realsize);
2243 			} else
2244 				dump_chunk(fd,
2245 				    (struct chunk_info *)d->r[i].size,
2246 				    d->r[i].f, 0);
2247 		}
2248 	}
2249 	dump_leaks(fd);
2250 	dprintf(fd, "\n");
2251 }
2252 
2253 void
2254 malloc_dump(int fd, int poolno, struct dir_info *pool)
2255 {
2256 	int i;
2257 	void *p;
2258 	struct region_info *r;
2259 	int saved_errno = errno;
2260 
2261 	if (pool == NULL)
2262 		return;
2263 	for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) {
2264 		p = pool->delayed_chunks[i];
2265 		if (p == NULL)
2266 			continue;
2267 		r = find(pool, p);
2268 		if (r == NULL)
2269 			wrterror(pool, "bogus pointer in malloc_dump %p", p);
2270 		free_bytes(pool, r, p);
2271 		pool->delayed_chunks[i] = NULL;
2272 	}
2273 	/* XXX leak when run multiple times */
2274 	RBT_INIT(leaktree, &leakhead);
2275 	malloc_dump1(fd, poolno, pool);
2276 	errno = saved_errno;
2277 }
2278 DEF_WEAK(malloc_dump);
2279 
2280 void
2281 malloc_gdump(int fd)
2282 {
2283 	int i;
2284 	int saved_errno = errno;
2285 
2286 	for (i = 0; i < _MALLOC_MUTEXES; i++)
2287 		malloc_dump(fd, i, mopts.malloc_pool[i]);
2288 
2289 	errno = saved_errno;
2290 }
2291 DEF_WEAK(malloc_gdump);
2292 
2293 static void
2294 malloc_exit(void)
2295 {
2296 	int save_errno = errno, fd, i;
2297 
2298 	fd = open("malloc.out", O_RDWR|O_APPEND);
2299 	if (fd != -1) {
2300 		dprintf(fd, "******** Start dump %s *******\n", __progname);
2301 		dprintf(fd,
2302 		    "MT=%d I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u G=%zu\n",
2303 		    mopts.malloc_mt, mopts.internal_funcs,
2304 		    mopts.malloc_freecheck,
2305 		    mopts.malloc_freeunmap, mopts.malloc_junk,
2306 		    mopts.malloc_realloc, mopts.malloc_xmalloc,
2307 		    mopts.chunk_canaries, mopts.malloc_cache,
2308 		    mopts.malloc_guard);
2309 
2310 		for (i = 0; i < _MALLOC_MUTEXES; i++)
2311 			malloc_dump(fd, i, mopts.malloc_pool[i]);
2312 		dprintf(fd, "******** End dump %s *******\n", __progname);
2313 		close(fd);
2314 	} else
2315 		dprintf(STDERR_FILENO,
2316 		    "malloc() warning: Couldn't dump stats\n");
2317 	errno = save_errno;
2318 }
2319 
2320 #endif /* MALLOC_STATS */
2321