xref: /openbsd-src/lib/libc/stdlib/malloc.c (revision 6f31b16b9589b822b677516478fd56b65f41c3dd)
1 /*	$OpenBSD: malloc.c,v 1.241 2018/01/18 20:06:16 otto Exp $	*/
2 /*
3  * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
4  * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
5  * Copyright (c) 2008 Damien Miller <djm@openbsd.org>
6  * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * If we meet some day, and you think this stuff is worth it, you
23  * can buy me a beer in return. Poul-Henning Kamp
24  */
25 
26 /* #define MALLOC_STATS */
27 
28 #include <sys/types.h>
29 #include <sys/param.h>	/* PAGE_SHIFT ALIGN */
30 #include <sys/queue.h>
31 #include <sys/mman.h>
32 #include <sys/uio.h>
33 #include <errno.h>
34 #include <stdarg.h>
35 #include <stdint.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <stdio.h>
39 #include <unistd.h>
40 
41 #ifdef MALLOC_STATS
42 #include <sys/tree.h>
43 #include <fcntl.h>
44 #endif
45 
46 #include "thread_private.h"
47 #include <tib.h>
48 
49 #if defined(__mips64__)
50 #define MALLOC_PAGESHIFT	(14U)
51 #else
52 #define MALLOC_PAGESHIFT	(PAGE_SHIFT)
53 #endif
54 
55 #define MALLOC_MINSHIFT		4
56 #define MALLOC_MAXSHIFT		(MALLOC_PAGESHIFT - 1)
57 #define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
58 #define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
59 #define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
60 #define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
61 
62 #define MALLOC_MAXCHUNK		(1 << MALLOC_MAXSHIFT)
63 #define MALLOC_MAXCACHE		256
64 #define MALLOC_DELAYED_CHUNK_MASK	15
65 #define MALLOC_INITIAL_REGIONS	512
66 #define MALLOC_DEFAULT_CACHE	64
67 #define MALLOC_CHUNK_LISTS	4
68 #define CHUNK_CHECK_LENGTH	32
69 
70 /*
71  * We move allocations between half a page and a whole page towards the end,
72  * subject to alignment constraints. This is the extra headroom we allow.
73  * Set to zero to be the most strict.
74  */
75 #define MALLOC_LEEWAY		0
76 #define MALLOC_MOVE_COND(sz)	((sz) - mopts.malloc_guard < 		\
77 				    MALLOC_PAGESIZE - MALLOC_LEEWAY)
78 #define MALLOC_MOVE(p, sz)  	(((char *)(p)) +			\
79 				    ((MALLOC_PAGESIZE - MALLOC_LEEWAY -	\
80 			    	    ((sz) - mopts.malloc_guard)) & 	\
81 				    ~(MALLOC_MINSIZE - 1)))
82 
83 #define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
84 
85 /*
86  * What to use for Junk.  This is the byte value we use to fill with
87  * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
88  * and SOME_FREEJUNK right before free.
89  */
90 #define SOME_JUNK		0xdb	/* deadbeef */
91 #define SOME_FREEJUNK		0xdf	/* dead, free */
92 
93 #define MMAP(sz)	mmap(NULL, (sz), PROT_READ | PROT_WRITE, \
94     MAP_ANON | MAP_PRIVATE, -1, 0)
95 
96 #define MMAPA(a,sz)	mmap((a), (sz), PROT_READ | PROT_WRITE, \
97     MAP_ANON | MAP_PRIVATE, -1, 0)
98 
99 #define MQUERY(a, sz)	mquery((a), (sz), PROT_READ | PROT_WRITE, \
100     MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0)
101 
102 struct region_info {
103 	void *p;		/* page; low bits used to mark chunks */
104 	uintptr_t size;		/* size for pages, or chunk_info pointer */
105 #ifdef MALLOC_STATS
106 	void *f;		/* where allocated from */
107 #endif
108 };
109 
110 LIST_HEAD(chunk_head, chunk_info);
111 
112 struct dir_info {
113 	u_int32_t canary1;
114 	int active;			/* status of malloc */
115 	struct region_info *r;		/* region slots */
116 	size_t regions_total;		/* number of region slots */
117 	size_t regions_free;		/* number of free slots */
118 					/* lists of free chunk info structs */
119 	struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1];
120 					/* lists of chunks with free slots */
121 	struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS];
122 	size_t free_regions_size;	/* free pages cached */
123 					/* free pages cache */
124 	struct region_info free_regions[MALLOC_MAXCACHE];
125 					/* delayed free chunk slots */
126 	u_int rotor;
127 	void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
128 	size_t rbytesused;		/* random bytes used */
129 	char *func;			/* current function */
130 	int mutex;
131 	u_char rbytes[32];		/* random bytes */
132 #ifdef MALLOC_STATS
133 	size_t inserts;
134 	size_t insert_collisions;
135 	size_t finds;
136 	size_t find_collisions;
137 	size_t deletes;
138 	size_t delete_moves;
139 	size_t cheap_realloc_tries;
140 	size_t cheap_reallocs;
141 	size_t malloc_used;		/* bytes allocated */
142 	size_t malloc_guarded;		/* bytes used for guards */
143 #define STATS_ADD(x,y)	((x) += (y))
144 #define STATS_SUB(x,y)	((x) -= (y))
145 #define STATS_INC(x)	((x)++)
146 #define STATS_ZERO(x)	((x) = 0)
147 #define STATS_SETF(x,y)	((x)->f = (y))
148 #else
149 #define STATS_ADD(x,y)	/* nothing */
150 #define STATS_SUB(x,y)	/* nothing */
151 #define STATS_INC(x)	/* nothing */
152 #define STATS_ZERO(x)	/* nothing */
153 #define STATS_SETF(x,y)	/* nothing */
154 #endif /* MALLOC_STATS */
155 	u_int32_t canary2;
156 };
157 #define DIR_INFO_RSZ	((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
158 			~MALLOC_PAGEMASK)
159 
160 /*
161  * This structure describes a page worth of chunks.
162  *
163  * How many bits per u_short in the bitmap
164  */
165 #define MALLOC_BITS		(NBBY * sizeof(u_short))
166 struct chunk_info {
167 	LIST_ENTRY(chunk_info) entries;
168 	void *page;			/* pointer to the page */
169 	u_short canary;
170 	u_short size;			/* size of this page's chunks */
171 	u_short shift;			/* how far to shift for this size */
172 	u_short free;			/* how many free chunks */
173 	u_short total;			/* how many chunks */
174 	u_short offset;			/* requested size table offset */
175 	u_short bits[1];		/* which chunks are free */
176 };
177 
178 struct malloc_readonly {
179 	struct dir_info *malloc_pool[_MALLOC_MUTEXES];	/* Main bookkeeping information */
180 	int	malloc_mt;		/* multi-threaded mode? */
181 	int	malloc_freecheck;	/* Extensive double free check */
182 	int	malloc_freeunmap;	/* mprotect free pages PROT_NONE? */
183 	int	malloc_junk;		/* junk fill? */
184 	int	malloc_realloc;		/* always realloc? */
185 	int	malloc_xmalloc;		/* xmalloc behaviour? */
186 	int	chunk_canaries;		/* use canaries after chunks? */
187 	int	internal_funcs;		/* use better recallocarray/freezero? */
188 	u_int	malloc_cache;		/* free pages we cache */
189 	size_t	malloc_guard;		/* use guard pages after allocations? */
190 #ifdef MALLOC_STATS
191 	int	malloc_stats;		/* dump statistics at end */
192 #endif
193 	u_int32_t malloc_canary;	/* Matched against ones in malloc_pool */
194 };
195 
196 /* This object is mapped PROT_READ after initialisation to prevent tampering */
197 static union {
198 	struct malloc_readonly mopts;
199 	u_char _pad[MALLOC_PAGESIZE];
200 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
201 #define mopts	malloc_readonly.mopts
202 
203 char		*malloc_options;	/* compile-time options */
204 
205 static __dead void wrterror(struct dir_info *d, char *msg, ...)
206     __attribute__((__format__ (printf, 2, 3)));
207 
208 #ifdef MALLOC_STATS
209 void malloc_dump(int, int, struct dir_info *);
210 PROTO_NORMAL(malloc_dump);
211 void malloc_gdump(int);
212 PROTO_NORMAL(malloc_gdump);
213 static void malloc_exit(void);
214 #define CALLER	__builtin_return_address(0)
215 #else
216 #define CALLER	NULL
217 #endif
218 
219 /* low bits of r->p determine size: 0 means >= page size and r->size holding
220  * real size, otherwise low bits are a shift count, or 1 for malloc(0)
221  */
222 #define REALSIZE(sz, r)						\
223 	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
224 	(sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
225 
226 static inline void
227 _MALLOC_LEAVE(struct dir_info *d)
228 {
229 	if (mopts.malloc_mt) {
230 		d->active--;
231 		_MALLOC_UNLOCK(d->mutex);
232 	}
233 }
234 
235 static inline void
236 _MALLOC_ENTER(struct dir_info *d)
237 {
238 	if (mopts.malloc_mt) {
239 		_MALLOC_LOCK(d->mutex);
240 		d->active++;
241 	}
242 }
243 
244 static inline size_t
245 hash(void *p)
246 {
247 	size_t sum;
248 	uintptr_t u;
249 
250 	u = (uintptr_t)p >> MALLOC_PAGESHIFT;
251 	sum = u;
252 	sum = (sum << 7) - sum + (u >> 16);
253 #ifdef __LP64__
254 	sum = (sum << 7) - sum + (u >> 32);
255 	sum = (sum << 7) - sum + (u >> 48);
256 #endif
257 	return sum;
258 }
259 
260 static inline
261 struct dir_info *getpool(void)
262 {
263 	if (!mopts.malloc_mt)
264 		return mopts.malloc_pool[0];
265 	else
266 		return mopts.malloc_pool[TIB_GET()->tib_tid &
267 		    (_MALLOC_MUTEXES - 1)];
268 }
269 
270 static __dead void
271 wrterror(struct dir_info *d, char *msg, ...)
272 {
273 	int		saved_errno = errno;
274 	va_list		ap;
275 
276 	dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname,
277 	    getpid(), (d != NULL && d->func) ? d->func : "unknown");
278 	va_start(ap, msg);
279 	vdprintf(STDERR_FILENO, msg, ap);
280 	va_end(ap);
281 	dprintf(STDERR_FILENO, "\n");
282 
283 #ifdef MALLOC_STATS
284 	if (mopts.malloc_stats)
285 		malloc_gdump(STDERR_FILENO);
286 #endif /* MALLOC_STATS */
287 
288 	errno = saved_errno;
289 
290 	abort();
291 }
292 
293 static void
294 rbytes_init(struct dir_info *d)
295 {
296 	arc4random_buf(d->rbytes, sizeof(d->rbytes));
297 	/* add 1 to account for using d->rbytes[0] */
298 	d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2);
299 }
300 
301 static inline u_char
302 getrbyte(struct dir_info *d)
303 {
304 	u_char x;
305 
306 	if (d->rbytesused >= sizeof(d->rbytes))
307 		rbytes_init(d);
308 	x = d->rbytes[d->rbytesused++];
309 	return x;
310 }
311 
312 static void
313 omalloc_parseopt(char opt)
314 {
315 	switch (opt) {
316 	case '>':
317 		mopts.malloc_cache <<= 1;
318 		if (mopts.malloc_cache > MALLOC_MAXCACHE)
319 			mopts.malloc_cache = MALLOC_MAXCACHE;
320 		break;
321 	case '<':
322 		mopts.malloc_cache >>= 1;
323 		break;
324 	case 'c':
325 		mopts.chunk_canaries = 0;
326 		break;
327 	case 'C':
328 		mopts.chunk_canaries = 1;
329 		break;
330 #ifdef MALLOC_STATS
331 	case 'd':
332 		mopts.malloc_stats = 0;
333 		break;
334 	case 'D':
335 		mopts.malloc_stats = 1;
336 		break;
337 #endif /* MALLOC_STATS */
338 	case 'f':
339 		mopts.malloc_freecheck = 0;
340 		mopts.malloc_freeunmap = 0;
341 		break;
342 	case 'F':
343 		mopts.malloc_freecheck = 1;
344 		mopts.malloc_freeunmap = 1;
345 		break;
346 	case 'g':
347 		mopts.malloc_guard = 0;
348 		break;
349 	case 'G':
350 		mopts.malloc_guard = MALLOC_PAGESIZE;
351 		break;
352 	case 'j':
353 		if (mopts.malloc_junk > 0)
354 			mopts.malloc_junk--;
355 		break;
356 	case 'J':
357 		if (mopts.malloc_junk < 2)
358 			mopts.malloc_junk++;
359 		break;
360 	case 'r':
361 		mopts.malloc_realloc = 0;
362 		break;
363 	case 'R':
364 		mopts.malloc_realloc = 1;
365 		break;
366 	case 'u':
367 		mopts.malloc_freeunmap = 0;
368 		break;
369 	case 'U':
370 		mopts.malloc_freeunmap = 1;
371 		break;
372 	case 'x':
373 		mopts.malloc_xmalloc = 0;
374 		break;
375 	case 'X':
376 		mopts.malloc_xmalloc = 1;
377 		break;
378 	default:
379 		dprintf(STDERR_FILENO, "malloc() warning: "
380                     "unknown char in MALLOC_OPTIONS\n");
381 		break;
382 	}
383 }
384 
385 static void
386 omalloc_init(void)
387 {
388 	char *p, *q, b[64];
389 	int i, j;
390 
391 	/*
392 	 * Default options
393 	 */
394 	mopts.malloc_junk = 1;
395 	mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
396 
397 	for (i = 0; i < 3; i++) {
398 		switch (i) {
399 		case 0:
400 			j = readlink("/etc/malloc.conf", b, sizeof b - 1);
401 			if (j <= 0)
402 				continue;
403 			b[j] = '\0';
404 			p = b;
405 			break;
406 		case 1:
407 			if (issetugid() == 0)
408 				p = getenv("MALLOC_OPTIONS");
409 			else
410 				continue;
411 			break;
412 		case 2:
413 			p = malloc_options;
414 			break;
415 		default:
416 			p = NULL;
417 		}
418 
419 		for (; p != NULL && *p != '\0'; p++) {
420 			switch (*p) {
421 			case 'S':
422 				for (q = "CFGJ"; *q != '\0'; q++)
423 					omalloc_parseopt(*q);
424 				mopts.malloc_cache = 0;
425 				break;
426 			case 's':
427 				for (q = "cfgj"; *q != '\0'; q++)
428 					omalloc_parseopt(*q);
429 				mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
430 				break;
431 			default:
432 				omalloc_parseopt(*p);
433 				break;
434 			}
435 		}
436 	}
437 
438 #ifdef MALLOC_STATS
439 	if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
440 		dprintf(STDERR_FILENO, "malloc() warning: atexit(2) failed."
441 		    " Will not be able to dump stats on exit\n");
442 	}
443 #endif /* MALLOC_STATS */
444 
445 	while ((mopts.malloc_canary = arc4random()) == 0)
446 		;
447 }
448 
449 static void
450 omalloc_poolinit(struct dir_info **dp)
451 {
452 	void *p;
453 	size_t d_avail, regioninfo_size;
454 	struct dir_info *d;
455 	int i, j;
456 
457 	/*
458 	 * Allocate dir_info with a guard page on either side. Also
459 	 * randomise offset inside the page at which the dir_info
460 	 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
461 	 */
462 	if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED)
463 		wrterror(NULL, "malloc init mmap failed");
464 	mprotect(p, MALLOC_PAGESIZE, PROT_NONE);
465 	mprotect((char *)p + MALLOC_PAGESIZE + DIR_INFO_RSZ,
466 	    MALLOC_PAGESIZE, PROT_NONE);
467 	d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
468 	d = (struct dir_info *)((char *)p + MALLOC_PAGESIZE +
469 	    (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
470 
471 	rbytes_init(d);
472 	d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS;
473 	regioninfo_size = d->regions_total * sizeof(struct region_info);
474 	d->r = MMAP(regioninfo_size);
475 	if (d->r == MAP_FAILED) {
476 		d->regions_total = 0;
477 		wrterror(NULL, "malloc init mmap failed");
478 	}
479 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
480 		LIST_INIT(&d->chunk_info_list[i]);
481 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++)
482 			LIST_INIT(&d->chunk_dir[i][j]);
483 	}
484 	STATS_ADD(d->malloc_used, regioninfo_size);
485 	d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
486 	d->canary2 = ~d->canary1;
487 
488 	*dp = d;
489 }
490 
491 static int
492 omalloc_grow(struct dir_info *d)
493 {
494 	size_t newtotal;
495 	size_t newsize;
496 	size_t mask;
497 	size_t i;
498 	struct region_info *p;
499 
500 	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2)
501 		return 1;
502 
503 	newtotal = d->regions_total * 2;
504 	newsize = newtotal * sizeof(struct region_info);
505 	mask = newtotal - 1;
506 
507 	p = MMAP(newsize);
508 	if (p == MAP_FAILED)
509 		return 1;
510 
511 	STATS_ADD(d->malloc_used, newsize);
512 	STATS_ZERO(d->inserts);
513 	STATS_ZERO(d->insert_collisions);
514 	for (i = 0; i < d->regions_total; i++) {
515 		void *q = d->r[i].p;
516 		if (q != NULL) {
517 			size_t index = hash(q) & mask;
518 			STATS_INC(d->inserts);
519 			while (p[index].p != NULL) {
520 				index = (index - 1) & mask;
521 				STATS_INC(d->insert_collisions);
522 			}
523 			p[index] = d->r[i];
524 		}
525 	}
526 	/* avoid pages containing meta info to end up in cache */
527 	if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
528 		wrterror(d, "munmap %p", (void *)d->r);
529 	else
530 		STATS_SUB(d->malloc_used,
531 		    d->regions_total * sizeof(struct region_info));
532 	d->regions_free = d->regions_free + d->regions_total;
533 	d->regions_total = newtotal;
534 	d->r = p;
535 	return 0;
536 }
537 
538 /*
539  * The hashtable uses the assumption that p is never NULL. This holds since
540  * non-MAP_FIXED mappings with hint 0 start at BRKSIZ.
541  */
542 static int
543 insert(struct dir_info *d, void *p, size_t sz, void *f)
544 {
545 	size_t index;
546 	size_t mask;
547 	void *q;
548 
549 	if (d->regions_free * 4 < d->regions_total) {
550 		if (omalloc_grow(d))
551 			return 1;
552 	}
553 	mask = d->regions_total - 1;
554 	index = hash(p) & mask;
555 	q = d->r[index].p;
556 	STATS_INC(d->inserts);
557 	while (q != NULL) {
558 		index = (index - 1) & mask;
559 		q = d->r[index].p;
560 		STATS_INC(d->insert_collisions);
561 	}
562 	d->r[index].p = p;
563 	d->r[index].size = sz;
564 #ifdef MALLOC_STATS
565 	d->r[index].f = f;
566 #endif
567 	d->regions_free--;
568 	return 0;
569 }
570 
571 static struct region_info *
572 find(struct dir_info *d, void *p)
573 {
574 	size_t index;
575 	size_t mask = d->regions_total - 1;
576 	void *q, *r;
577 
578 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
579 	    d->canary1 != ~d->canary2)
580 		wrterror(d, "internal struct corrupt");
581 	p = MASK_POINTER(p);
582 	index = hash(p) & mask;
583 	r = d->r[index].p;
584 	q = MASK_POINTER(r);
585 	STATS_INC(d->finds);
586 	while (q != p && r != NULL) {
587 		index = (index - 1) & mask;
588 		r = d->r[index].p;
589 		q = MASK_POINTER(r);
590 		STATS_INC(d->find_collisions);
591 	}
592 	return (q == p && r != NULL) ? &d->r[index] : NULL;
593 }
594 
595 static void
596 delete(struct dir_info *d, struct region_info *ri)
597 {
598 	/* algorithm R, Knuth Vol III section 6.4 */
599 	size_t mask = d->regions_total - 1;
600 	size_t i, j, r;
601 
602 	if (d->regions_total & (d->regions_total - 1))
603 		wrterror(d, "regions_total not 2^x");
604 	d->regions_free++;
605 	STATS_INC(d->deletes);
606 
607 	i = ri - d->r;
608 	for (;;) {
609 		d->r[i].p = NULL;
610 		d->r[i].size = 0;
611 		j = i;
612 		for (;;) {
613 			i = (i - 1) & mask;
614 			if (d->r[i].p == NULL)
615 				return;
616 			r = hash(d->r[i].p) & mask;
617 			if ((i <= r && r < j) || (r < j && j < i) ||
618 			    (j < i && i <= r))
619 				continue;
620 			d->r[j] = d->r[i];
621 			STATS_INC(d->delete_moves);
622 			break;
623 		}
624 
625 	}
626 }
627 
628 /*
629  * Cache maintenance. We keep at most malloc_cache pages cached.
630  * If the cache is becoming full, unmap pages in the cache for real,
631  * and then add the region to the cache
632  * Opposed to the regular region data structure, the sizes in the
633  * cache are in MALLOC_PAGESIZE units.
634  */
635 static void
636 unmap(struct dir_info *d, void *p, size_t sz, int clear)
637 {
638 	size_t psz = sz >> MALLOC_PAGESHIFT;
639 	size_t rsz;
640 	struct region_info *r;
641 	u_int i, offset, mask;
642 
643 	if (sz != PAGEROUND(sz))
644 		wrterror(d, "munmap round");
645 
646 	rsz = mopts.malloc_cache - d->free_regions_size;
647 
648 	/*
649 	 * normally the cache holds recently freed regions, but if the region
650 	 * to unmap is larger than the cache size or we're clearing and the
651 	 * cache is full, just munmap
652 	 */
653 	if (psz > mopts.malloc_cache || (clear && rsz == 0)) {
654 		i = munmap(p, sz);
655 		if (i)
656 			wrterror(d, "munmap %p", p);
657 		STATS_SUB(d->malloc_used, sz);
658 		return;
659 	}
660 	offset = getrbyte(d);
661 	mask = mopts.malloc_cache - 1;
662 	if (psz > rsz) {
663 		size_t tounmap = psz - rsz;
664 		i = 0;
665 		for (;;) {
666 			r = &d->free_regions[(i + offset) & mask];
667 			if (r->p != NULL) {
668 				rsz = r->size << MALLOC_PAGESHIFT;
669 				if (munmap(r->p, rsz))
670 					wrterror(d, "munmap %p", r->p);
671 				r->p = NULL;
672 				if (tounmap > r->size)
673 					tounmap -= r->size;
674 				else
675 					tounmap = 0;
676 				d->free_regions_size -= r->size;
677 				STATS_SUB(d->malloc_used, rsz);
678 				if (tounmap == 0) {
679 					offset = i;
680 					break;
681 				}
682 			}
683 			i++;
684 		}
685 	}
686 	for (i = 0; ; i++) {
687 		r = &d->free_regions[(i + offset) & mask];
688 		if (r->p == NULL) {
689 			if (clear)
690 				memset(p, 0, sz - mopts.malloc_guard);
691 			if (mopts.malloc_junk && !mopts.malloc_freeunmap) {
692 				size_t amt = mopts.malloc_junk == 1 ?
693 				    MALLOC_MAXCHUNK : sz;
694 				memset(p, SOME_FREEJUNK, amt);
695 			}
696 			if (mopts.malloc_freeunmap)
697 				mprotect(p, sz, PROT_NONE);
698 			r->p = p;
699 			r->size = psz;
700 			d->free_regions_size += psz;
701 			break;
702 		}
703 	}
704 	if (d->free_regions_size > mopts.malloc_cache)
705 		wrterror(d, "malloc cache overflow");
706 }
707 
708 static void
709 zapcacheregion(struct dir_info *d, void *p, size_t len)
710 {
711 	u_int i;
712 	struct region_info *r;
713 	size_t rsz;
714 
715 	for (i = 0; i < mopts.malloc_cache; i++) {
716 		r = &d->free_regions[i];
717 		if (r->p >= p && r->p <= (void *)((char *)p + len)) {
718 			rsz = r->size << MALLOC_PAGESHIFT;
719 			if (munmap(r->p, rsz))
720 				wrterror(d, "munmap %p", r->p);
721 			r->p = NULL;
722 			d->free_regions_size -= r->size;
723 			STATS_SUB(d->malloc_used, rsz);
724 		}
725 	}
726 }
727 
728 static void *
729 map(struct dir_info *d, void *hint, size_t sz, int zero_fill)
730 {
731 	size_t psz = sz >> MALLOC_PAGESHIFT;
732 	struct region_info *r, *big = NULL;
733 	u_int i;
734 	void *p;
735 
736 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
737 	    d->canary1 != ~d->canary2)
738 		wrterror(d, "internal struct corrupt");
739 	if (sz != PAGEROUND(sz))
740 		wrterror(d, "map round");
741 
742 	if (hint == NULL && psz > d->free_regions_size) {
743 		_MALLOC_LEAVE(d);
744 		p = MMAP(sz);
745 		_MALLOC_ENTER(d);
746 		if (p != MAP_FAILED)
747 			STATS_ADD(d->malloc_used, sz);
748 		/* zero fill not needed */
749 		return p;
750 	}
751 	for (i = 0; i < mopts.malloc_cache; i++) {
752 		r = &d->free_regions[(i + d->rotor) & (mopts.malloc_cache - 1)];
753 		if (r->p != NULL) {
754 			if (hint != NULL && r->p != hint)
755 				continue;
756 			if (r->size == psz) {
757 				p = r->p;
758 				r->p = NULL;
759 				d->free_regions_size -= psz;
760 				if (mopts.malloc_freeunmap)
761 					mprotect(p, sz, PROT_READ | PROT_WRITE);
762 				if (zero_fill)
763 					memset(p, 0, sz);
764 				else if (mopts.malloc_junk == 2 &&
765 				    mopts.malloc_freeunmap)
766 					memset(p, SOME_FREEJUNK, sz);
767 				d->rotor += i + 1;
768 				return p;
769 			} else if (r->size > psz)
770 				big = r;
771 		}
772 	}
773 	if (big != NULL) {
774 		r = big;
775 		p = r->p;
776 		r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT);
777 		if (mopts.malloc_freeunmap)
778 			mprotect(p, sz, PROT_READ | PROT_WRITE);
779 		r->size -= psz;
780 		d->free_regions_size -= psz;
781 		if (zero_fill)
782 			memset(p, 0, sz);
783 		else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap)
784 			memset(p, SOME_FREEJUNK, sz);
785 		return p;
786 	}
787 	if (hint != NULL)
788 		return MAP_FAILED;
789 	if (d->free_regions_size > mopts.malloc_cache)
790 		wrterror(d, "malloc cache");
791 	_MALLOC_LEAVE(d);
792 	p = MMAP(sz);
793 	_MALLOC_ENTER(d);
794 	if (p != MAP_FAILED)
795 		STATS_ADD(d->malloc_used, sz);
796 	/* zero fill not needed */
797 	return p;
798 }
799 
800 static void
801 init_chunk_info(struct dir_info *d, struct chunk_info *p, int bits)
802 {
803 	int i;
804 
805 	if (bits == 0) {
806 		p->shift = MALLOC_MINSHIFT;
807 		p->total = p->free = MALLOC_PAGESIZE >> p->shift;
808 		p->size = 0;
809 		p->offset = 0xdead;
810 	} else {
811 		p->shift = bits;
812 		p->total = p->free = MALLOC_PAGESIZE >> p->shift;
813 		p->size = 1U << bits;
814 		p->offset = howmany(p->total, MALLOC_BITS);
815 	}
816 	p->canary = (u_short)d->canary1;
817 
818 	/* set all valid bits in the bitmap */
819  	i = p->total - 1;
820 	memset(p->bits, 0xff, sizeof(p->bits[0]) * (i / MALLOC_BITS));
821 	p->bits[i / MALLOC_BITS] = (2U << (i % MALLOC_BITS)) - 1;
822 }
823 
824 static struct chunk_info *
825 alloc_chunk_info(struct dir_info *d, int bits)
826 {
827 	struct chunk_info *p;
828 
829 	if (LIST_EMPTY(&d->chunk_info_list[bits])) {
830 		size_t size, count, i;
831 		char *q;
832 
833 		if (bits == 0)
834 			count = MALLOC_PAGESIZE / MALLOC_MINSIZE;
835 		else
836 			count = MALLOC_PAGESIZE >> bits;
837 
838 		size = howmany(count, MALLOC_BITS);
839 		size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
840 		if (mopts.chunk_canaries)
841 			size += count * sizeof(u_short);
842 		size = ALIGN(size);
843 
844 		q = MMAP(MALLOC_PAGESIZE);
845 		if (q == MAP_FAILED)
846 			return NULL;
847 		STATS_ADD(d->malloc_used, MALLOC_PAGESIZE);
848 		count = MALLOC_PAGESIZE / size;
849 
850 		for (i = 0; i < count; i++, q += size) {
851 			p = (struct chunk_info *)q;
852 			LIST_INSERT_HEAD(&d->chunk_info_list[bits], p, entries);
853 		}
854 	}
855 	p = LIST_FIRST(&d->chunk_info_list[bits]);
856 	LIST_REMOVE(p, entries);
857 	if (p->shift == 0)
858 		init_chunk_info(d, p, bits);
859 	return p;
860 }
861 
862 /*
863  * Allocate a page of chunks
864  */
865 static struct chunk_info *
866 omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
867 {
868 	struct chunk_info *bp;
869 	void *pp;
870 
871 	/* Allocate a new bucket */
872 	pp = map(d, NULL, MALLOC_PAGESIZE, 0);
873 	if (pp == MAP_FAILED)
874 		return NULL;
875 
876 	/* memory protect the page allocated in the malloc(0) case */
877 	if (bits == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) < 0)
878 		goto err;
879 
880 	bp = alloc_chunk_info(d, bits);
881 	if (bp == NULL)
882 		goto err;
883 	bp->page = pp;
884 
885 	if (insert(d, (void *)((uintptr_t)pp | bits + 1), (uintptr_t)bp, NULL))
886 		goto err;
887 	LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries);
888 	return bp;
889 
890 err:
891 	unmap(d, pp, MALLOC_PAGESIZE, 0);
892 	return NULL;
893 }
894 
895 static int
896 find_chunksize(size_t size)
897 {
898 	int r;
899 
900 	/* malloc(0) is special */
901 	if (size == 0)
902 		return 0;
903 
904 	if (size < MALLOC_MINSIZE)
905 		size = MALLOC_MINSIZE;
906 	size--;
907 
908 	r = MALLOC_MINSHIFT;
909 	while (size >> r)
910 		r++;
911 	return r;
912 }
913 
914 static void
915 fill_canary(char *ptr, size_t sz, size_t allocated)
916 {
917 	size_t check_sz = allocated - sz;
918 
919 	if (check_sz > CHUNK_CHECK_LENGTH)
920 		check_sz = CHUNK_CHECK_LENGTH;
921 	memset(ptr + sz, SOME_JUNK, check_sz);
922 }
923 
924 /*
925  * Allocate a chunk
926  */
927 static void *
928 malloc_bytes(struct dir_info *d, size_t size, void *f)
929 {
930 	u_int i, r;
931 	int j, listnum;
932 	size_t k;
933 	u_short	*lp;
934 	struct chunk_info *bp;
935 	void *p;
936 
937 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
938 	    d->canary1 != ~d->canary2)
939 		wrterror(d, "internal struct corrupt");
940 
941 	j = find_chunksize(size);
942 
943 	r = ((u_int)getrbyte(d) << 8) | getrbyte(d);
944 	listnum = r % MALLOC_CHUNK_LISTS;
945 	/* If it's empty, make a page more of that size chunks */
946 	if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) {
947 		bp = omalloc_make_chunks(d, j, listnum);
948 		if (bp == NULL)
949 			return NULL;
950 	}
951 
952 	if (bp->canary != (u_short)d->canary1)
953 		wrterror(d, "chunk info corrupted");
954 
955 	i = (r / MALLOC_CHUNK_LISTS) & (bp->total - 1);
956 
957 	/* start somewhere in a short */
958 	lp = &bp->bits[i / MALLOC_BITS];
959 	if (*lp) {
960 		j = i % MALLOC_BITS;
961 		k = ffs(*lp >> j);
962 		if (k != 0) {
963 			k += j - 1;
964 			goto found;
965 		}
966 	}
967 	/* no bit halfway, go to next full short */
968 	i /= MALLOC_BITS;
969 	for (;;) {
970 		if (++i >= bp->total / MALLOC_BITS)
971 			i = 0;
972 		lp = &bp->bits[i];
973 		if (*lp) {
974 			k = ffs(*lp) - 1;
975 			break;
976 		}
977 	}
978 found:
979 #ifdef MALLOC_STATS
980 	if (i == 0 && k == 0) {
981 		struct region_info *r = find(d, bp->page);
982 		r->f = f;
983 	}
984 #endif
985 
986 	*lp ^= 1 << k;
987 
988 	/* If there are no more free, remove from free-list */
989 	if (--bp->free == 0)
990 		LIST_REMOVE(bp, entries);
991 
992 	/* Adjust to the real offset of that chunk */
993 	k += (lp - bp->bits) * MALLOC_BITS;
994 
995 	if (mopts.chunk_canaries && size > 0)
996 		bp->bits[bp->offset + k] = size;
997 
998 	k <<= bp->shift;
999 
1000 	p = (char *)bp->page + k;
1001 	if (bp->size > 0) {
1002 		if (mopts.malloc_junk == 2)
1003 			memset(p, SOME_JUNK, bp->size);
1004 		else if (mopts.chunk_canaries)
1005 			fill_canary(p, size, bp->size);
1006 	}
1007 	return p;
1008 }
1009 
1010 static void
1011 validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1012 {
1013 	size_t check_sz = allocated - sz;
1014 	u_char *p, *q;
1015 
1016 	if (check_sz > CHUNK_CHECK_LENGTH)
1017 		check_sz = CHUNK_CHECK_LENGTH;
1018 	p = ptr + sz;
1019 	q = p + check_sz;
1020 
1021 	while (p < q) {
1022 		if (*p != SOME_JUNK) {
1023 			wrterror(d, "chunk canary corrupted %p %#tx@%#zx%s",
1024 			    ptr, p - ptr, sz,
1025 			    *p == SOME_FREEJUNK ? " (double free?)" : "");
1026 		}
1027 		p++;
1028 	}
1029 }
1030 
1031 static uint32_t
1032 find_chunknum(struct dir_info *d, struct chunk_info *info, void *ptr, int check)
1033 {
1034 	uint32_t chunknum;
1035 
1036 	if (info->canary != (u_short)d->canary1)
1037 		wrterror(d, "chunk info corrupted");
1038 
1039 	/* Find the chunk number on the page */
1040 	chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1041 
1042 	if ((uintptr_t)ptr & ((1U << (info->shift)) - 1))
1043 		wrterror(d, "modified chunk-pointer %p", ptr);
1044 	if (info->bits[chunknum / MALLOC_BITS] &
1045 	    (1U << (chunknum % MALLOC_BITS)))
1046 		wrterror(d, "chunk is already free %p", ptr);
1047 	if (check && info->size > 0) {
1048 		validate_canary(d, ptr, info->bits[info->offset + chunknum],
1049 		    info->size);
1050 	}
1051 	return chunknum;
1052 }
1053 
1054 /*
1055  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1056  */
1057 static void
1058 free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1059 {
1060 	struct chunk_head *mp;
1061 	struct chunk_info *info;
1062 	uint32_t chunknum;
1063 	int listnum;
1064 
1065 	info = (struct chunk_info *)r->size;
1066 	chunknum = find_chunknum(d, info, ptr, 0);
1067 
1068 	info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
1069 	info->free++;
1070 
1071 	if (info->free == 1) {
1072 		/* Page became non-full */
1073 		listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
1074 		if (info->size != 0)
1075 			mp = &d->chunk_dir[info->shift][listnum];
1076 		else
1077 			mp = &d->chunk_dir[0][listnum];
1078 
1079 		LIST_INSERT_HEAD(mp, info, entries);
1080 		return;
1081 	}
1082 
1083 	if (info->free != info->total)
1084 		return;
1085 
1086 	LIST_REMOVE(info, entries);
1087 
1088 	if (info->size == 0 && !mopts.malloc_freeunmap)
1089 		mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1090 	unmap(d, info->page, MALLOC_PAGESIZE, 0);
1091 
1092 	delete(d, r);
1093 	if (info->size != 0)
1094 		mp = &d->chunk_info_list[info->shift];
1095 	else
1096 		mp = &d->chunk_info_list[0];
1097 	LIST_INSERT_HEAD(mp, info, entries);
1098 }
1099 
1100 
1101 
1102 static void *
1103 omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1104 {
1105 	void *p;
1106 	size_t psz;
1107 
1108 	if (sz > MALLOC_MAXCHUNK) {
1109 		if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1110 			errno = ENOMEM;
1111 			return NULL;
1112 		}
1113 		sz += mopts.malloc_guard;
1114 		psz = PAGEROUND(sz);
1115 		p = map(pool, NULL, psz, zero_fill);
1116 		if (p == MAP_FAILED) {
1117 			errno = ENOMEM;
1118 			return NULL;
1119 		}
1120 		if (insert(pool, p, sz, f)) {
1121 			unmap(pool, p, psz, 0);
1122 			errno = ENOMEM;
1123 			return NULL;
1124 		}
1125 		if (mopts.malloc_guard) {
1126 			if (mprotect((char *)p + psz - mopts.malloc_guard,
1127 			    mopts.malloc_guard, PROT_NONE))
1128 				wrterror(pool, "mprotect");
1129 			STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1130 		}
1131 
1132 		if (MALLOC_MOVE_COND(sz)) {
1133 			/* fill whole allocation */
1134 			if (mopts.malloc_junk == 2)
1135 				memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1136 			/* shift towards the end */
1137 			p = MALLOC_MOVE(p, sz);
1138 			/* fill zeros if needed and overwritten above */
1139 			if (zero_fill && mopts.malloc_junk == 2)
1140 				memset(p, 0, sz - mopts.malloc_guard);
1141 		} else {
1142 			if (mopts.malloc_junk == 2) {
1143 				if (zero_fill)
1144 					memset((char *)p + sz - mopts.malloc_guard,
1145 					    SOME_JUNK, psz - sz);
1146 				else
1147 					memset(p, SOME_JUNK,
1148 					    psz - mopts.malloc_guard);
1149 			} else if (mopts.chunk_canaries)
1150 				fill_canary(p, sz - mopts.malloc_guard,
1151 				    psz - mopts.malloc_guard);
1152 		}
1153 
1154 	} else {
1155 		/* takes care of SOME_JUNK */
1156 		p = malloc_bytes(pool, sz, f);
1157 		if (zero_fill && p != NULL && sz > 0)
1158 			memset(p, 0, sz);
1159 	}
1160 
1161 	return p;
1162 }
1163 
1164 /*
1165  * Common function for handling recursion.  Only
1166  * print the error message once, to avoid making the problem
1167  * potentially worse.
1168  */
1169 static void
1170 malloc_recurse(struct dir_info *d)
1171 {
1172 	static int noprint;
1173 
1174 	if (noprint == 0) {
1175 		noprint = 1;
1176 		wrterror(d, "recursive call");
1177 	}
1178 	d->active--;
1179 	_MALLOC_UNLOCK(d->mutex);
1180 	errno = EDEADLK;
1181 }
1182 
1183 void
1184 _malloc_init(int from_rthreads)
1185 {
1186 	int i, max;
1187 	struct dir_info *d;
1188 
1189 	_MALLOC_LOCK(0);
1190 	if (!from_rthreads && mopts.malloc_pool[0]) {
1191 		_MALLOC_UNLOCK(0);
1192 		return;
1193 	}
1194 	if (!mopts.malloc_canary)
1195 		omalloc_init();
1196 
1197 	max = from_rthreads ? _MALLOC_MUTEXES : 1;
1198 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1199 		mprotect(&malloc_readonly, sizeof(malloc_readonly),
1200 		    PROT_READ | PROT_WRITE);
1201 	for (i = 0; i < max; i++) {
1202 		if (mopts.malloc_pool[i])
1203 			continue;
1204 		omalloc_poolinit(&d);
1205 		d->mutex = i;
1206 		mopts.malloc_pool[i] = d;
1207 	}
1208 
1209 	if (from_rthreads)
1210 		mopts.malloc_mt = 1;
1211 	else
1212 		mopts.internal_funcs = 1;
1213 
1214 	/*
1215 	 * Options have been set and will never be reset.
1216 	 * Prevent further tampering with them.
1217 	 */
1218 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1219 		mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
1220 	_MALLOC_UNLOCK(0);
1221 }
1222 DEF_STRONG(_malloc_init);
1223 
1224 void *
1225 malloc(size_t size)
1226 {
1227 	void *r;
1228 	struct dir_info *d;
1229 	int saved_errno = errno;
1230 
1231 	d = getpool();
1232 	if (d == NULL) {
1233 		_malloc_init(0);
1234 		d = getpool();
1235 	}
1236 	_MALLOC_LOCK(d->mutex);
1237 	d->func = "malloc";
1238 
1239 	if (d->active++) {
1240 		malloc_recurse(d);
1241 		return NULL;
1242 	}
1243 	r = omalloc(d, size, 0, CALLER);
1244 	d->active--;
1245 	_MALLOC_UNLOCK(d->mutex);
1246 	if (r == NULL && mopts.malloc_xmalloc)
1247 		wrterror(d, "out of memory");
1248 	if (r != NULL)
1249 		errno = saved_errno;
1250 	return r;
1251 }
1252 /*DEF_STRONG(malloc);*/
1253 
1254 static void
1255 validate_junk(struct dir_info *pool, void *p)
1256 {
1257 	struct region_info *r;
1258 	size_t byte, sz;
1259 
1260 	if (p == NULL)
1261 		return;
1262 	r = find(pool, p);
1263 	if (r == NULL)
1264 		wrterror(pool, "bogus pointer in validate_junk %p", p);
1265 	REALSIZE(sz, r);
1266 	if (sz > CHUNK_CHECK_LENGTH)
1267 		sz = CHUNK_CHECK_LENGTH;
1268 	for (byte = 0; byte < sz; byte++) {
1269 		if (((unsigned char *)p)[byte] != SOME_FREEJUNK)
1270 			wrterror(pool, "use after free %p", p);
1271 	}
1272 }
1273 
1274 static void
1275 ofree(struct dir_info *argpool, void *p, int clear, int check, size_t argsz)
1276 {
1277 	struct dir_info *pool;
1278 	struct region_info *r;
1279 	size_t sz;
1280 	int i;
1281 
1282 	pool = argpool;
1283 	r = find(pool, p);
1284 	if (r == NULL) {
1285 		if (mopts.malloc_mt)  {
1286 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1287 				if (i == argpool->mutex)
1288 					continue;
1289 				pool->active--;
1290 				_MALLOC_UNLOCK(pool->mutex);
1291 				pool = mopts.malloc_pool[i];
1292 				_MALLOC_LOCK(pool->mutex);
1293 				pool->active++;
1294 				r = find(pool, p);
1295 				if (r != NULL)
1296 					break;
1297 			}
1298 		}
1299 		if (r == NULL)
1300 			wrterror(pool, "bogus pointer (double free?) %p", p);
1301 	}
1302 
1303 	REALSIZE(sz, r);
1304 	if (check) {
1305 		if (sz <= MALLOC_MAXCHUNK) {
1306 			if (mopts.chunk_canaries && sz > 0) {
1307 				struct chunk_info *info =
1308 				    (struct chunk_info *)r->size;
1309 				uint32_t chunknum =
1310 				    find_chunknum(pool, info, p, 0);
1311 
1312 				if (info->bits[info->offset + chunknum] <
1313 				    argsz)
1314 					wrterror(pool, "recorded size %hu"
1315 					    " < %zu",
1316 					    info->bits[info->offset + chunknum],
1317 					    argsz);
1318 			} else {
1319 				if (sz < argsz)
1320 					wrterror(pool, "chunk size %zu < %zu",
1321 					    sz, argsz);
1322 			}
1323 		} else if (sz - mopts.malloc_guard < argsz) {
1324 			wrterror(pool, "recorded size %zu < %zu",
1325 			    sz - mopts.malloc_guard, argsz);
1326 		}
1327 	}
1328 	if (sz > MALLOC_MAXCHUNK) {
1329 		if (!MALLOC_MOVE_COND(sz)) {
1330 			if (r->p != p)
1331 				wrterror(pool, "bogus pointer %p", p);
1332 			if (mopts.chunk_canaries)
1333 				validate_canary(pool, p,
1334 				    sz - mopts.malloc_guard,
1335 				    PAGEROUND(sz - mopts.malloc_guard));
1336 		} else {
1337 			/* shifted towards the end */
1338 			if (p != MALLOC_MOVE(r->p, sz))
1339 				wrterror(pool, "bogus moved pointer %p", p);
1340 			p = r->p;
1341 		}
1342 		if (mopts.malloc_guard) {
1343 			if (sz < mopts.malloc_guard)
1344 				wrterror(pool, "guard size");
1345 			if (!mopts.malloc_freeunmap) {
1346 				if (mprotect((char *)p + PAGEROUND(sz) -
1347 				    mopts.malloc_guard, mopts.malloc_guard,
1348 				    PROT_READ | PROT_WRITE))
1349 					wrterror(pool, "mprotect");
1350 			}
1351 			STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
1352 		}
1353 		unmap(pool, p, PAGEROUND(sz), clear);
1354 		delete(pool, r);
1355 	} else {
1356 		/* Validate and optionally canary check */
1357 		struct chunk_info *info = (struct chunk_info *)r->size;
1358 		find_chunknum(pool, info, p, mopts.chunk_canaries);
1359 		if (!clear) {
1360 			void *tmp;
1361 			int i;
1362 
1363 			if (mopts.malloc_freecheck) {
1364 				for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK; i++)
1365 					if (p == pool->delayed_chunks[i])
1366 						wrterror(pool,
1367 						    "double free %p", p);
1368 			}
1369 			if (mopts.malloc_junk && sz > 0)
1370 				memset(p, SOME_FREEJUNK, sz);
1371 			i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK;
1372 			tmp = p;
1373 			p = pool->delayed_chunks[i];
1374 			if (tmp == p)
1375 				wrterror(pool, "double free %p", tmp);
1376 			pool->delayed_chunks[i] = tmp;
1377 			if (mopts.malloc_junk)
1378 				validate_junk(pool, p);
1379 		} else if (sz > 0)
1380 			memset(p, 0, sz);
1381 		if (p != NULL) {
1382 			r = find(pool, p);
1383 			if (r == NULL)
1384 				wrterror(pool,
1385 				    "bogus pointer (double free?) %p", p);
1386 			free_bytes(pool, r, p);
1387 		}
1388 	}
1389 
1390 	if (argpool != pool) {
1391 		pool->active--;
1392 		_MALLOC_UNLOCK(pool->mutex);
1393 		_MALLOC_LOCK(argpool->mutex);
1394 		argpool->active++;
1395 	}
1396 }
1397 
1398 void
1399 free(void *ptr)
1400 {
1401 	struct dir_info *d;
1402 	int saved_errno = errno;
1403 
1404 	/* This is legal. */
1405 	if (ptr == NULL)
1406 		return;
1407 
1408 	d = getpool();
1409 	if (d == NULL)
1410 		wrterror(d, "free() called before allocation");
1411 	_MALLOC_LOCK(d->mutex);
1412 	d->func = "free";
1413 	if (d->active++) {
1414 		malloc_recurse(d);
1415 		return;
1416 	}
1417 	ofree(d, ptr, 0, 0, 0);
1418 	d->active--;
1419 	_MALLOC_UNLOCK(d->mutex);
1420 	errno = saved_errno;
1421 }
1422 /*DEF_STRONG(free);*/
1423 
1424 static void
1425 freezero_p(void *ptr, size_t sz)
1426 {
1427 	explicit_bzero(ptr, sz);
1428 	free(ptr);
1429 }
1430 
1431 void
1432 freezero(void *ptr, size_t sz)
1433 {
1434 	struct dir_info *d;
1435 	int saved_errno = errno;
1436 
1437 	/* This is legal. */
1438 	if (ptr == NULL)
1439 		return;
1440 
1441 	if (!mopts.internal_funcs) {
1442 		freezero_p(ptr, sz);
1443 		return;
1444 	}
1445 
1446 	d = getpool();
1447 	if (d == NULL)
1448 		wrterror(d, "freezero() called before allocation");
1449 	_MALLOC_LOCK(d->mutex);
1450 	d->func = "freezero";
1451 	if (d->active++) {
1452 		malloc_recurse(d);
1453 		return;
1454 	}
1455 	ofree(d, ptr, 1, 1, sz);
1456 	d->active--;
1457 	_MALLOC_UNLOCK(d->mutex);
1458 	errno = saved_errno;
1459 }
1460 DEF_WEAK(freezero);
1461 
1462 static void *
1463 orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
1464 {
1465 	struct dir_info *pool;
1466 	struct region_info *r;
1467 	struct chunk_info *info;
1468 	size_t oldsz, goldsz, gnewsz;
1469 	void *q, *ret;
1470 	int i;
1471 	uint32_t chunknum;
1472 
1473 	pool = argpool;
1474 
1475 	if (p == NULL)
1476 		return omalloc(pool, newsz, 0, f);
1477 
1478 	r = find(pool, p);
1479 	if (r == NULL) {
1480 		if (mopts.malloc_mt) {
1481 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1482 				if (i == argpool->mutex)
1483 					continue;
1484 				pool->active--;
1485 				_MALLOC_UNLOCK(pool->mutex);
1486 				pool = mopts.malloc_pool[i];
1487 				_MALLOC_LOCK(pool->mutex);
1488 				pool->active++;
1489 				r = find(pool, p);
1490 				if (r != NULL)
1491 					break;
1492 			}
1493 		}
1494 		if (r == NULL)
1495 			wrterror(pool, "bogus pointer (double free?) %p", p);
1496 	}
1497 	if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1498 		errno = ENOMEM;
1499 		ret = NULL;
1500 		goto done;
1501 	}
1502 
1503 	REALSIZE(oldsz, r);
1504 	if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) {
1505 		info = (struct chunk_info *)r->size;
1506 		chunknum = find_chunknum(pool, info, p, 0);
1507 	}
1508 
1509 	goldsz = oldsz;
1510 	if (oldsz > MALLOC_MAXCHUNK) {
1511 		if (oldsz < mopts.malloc_guard)
1512 			wrterror(pool, "guard size");
1513 		oldsz -= mopts.malloc_guard;
1514 	}
1515 
1516 	gnewsz = newsz;
1517 	if (gnewsz > MALLOC_MAXCHUNK)
1518 		gnewsz += mopts.malloc_guard;
1519 
1520 	if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK &&
1521 	    !mopts.malloc_realloc) {
1522 		/* First case: from n pages sized allocation to m pages sized
1523 		   allocation, m > n */
1524 		size_t roldsz = PAGEROUND(goldsz);
1525 		size_t rnewsz = PAGEROUND(gnewsz);
1526 
1527 		if (rnewsz > roldsz) {
1528 			/* try to extend existing region */
1529 			if (!mopts.malloc_guard) {
1530 				void *hint = (char *)r->p + roldsz;
1531 				size_t needed = rnewsz - roldsz;
1532 
1533 				STATS_INC(pool->cheap_realloc_tries);
1534 				q = map(pool, hint, needed, 0);
1535 				if (q == hint)
1536 					goto gotit;
1537 				zapcacheregion(pool, hint, needed);
1538 				q = MQUERY(hint, needed);
1539 				if (q == hint)
1540 					q = MMAPA(hint, needed);
1541 				else
1542 					q = MAP_FAILED;
1543 				if (q == hint) {
1544 gotit:
1545 					STATS_ADD(pool->malloc_used, needed);
1546 					if (mopts.malloc_junk == 2)
1547 						memset(q, SOME_JUNK, needed);
1548 					r->size = gnewsz;
1549 					if (r->p != p) {
1550 						/* old pointer is moved */
1551 						memmove(r->p, p, oldsz);
1552 						p = r->p;
1553 					}
1554 					if (mopts.chunk_canaries)
1555 						fill_canary(p, newsz,
1556 						    PAGEROUND(newsz));
1557 					STATS_SETF(r, f);
1558 					STATS_INC(pool->cheap_reallocs);
1559 					ret = p;
1560 					goto done;
1561 				} else if (q != MAP_FAILED) {
1562 					if (munmap(q, needed))
1563 						wrterror(pool, "munmap %p", q);
1564 				}
1565 			}
1566 		} else if (rnewsz < roldsz) {
1567 			/* shrink number of pages */
1568 			if (mopts.malloc_guard) {
1569 				if (mprotect((char *)r->p + roldsz -
1570 				    mopts.malloc_guard, mopts.malloc_guard,
1571 				    PROT_READ | PROT_WRITE))
1572 					wrterror(pool, "mprotect");
1573 				if (mprotect((char *)r->p + rnewsz -
1574 				    mopts.malloc_guard, mopts.malloc_guard,
1575 				    PROT_NONE))
1576 					wrterror(pool, "mprotect");
1577 			}
1578 			unmap(pool, (char *)r->p + rnewsz, roldsz - rnewsz, 0);
1579 			r->size = gnewsz;
1580 			if (MALLOC_MOVE_COND(gnewsz)) {
1581 				void *pp = MALLOC_MOVE(r->p, gnewsz);
1582 				memmove(pp, p, newsz);
1583 				p = pp;
1584 			} else if (mopts.chunk_canaries)
1585 				fill_canary(p, newsz, PAGEROUND(newsz));
1586 			STATS_SETF(r, f);
1587 			ret = p;
1588 			goto done;
1589 		} else {
1590 			/* number of pages remains the same */
1591 			void *pp = r->p;
1592 
1593 			r->size = gnewsz;
1594 			if (MALLOC_MOVE_COND(gnewsz))
1595 				pp = MALLOC_MOVE(r->p, gnewsz);
1596 			if (p != pp) {
1597 				memmove(pp, p, oldsz < newsz ? oldsz : newsz);
1598 				p = pp;
1599 			}
1600 			if (p == r->p) {
1601 				if (newsz > oldsz && mopts.malloc_junk == 2)
1602 					memset((char *)p + newsz, SOME_JUNK,
1603 					    rnewsz - mopts.malloc_guard -
1604 					    newsz);
1605 				if (mopts.chunk_canaries)
1606 					fill_canary(p, newsz, PAGEROUND(newsz));
1607 			}
1608 			STATS_SETF(r, f);
1609 			ret = p;
1610 			goto done;
1611 		}
1612 	}
1613 	if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 &&
1614 	    newsz <= MALLOC_MAXCHUNK && newsz > 0 &&
1615 	    1 << find_chunksize(newsz) == oldsz && !mopts.malloc_realloc) {
1616 		/* do not reallocate if new size fits good in existing chunk */
1617 		if (mopts.malloc_junk == 2)
1618 			memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1619 		if (mopts.chunk_canaries) {
1620 			info->bits[info->offset + chunknum] = newsz;
1621 			fill_canary(p, newsz, info->size);
1622 		}
1623 		STATS_SETF(r, f);
1624 		ret = p;
1625 	} else if (newsz != oldsz || mopts.malloc_realloc) {
1626 		/* create new allocation */
1627 		q = omalloc(pool, newsz, 0, f);
1628 		if (q == NULL) {
1629 			ret = NULL;
1630 			goto done;
1631 		}
1632 		if (newsz != 0 && oldsz != 0)
1633 			memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1634 		ofree(pool, p, 0, 0, 0);
1635 		ret = q;
1636 	} else {
1637 		/* oldsz == newsz */
1638 		if (newsz != 0)
1639 			wrterror(pool, "realloc internal inconsistency");
1640 		STATS_SETF(r, f);
1641 		ret = p;
1642 	}
1643 done:
1644 	if (argpool != pool) {
1645 		pool->active--;
1646 		_MALLOC_UNLOCK(pool->mutex);
1647 		_MALLOC_LOCK(argpool->mutex);
1648 		argpool->active++;
1649 	}
1650 	return ret;
1651 }
1652 
1653 void *
1654 realloc(void *ptr, size_t size)
1655 {
1656 	struct dir_info *d;
1657 	void *r;
1658 	int saved_errno = errno;
1659 
1660 	d = getpool();
1661 	if (d == NULL) {
1662 		_malloc_init(0);
1663 		d = getpool();
1664 	}
1665 	_MALLOC_LOCK(d->mutex);
1666 	d->func = "realloc";
1667 	if (d->active++) {
1668 		malloc_recurse(d);
1669 		return NULL;
1670 	}
1671 	r = orealloc(d, ptr, size, CALLER);
1672 
1673 	d->active--;
1674 	_MALLOC_UNLOCK(d->mutex);
1675 	if (r == NULL && mopts.malloc_xmalloc)
1676 		wrterror(d, "out of memory");
1677 	if (r != NULL)
1678 		errno = saved_errno;
1679 	return r;
1680 }
1681 /*DEF_STRONG(realloc);*/
1682 
1683 
1684 /*
1685  * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
1686  * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
1687  */
1688 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1689 
1690 void *
1691 calloc(size_t nmemb, size_t size)
1692 {
1693 	struct dir_info *d;
1694 	void *r;
1695 	int saved_errno = errno;
1696 
1697 	d = getpool();
1698 	if (d == NULL) {
1699 		_malloc_init(0);
1700 		d = getpool();
1701 	}
1702 	_MALLOC_LOCK(d->mutex);
1703 	d->func = "calloc";
1704 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1705 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1706 		_MALLOC_UNLOCK(d->mutex);
1707 		if (mopts.malloc_xmalloc)
1708 			wrterror(d, "out of memory");
1709 		errno = ENOMEM;
1710 		return NULL;
1711 	}
1712 
1713 	if (d->active++) {
1714 		malloc_recurse(d);
1715 		return NULL;
1716 	}
1717 
1718 	size *= nmemb;
1719 	r = omalloc(d, size, 1, CALLER);
1720 
1721 	d->active--;
1722 	_MALLOC_UNLOCK(d->mutex);
1723 	if (r == NULL && mopts.malloc_xmalloc)
1724 		wrterror(d, "out of memory");
1725 	if (r != NULL)
1726 		errno = saved_errno;
1727 	return r;
1728 }
1729 /*DEF_STRONG(calloc);*/
1730 
1731 static void *
1732 orecallocarray(struct dir_info *argpool, void *p, size_t oldsize,
1733     size_t newsize, void *f)
1734 {
1735 	struct dir_info *pool;
1736 	struct region_info *r;
1737 	void *newptr;
1738 	size_t sz;
1739 	int i;
1740 
1741 	pool = argpool;
1742 
1743 	if (p == NULL)
1744 		return omalloc(pool, newsize, 1, f);
1745 
1746 	if (oldsize == newsize)
1747 		return p;
1748 
1749 	r = find(pool, p);
1750 	if (r == NULL) {
1751 		if (mopts.malloc_mt) {
1752 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1753 				if (i == argpool->mutex)
1754 					continue;
1755 				pool->active--;
1756 				_MALLOC_UNLOCK(pool->mutex);
1757 				pool = mopts.malloc_pool[i];
1758 				_MALLOC_LOCK(pool->mutex);
1759 				pool->active++;
1760 				r = find(pool, p);
1761 				if (r != NULL)
1762 					break;
1763 			}
1764 		}
1765 		if (r == NULL)
1766 			wrterror(pool, "bogus pointer (double free?) %p", p);
1767 	}
1768 
1769 	REALSIZE(sz, r);
1770 	if (sz <= MALLOC_MAXCHUNK) {
1771 		if (mopts.chunk_canaries && sz > 0) {
1772 			struct chunk_info *info = (struct chunk_info *)r->size;
1773 			uint32_t chunknum = find_chunknum(pool, info, p, 0);
1774 
1775 			if (info->bits[info->offset + chunknum] != oldsize)
1776 				wrterror(pool, "recorded old size %hu != %zu",
1777 				    info->bits[info->offset + chunknum],
1778 				    oldsize);
1779 		}
1780 	} else if (oldsize != sz - mopts.malloc_guard)
1781 		wrterror(pool, "recorded old size %zu != %zu",
1782 		    sz - mopts.malloc_guard, oldsize);
1783 
1784 	newptr = omalloc(pool, newsize, 0, f);
1785 	if (newptr == NULL)
1786 		goto done;
1787 
1788 	if (newsize > oldsize) {
1789 		memcpy(newptr, p, oldsize);
1790 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
1791 	} else
1792 		memcpy(newptr, p, newsize);
1793 
1794 	ofree(pool, p, 1, 0, 0);
1795 
1796 done:
1797 	if (argpool != pool) {
1798 		pool->active--;
1799 		_MALLOC_UNLOCK(pool->mutex);
1800 		_MALLOC_LOCK(argpool->mutex);
1801 		argpool->active++;
1802 	}
1803 
1804 	return newptr;
1805 }
1806 
1807 static void *
1808 recallocarray_p(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1809 {
1810 	size_t oldsize, newsize;
1811 	void *newptr;
1812 
1813 	if (ptr == NULL)
1814 		return calloc(newnmemb, size);
1815 
1816 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1817 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
1818 		errno = ENOMEM;
1819 		return NULL;
1820 	}
1821 	newsize = newnmemb * size;
1822 
1823 	if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1824 	    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
1825 		errno = EINVAL;
1826 		return NULL;
1827 	}
1828 	oldsize = oldnmemb * size;
1829 
1830 	/*
1831 	 * Don't bother too much if we're shrinking just a bit,
1832 	 * we do not shrink for series of small steps, oh well.
1833 	 */
1834 	if (newsize <= oldsize) {
1835 		size_t d = oldsize - newsize;
1836 
1837 		if (d < oldsize / 2 && d < MALLOC_PAGESIZE) {
1838 			memset((char *)ptr + newsize, 0, d);
1839 			return ptr;
1840 		}
1841 	}
1842 
1843 	newptr = malloc(newsize);
1844 	if (newptr == NULL)
1845 		return NULL;
1846 
1847 	if (newsize > oldsize) {
1848 		memcpy(newptr, ptr, oldsize);
1849 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
1850 	} else
1851 		memcpy(newptr, ptr, newsize);
1852 
1853 	explicit_bzero(ptr, oldsize);
1854 	free(ptr);
1855 
1856 	return newptr;
1857 }
1858 
1859 void *
1860 recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1861 {
1862 	struct dir_info *d;
1863 	size_t oldsize = 0, newsize;
1864 	void *r;
1865 	int saved_errno = errno;
1866 
1867 	if (!mopts.internal_funcs)
1868 		return recallocarray_p(ptr, oldnmemb, newnmemb, size);
1869 
1870 	d = getpool();
1871 	if (d == NULL) {
1872 		_malloc_init(0);
1873 		d = getpool();
1874 	}
1875 
1876 	_MALLOC_LOCK(d->mutex);
1877 	d->func = "recallocarray";
1878 
1879 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1880 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
1881 		_MALLOC_UNLOCK(d->mutex);
1882 		if (mopts.malloc_xmalloc)
1883 			wrterror(d, "out of memory");
1884 		errno = ENOMEM;
1885 		return NULL;
1886 	}
1887 	newsize = newnmemb * size;
1888 
1889 	if (ptr != NULL) {
1890 		if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1891 		    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
1892 			_MALLOC_UNLOCK(d->mutex);
1893 			errno = EINVAL;
1894 			return NULL;
1895 		}
1896 		oldsize = oldnmemb * size;
1897 	}
1898 
1899 	if (d->active++) {
1900 		malloc_recurse(d);
1901 		return NULL;
1902 	}
1903 
1904 	r = orecallocarray(d, ptr, oldsize, newsize, CALLER);
1905 
1906 	d->active--;
1907 	_MALLOC_UNLOCK(d->mutex);
1908 	if (r == NULL && mopts.malloc_xmalloc)
1909 		wrterror(d, "out of memory");
1910 	if (r != NULL)
1911 		errno = saved_errno;
1912 	return r;
1913 }
1914 DEF_WEAK(recallocarray);
1915 
1916 
1917 static void *
1918 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
1919 {
1920 	char *p, *q;
1921 
1922 	if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
1923 		wrterror(d, "mapalign bad alignment");
1924 	if (sz != PAGEROUND(sz))
1925 		wrterror(d, "mapalign round");
1926 
1927 	/* Allocate sz + alignment bytes of memory, which must include a
1928 	 * subrange of size bytes that is properly aligned.  Unmap the
1929 	 * other bytes, and then return that subrange.
1930 	 */
1931 
1932 	/* We need sz + alignment to fit into a size_t. */
1933 	if (alignment > SIZE_MAX - sz)
1934 		return MAP_FAILED;
1935 
1936 	p = map(d, NULL, sz + alignment, zero_fill);
1937 	if (p == MAP_FAILED)
1938 		return MAP_FAILED;
1939 	q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
1940 	if (q != p) {
1941 		if (munmap(p, q - p))
1942 			wrterror(d, "munmap %p", p);
1943 	}
1944 	if (munmap(q + sz, alignment - (q - p)))
1945 		wrterror(d, "munmap %p", q + sz);
1946 	STATS_SUB(d->malloc_used, alignment);
1947 
1948 	return q;
1949 }
1950 
1951 static void *
1952 omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill,
1953     void *f)
1954 {
1955 	size_t psz;
1956 	void *p;
1957 
1958 	/* If between half a page and a page, avoid MALLOC_MOVE. */
1959 	if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE)
1960 		sz = MALLOC_PAGESIZE;
1961 	if (alignment <= MALLOC_PAGESIZE) {
1962 		/*
1963 		 * max(size, alignment) is enough to assure the requested
1964 		 * alignment, since the allocator always allocates
1965 		 * power-of-two blocks.
1966 		 */
1967 		if (sz < alignment)
1968 			sz = alignment;
1969 		return omalloc(pool, sz, zero_fill, f);
1970 	}
1971 
1972 	if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1973 		errno = ENOMEM;
1974 		return NULL;
1975 	}
1976 
1977 	sz += mopts.malloc_guard;
1978 	psz = PAGEROUND(sz);
1979 
1980 	p = mapalign(pool, alignment, psz, zero_fill);
1981 	if (p == MAP_FAILED) {
1982 		errno = ENOMEM;
1983 		return NULL;
1984 	}
1985 
1986 	if (insert(pool, p, sz, f)) {
1987 		unmap(pool, p, psz, 0);
1988 		errno = ENOMEM;
1989 		return NULL;
1990 	}
1991 
1992 	if (mopts.malloc_guard) {
1993 		if (mprotect((char *)p + psz - mopts.malloc_guard,
1994 		    mopts.malloc_guard, PROT_NONE))
1995 			wrterror(pool, "mprotect");
1996 		STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1997 	}
1998 
1999 	if (mopts.malloc_junk == 2) {
2000 		if (zero_fill)
2001 			memset((char *)p + sz - mopts.malloc_guard,
2002 			    SOME_JUNK, psz - sz);
2003 		else
2004 			memset(p, SOME_JUNK, psz - mopts.malloc_guard);
2005 	} else if (mopts.chunk_canaries)
2006 		fill_canary(p, sz - mopts.malloc_guard,
2007 		    psz - mopts.malloc_guard);
2008 
2009 	return p;
2010 }
2011 
2012 int
2013 posix_memalign(void **memptr, size_t alignment, size_t size)
2014 {
2015 	struct dir_info *d;
2016 	int res, saved_errno = errno;
2017 	void *r;
2018 
2019 	/* Make sure that alignment is a large enough power of 2. */
2020 	if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
2021 		return EINVAL;
2022 
2023 	d = getpool();
2024 	if (d == NULL) {
2025 		_malloc_init(0);
2026 		d = getpool();
2027 	}
2028 	_MALLOC_LOCK(d->mutex);
2029 	d->func = "posix_memalign";
2030 	if (d->active++) {
2031 		malloc_recurse(d);
2032 		goto err;
2033 	}
2034 	r = omemalign(d, alignment, size, 0, CALLER);
2035 	d->active--;
2036 	_MALLOC_UNLOCK(d->mutex);
2037 	if (r == NULL) {
2038 		if (mopts.malloc_xmalloc)
2039 			wrterror(d, "out of memory");
2040 		goto err;
2041 	}
2042 	errno = saved_errno;
2043 	*memptr = r;
2044 	return 0;
2045 
2046 err:
2047 	res = errno;
2048 	errno = saved_errno;
2049 	return res;
2050 }
2051 /*DEF_STRONG(posix_memalign);*/
2052 
2053 #ifdef MALLOC_STATS
2054 
2055 struct malloc_leak {
2056 	void *f;
2057 	size_t total_size;
2058 	int count;
2059 };
2060 
2061 struct leaknode {
2062 	RBT_ENTRY(leaknode) entry;
2063 	struct malloc_leak d;
2064 };
2065 
2066 static inline int
2067 leakcmp(const struct leaknode *e1, const struct leaknode *e2)
2068 {
2069 	return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
2070 }
2071 
2072 static RBT_HEAD(leaktree, leaknode) leakhead;
2073 RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp);
2074 RBT_GENERATE(leaktree, leaknode, entry, leakcmp);
2075 
2076 static void
2077 putleakinfo(void *f, size_t sz, int cnt)
2078 {
2079 	struct leaknode key, *p;
2080 	static struct leaknode *page;
2081 	static int used;
2082 
2083 	if (cnt == 0 || page == MAP_FAILED)
2084 		return;
2085 
2086 	key.d.f = f;
2087 	p = RBT_FIND(leaktree, &leakhead, &key);
2088 	if (p == NULL) {
2089 		if (page == NULL ||
2090 		    used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
2091 			page = MMAP(MALLOC_PAGESIZE);
2092 			if (page == MAP_FAILED)
2093 				return;
2094 			used = 0;
2095 		}
2096 		p = &page[used++];
2097 		p->d.f = f;
2098 		p->d.total_size = sz * cnt;
2099 		p->d.count = cnt;
2100 		RBT_INSERT(leaktree, &leakhead, p);
2101 	} else {
2102 		p->d.total_size += sz * cnt;
2103 		p->d.count += cnt;
2104 	}
2105 }
2106 
2107 static struct malloc_leak *malloc_leaks;
2108 
2109 static void
2110 dump_leaks(int fd)
2111 {
2112 	struct leaknode *p;
2113 	int i = 0;
2114 
2115 	dprintf(fd, "Leak report\n");
2116 	dprintf(fd, "                 f     sum      #    avg\n");
2117 	/* XXX only one page of summary */
2118 	if (malloc_leaks == NULL)
2119 		malloc_leaks = MMAP(MALLOC_PAGESIZE);
2120 	if (malloc_leaks != MAP_FAILED)
2121 		memset(malloc_leaks, 0, MALLOC_PAGESIZE);
2122 	RBT_FOREACH(p, leaktree, &leakhead) {
2123 		dprintf(fd, "%18p %7zu %6u %6zu\n", p->d.f,
2124 		    p->d.total_size, p->d.count, p->d.total_size / p->d.count);
2125 		if (malloc_leaks == MAP_FAILED ||
2126 		    i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak))
2127 			continue;
2128 		malloc_leaks[i].f = p->d.f;
2129 		malloc_leaks[i].total_size = p->d.total_size;
2130 		malloc_leaks[i].count = p->d.count;
2131 		i++;
2132 	}
2133 }
2134 
2135 static void
2136 dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist)
2137 {
2138 	while (p != NULL) {
2139 		dprintf(fd, "chunk %18p %18p %4d %d/%d\n",
2140 		    p->page, ((p->bits[0] & 1) ? NULL : f),
2141 		    p->size, p->free, p->total);
2142 		if (!fromfreelist) {
2143 			if (p->bits[0] & 1)
2144 				putleakinfo(NULL, p->size, p->total - p->free);
2145 			else {
2146 				putleakinfo(f, p->size, 1);
2147 				putleakinfo(NULL, p->size,
2148 				    p->total - p->free - 1);
2149 			}
2150 			break;
2151 		}
2152 		p = LIST_NEXT(p, entries);
2153 		if (p != NULL)
2154 			dprintf(fd, "        ");
2155 	}
2156 }
2157 
2158 static void
2159 dump_free_chunk_info(int fd, struct dir_info *d)
2160 {
2161 	int i, j, count;
2162 	struct chunk_info *p;
2163 
2164 	dprintf(fd, "Free chunk structs:\n");
2165 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
2166 		count = 0;
2167 		LIST_FOREACH(p, &d->chunk_info_list[i], entries)
2168 			count++;
2169 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++) {
2170 			p = LIST_FIRST(&d->chunk_dir[i][j]);
2171 			if (p == NULL && count == 0)
2172 				continue;
2173 			dprintf(fd, "%2d) %3d ", i, count);
2174 			if (p != NULL)
2175 				dump_chunk(fd, p, NULL, 1);
2176 			else
2177 				dprintf(fd, "\n");
2178 		}
2179 	}
2180 
2181 }
2182 
2183 static void
2184 dump_free_page_info(int fd, struct dir_info *d)
2185 {
2186 	int i;
2187 
2188 	dprintf(fd, "Free pages cached: %zu\n", d->free_regions_size);
2189 	for (i = 0; i < mopts.malloc_cache; i++) {
2190 		if (d->free_regions[i].p != NULL) {
2191 			dprintf(fd, "%2d) ", i);
2192 			dprintf(fd, "free at %p: %zu\n",
2193 			    d->free_regions[i].p, d->free_regions[i].size);
2194 		}
2195 	}
2196 }
2197 
2198 static void
2199 malloc_dump1(int fd, int poolno, struct dir_info *d)
2200 {
2201 	size_t i, realsize;
2202 
2203 	dprintf(fd, "Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
2204 	if (d == NULL)
2205 		return;
2206 	dprintf(fd, "Region slots free %zu/%zu\n",
2207 		d->regions_free, d->regions_total);
2208 	dprintf(fd, "Finds %zu/%zu\n", d->finds,
2209 	    d->find_collisions);
2210 	dprintf(fd, "Inserts %zu/%zu\n", d->inserts,
2211 	    d->insert_collisions);
2212 	dprintf(fd, "Deletes %zu/%zu\n", d->deletes,
2213 	    d->delete_moves);
2214 	dprintf(fd, "Cheap reallocs %zu/%zu\n",
2215 	    d->cheap_reallocs, d->cheap_realloc_tries);
2216 	dprintf(fd, "In use %zu\n", d->malloc_used);
2217 	dprintf(fd, "Guarded %zu\n", d->malloc_guarded);
2218 	dump_free_chunk_info(fd, d);
2219 	dump_free_page_info(fd, d);
2220 	dprintf(fd,
2221 	    "slot)  hash d  type               page                  f size [free/n]\n");
2222 	for (i = 0; i < d->regions_total; i++) {
2223 		if (d->r[i].p != NULL) {
2224 			size_t h = hash(d->r[i].p) &
2225 			    (d->regions_total - 1);
2226 			dprintf(fd, "%4zx) #%4zx %zd ",
2227 			    i, h, h - i);
2228 			REALSIZE(realsize, &d->r[i]);
2229 			if (realsize > MALLOC_MAXCHUNK) {
2230 				putleakinfo(d->r[i].f, realsize, 1);
2231 				dprintf(fd,
2232 				    "pages %18p %18p %zu\n", d->r[i].p,
2233 				    d->r[i].f, realsize);
2234 			} else
2235 				dump_chunk(fd,
2236 				    (struct chunk_info *)d->r[i].size,
2237 				    d->r[i].f, 0);
2238 		}
2239 	}
2240 	dump_leaks(fd);
2241 	dprintf(fd, "\n");
2242 }
2243 
2244 void
2245 malloc_dump(int fd, int poolno, struct dir_info *pool)
2246 {
2247 	int i;
2248 	void *p;
2249 	struct region_info *r;
2250 	int saved_errno = errno;
2251 
2252 	if (pool == NULL)
2253 		return;
2254 	for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) {
2255 		p = pool->delayed_chunks[i];
2256 		if (p == NULL)
2257 			continue;
2258 		r = find(pool, p);
2259 		if (r == NULL)
2260 			wrterror(pool, "bogus pointer in malloc_dump %p", p);
2261 		free_bytes(pool, r, p);
2262 		pool->delayed_chunks[i] = NULL;
2263 	}
2264 	/* XXX leak when run multiple times */
2265 	RBT_INIT(leaktree, &leakhead);
2266 	malloc_dump1(fd, poolno, pool);
2267 	errno = saved_errno;
2268 }
2269 DEF_WEAK(malloc_dump);
2270 
2271 void
2272 malloc_gdump(int fd)
2273 {
2274 	int i;
2275 	int saved_errno = errno;
2276 
2277 	for (i = 0; i < _MALLOC_MUTEXES; i++)
2278 		malloc_dump(fd, i, mopts.malloc_pool[i]);
2279 
2280 	errno = saved_errno;
2281 }
2282 DEF_WEAK(malloc_gdump);
2283 
2284 static void
2285 malloc_exit(void)
2286 {
2287 	int save_errno = errno, fd, i;
2288 
2289 	fd = open("malloc.out", O_RDWR|O_APPEND);
2290 	if (fd != -1) {
2291 		dprintf(fd, "******** Start dump %s *******\n", __progname);
2292 		dprintf(fd,
2293 		    "MT=%d I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u G=%zu\n",
2294 		    mopts.malloc_mt, mopts.internal_funcs,
2295 		    mopts.malloc_freecheck,
2296 		    mopts.malloc_freeunmap, mopts.malloc_junk,
2297 		    mopts.malloc_realloc, mopts.malloc_xmalloc,
2298 		    mopts.chunk_canaries, mopts.malloc_cache,
2299 		    mopts.malloc_guard);
2300 
2301 		for (i = 0; i < _MALLOC_MUTEXES; i++)
2302 			malloc_dump(fd, i, mopts.malloc_pool[i]);
2303 		dprintf(fd, "******** End dump %s *******\n", __progname);
2304 		close(fd);
2305 	} else
2306 		dprintf(STDERR_FILENO,
2307 		    "malloc() warning: Couldn't dump stats\n");
2308 	errno = save_errno;
2309 }
2310 
2311 #endif /* MALLOC_STATS */
2312