xref: /openbsd-src/lib/libc/stdlib/malloc.c (revision f763167468dba5339ed4b14b7ecaca2a397ab0f6)
1 /*	$OpenBSD: malloc.c,v 1.232 2017/09/23 15:13:12 otto Exp $	*/
2 /*
3  * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
4  * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
5  * Copyright (c) 2008 Damien Miller <djm@openbsd.org>
6  * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * If we meet some day, and you think this stuff is worth it, you
23  * can buy me a beer in return. Poul-Henning Kamp
24  */
25 
26 /* #define MALLOC_STATS */
27 
28 #include <sys/types.h>
29 #include <sys/param.h>	/* PAGE_SHIFT ALIGN */
30 #include <sys/queue.h>
31 #include <sys/mman.h>
32 #include <sys/uio.h>
33 #include <errno.h>
34 #include <stdarg.h>
35 #include <stdint.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <stdio.h>
39 #include <unistd.h>
40 
41 #ifdef MALLOC_STATS
42 #include <sys/tree.h>
43 #include <fcntl.h>
44 #endif
45 
46 #include "thread_private.h"
47 #include <tib.h>
48 
49 #if defined(__mips64__)
50 #define MALLOC_PAGESHIFT	(14U)
51 #else
52 #define MALLOC_PAGESHIFT	(PAGE_SHIFT)
53 #endif
54 
55 #define MALLOC_MINSHIFT		4
56 #define MALLOC_MAXSHIFT		(MALLOC_PAGESHIFT - 1)
57 #define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
58 #define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
59 #define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
60 #define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
61 
62 #define MALLOC_MAXCHUNK		(1 << MALLOC_MAXSHIFT)
63 #define MALLOC_MAXCACHE		256
64 #define MALLOC_DELAYED_CHUNK_MASK	15
65 #define MALLOC_INITIAL_REGIONS	512
66 #define MALLOC_DEFAULT_CACHE	64
67 #define MALLOC_CHUNK_LISTS	4
68 #define CHUNK_CHECK_LENGTH	32
69 
70 /*
71  * We move allocations between half a page and a whole page towards the end,
72  * subject to alignment constraints. This is the extra headroom we allow.
73  * Set to zero to be the most strict.
74  */
75 #define MALLOC_LEEWAY		0
76 #define MALLOC_MOVE_COND(sz)	((sz) - mopts.malloc_guard < 		\
77 				    MALLOC_PAGESIZE - MALLOC_LEEWAY)
78 #define MALLOC_MOVE(p, sz)  	(((char *)(p)) +			\
79 				    ((MALLOC_PAGESIZE - MALLOC_LEEWAY -	\
80 			    	    ((sz) - mopts.malloc_guard)) & 	\
81 				    ~(MALLOC_MINSIZE - 1)))
82 
83 #define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
84 
85 /*
86  * What to use for Junk.  This is the byte value we use to fill with
87  * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
88  * and SOME_FREEJUNK right before free.
89  */
90 #define SOME_JUNK		0xdb	/* deadbeef */
91 #define SOME_FREEJUNK		0xdf	/* dead, free */
92 
93 #define MMAP(sz)	mmap(NULL, (sz), PROT_READ | PROT_WRITE, \
94     MAP_ANON | MAP_PRIVATE, -1, 0)
95 
96 #define MMAPA(a,sz)	mmap((a), (sz), PROT_READ | PROT_WRITE, \
97     MAP_ANON | MAP_PRIVATE, -1, 0)
98 
99 #define MQUERY(a, sz)	mquery((a), (sz), PROT_READ | PROT_WRITE, \
100     MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0)
101 
102 struct region_info {
103 	void *p;		/* page; low bits used to mark chunks */
104 	uintptr_t size;		/* size for pages, or chunk_info pointer */
105 #ifdef MALLOC_STATS
106 	void *f;		/* where allocated from */
107 #endif
108 };
109 
110 LIST_HEAD(chunk_head, chunk_info);
111 
112 struct dir_info {
113 	u_int32_t canary1;
114 	int active;			/* status of malloc */
115 	struct region_info *r;		/* region slots */
116 	size_t regions_total;		/* number of region slots */
117 	size_t regions_free;		/* number of free slots */
118 					/* lists of free chunk info structs */
119 	struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1];
120 					/* lists of chunks with free slots */
121 	struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS];
122 	size_t free_regions_size;	/* free pages cached */
123 					/* free pages cache */
124 	struct region_info free_regions[MALLOC_MAXCACHE];
125 					/* delayed free chunk slots */
126 	void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
127 	size_t rbytesused;		/* random bytes used */
128 	char *func;			/* current function */
129 	int mutex;
130 	u_char rbytes[32];		/* random bytes */
131 	u_short chunk_start;
132 #ifdef MALLOC_STATS
133 	size_t inserts;
134 	size_t insert_collisions;
135 	size_t finds;
136 	size_t find_collisions;
137 	size_t deletes;
138 	size_t delete_moves;
139 	size_t cheap_realloc_tries;
140 	size_t cheap_reallocs;
141 	size_t malloc_used;		/* bytes allocated */
142 	size_t malloc_guarded;		/* bytes used for guards */
143 #define STATS_ADD(x,y)	((x) += (y))
144 #define STATS_SUB(x,y)	((x) -= (y))
145 #define STATS_INC(x)	((x)++)
146 #define STATS_ZERO(x)	((x) = 0)
147 #define STATS_SETF(x,y)	((x)->f = (y))
148 #else
149 #define STATS_ADD(x,y)	/* nothing */
150 #define STATS_SUB(x,y)	/* nothing */
151 #define STATS_INC(x)	/* nothing */
152 #define STATS_ZERO(x)	/* nothing */
153 #define STATS_SETF(x,y)	/* nothing */
154 #endif /* MALLOC_STATS */
155 	u_int32_t canary2;
156 };
157 #define DIR_INFO_RSZ	((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
158 			~MALLOC_PAGEMASK)
159 
160 /*
161  * This structure describes a page worth of chunks.
162  *
163  * How many bits per u_short in the bitmap
164  */
165 #define MALLOC_BITS		(NBBY * sizeof(u_short))
166 struct chunk_info {
167 	LIST_ENTRY(chunk_info) entries;
168 	void *page;			/* pointer to the page */
169 	u_int32_t canary;
170 	u_short size;			/* size of this page's chunks */
171 	u_short shift;			/* how far to shift for this size */
172 	u_short free;			/* how many free chunks */
173 	u_short total;			/* how many chunks */
174 	u_short offset;			/* requested size table offset */
175 					/* which chunks are free */
176 	u_short bits[1];
177 };
178 
179 struct malloc_readonly {
180 	struct dir_info *malloc_pool[_MALLOC_MUTEXES];	/* Main bookkeeping information */
181 	int	malloc_mt;		/* multi-threaded mode? */
182 	int	malloc_freecheck;	/* Extensive double free check */
183 	int	malloc_freeunmap;	/* mprotect free pages PROT_NONE? */
184 	int	malloc_junk;		/* junk fill? */
185 	int	malloc_realloc;		/* always realloc? */
186 	int	malloc_xmalloc;		/* xmalloc behaviour? */
187 	int	chunk_canaries;		/* use canaries after chunks? */
188 	int	internal_funcs;		/* use better recallocarray/freezero? */
189 	u_int	malloc_cache;		/* free pages we cache */
190 	size_t	malloc_guard;		/* use guard pages after allocations? */
191 #ifdef MALLOC_STATS
192 	int	malloc_stats;		/* dump statistics at end */
193 #endif
194 	u_int32_t malloc_canary;	/* Matched against ones in malloc_pool */
195 };
196 
197 /* This object is mapped PROT_READ after initialisation to prevent tampering */
198 static union {
199 	struct malloc_readonly mopts;
200 	u_char _pad[MALLOC_PAGESIZE];
201 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
202 #define mopts	malloc_readonly.mopts
203 
204 char		*malloc_options;	/* compile-time options */
205 
206 static u_char getrbyte(struct dir_info *d);
207 static __dead void wrterror(struct dir_info *d, char *msg, ...)
208     __attribute__((__format__ (printf, 2, 3)));
209 static void fill_canary(char *ptr, size_t sz, size_t allocated);
210 
211 #ifdef MALLOC_STATS
212 void malloc_dump(int, int, struct dir_info *);
213 PROTO_NORMAL(malloc_dump);
214 void malloc_gdump(int);
215 PROTO_NORMAL(malloc_gdump);
216 static void malloc_exit(void);
217 #define CALLER	__builtin_return_address(0)
218 #else
219 #define CALLER	NULL
220 #endif
221 
222 /* low bits of r->p determine size: 0 means >= page size and r->size holding
223  * real size, otherwise low bits are a shift count, or 1 for malloc(0)
224  */
225 #define REALSIZE(sz, r)						\
226 	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
227 	(sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
228 
229 static inline void
230 _MALLOC_LEAVE(struct dir_info *d)
231 {
232 	if (mopts.malloc_mt) {
233 		d->active--;
234 		_MALLOC_UNLOCK(d->mutex);
235 	}
236 }
237 
238 static inline void
239 _MALLOC_ENTER(struct dir_info *d)
240 {
241 	if (mopts.malloc_mt) {
242 		_MALLOC_LOCK(d->mutex);
243 		d->active++;
244 	}
245 }
246 
247 static inline size_t
248 hash(void *p)
249 {
250 	size_t sum;
251 	uintptr_t u;
252 
253 	u = (uintptr_t)p >> MALLOC_PAGESHIFT;
254 	sum = u;
255 	sum = (sum << 7) - sum + (u >> 16);
256 #ifdef __LP64__
257 	sum = (sum << 7) - sum + (u >> 32);
258 	sum = (sum << 7) - sum + (u >> 48);
259 #endif
260 	return sum;
261 }
262 
263 static inline
264 struct dir_info *getpool(void)
265 {
266 	if (!mopts.malloc_mt)
267 		return mopts.malloc_pool[0];
268 	else
269 		return mopts.malloc_pool[TIB_GET()->tib_tid &
270 		    (_MALLOC_MUTEXES - 1)];
271 }
272 
273 static __dead void
274 wrterror(struct dir_info *d, char *msg, ...)
275 {
276 	struct iovec	iov[3];
277 	char		pidbuf[80];
278 	char		buf[80];
279 	int		saved_errno = errno;
280 	va_list		ap;
281 
282 	iov[0].iov_base = pidbuf;
283 	snprintf(pidbuf, sizeof(pidbuf), "%s(%d) in %s(): ", __progname,
284 	    getpid(), (d != NULL && d->func) ? d->func : "unknown");
285 	iov[0].iov_len = strlen(pidbuf);
286 	iov[1].iov_base = buf;
287 	va_start(ap, msg);
288 	vsnprintf(buf, sizeof(buf), msg, ap);
289 	va_end(ap);
290 	iov[1].iov_len = strlen(buf);
291 	iov[2].iov_base = "\n";
292 	iov[2].iov_len = 1;
293 	writev(STDERR_FILENO, iov, 3);
294 
295 #ifdef MALLOC_STATS
296 	if (mopts.malloc_stats)
297 		malloc_gdump(STDERR_FILENO);
298 #endif /* MALLOC_STATS */
299 
300 	errno = saved_errno;
301 
302 	abort();
303 }
304 
305 static void
306 rbytes_init(struct dir_info *d)
307 {
308 	arc4random_buf(d->rbytes, sizeof(d->rbytes));
309 	/* add 1 to account for using d->rbytes[0] */
310 	d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2);
311 }
312 
313 static inline u_char
314 getrbyte(struct dir_info *d)
315 {
316 	u_char x;
317 
318 	if (d->rbytesused >= sizeof(d->rbytes))
319 		rbytes_init(d);
320 	x = d->rbytes[d->rbytesused++];
321 	return x;
322 }
323 
324 /*
325  * Cache maintenance. We keep at most malloc_cache pages cached.
326  * If the cache is becoming full, unmap pages in the cache for real,
327  * and then add the region to the cache
328  * Opposed to the regular region data structure, the sizes in the
329  * cache are in MALLOC_PAGESIZE units.
330  */
331 static void
332 unmap(struct dir_info *d, void *p, size_t sz, int clear)
333 {
334 	size_t psz = sz >> MALLOC_PAGESHIFT;
335 	size_t rsz, tounmap;
336 	struct region_info *r;
337 	u_int i, offset;
338 
339 	if (sz != PAGEROUND(sz))
340 		wrterror(d, "munmap round");
341 
342 	rsz = mopts.malloc_cache - d->free_regions_size;
343 
344 	/*
345 	 * normally the cache holds recently freed regions, but if the region
346 	 * to unmap is larger than the cache size or we're clearing and the
347 	 * cache is full, just munmap
348 	 */
349 	if (psz > mopts.malloc_cache || (clear && rsz == 0)) {
350 		i = munmap(p, sz);
351 		if (i)
352 			wrterror(d, "munmap %p", p);
353 		STATS_SUB(d->malloc_used, sz);
354 		return;
355 	}
356 	tounmap = 0;
357 	if (psz > rsz)
358 		tounmap = psz - rsz;
359 	offset = getrbyte(d);
360 	for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) {
361 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
362 		if (r->p != NULL) {
363 			rsz = r->size << MALLOC_PAGESHIFT;
364 			if (munmap(r->p, rsz))
365 				wrterror(d, "munmap %p", r->p);
366 			r->p = NULL;
367 			if (tounmap > r->size)
368 				tounmap -= r->size;
369 			else
370 				tounmap = 0;
371 			d->free_regions_size -= r->size;
372 			r->size = 0;
373 			STATS_SUB(d->malloc_used, rsz);
374 		}
375 	}
376 	if (tounmap > 0)
377 		wrterror(d, "malloc cache underflow");
378 	for (i = 0; i < mopts.malloc_cache; i++) {
379 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
380 		if (r->p == NULL) {
381 			if (clear)
382 				memset(p, 0, sz - mopts.malloc_guard);
383 			if (mopts.malloc_junk && !mopts.malloc_freeunmap) {
384 				size_t amt = mopts.malloc_junk == 1 ?
385 				    MALLOC_MAXCHUNK : sz;
386 				memset(p, SOME_FREEJUNK, amt);
387 			}
388 			if (mopts.malloc_freeunmap)
389 				mprotect(p, sz, PROT_NONE);
390 			r->p = p;
391 			r->size = psz;
392 			d->free_regions_size += psz;
393 			break;
394 		}
395 	}
396 	if (i == mopts.malloc_cache)
397 		wrterror(d, "malloc free slot lost");
398 	if (d->free_regions_size > mopts.malloc_cache)
399 		wrterror(d, "malloc cache overflow");
400 }
401 
402 static void
403 zapcacheregion(struct dir_info *d, void *p, size_t len)
404 {
405 	u_int i;
406 	struct region_info *r;
407 	size_t rsz;
408 
409 	for (i = 0; i < mopts.malloc_cache; i++) {
410 		r = &d->free_regions[i];
411 		if (r->p >= p && r->p <= (void *)((char *)p + len)) {
412 			rsz = r->size << MALLOC_PAGESHIFT;
413 			if (munmap(r->p, rsz))
414 				wrterror(d, "munmap %p", r->p);
415 			r->p = NULL;
416 			d->free_regions_size -= r->size;
417 			r->size = 0;
418 			STATS_SUB(d->malloc_used, rsz);
419 		}
420 	}
421 }
422 
423 static void *
424 map(struct dir_info *d, void *hint, size_t sz, int zero_fill)
425 {
426 	size_t psz = sz >> MALLOC_PAGESHIFT;
427 	struct region_info *r, *big = NULL;
428 	u_int i, offset;
429 	void *p;
430 
431 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
432 	    d->canary1 != ~d->canary2)
433 		wrterror(d, "internal struct corrupt");
434 	if (sz != PAGEROUND(sz))
435 		wrterror(d, "map round");
436 
437 	if (!hint && psz > d->free_regions_size) {
438 		_MALLOC_LEAVE(d);
439 		p = MMAP(sz);
440 		_MALLOC_ENTER(d);
441 		if (p != MAP_FAILED)
442 			STATS_ADD(d->malloc_used, sz);
443 		/* zero fill not needed */
444 		return p;
445 	}
446 	offset = getrbyte(d);
447 	for (i = 0; i < mopts.malloc_cache; i++) {
448 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
449 		if (r->p != NULL) {
450 			if (hint && r->p != hint)
451 				continue;
452 			if (r->size == psz) {
453 				p = r->p;
454 				r->p = NULL;
455 				r->size = 0;
456 				d->free_regions_size -= psz;
457 				if (mopts.malloc_freeunmap)
458 					mprotect(p, sz, PROT_READ | PROT_WRITE);
459 				if (zero_fill)
460 					memset(p, 0, sz);
461 				else if (mopts.malloc_junk == 2 &&
462 				    mopts.malloc_freeunmap)
463 					memset(p, SOME_FREEJUNK, sz);
464 				return p;
465 			} else if (r->size > psz)
466 				big = r;
467 		}
468 	}
469 	if (big != NULL) {
470 		r = big;
471 		p = r->p;
472 		r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT);
473 		if (mopts.malloc_freeunmap)
474 			mprotect(p, sz, PROT_READ | PROT_WRITE);
475 		r->size -= psz;
476 		d->free_regions_size -= psz;
477 		if (zero_fill)
478 			memset(p, 0, sz);
479 		else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap)
480 			memset(p, SOME_FREEJUNK, sz);
481 		return p;
482 	}
483 	if (hint)
484 		return MAP_FAILED;
485 	if (d->free_regions_size > mopts.malloc_cache)
486 		wrterror(d, "malloc cache");
487 	_MALLOC_LEAVE(d);
488 	p = MMAP(sz);
489 	_MALLOC_ENTER(d);
490 	if (p != MAP_FAILED)
491 		STATS_ADD(d->malloc_used, sz);
492 	/* zero fill not needed */
493 	return p;
494 }
495 
496 static void
497 omalloc_parseopt(char opt)
498 {
499 	switch (opt) {
500 	case '>':
501 		mopts.malloc_cache <<= 1;
502 		if (mopts.malloc_cache > MALLOC_MAXCACHE)
503 			mopts.malloc_cache = MALLOC_MAXCACHE;
504 		break;
505 	case '<':
506 		mopts.malloc_cache >>= 1;
507 		break;
508 	case 'c':
509 		mopts.chunk_canaries = 0;
510 		break;
511 	case 'C':
512 		mopts.chunk_canaries = 1;
513 		break;
514 #ifdef MALLOC_STATS
515 	case 'd':
516 		mopts.malloc_stats = 0;
517 		break;
518 	case 'D':
519 		mopts.malloc_stats = 1;
520 		break;
521 #endif /* MALLOC_STATS */
522 	case 'f':
523 		mopts.malloc_freecheck = 0;
524 		mopts.malloc_freeunmap = 0;
525 		break;
526 	case 'F':
527 		mopts.malloc_freecheck = 1;
528 		mopts.malloc_freeunmap = 1;
529 		break;
530 	case 'g':
531 		mopts.malloc_guard = 0;
532 		break;
533 	case 'G':
534 		mopts.malloc_guard = MALLOC_PAGESIZE;
535 		break;
536 	case 'j':
537 		if (mopts.malloc_junk > 0)
538 			mopts.malloc_junk--;
539 		break;
540 	case 'J':
541 		if (mopts.malloc_junk < 2)
542 			mopts.malloc_junk++;
543 		break;
544 	case 'r':
545 		mopts.malloc_realloc = 0;
546 		break;
547 	case 'R':
548 		mopts.malloc_realloc = 1;
549 		break;
550 	case 'u':
551 		mopts.malloc_freeunmap = 0;
552 		break;
553 	case 'U':
554 		mopts.malloc_freeunmap = 1;
555 		break;
556 	case 'x':
557 		mopts.malloc_xmalloc = 0;
558 		break;
559 	case 'X':
560 		mopts.malloc_xmalloc = 1;
561 		break;
562 	default: {
563 		static const char q[] = "malloc() warning: "
564 		    "unknown char in MALLOC_OPTIONS\n";
565 		write(STDERR_FILENO, q, sizeof(q) - 1);
566 		break;
567 	}
568 	}
569 }
570 
571 static void
572 omalloc_init(void)
573 {
574 	char *p, *q, b[64];
575 	int i, j;
576 
577 	/*
578 	 * Default options
579 	 */
580 	mopts.malloc_junk = 1;
581 	mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
582 
583 	for (i = 0; i < 3; i++) {
584 		switch (i) {
585 		case 0:
586 			j = readlink("/etc/malloc.conf", b, sizeof b - 1);
587 			if (j <= 0)
588 				continue;
589 			b[j] = '\0';
590 			p = b;
591 			break;
592 		case 1:
593 			if (issetugid() == 0)
594 				p = getenv("MALLOC_OPTIONS");
595 			else
596 				continue;
597 			break;
598 		case 2:
599 			p = malloc_options;
600 			break;
601 		default:
602 			p = NULL;
603 		}
604 
605 		for (; p != NULL && *p != '\0'; p++) {
606 			switch (*p) {
607 			case 'S':
608 				for (q = "CFGJ"; *q != '\0'; q++)
609 					omalloc_parseopt(*q);
610 				mopts.malloc_cache = 0;
611 				break;
612 			case 's':
613 				for (q = "cgj"; *q != '\0'; q++)
614 					omalloc_parseopt(*q);
615 				mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
616 				break;
617 			default:
618 				omalloc_parseopt(*p);
619 				break;
620 			}
621 		}
622 	}
623 
624 #ifdef MALLOC_STATS
625 	if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
626 		static const char q[] = "malloc() warning: atexit(2) failed."
627 		    " Will not be able to dump stats on exit\n";
628 		write(STDERR_FILENO, q, sizeof(q) - 1);
629 	}
630 #endif /* MALLOC_STATS */
631 
632 	while ((mopts.malloc_canary = arc4random()) == 0)
633 		;
634 }
635 
636 /*
637  * Initialize a dir_info, which should have been cleared by caller
638  */
639 static void
640 omalloc_poolinit(struct dir_info **dp)
641 {
642 	void *p;
643 	size_t d_avail, regioninfo_size;
644 	struct dir_info *d;
645 	int i, j;
646 
647 	/*
648 	 * Allocate dir_info with a guard page on either side. Also
649 	 * randomise offset inside the page at which the dir_info
650 	 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
651 	 */
652 	if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED)
653 		wrterror(NULL, "malloc init mmap failed");
654 	mprotect(p, MALLOC_PAGESIZE, PROT_NONE);
655 	mprotect((char *)p + MALLOC_PAGESIZE + DIR_INFO_RSZ,
656 	    MALLOC_PAGESIZE, PROT_NONE);
657 	d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
658 	d = (struct dir_info *)((char *)p + MALLOC_PAGESIZE +
659 	    (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
660 
661 	rbytes_init(d);
662 	d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS;
663 	regioninfo_size = d->regions_total * sizeof(struct region_info);
664 	d->r = MMAP(regioninfo_size);
665 	if (d->r == MAP_FAILED) {
666 		d->regions_total = 0;
667 		wrterror(NULL, "malloc init mmap failed");
668 	}
669 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
670 		LIST_INIT(&d->chunk_info_list[i]);
671 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++)
672 			LIST_INIT(&d->chunk_dir[i][j]);
673 	}
674 	STATS_ADD(d->malloc_used, regioninfo_size);
675 	d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
676 	d->canary2 = ~d->canary1;
677 
678 	*dp = d;
679 }
680 
681 static int
682 omalloc_grow(struct dir_info *d)
683 {
684 	size_t newtotal;
685 	size_t newsize;
686 	size_t mask;
687 	size_t i;
688 	struct region_info *p;
689 
690 	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 )
691 		return 1;
692 
693 	newtotal = d->regions_total * 2;
694 	newsize = newtotal * sizeof(struct region_info);
695 	mask = newtotal - 1;
696 
697 	p = MMAP(newsize);
698 	if (p == MAP_FAILED)
699 		return 1;
700 
701 	STATS_ADD(d->malloc_used, newsize);
702 	STATS_ZERO(d->inserts);
703 	STATS_ZERO(d->insert_collisions);
704 	for (i = 0; i < d->regions_total; i++) {
705 		void *q = d->r[i].p;
706 		if (q != NULL) {
707 			size_t index = hash(q) & mask;
708 			STATS_INC(d->inserts);
709 			while (p[index].p != NULL) {
710 				index = (index - 1) & mask;
711 				STATS_INC(d->insert_collisions);
712 			}
713 			p[index] = d->r[i];
714 		}
715 	}
716 	/* avoid pages containing meta info to end up in cache */
717 	if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
718 		wrterror(d, "munmap %p", (void *)d->r);
719 	else
720 		STATS_SUB(d->malloc_used,
721 		    d->regions_total * sizeof(struct region_info));
722 	d->regions_free = d->regions_free + d->regions_total;
723 	d->regions_total = newtotal;
724 	d->r = p;
725 	return 0;
726 }
727 
728 static struct chunk_info *
729 alloc_chunk_info(struct dir_info *d, int bits)
730 {
731 	struct chunk_info *p;
732 	size_t size, count;
733 
734 	if (bits == 0)
735 		count = MALLOC_PAGESIZE / MALLOC_MINSIZE;
736 	else
737 		count = MALLOC_PAGESIZE >> bits;
738 
739 	size = howmany(count, MALLOC_BITS);
740 	size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
741 	if (mopts.chunk_canaries)
742 		size += count * sizeof(u_short);
743 	size = ALIGN(size);
744 
745 	if (LIST_EMPTY(&d->chunk_info_list[bits])) {
746 		char *q;
747 		int i;
748 
749 		q = MMAP(MALLOC_PAGESIZE);
750 		if (q == MAP_FAILED)
751 			return NULL;
752 		STATS_ADD(d->malloc_used, MALLOC_PAGESIZE);
753 		count = MALLOC_PAGESIZE / size;
754 		for (i = 0; i < count; i++, q += size)
755 			LIST_INSERT_HEAD(&d->chunk_info_list[bits],
756 			    (struct chunk_info *)q, entries);
757 	}
758 	p = LIST_FIRST(&d->chunk_info_list[bits]);
759 	LIST_REMOVE(p, entries);
760 	memset(p, 0, size);
761 	p->canary = d->canary1;
762 	return p;
763 }
764 
765 
766 /*
767  * The hashtable uses the assumption that p is never NULL. This holds since
768  * non-MAP_FIXED mappings with hint 0 start at BRKSIZ.
769  */
770 static int
771 insert(struct dir_info *d, void *p, size_t sz, void *f)
772 {
773 	size_t index;
774 	size_t mask;
775 	void *q;
776 
777 	if (d->regions_free * 4 < d->regions_total) {
778 		if (omalloc_grow(d))
779 			return 1;
780 	}
781 	mask = d->regions_total - 1;
782 	index = hash(p) & mask;
783 	q = d->r[index].p;
784 	STATS_INC(d->inserts);
785 	while (q != NULL) {
786 		index = (index - 1) & mask;
787 		q = d->r[index].p;
788 		STATS_INC(d->insert_collisions);
789 	}
790 	d->r[index].p = p;
791 	d->r[index].size = sz;
792 #ifdef MALLOC_STATS
793 	d->r[index].f = f;
794 #endif
795 	d->regions_free--;
796 	return 0;
797 }
798 
799 static struct region_info *
800 find(struct dir_info *d, void *p)
801 {
802 	size_t index;
803 	size_t mask = d->regions_total - 1;
804 	void *q, *r;
805 
806 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
807 	    d->canary1 != ~d->canary2)
808 		wrterror(d, "internal struct corrupt");
809 	p = MASK_POINTER(p);
810 	index = hash(p) & mask;
811 	r = d->r[index].p;
812 	q = MASK_POINTER(r);
813 	STATS_INC(d->finds);
814 	while (q != p && r != NULL) {
815 		index = (index - 1) & mask;
816 		r = d->r[index].p;
817 		q = MASK_POINTER(r);
818 		STATS_INC(d->find_collisions);
819 	}
820 	return (q == p && r != NULL) ? &d->r[index] : NULL;
821 }
822 
823 static void
824 delete(struct dir_info *d, struct region_info *ri)
825 {
826 	/* algorithm R, Knuth Vol III section 6.4 */
827 	size_t mask = d->regions_total - 1;
828 	size_t i, j, r;
829 
830 	if (d->regions_total & (d->regions_total - 1))
831 		wrterror(d, "regions_total not 2^x");
832 	d->regions_free++;
833 	STATS_INC(d->deletes);
834 
835 	i = ri - d->r;
836 	for (;;) {
837 		d->r[i].p = NULL;
838 		d->r[i].size = 0;
839 		j = i;
840 		for (;;) {
841 			i = (i - 1) & mask;
842 			if (d->r[i].p == NULL)
843 				return;
844 			r = hash(d->r[i].p) & mask;
845 			if ((i <= r && r < j) || (r < j && j < i) ||
846 			    (j < i && i <= r))
847 				continue;
848 			d->r[j] = d->r[i];
849 			STATS_INC(d->delete_moves);
850 			break;
851 		}
852 
853 	}
854 }
855 
856 /*
857  * Allocate a page of chunks
858  */
859 static struct chunk_info *
860 omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
861 {
862 	struct chunk_info *bp;
863 	void		*pp;
864 	int		i, k;
865 
866 	/* Allocate a new bucket */
867 	pp = map(d, NULL, MALLOC_PAGESIZE, 0);
868 	if (pp == MAP_FAILED)
869 		return NULL;
870 
871 	bp = alloc_chunk_info(d, bits);
872 	if (bp == NULL) {
873 		unmap(d, pp, MALLOC_PAGESIZE, 0);
874 		return NULL;
875 	}
876 
877 	/* memory protect the page allocated in the malloc(0) case */
878 	if (bits == 0) {
879 		bp->size = 0;
880 		bp->shift = 1;
881 		i = MALLOC_MINSIZE - 1;
882 		while (i >>= 1)
883 			bp->shift++;
884 		bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift;
885 		bp->offset = 0xdead;
886 		bp->page = pp;
887 
888 		k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE);
889 		if (k < 0) {
890 			unmap(d, pp, MALLOC_PAGESIZE, 0);
891 			LIST_INSERT_HEAD(&d->chunk_info_list[0], bp, entries);
892 			return NULL;
893 		}
894 	} else {
895 		bp->size = 1U << bits;
896 		bp->shift = bits;
897 		bp->total = bp->free = MALLOC_PAGESIZE >> bits;
898 		bp->offset = howmany(bp->total, MALLOC_BITS);
899 		bp->page = pp;
900 	}
901 
902 	/* set all valid bits in the bitmap */
903 	k = bp->total;
904 	i = 0;
905 
906 	/* Do a bunch at a time */
907 	for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS)
908 		bp->bits[i / MALLOC_BITS] = (u_short)~0U;
909 
910 	for (; i < k; i++)
911 		bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS);
912 
913 	LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries);
914 
915 	bits++;
916 	if ((uintptr_t)pp & bits)
917 		wrterror(d, "pp & bits %p", pp);
918 
919 	insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL);
920 	return bp;
921 }
922 
923 static int
924 find_chunksize(size_t size)
925 {
926 	int		i, j;
927 
928 	/* Don't bother with anything less than this */
929 	/* unless we have a malloc(0) requests */
930 	if (size != 0 && size < MALLOC_MINSIZE)
931 		size = MALLOC_MINSIZE;
932 
933 	/* Find the right bucket */
934 	if (size == 0)
935 		j = 0;
936 	else {
937 		j = MALLOC_MINSHIFT;
938 		i = (size - 1) >> (MALLOC_MINSHIFT - 1);
939 		while (i >>= 1)
940 			j++;
941 	}
942 	return j;
943 }
944 
945 /*
946  * Allocate a chunk
947  */
948 static void *
949 malloc_bytes(struct dir_info *d, size_t size, void *f)
950 {
951 	int		i, j, listnum;
952 	size_t		k;
953 	u_short		u, *lp;
954 	struct chunk_info *bp;
955 
956 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
957 	    d->canary1 != ~d->canary2)
958 		wrterror(d, "internal struct corrupt");
959 
960 	j = find_chunksize(size);
961 
962 	listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
963 	/* If it's empty, make a page more of that size chunks */
964 	if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) {
965 		bp = omalloc_make_chunks(d, j, listnum);
966 		if (bp == NULL)
967 			return NULL;
968 	}
969 
970 	if (bp->canary != d->canary1)
971 		wrterror(d, "chunk info corrupted");
972 
973 	i = d->chunk_start;
974 	if (bp->free > 1)
975 		i += getrbyte(d);
976 	if (i >= bp->total)
977 		i &= bp->total - 1;
978 	for (;;) {
979 		for (;;) {
980 			lp = &bp->bits[i / MALLOC_BITS];
981 			if (!*lp) {
982 				i += MALLOC_BITS;
983 				i &= ~(MALLOC_BITS - 1);
984 				if (i >= bp->total)
985 					i = 0;
986 			} else
987 				break;
988 		}
989 		k = i % MALLOC_BITS;
990 		u = 1 << k;
991 		if (*lp & u)
992 			break;
993 		if (++i >= bp->total)
994 			i = 0;
995 	}
996 	d->chunk_start += i + 1;
997 #ifdef MALLOC_STATS
998 	if (i == 0) {
999 		struct region_info *r = find(d, bp->page);
1000 		r->f = f;
1001 	}
1002 #endif
1003 
1004 	*lp ^= u;
1005 
1006 	/* If there are no more free, remove from free-list */
1007 	if (!--bp->free)
1008 		LIST_REMOVE(bp, entries);
1009 
1010 	/* Adjust to the real offset of that chunk */
1011 	k += (lp - bp->bits) * MALLOC_BITS;
1012 
1013 	if (mopts.chunk_canaries && size > 0)
1014 		bp->bits[bp->offset + k] = size;
1015 
1016 	k <<= bp->shift;
1017 
1018 	if (bp->size > 0) {
1019 		if (mopts.malloc_junk == 2)
1020 			memset((char *)bp->page + k, SOME_JUNK, bp->size);
1021 		else if (mopts.chunk_canaries)
1022 			fill_canary((char *)bp->page + k, size, bp->size);
1023 	}
1024 	return ((char *)bp->page + k);
1025 }
1026 
1027 static void
1028 fill_canary(char *ptr, size_t sz, size_t allocated)
1029 {
1030 	size_t check_sz = allocated - sz;
1031 
1032 	if (check_sz > CHUNK_CHECK_LENGTH)
1033 		check_sz = CHUNK_CHECK_LENGTH;
1034 	memset(ptr + sz, SOME_JUNK, check_sz);
1035 }
1036 
1037 static void
1038 validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1039 {
1040 	size_t check_sz = allocated - sz;
1041 	u_char *p, *q;
1042 
1043 	if (check_sz > CHUNK_CHECK_LENGTH)
1044 		check_sz = CHUNK_CHECK_LENGTH;
1045 	p = ptr + sz;
1046 	q = p + check_sz;
1047 
1048 	while (p < q) {
1049 		if (*p != SOME_JUNK) {
1050 			wrterror(d, "chunk canary corrupted %p %#tx@%#zx%s",
1051 			    ptr, p - ptr, sz, *p == SOME_FREEJUNK ?
1052 			        " (double free?)" : "");
1053 		}
1054 		p++;
1055 	}
1056 }
1057 
1058 static uint32_t
1059 find_chunknum(struct dir_info *d, struct region_info *r, void *ptr, int check)
1060 {
1061 	struct chunk_info *info;
1062 	uint32_t chunknum;
1063 
1064 	info = (struct chunk_info *)r->size;
1065 	if (info->canary != d->canary1)
1066 		wrterror(d, "chunk info corrupted");
1067 
1068 	/* Find the chunk number on the page */
1069 	chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1070 
1071 	if ((uintptr_t)ptr & ((1U << (info->shift)) - 1))
1072 		wrterror(d, "modified chunk-pointer %p", ptr);
1073 	if (info->bits[chunknum / MALLOC_BITS] &
1074 	    (1U << (chunknum % MALLOC_BITS)))
1075 		wrterror(d, "chunk is already free %p", ptr);
1076 	if (check && info->size > 0) {
1077 		validate_canary(d, ptr, info->bits[info->offset + chunknum],
1078 		    info->size);
1079 	}
1080 	return chunknum;
1081 }
1082 
1083 /*
1084  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1085  */
1086 static void
1087 free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1088 {
1089 	struct chunk_head *mp;
1090 	struct chunk_info *info;
1091 	uint32_t chunknum;
1092 	int listnum;
1093 
1094 	info = (struct chunk_info *)r->size;
1095 	chunknum = find_chunknum(d, r, ptr, 0);
1096 
1097 	info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
1098 	info->free++;
1099 
1100 	if (info->free == 1) {
1101 		/* Page became non-full */
1102 		listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
1103 		if (info->size != 0)
1104 			mp = &d->chunk_dir[info->shift][listnum];
1105 		else
1106 			mp = &d->chunk_dir[0][listnum];
1107 
1108 		LIST_INSERT_HEAD(mp, info, entries);
1109 		return;
1110 	}
1111 
1112 	if (info->free != info->total)
1113 		return;
1114 
1115 	LIST_REMOVE(info, entries);
1116 
1117 	if (info->size == 0 && !mopts.malloc_freeunmap)
1118 		mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1119 	unmap(d, info->page, MALLOC_PAGESIZE, 0);
1120 
1121 	delete(d, r);
1122 	if (info->size != 0)
1123 		mp = &d->chunk_info_list[info->shift];
1124 	else
1125 		mp = &d->chunk_info_list[0];
1126 	LIST_INSERT_HEAD(mp, info, entries);
1127 }
1128 
1129 
1130 
1131 static void *
1132 omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1133 {
1134 	void *p;
1135 	size_t psz;
1136 
1137 	if (sz > MALLOC_MAXCHUNK) {
1138 		if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1139 			errno = ENOMEM;
1140 			return NULL;
1141 		}
1142 		sz += mopts.malloc_guard;
1143 		psz = PAGEROUND(sz);
1144 		p = map(pool, NULL, psz, zero_fill);
1145 		if (p == MAP_FAILED) {
1146 			errno = ENOMEM;
1147 			return NULL;
1148 		}
1149 		if (insert(pool, p, sz, f)) {
1150 			unmap(pool, p, psz, 0);
1151 			errno = ENOMEM;
1152 			return NULL;
1153 		}
1154 		if (mopts.malloc_guard) {
1155 			if (mprotect((char *)p + psz - mopts.malloc_guard,
1156 			    mopts.malloc_guard, PROT_NONE))
1157 				wrterror(pool, "mprotect");
1158 			STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1159 		}
1160 
1161 		if (MALLOC_MOVE_COND(sz)) {
1162 			/* fill whole allocation */
1163 			if (mopts.malloc_junk == 2)
1164 				memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1165 			/* shift towards the end */
1166 			p = MALLOC_MOVE(p, sz);
1167 			/* fill zeros if needed and overwritten above */
1168 			if (zero_fill && mopts.malloc_junk == 2)
1169 				memset(p, 0, sz - mopts.malloc_guard);
1170 		} else {
1171 			if (mopts.malloc_junk == 2) {
1172 				if (zero_fill)
1173 					memset((char *)p + sz - mopts.malloc_guard,
1174 					    SOME_JUNK, psz - sz);
1175 				else
1176 					memset(p, SOME_JUNK,
1177 					    psz - mopts.malloc_guard);
1178 			}
1179 			else if (mopts.chunk_canaries)
1180 				fill_canary(p, sz - mopts.malloc_guard,
1181 				    psz - mopts.malloc_guard);
1182 		}
1183 
1184 	} else {
1185 		/* takes care of SOME_JUNK */
1186 		p = malloc_bytes(pool, sz, f);
1187 		if (zero_fill && p != NULL && sz > 0)
1188 			memset(p, 0, sz);
1189 	}
1190 
1191 	return p;
1192 }
1193 
1194 /*
1195  * Common function for handling recursion.  Only
1196  * print the error message once, to avoid making the problem
1197  * potentially worse.
1198  */
1199 static void
1200 malloc_recurse(struct dir_info *d)
1201 {
1202 	static int noprint;
1203 
1204 	if (noprint == 0) {
1205 		noprint = 1;
1206 		wrterror(d, "recursive call");
1207 	}
1208 	d->active--;
1209 	_MALLOC_UNLOCK(d->mutex);
1210 	errno = EDEADLK;
1211 }
1212 
1213 void
1214 _malloc_init(int from_rthreads)
1215 {
1216 	int i, max;
1217 	struct dir_info *d;
1218 
1219 	_MALLOC_LOCK(0);
1220 	if (!from_rthreads && mopts.malloc_pool[0]) {
1221 		_MALLOC_UNLOCK(0);
1222 		return;
1223 	}
1224 	if (!mopts.malloc_canary)
1225 		omalloc_init();
1226 
1227 	max = from_rthreads ? _MALLOC_MUTEXES : 1;
1228 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1229 		mprotect(&malloc_readonly, sizeof(malloc_readonly),
1230 		     PROT_READ | PROT_WRITE);
1231 	for (i = 0; i < max; i++) {
1232 		if (mopts.malloc_pool[i])
1233 			continue;
1234 		omalloc_poolinit(&d);
1235 		d->mutex = i;
1236 		mopts.malloc_pool[i] = d;
1237 	}
1238 
1239 	if (from_rthreads)
1240 		mopts.malloc_mt = 1;
1241 	else
1242 		mopts.internal_funcs = 1;
1243 
1244 	/*
1245 	 * Options have been set and will never be reset.
1246 	 * Prevent further tampering with them.
1247 	 */
1248 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1249 		mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
1250 	_MALLOC_UNLOCK(0);
1251 }
1252 DEF_STRONG(_malloc_init);
1253 
1254 void *
1255 malloc(size_t size)
1256 {
1257 	void *r;
1258 	struct dir_info *d;
1259 	int saved_errno = errno;
1260 
1261 	d = getpool();
1262 	if (d == NULL) {
1263 		_malloc_init(0);
1264 		d = getpool();
1265 	}
1266 	_MALLOC_LOCK(d->mutex);
1267 	d->func = "malloc";
1268 
1269 	if (d->active++) {
1270 		malloc_recurse(d);
1271 		return NULL;
1272 	}
1273 	r = omalloc(d, size, 0, CALLER);
1274 	d->active--;
1275 	_MALLOC_UNLOCK(d->mutex);
1276 	if (r == NULL && mopts.malloc_xmalloc)
1277 		wrterror(d, "out of memory");
1278 	if (r != NULL)
1279 		errno = saved_errno;
1280 	return r;
1281 }
1282 /*DEF_STRONG(malloc);*/
1283 
1284 static void
1285 validate_junk(struct dir_info *pool, void *p)
1286 {
1287 	struct region_info *r;
1288 	size_t byte, sz;
1289 
1290 	if (p == NULL)
1291 		return;
1292 	r = find(pool, p);
1293 	if (r == NULL)
1294 		wrterror(pool, "bogus pointer in validate_junk %p", p);
1295 	REALSIZE(sz, r);
1296 	if (sz > CHUNK_CHECK_LENGTH)
1297 		sz = CHUNK_CHECK_LENGTH;
1298 	for (byte = 0; byte < sz; byte++) {
1299 		if (((unsigned char *)p)[byte] != SOME_FREEJUNK)
1300 			wrterror(pool, "use after free %p", p);
1301 	}
1302 }
1303 
1304 static void
1305 ofree(struct dir_info *argpool, void *p, int clear, int check, size_t argsz)
1306 {
1307 	struct dir_info *pool;
1308 	struct region_info *r;
1309 	size_t sz;
1310 	int i;
1311 
1312 	pool = argpool;
1313 	r = find(pool, p);
1314 	if (r == NULL) {
1315 		if (mopts.malloc_mt)  {
1316 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1317 				if (i == argpool->mutex)
1318 					continue;
1319 				pool->active--;
1320 				_MALLOC_UNLOCK(pool->mutex);
1321 				pool = mopts.malloc_pool[i];
1322 				_MALLOC_LOCK(pool->mutex);
1323 				pool->active++;
1324 				r = find(pool, p);
1325 				if (r != NULL)
1326 					break;
1327 			}
1328 		}
1329 		if (r == NULL)
1330 			wrterror(pool, "bogus pointer (double free?) %p", p);
1331 	}
1332 
1333 	REALSIZE(sz, r);
1334 	if (check) {
1335 		if (sz <= MALLOC_MAXCHUNK) {
1336 			if (mopts.chunk_canaries && sz > 0) {
1337 				struct chunk_info *info =
1338 				    (struct chunk_info *)r->size;
1339 				uint32_t chunknum =
1340 				    find_chunknum(pool, r, p, 0);
1341 
1342 				if (info->bits[info->offset + chunknum] <
1343 				    argsz)
1344 					wrterror(pool, "recorded size %hu"
1345 					    " < %zu",
1346 					    info->bits[info->offset + chunknum],
1347 					    argsz);
1348 			} else {
1349 				if (sz < argsz)
1350 					wrterror(pool, "chunk size %zu < %zu",
1351 					    sz, argsz);
1352 			}
1353 		} else if (sz - mopts.malloc_guard < argsz) {
1354 			wrterror(pool, "recorded size %zu < %zu",
1355 			    sz - mopts.malloc_guard, argsz);
1356 		}
1357 	}
1358 	if (sz > MALLOC_MAXCHUNK) {
1359 		if (!MALLOC_MOVE_COND(sz)) {
1360 			if (r->p != p)
1361 				wrterror(pool, "bogus pointer %p", p);
1362 			if (mopts.chunk_canaries)
1363 				validate_canary(pool, p,
1364 				    sz - mopts.malloc_guard,
1365 				    PAGEROUND(sz - mopts.malloc_guard));
1366 		} else {
1367 			/* shifted towards the end */
1368 			if (p != MALLOC_MOVE(r->p, sz))
1369 				wrterror(pool, "bogus moved pointer %p", p);
1370 			p = r->p;
1371 		}
1372 		if (mopts.malloc_guard) {
1373 			if (sz < mopts.malloc_guard)
1374 				wrterror(pool, "guard size");
1375 			if (!mopts.malloc_freeunmap) {
1376 				if (mprotect((char *)p + PAGEROUND(sz) -
1377 				    mopts.malloc_guard, mopts.malloc_guard,
1378 				    PROT_READ | PROT_WRITE))
1379 					wrterror(pool, "mprotect");
1380 			}
1381 			STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
1382 		}
1383 		unmap(pool, p, PAGEROUND(sz), clear);
1384 		delete(pool, r);
1385 	} else {
1386 		/* Validate and optionally canary check */
1387 		find_chunknum(pool, r, p, mopts.chunk_canaries);
1388 		if (!clear) {
1389 			void *tmp;
1390 			int i;
1391 
1392 			if (mopts.malloc_freecheck) {
1393 				for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK; i++)
1394 					if (p == pool->delayed_chunks[i])
1395 						wrterror(pool,
1396 						    "double free %p", p);
1397 			}
1398 			if (mopts.malloc_junk && sz > 0)
1399 				memset(p, SOME_FREEJUNK, sz);
1400 			i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK;
1401 			tmp = p;
1402 			p = pool->delayed_chunks[i];
1403 			if (tmp == p)
1404 				wrterror(pool, "double free %p", tmp);
1405 			pool->delayed_chunks[i] = tmp;
1406 			if (mopts.malloc_junk)
1407 				validate_junk(pool, p);
1408 		} else if (sz > 0)
1409 			memset(p, 0, sz);
1410 		if (p != NULL) {
1411 			r = find(pool, p);
1412 			if (r == NULL)
1413 				wrterror(pool,
1414 				    "bogus pointer (double free?) %p", p);
1415 			free_bytes(pool, r, p);
1416 		}
1417 	}
1418 
1419 	if (argpool != pool) {
1420 		pool->active--;
1421 		_MALLOC_UNLOCK(pool->mutex);
1422 		_MALLOC_LOCK(argpool->mutex);
1423 		argpool->active++;
1424 	}
1425 }
1426 
1427 void
1428 free(void *ptr)
1429 {
1430 	struct dir_info *d;
1431 	int saved_errno = errno;
1432 
1433 	/* This is legal. */
1434 	if (ptr == NULL)
1435 		return;
1436 
1437 	d = getpool();
1438 	if (d == NULL)
1439 		wrterror(d, "free() called before allocation");
1440 	_MALLOC_LOCK(d->mutex);
1441 	d->func = "free";
1442 	if (d->active++) {
1443 		malloc_recurse(d);
1444 		return;
1445 	}
1446 	ofree(d, ptr, 0, 0, 0);
1447 	d->active--;
1448 	_MALLOC_UNLOCK(d->mutex);
1449 	errno = saved_errno;
1450 }
1451 /*DEF_STRONG(free);*/
1452 
1453 static void
1454 freezero_p(void *ptr, size_t sz)
1455 {
1456 	explicit_bzero(ptr, sz);
1457 	free(ptr);
1458 }
1459 
1460 void
1461 freezero(void *ptr, size_t sz)
1462 {
1463 	struct dir_info *d;
1464 	int saved_errno = errno;
1465 
1466 	/* This is legal. */
1467 	if (ptr == NULL)
1468 		return;
1469 
1470 	if (!mopts.internal_funcs)
1471 		return freezero_p(ptr, sz);
1472 
1473 	d = getpool();
1474 	if (d == NULL)
1475 		wrterror(d, "freezero() called before allocation");
1476 	_MALLOC_LOCK(d->mutex);
1477 	d->func = "freezero";
1478 	if (d->active++) {
1479 		malloc_recurse(d);
1480 		return;
1481 	}
1482 	ofree(d, ptr, 1, 1, sz);
1483 	d->active--;
1484 	_MALLOC_UNLOCK(d->mutex);
1485 	errno = saved_errno;
1486 }
1487 DEF_WEAK(freezero);
1488 
1489 static void *
1490 orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
1491 {
1492 	struct dir_info *pool;
1493 	struct region_info *r;
1494 	struct chunk_info *info;
1495 	size_t oldsz, goldsz, gnewsz;
1496 	void *q, *ret;
1497 	int i;
1498 	uint32_t chunknum;
1499 
1500 	pool = argpool;
1501 
1502 	if (p == NULL)
1503 		return omalloc(pool, newsz, 0, f);
1504 
1505 	r = find(pool, p);
1506 	if (r == NULL) {
1507 		if (mopts.malloc_mt) {
1508 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1509 				if (i == argpool->mutex)
1510 					continue;
1511 				pool->active--;
1512 				_MALLOC_UNLOCK(pool->mutex);
1513 				pool = mopts.malloc_pool[i];
1514 				_MALLOC_LOCK(pool->mutex);
1515 				pool->active++;
1516 				r = find(pool, p);
1517 				if (r != NULL)
1518 					break;
1519 			}
1520 		}
1521 		if (r == NULL)
1522 			wrterror(pool, "bogus pointer (double free?) %p", p);
1523 	}
1524 	if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1525 		errno = ENOMEM;
1526 		ret = NULL;
1527 		goto done;
1528 	}
1529 
1530 	REALSIZE(oldsz, r);
1531 	if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) {
1532 		chunknum = find_chunknum(pool, r, p, 0);
1533 		info = (struct chunk_info *)r->size;
1534 	}
1535 
1536 	goldsz = oldsz;
1537 	if (oldsz > MALLOC_MAXCHUNK) {
1538 		if (oldsz < mopts.malloc_guard)
1539 			wrterror(pool, "guard size");
1540 		oldsz -= mopts.malloc_guard;
1541 	}
1542 
1543 	gnewsz = newsz;
1544 	if (gnewsz > MALLOC_MAXCHUNK)
1545 		gnewsz += mopts.malloc_guard;
1546 
1547 	if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK &&
1548 	    !mopts.malloc_realloc) {
1549 		/* First case: from n pages sized allocation to m pages sized
1550 		   allocation, m > n */
1551 		size_t roldsz = PAGEROUND(goldsz);
1552 		size_t rnewsz = PAGEROUND(gnewsz);
1553 
1554 		if (rnewsz > roldsz) {
1555 			/* try to extend existing region */
1556 			if (!mopts.malloc_guard) {
1557 				void *hint = (char *)r->p + roldsz;
1558 				size_t needed = rnewsz - roldsz;
1559 
1560 				STATS_INC(pool->cheap_realloc_tries);
1561 				q = map(pool, hint, needed, 0);
1562 				if (q == hint)
1563 					goto gotit;
1564 				zapcacheregion(pool, hint, needed);
1565 				q = MQUERY(hint, needed);
1566 				if (q == hint)
1567 					q = MMAPA(hint, needed);
1568 				else
1569 					q = MAP_FAILED;
1570 				if (q == hint) {
1571 gotit:
1572 					STATS_ADD(pool->malloc_used, needed);
1573 					if (mopts.malloc_junk == 2)
1574 						memset(q, SOME_JUNK, needed);
1575 					r->size = gnewsz;
1576 					if (r->p != p) {
1577 						/* old pointer is moved */
1578 						memmove(r->p, p, oldsz);
1579 						p = r->p;
1580 					}
1581 					if (mopts.chunk_canaries)
1582 						fill_canary(p, newsz,
1583 						    PAGEROUND(newsz));
1584 					STATS_SETF(r, f);
1585 					STATS_INC(pool->cheap_reallocs);
1586 					ret = p;
1587 					goto done;
1588 				} else if (q != MAP_FAILED) {
1589 					if (munmap(q, needed))
1590 						wrterror(pool, "munmap %p", q);
1591 				}
1592 			}
1593 		} else if (rnewsz < roldsz) {
1594 			/* shrink number of pages */
1595 			if (mopts.malloc_guard) {
1596 				if (mprotect((char *)r->p + roldsz -
1597 				    mopts.malloc_guard, mopts.malloc_guard,
1598 				    PROT_READ | PROT_WRITE))
1599 					wrterror(pool, "mprotect");
1600 				if (mprotect((char *)r->p + rnewsz -
1601 				    mopts.malloc_guard, mopts.malloc_guard,
1602 				    PROT_NONE))
1603 					wrterror(pool, "mprotect");
1604 			}
1605 			unmap(pool, (char *)r->p + rnewsz, roldsz - rnewsz, 0);
1606 			r->size = gnewsz;
1607 			if (MALLOC_MOVE_COND(gnewsz)) {
1608 				void *pp = MALLOC_MOVE(r->p, gnewsz);
1609 				memmove(pp, p, newsz);
1610 				p = pp;
1611 			} else if (mopts.chunk_canaries)
1612 				fill_canary(p, newsz, PAGEROUND(newsz));
1613 			STATS_SETF(r, f);
1614 			ret = p;
1615 			goto done;
1616 		} else {
1617 			/* number of pages remains the same */
1618 			void *pp = r->p;
1619 
1620 			r->size = gnewsz;
1621 			if (MALLOC_MOVE_COND(gnewsz))
1622 				pp = MALLOC_MOVE(r->p, gnewsz);
1623 			if (p != pp) {
1624 				memmove(pp, p, oldsz < newsz ? oldsz : newsz);
1625 				p = pp;
1626 			}
1627 			if (p == r->p) {
1628 				if (newsz > oldsz && mopts.malloc_junk == 2)
1629 					memset((char *)p + newsz, SOME_JUNK,
1630 					    rnewsz - mopts.malloc_guard -
1631 					    newsz);
1632 				if (mopts.chunk_canaries)
1633 					fill_canary(p, newsz, PAGEROUND(newsz));
1634 			}
1635 			STATS_SETF(r, f);
1636 			ret = p;
1637 			goto done;
1638 		}
1639 	}
1640 	if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 &&
1641 	    newsz <= MALLOC_MAXCHUNK && newsz > 0 &&
1642 	    1 << find_chunksize(newsz) == oldsz && !mopts.malloc_realloc) {
1643 		/* do not reallocate if new size fits good in existing chunk */
1644 		if (mopts.malloc_junk == 2)
1645 			memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1646 		if (mopts.chunk_canaries) {
1647 			info->bits[info->offset + chunknum] = newsz;
1648 			fill_canary(p, newsz, info->size);
1649 		}
1650 		STATS_SETF(r, f);
1651 		ret = p;
1652 	} else if (newsz != oldsz || mopts.malloc_realloc) {
1653 		/* create new allocation */
1654 		q = omalloc(pool, newsz, 0, f);
1655 		if (q == NULL) {
1656 			ret = NULL;
1657 			goto done;
1658 		}
1659 		if (newsz != 0 && oldsz != 0)
1660 			memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1661 		ofree(pool, p, 0, 0, 0);
1662 		ret = q;
1663 	} else {
1664 		/* oldsz == newsz */
1665 		if (newsz != 0)
1666 			wrterror(pool, "realloc internal inconsistency");
1667 		STATS_SETF(r, f);
1668 		ret = p;
1669 	}
1670 done:
1671 	if (argpool != pool) {
1672 		pool->active--;
1673 		_MALLOC_UNLOCK(pool->mutex);
1674 		_MALLOC_LOCK(argpool->mutex);
1675 		argpool->active++;
1676 	}
1677 	return ret;
1678 }
1679 
1680 void *
1681 realloc(void *ptr, size_t size)
1682 {
1683 	struct dir_info *d;
1684 	void *r;
1685 	int saved_errno = errno;
1686 
1687 	d = getpool();
1688 	if (d == NULL) {
1689 		_malloc_init(0);
1690 		d = getpool();
1691 	}
1692 	_MALLOC_LOCK(d->mutex);
1693 	d->func = "realloc";
1694 	if (d->active++) {
1695 		malloc_recurse(d);
1696 		return NULL;
1697 	}
1698 	r = orealloc(d, ptr, size, CALLER);
1699 
1700 	d->active--;
1701 	_MALLOC_UNLOCK(d->mutex);
1702 	if (r == NULL && mopts.malloc_xmalloc)
1703 		wrterror(d, "out of memory");
1704 	if (r != NULL)
1705 		errno = saved_errno;
1706 	return r;
1707 }
1708 /*DEF_STRONG(realloc);*/
1709 
1710 
1711 /*
1712  * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
1713  * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
1714  */
1715 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1716 
1717 void *
1718 calloc(size_t nmemb, size_t size)
1719 {
1720 	struct dir_info *d;
1721 	void *r;
1722 	int saved_errno = errno;
1723 
1724 	d = getpool();
1725 	if (d == NULL) {
1726 		_malloc_init(0);
1727 		d = getpool();
1728 	}
1729 	_MALLOC_LOCK(d->mutex);
1730 	d->func = "calloc";
1731 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1732 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1733 		_MALLOC_UNLOCK(d->mutex);
1734 		if (mopts.malloc_xmalloc)
1735 			wrterror(d, "out of memory");
1736 		errno = ENOMEM;
1737 		return NULL;
1738 	}
1739 
1740 	if (d->active++) {
1741 		malloc_recurse(d);
1742 		return NULL;
1743 	}
1744 
1745 	size *= nmemb;
1746 	r = omalloc(d, size, 1, CALLER);
1747 
1748 	d->active--;
1749 	_MALLOC_UNLOCK(d->mutex);
1750 	if (r == NULL && mopts.malloc_xmalloc)
1751 		wrterror(d, "out of memory");
1752 	if (r != NULL)
1753 		errno = saved_errno;
1754 	return r;
1755 }
1756 /*DEF_STRONG(calloc);*/
1757 
1758 static void *
1759 orecallocarray(struct dir_info *argpool, void *p, size_t oldsize,
1760     size_t newsize, void *f)
1761 {
1762 	struct dir_info *pool;
1763 	struct region_info *r;
1764 	void *newptr;
1765 	size_t sz;
1766 	int i;
1767 
1768 	pool = argpool;
1769 
1770 	if (p == NULL)
1771 		return omalloc(pool, newsize, 1, f);
1772 
1773 	if (oldsize == newsize)
1774 		return p;
1775 
1776 	r = find(pool, p);
1777 	if (r == NULL) {
1778 		if (mopts.malloc_mt) {
1779 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1780 				if (i == argpool->mutex)
1781 					continue;
1782 				pool->active--;
1783 				_MALLOC_UNLOCK(pool->mutex);
1784 				pool = mopts.malloc_pool[i];
1785 				_MALLOC_LOCK(pool->mutex);
1786 				pool->active++;
1787 				r = find(pool, p);
1788 				if (r != NULL)
1789 					break;
1790 			}
1791 		}
1792 		if (r == NULL)
1793 			wrterror(pool, "bogus pointer (double free?) %p", p);
1794 	}
1795 
1796 	REALSIZE(sz, r);
1797 	if (sz <= MALLOC_MAXCHUNK) {
1798 		if (mopts.chunk_canaries && sz > 0) {
1799 			struct chunk_info *info = (struct chunk_info *)r->size;
1800 			uint32_t chunknum = find_chunknum(pool, r, p, 0);
1801 
1802 			if (info->bits[info->offset + chunknum] != oldsize)
1803 				wrterror(pool, "recorded old size %hu != %zu",
1804 				    info->bits[info->offset + chunknum],
1805 				    oldsize);
1806 		}
1807 	} else if (oldsize != sz - mopts.malloc_guard)
1808 		wrterror(pool, "recorded old size %zu != %zu",
1809 		    sz - mopts.malloc_guard, oldsize);
1810 
1811 	newptr = omalloc(pool, newsize, 0, f);
1812 	if (newptr == NULL)
1813 		goto done;
1814 
1815 	if (newsize > oldsize) {
1816 		memcpy(newptr, p, oldsize);
1817 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
1818 	} else
1819 		memcpy(newptr, p, newsize);
1820 
1821 	ofree(pool, p, 1, 0, 0);
1822 
1823 done:
1824 	if (argpool != pool) {
1825 		pool->active--;
1826 		_MALLOC_UNLOCK(pool->mutex);
1827 		_MALLOC_LOCK(argpool->mutex);
1828 		argpool->active++;
1829 	}
1830 
1831 	return newptr;
1832 }
1833 
1834 static void *
1835 recallocarray_p(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1836 {
1837 	size_t oldsize, newsize;
1838 	void *newptr;
1839 
1840 	if (ptr == NULL)
1841 		return calloc(newnmemb, size);
1842 
1843 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1844 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
1845 		errno = ENOMEM;
1846 		return NULL;
1847 	}
1848 	newsize = newnmemb * size;
1849 
1850 	if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1851 	    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
1852 		errno = EINVAL;
1853 		return NULL;
1854 	}
1855 	oldsize = oldnmemb * size;
1856 
1857 	/*
1858 	 * Don't bother too much if we're shrinking just a bit,
1859 	 * we do not shrink for series of small steps, oh well.
1860 	 */
1861 	if (newsize <= oldsize) {
1862 		size_t d = oldsize - newsize;
1863 
1864 		if (d < oldsize / 2 && d < MALLOC_PAGESIZE) {
1865 			memset((char *)ptr + newsize, 0, d);
1866 			return ptr;
1867 		}
1868 	}
1869 
1870 	newptr = malloc(newsize);
1871 	if (newptr == NULL)
1872 		return NULL;
1873 
1874 	if (newsize > oldsize) {
1875 		memcpy(newptr, ptr, oldsize);
1876 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
1877 	} else
1878 		memcpy(newptr, ptr, newsize);
1879 
1880 	explicit_bzero(ptr, oldsize);
1881 	free(ptr);
1882 
1883 	return newptr;
1884 }
1885 
1886 void *
1887 recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1888 {
1889 	struct dir_info *d;
1890 	size_t oldsize = 0, newsize;
1891 	void *r;
1892 	int saved_errno = errno;
1893 
1894 	if (!mopts.internal_funcs)
1895 		return recallocarray_p(ptr, oldnmemb, newnmemb, size);
1896 
1897 	d = getpool();
1898 	if (d == NULL) {
1899 		_malloc_init(0);
1900 		d = getpool();
1901 	}
1902 
1903 	_MALLOC_LOCK(d->mutex);
1904 	d->func = "recallocarray";
1905 
1906 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1907 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
1908 		_MALLOC_UNLOCK(d->mutex);
1909 		if (mopts.malloc_xmalloc)
1910 			wrterror(d, "out of memory");
1911 		errno = ENOMEM;
1912 		return NULL;
1913 	}
1914 	newsize = newnmemb * size;
1915 
1916 	if (ptr != NULL) {
1917 		if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1918 		    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
1919 			_MALLOC_UNLOCK(d->mutex);
1920 			errno = EINVAL;
1921 			return NULL;
1922 		}
1923 		oldsize = oldnmemb * size;
1924 	}
1925 
1926 	if (d->active++) {
1927 		malloc_recurse(d);
1928 		return NULL;
1929 	}
1930 
1931 	r = orecallocarray(d, ptr, oldsize, newsize, CALLER);
1932 
1933 	d->active--;
1934 	_MALLOC_UNLOCK(d->mutex);
1935 	if (r == NULL && mopts.malloc_xmalloc)
1936 		wrterror(d, "out of memory");
1937 	if (r != NULL)
1938 		errno = saved_errno;
1939 	return r;
1940 }
1941 DEF_WEAK(recallocarray);
1942 
1943 
1944 static void *
1945 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
1946 {
1947 	char *p, *q;
1948 
1949 	if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
1950 		wrterror(d, "mapalign bad alignment");
1951 	if (sz != PAGEROUND(sz))
1952 		wrterror(d, "mapalign round");
1953 
1954 	/* Allocate sz + alignment bytes of memory, which must include a
1955 	 * subrange of size bytes that is properly aligned.  Unmap the
1956 	 * other bytes, and then return that subrange.
1957 	 */
1958 
1959 	/* We need sz + alignment to fit into a size_t. */
1960 	if (alignment > SIZE_MAX - sz)
1961 		return MAP_FAILED;
1962 
1963 	p = map(d, NULL, sz + alignment, zero_fill);
1964 	if (p == MAP_FAILED)
1965 		return MAP_FAILED;
1966 	q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
1967 	if (q != p) {
1968 		if (munmap(p, q - p))
1969 			wrterror(d, "munmap %p", p);
1970 	}
1971 	if (munmap(q + sz, alignment - (q - p)))
1972 		wrterror(d, "munmap %p", q + sz);
1973 	STATS_SUB(d->malloc_used, alignment);
1974 
1975 	return q;
1976 }
1977 
1978 static void *
1979 omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill,
1980     void *f)
1981 {
1982 	size_t psz;
1983 	void *p;
1984 
1985 	/* If between half a page and a page, avoid MALLOC_MOVE. */
1986 	if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE)
1987 		sz = MALLOC_PAGESIZE;
1988 	if (alignment <= MALLOC_PAGESIZE) {
1989 		/*
1990 		 * max(size, alignment) is enough to assure the requested
1991 		 * alignment, since the allocator always allocates
1992 		 * power-of-two blocks.
1993 		 */
1994 		if (sz < alignment)
1995 			sz = alignment;
1996 		return omalloc(pool, sz, zero_fill, f);
1997 	}
1998 
1999 	if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
2000 		errno = ENOMEM;
2001 		return NULL;
2002 	}
2003 
2004 	sz += mopts.malloc_guard;
2005 	psz = PAGEROUND(sz);
2006 
2007 	p = mapalign(pool, alignment, psz, zero_fill);
2008 	if (p == MAP_FAILED) {
2009 		errno = ENOMEM;
2010 		return NULL;
2011 	}
2012 
2013 	if (insert(pool, p, sz, f)) {
2014 		unmap(pool, p, psz, 0);
2015 		errno = ENOMEM;
2016 		return NULL;
2017 	}
2018 
2019 	if (mopts.malloc_guard) {
2020 		if (mprotect((char *)p + psz - mopts.malloc_guard,
2021 		    mopts.malloc_guard, PROT_NONE))
2022 			wrterror(pool, "mprotect");
2023 		STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
2024 	}
2025 
2026 	if (mopts.malloc_junk == 2) {
2027 		if (zero_fill)
2028 			memset((char *)p + sz - mopts.malloc_guard,
2029 			    SOME_JUNK, psz - sz);
2030 		else
2031 			memset(p, SOME_JUNK, psz - mopts.malloc_guard);
2032 	}
2033 	else if (mopts.chunk_canaries)
2034 		fill_canary(p, sz - mopts.malloc_guard,
2035 		    psz - mopts.malloc_guard);
2036 
2037 	return p;
2038 }
2039 
2040 int
2041 posix_memalign(void **memptr, size_t alignment, size_t size)
2042 {
2043 	struct dir_info *d;
2044 	int res, saved_errno = errno;
2045 	void *r;
2046 
2047 	/* Make sure that alignment is a large enough power of 2. */
2048 	if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
2049 		return EINVAL;
2050 
2051 	d = getpool();
2052 	if (d == NULL) {
2053 		_malloc_init(0);
2054 		d = getpool();
2055 	}
2056 	_MALLOC_LOCK(d->mutex);
2057 	d->func = "posix_memalign";
2058 	if (d->active++) {
2059 		malloc_recurse(d);
2060 		goto err;
2061 	}
2062 	r = omemalign(d, alignment, size, 0, CALLER);
2063 	d->active--;
2064 	_MALLOC_UNLOCK(d->mutex);
2065 	if (r == NULL) {
2066 		if (mopts.malloc_xmalloc)
2067 			wrterror(d, "out of memory");
2068 		goto err;
2069 	}
2070 	errno = saved_errno;
2071 	*memptr = r;
2072 	return 0;
2073 
2074 err:
2075 	res = errno;
2076 	errno = saved_errno;
2077 	return res;
2078 }
2079 /*DEF_STRONG(posix_memalign);*/
2080 
2081 #ifdef MALLOC_STATS
2082 
2083 struct malloc_leak {
2084 	void *f;
2085 	size_t total_size;
2086 	int count;
2087 };
2088 
2089 struct leaknode {
2090 	RBT_ENTRY(leaknode) entry;
2091 	struct malloc_leak d;
2092 };
2093 
2094 static inline int
2095 leakcmp(const struct leaknode *e1, const struct leaknode *e2)
2096 {
2097 	return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
2098 }
2099 
2100 static RBT_HEAD(leaktree, leaknode) leakhead;
2101 RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp);
2102 RBT_GENERATE(leaktree, leaknode, entry, leakcmp);
2103 
2104 static void
2105 putleakinfo(void *f, size_t sz, int cnt)
2106 {
2107 	struct leaknode key, *p;
2108 	static struct leaknode *page;
2109 	static int used;
2110 
2111 	if (cnt == 0 || page == MAP_FAILED)
2112 		return;
2113 
2114 	key.d.f = f;
2115 	p = RBT_FIND(leaktree, &leakhead, &key);
2116 	if (p == NULL) {
2117 		if (page == NULL ||
2118 		    used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
2119 			page = MMAP(MALLOC_PAGESIZE);
2120 			if (page == MAP_FAILED)
2121 				return;
2122 			used = 0;
2123 		}
2124 		p = &page[used++];
2125 		p->d.f = f;
2126 		p->d.total_size = sz * cnt;
2127 		p->d.count = cnt;
2128 		RBT_INSERT(leaktree, &leakhead, p);
2129 	} else {
2130 		p->d.total_size += sz * cnt;
2131 		p->d.count += cnt;
2132 	}
2133 }
2134 
2135 static struct malloc_leak *malloc_leaks;
2136 
2137 static void
2138 writestr(int fd, const char *p)
2139 {
2140 	write(fd, p, strlen(p));
2141 }
2142 
2143 static void
2144 dump_leaks(int fd)
2145 {
2146 	struct leaknode *p;
2147 	char buf[64];
2148 	int i = 0;
2149 
2150 	writestr(fd, "Leak report\n");
2151 	writestr(fd, "                 f     sum      #    avg\n");
2152 	/* XXX only one page of summary */
2153 	if (malloc_leaks == NULL)
2154 		malloc_leaks = MMAP(MALLOC_PAGESIZE);
2155 	if (malloc_leaks != MAP_FAILED)
2156 		memset(malloc_leaks, 0, MALLOC_PAGESIZE);
2157 	RBT_FOREACH(p, leaktree, &leakhead) {
2158 		snprintf(buf, sizeof(buf), "%18p %7zu %6u %6zu\n", p->d.f,
2159 		    p->d.total_size, p->d.count, p->d.total_size / p->d.count);
2160 		write(fd, buf, strlen(buf));
2161 		if (malloc_leaks == MAP_FAILED ||
2162 		    i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak))
2163 			continue;
2164 		malloc_leaks[i].f = p->d.f;
2165 		malloc_leaks[i].total_size = p->d.total_size;
2166 		malloc_leaks[i].count = p->d.count;
2167 		i++;
2168 	}
2169 }
2170 
2171 static void
2172 dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist)
2173 {
2174 	char buf[64];
2175 
2176 	while (p != NULL) {
2177 		snprintf(buf, sizeof(buf), "chunk %18p %18p %4d %d/%d\n",
2178 		    p->page, ((p->bits[0] & 1) ? NULL : f),
2179 		    p->size, p->free, p->total);
2180 		write(fd, buf, strlen(buf));
2181 		if (!fromfreelist) {
2182 			if (p->bits[0] & 1)
2183 				putleakinfo(NULL, p->size, p->total - p->free);
2184 			else {
2185 				putleakinfo(f, p->size, 1);
2186 				putleakinfo(NULL, p->size,
2187 				    p->total - p->free - 1);
2188 			}
2189 			break;
2190 		}
2191 		p = LIST_NEXT(p, entries);
2192 		if (p != NULL)
2193 			writestr(fd, "        ");
2194 	}
2195 }
2196 
2197 static void
2198 dump_free_chunk_info(int fd, struct dir_info *d)
2199 {
2200 	char buf[64];
2201 	int i, j, count;
2202 	struct chunk_info *p;
2203 
2204 	writestr(fd, "Free chunk structs:\n");
2205 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
2206 		count = 0;
2207 		LIST_FOREACH(p, &d->chunk_info_list[i], entries)
2208 			count++;
2209 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++) {
2210 			p = LIST_FIRST(&d->chunk_dir[i][j]);
2211 			if (p == NULL && count == 0)
2212 				continue;
2213 			snprintf(buf, sizeof(buf), "%2d) %3d ", i, count);
2214 			write(fd, buf, strlen(buf));
2215 			if (p != NULL)
2216 				dump_chunk(fd, p, NULL, 1);
2217 			else
2218 				write(fd, "\n", 1);
2219 		}
2220 	}
2221 
2222 }
2223 
2224 static void
2225 dump_free_page_info(int fd, struct dir_info *d)
2226 {
2227 	char buf[64];
2228 	int i;
2229 
2230 	snprintf(buf, sizeof(buf), "Free pages cached: %zu\n",
2231 	    d->free_regions_size);
2232 	write(fd, buf, strlen(buf));
2233 	for (i = 0; i < mopts.malloc_cache; i++) {
2234 		if (d->free_regions[i].p != NULL) {
2235 			snprintf(buf, sizeof(buf), "%2d) ", i);
2236 			write(fd, buf, strlen(buf));
2237 			snprintf(buf, sizeof(buf), "free at %p: %zu\n",
2238 			    d->free_regions[i].p, d->free_regions[i].size);
2239 			write(fd, buf, strlen(buf));
2240 		}
2241 	}
2242 }
2243 
2244 static void
2245 malloc_dump1(int fd, int poolno, struct dir_info *d)
2246 {
2247 	char buf[100];
2248 	size_t i, realsize;
2249 
2250 	snprintf(buf, sizeof(buf), "Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
2251 	write(fd, buf, strlen(buf));
2252 	if (d == NULL)
2253 		return;
2254 	snprintf(buf, sizeof(buf), "Region slots free %zu/%zu\n",
2255 		d->regions_free, d->regions_total);
2256 	write(fd, buf, strlen(buf));
2257 	snprintf(buf, sizeof(buf), "Finds %zu/%zu\n", d->finds,
2258 	    d->find_collisions);
2259 	write(fd, buf, strlen(buf));
2260 	snprintf(buf, sizeof(buf), "Inserts %zu/%zu\n", d->inserts,
2261 	    d->insert_collisions);
2262 	write(fd, buf, strlen(buf));
2263 	snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes,
2264 	    d->delete_moves);
2265 	write(fd, buf, strlen(buf));
2266 	snprintf(buf, sizeof(buf), "Cheap reallocs %zu/%zu\n",
2267 	    d->cheap_reallocs, d->cheap_realloc_tries);
2268 	write(fd, buf, strlen(buf));
2269 	snprintf(buf, sizeof(buf), "In use %zu\n", d->malloc_used);
2270 	write(fd, buf, strlen(buf));
2271 	snprintf(buf, sizeof(buf), "Guarded %zu\n", d->malloc_guarded);
2272 	write(fd, buf, strlen(buf));
2273 	dump_free_chunk_info(fd, d);
2274 	dump_free_page_info(fd, d);
2275 	writestr(fd,
2276 	    "slot)  hash d  type               page                  f size [free/n]\n");
2277 	for (i = 0; i < d->regions_total; i++) {
2278 		if (d->r[i].p != NULL) {
2279 			size_t h = hash(d->r[i].p) &
2280 			    (d->regions_total - 1);
2281 			snprintf(buf, sizeof(buf), "%4zx) #%4zx %zd ",
2282 			    i, h, h - i);
2283 			write(fd, buf, strlen(buf));
2284 			REALSIZE(realsize, &d->r[i]);
2285 			if (realsize > MALLOC_MAXCHUNK) {
2286 				putleakinfo(d->r[i].f, realsize, 1);
2287 				snprintf(buf, sizeof(buf),
2288 				    "pages %18p %18p %zu\n", d->r[i].p,
2289 				    d->r[i].f, realsize);
2290 				write(fd, buf, strlen(buf));
2291 			} else
2292 				dump_chunk(fd,
2293 				    (struct chunk_info *)d->r[i].size,
2294 				    d->r[i].f, 0);
2295 		}
2296 	}
2297 	dump_leaks(fd);
2298 	write(fd, "\n", 1);
2299 }
2300 
2301 void
2302 malloc_dump(int fd, int poolno, struct dir_info *pool)
2303 {
2304 	int i;
2305 	void *p;
2306 	struct region_info *r;
2307 	int saved_errno = errno;
2308 
2309 	if (pool == NULL)
2310 		return;
2311 	for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) {
2312 		p = pool->delayed_chunks[i];
2313 		if (p == NULL)
2314 			continue;
2315 		r = find(pool, p);
2316 		if (r == NULL)
2317 			wrterror(pool, "bogus pointer in malloc_dump %p", p);
2318 		free_bytes(pool, r, p);
2319 		pool->delayed_chunks[i] = NULL;
2320 	}
2321 	/* XXX leak when run multiple times */
2322 	RBT_INIT(leaktree, &leakhead);
2323 	malloc_dump1(fd, poolno, pool);
2324 	errno = saved_errno;
2325 }
2326 DEF_WEAK(malloc_dump);
2327 
2328 void
2329 malloc_gdump(int fd)
2330 {
2331 	int i;
2332 	int saved_errno = errno;
2333 
2334 	for (i = 0; i < _MALLOC_MUTEXES; i++)
2335 		malloc_dump(fd, i, mopts.malloc_pool[i]);
2336 
2337 	errno = saved_errno;
2338 }
2339 DEF_WEAK(malloc_gdump);
2340 
2341 static void
2342 malloc_exit(void)
2343 {
2344 	static const char q[] = "malloc() warning: Couldn't dump stats\n";
2345 	int save_errno = errno, fd, i;
2346 	char buf[100];
2347 
2348 	fd = open("malloc.out", O_RDWR|O_APPEND);
2349 	if (fd != -1) {
2350 		snprintf(buf, sizeof(buf), "******** Start dump %s *******\n",
2351 		     __progname);
2352 		write(fd, buf, strlen(buf));
2353 		snprintf(buf, sizeof(buf),
2354 		    "MT=%d I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u G=%zu\n",
2355 		    mopts.malloc_mt, mopts.internal_funcs,
2356 		    mopts.malloc_freecheck,
2357 		    mopts.malloc_freeunmap, mopts.malloc_junk,
2358 		    mopts.malloc_realloc, mopts.malloc_xmalloc,
2359 		    mopts.chunk_canaries, mopts.malloc_cache,
2360 		    mopts.malloc_guard);
2361 		write(fd, buf, strlen(buf));
2362 
2363 		for (i = 0; i < _MALLOC_MUTEXES; i++)
2364 			malloc_dump(fd, i, mopts.malloc_pool[i]);
2365 		snprintf(buf, sizeof(buf), "******** End dump %s *******\n",
2366 		    __progname);
2367 		write(fd, buf, strlen(buf));
2368 		close(fd);
2369 	} else
2370 		write(STDERR_FILENO, q, sizeof(q) - 1);
2371 	errno = save_errno;
2372 }
2373 
2374 #endif /* MALLOC_STATS */
2375