xref: /openbsd-src/lib/libc/stdlib/malloc.c (revision 897fc685943471cf985a0fe38ba076ea6fe74fa5)
1 /*	$OpenBSD: malloc.c,v 1.249 2018/04/07 09:57:08 otto Exp $	*/
2 /*
3  * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
4  * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
5  * Copyright (c) 2008 Damien Miller <djm@openbsd.org>
6  * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * If we meet some day, and you think this stuff is worth it, you
23  * can buy me a beer in return. Poul-Henning Kamp
24  */
25 
26 /* #define MALLOC_STATS */
27 
28 #include <sys/types.h>
29 #include <sys/queue.h>
30 #include <sys/mman.h>
31 #include <errno.h>
32 #include <stdarg.h>
33 #include <stdint.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <unistd.h>
38 
39 #ifdef MALLOC_STATS
40 #include <sys/tree.h>
41 #include <fcntl.h>
42 #endif
43 
44 #include "thread_private.h"
45 #include <tib.h>
46 
47 #define MALLOC_PAGESHIFT	_MAX_PAGE_SHIFT
48 
49 #define MALLOC_MINSHIFT		4
50 #define MALLOC_MAXSHIFT		(MALLOC_PAGESHIFT - 1)
51 #define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
52 #define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
53 #define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
54 #define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
55 
56 #define MALLOC_MAXCHUNK		(1 << MALLOC_MAXSHIFT)
57 #define MALLOC_MAXCACHE		256
58 #define MALLOC_DELAYED_CHUNK_MASK	15
59 #ifdef MALLOC_STATS
60 #define MALLOC_INITIAL_REGIONS	512
61 #else
62 #define MALLOC_INITIAL_REGIONS	(MALLOC_PAGESIZE / sizeof(struct region_info))
63 #endif
64 #define MALLOC_DEFAULT_CACHE	64
65 #define MALLOC_CHUNK_LISTS	4
66 #define CHUNK_CHECK_LENGTH	32
67 
68 /*
69  * We move allocations between half a page and a whole page towards the end,
70  * subject to alignment constraints. This is the extra headroom we allow.
71  * Set to zero to be the most strict.
72  */
73 #define MALLOC_LEEWAY		0
74 #define MALLOC_MOVE_COND(sz)	((sz) - mopts.malloc_guard < 		\
75 				    MALLOC_PAGESIZE - MALLOC_LEEWAY)
76 #define MALLOC_MOVE(p, sz)  	(((char *)(p)) +			\
77 				    ((MALLOC_PAGESIZE - MALLOC_LEEWAY -	\
78 			    	    ((sz) - mopts.malloc_guard)) & 	\
79 				    ~(MALLOC_MINSIZE - 1)))
80 
81 #define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
82 
83 /*
84  * What to use for Junk.  This is the byte value we use to fill with
85  * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
86  * and SOME_FREEJUNK right before free.
87  */
88 #define SOME_JUNK		0xdb	/* deadbeef */
89 #define SOME_FREEJUNK		0xdf	/* dead, free */
90 
91 #define MMAP(sz)	mmap(NULL, (sz), PROT_READ | PROT_WRITE, \
92     MAP_ANON | MAP_PRIVATE, -1, 0)
93 
94 #define MMAPNONE(sz)	mmap(NULL, (sz), PROT_NONE, \
95     MAP_ANON | MAP_PRIVATE, -1, 0)
96 
97 #define MMAPA(a,sz)	mmap((a), (sz), PROT_READ | PROT_WRITE, \
98     MAP_ANON | MAP_PRIVATE, -1, 0)
99 
100 #define MQUERY(a, sz)	mquery((a), (sz), PROT_READ | PROT_WRITE, \
101     MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0)
102 
103 struct region_info {
104 	void *p;		/* page; low bits used to mark chunks */
105 	uintptr_t size;		/* size for pages, or chunk_info pointer */
106 #ifdef MALLOC_STATS
107 	void *f;		/* where allocated from */
108 #endif
109 };
110 
111 LIST_HEAD(chunk_head, chunk_info);
112 
113 struct dir_info {
114 	u_int32_t canary1;
115 	int active;			/* status of malloc */
116 	struct region_info *r;		/* region slots */
117 	size_t regions_total;		/* number of region slots */
118 	size_t regions_free;		/* number of free slots */
119 					/* lists of free chunk info structs */
120 	struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1];
121 					/* lists of chunks with free slots */
122 	struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS];
123 	size_t free_regions_size;	/* free pages cached */
124 					/* free pages cache */
125 	struct region_info free_regions[MALLOC_MAXCACHE];
126 					/* delayed free chunk slots */
127 	u_int rotor;
128 	void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
129 	size_t rbytesused;		/* random bytes used */
130 	char *func;			/* current function */
131 	int mutex;
132 	u_char rbytes[32];		/* random bytes */
133 #ifdef MALLOC_STATS
134 	size_t inserts;
135 	size_t insert_collisions;
136 	size_t finds;
137 	size_t find_collisions;
138 	size_t deletes;
139 	size_t delete_moves;
140 	size_t cheap_realloc_tries;
141 	size_t cheap_reallocs;
142 	size_t malloc_used;		/* bytes allocated */
143 	size_t malloc_guarded;		/* bytes used for guards */
144 #define STATS_ADD(x,y)	((x) += (y))
145 #define STATS_SUB(x,y)	((x) -= (y))
146 #define STATS_INC(x)	((x)++)
147 #define STATS_ZERO(x)	((x) = 0)
148 #define STATS_SETF(x,y)	((x)->f = (y))
149 #else
150 #define STATS_ADD(x,y)	/* nothing */
151 #define STATS_SUB(x,y)	/* nothing */
152 #define STATS_INC(x)	/* nothing */
153 #define STATS_ZERO(x)	/* nothing */
154 #define STATS_SETF(x,y)	/* nothing */
155 #endif /* MALLOC_STATS */
156 	u_int32_t canary2;
157 };
158 #define DIR_INFO_RSZ	((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
159 			~MALLOC_PAGEMASK)
160 
161 /*
162  * This structure describes a page worth of chunks.
163  *
164  * How many bits per u_short in the bitmap
165  */
166 #define MALLOC_BITS		(NBBY * sizeof(u_short))
167 struct chunk_info {
168 	LIST_ENTRY(chunk_info) entries;
169 	void *page;			/* pointer to the page */
170 	u_short canary;
171 	u_short size;			/* size of this page's chunks */
172 	u_short shift;			/* how far to shift for this size */
173 	u_short free;			/* how many free chunks */
174 	u_short total;			/* how many chunks */
175 	u_short offset;			/* requested size table offset */
176 	u_short bits[1];		/* which chunks are free */
177 };
178 
179 struct malloc_readonly {
180 	struct dir_info *malloc_pool[_MALLOC_MUTEXES];	/* Main bookkeeping information */
181 	int	malloc_mt;		/* multi-threaded mode? */
182 	int	malloc_freecheck;	/* Extensive double free check */
183 	int	malloc_freeunmap;	/* mprotect free pages PROT_NONE? */
184 	int	malloc_junk;		/* junk fill? */
185 	int	malloc_realloc;		/* always realloc? */
186 	int	malloc_xmalloc;		/* xmalloc behaviour? */
187 	int	chunk_canaries;		/* use canaries after chunks? */
188 	int	internal_funcs;		/* use better recallocarray/freezero? */
189 	u_int	malloc_cache;		/* free pages we cache */
190 	size_t	malloc_guard;		/* use guard pages after allocations? */
191 #ifdef MALLOC_STATS
192 	int	malloc_stats;		/* dump statistics at end */
193 #endif
194 	u_int32_t malloc_canary;	/* Matched against ones in malloc_pool */
195 };
196 
197 /* This object is mapped PROT_READ after initialisation to prevent tampering */
198 static union {
199 	struct malloc_readonly mopts;
200 	u_char _pad[MALLOC_PAGESIZE];
201 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
202 #define mopts	malloc_readonly.mopts
203 
204 char		*malloc_options;	/* compile-time options */
205 
206 static __dead void wrterror(struct dir_info *d, char *msg, ...)
207     __attribute__((__format__ (printf, 2, 3)));
208 
209 #ifdef MALLOC_STATS
210 void malloc_dump(int, int, struct dir_info *);
211 PROTO_NORMAL(malloc_dump);
212 void malloc_gdump(int);
213 PROTO_NORMAL(malloc_gdump);
214 static void malloc_exit(void);
215 #define CALLER	__builtin_return_address(0)
216 #else
217 #define CALLER	NULL
218 #endif
219 
220 /* low bits of r->p determine size: 0 means >= page size and r->size holding
221  * real size, otherwise low bits are a shift count, or 1 for malloc(0)
222  */
223 #define REALSIZE(sz, r)						\
224 	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
225 	(sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
226 
227 static inline void
228 _MALLOC_LEAVE(struct dir_info *d)
229 {
230 	if (mopts.malloc_mt) {
231 		d->active--;
232 		_MALLOC_UNLOCK(d->mutex);
233 	}
234 }
235 
236 static inline void
237 _MALLOC_ENTER(struct dir_info *d)
238 {
239 	if (mopts.malloc_mt) {
240 		_MALLOC_LOCK(d->mutex);
241 		d->active++;
242 	}
243 }
244 
245 static inline size_t
246 hash(void *p)
247 {
248 	size_t sum;
249 	uintptr_t u;
250 
251 	u = (uintptr_t)p >> MALLOC_PAGESHIFT;
252 	sum = u;
253 	sum = (sum << 7) - sum + (u >> 16);
254 #ifdef __LP64__
255 	sum = (sum << 7) - sum + (u >> 32);
256 	sum = (sum << 7) - sum + (u >> 48);
257 #endif
258 	return sum;
259 }
260 
261 static inline
262 struct dir_info *getpool(void)
263 {
264 	if (!mopts.malloc_mt)
265 		return mopts.malloc_pool[0];
266 	else
267 		return mopts.malloc_pool[TIB_GET()->tib_tid &
268 		    (_MALLOC_MUTEXES - 1)];
269 }
270 
271 static __dead void
272 wrterror(struct dir_info *d, char *msg, ...)
273 {
274 	int		saved_errno = errno;
275 	va_list		ap;
276 
277 	dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname,
278 	    getpid(), (d != NULL && d->func) ? d->func : "unknown");
279 	va_start(ap, msg);
280 	vdprintf(STDERR_FILENO, msg, ap);
281 	va_end(ap);
282 	dprintf(STDERR_FILENO, "\n");
283 
284 #ifdef MALLOC_STATS
285 	if (mopts.malloc_stats)
286 		malloc_gdump(STDERR_FILENO);
287 #endif /* MALLOC_STATS */
288 
289 	errno = saved_errno;
290 
291 	abort();
292 }
293 
294 static void
295 rbytes_init(struct dir_info *d)
296 {
297 	arc4random_buf(d->rbytes, sizeof(d->rbytes));
298 	/* add 1 to account for using d->rbytes[0] */
299 	d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2);
300 }
301 
302 static inline u_char
303 getrbyte(struct dir_info *d)
304 {
305 	u_char x;
306 
307 	if (d->rbytesused >= sizeof(d->rbytes))
308 		rbytes_init(d);
309 	x = d->rbytes[d->rbytesused++];
310 	return x;
311 }
312 
313 static void
314 omalloc_parseopt(char opt)
315 {
316 	switch (opt) {
317 	case '>':
318 		mopts.malloc_cache <<= 1;
319 		if (mopts.malloc_cache > MALLOC_MAXCACHE)
320 			mopts.malloc_cache = MALLOC_MAXCACHE;
321 		break;
322 	case '<':
323 		mopts.malloc_cache >>= 1;
324 		break;
325 	case 'c':
326 		mopts.chunk_canaries = 0;
327 		break;
328 	case 'C':
329 		mopts.chunk_canaries = 1;
330 		break;
331 #ifdef MALLOC_STATS
332 	case 'd':
333 		mopts.malloc_stats = 0;
334 		break;
335 	case 'D':
336 		mopts.malloc_stats = 1;
337 		break;
338 #endif /* MALLOC_STATS */
339 	case 'f':
340 		mopts.malloc_freecheck = 0;
341 		mopts.malloc_freeunmap = 0;
342 		break;
343 	case 'F':
344 		mopts.malloc_freecheck = 1;
345 		mopts.malloc_freeunmap = 1;
346 		break;
347 	case 'g':
348 		mopts.malloc_guard = 0;
349 		break;
350 	case 'G':
351 		mopts.malloc_guard = MALLOC_PAGESIZE;
352 		break;
353 	case 'j':
354 		if (mopts.malloc_junk > 0)
355 			mopts.malloc_junk--;
356 		break;
357 	case 'J':
358 		if (mopts.malloc_junk < 2)
359 			mopts.malloc_junk++;
360 		break;
361 	case 'r':
362 		mopts.malloc_realloc = 0;
363 		break;
364 	case 'R':
365 		mopts.malloc_realloc = 1;
366 		break;
367 	case 'u':
368 		mopts.malloc_freeunmap = 0;
369 		break;
370 	case 'U':
371 		mopts.malloc_freeunmap = 1;
372 		break;
373 	case 'x':
374 		mopts.malloc_xmalloc = 0;
375 		break;
376 	case 'X':
377 		mopts.malloc_xmalloc = 1;
378 		break;
379 	default:
380 		dprintf(STDERR_FILENO, "malloc() warning: "
381                     "unknown char in MALLOC_OPTIONS\n");
382 		break;
383 	}
384 }
385 
386 static void
387 omalloc_init(void)
388 {
389 	char *p, *q, b[64];
390 	int i, j;
391 
392 	/*
393 	 * Default options
394 	 */
395 	mopts.malloc_junk = 1;
396 	mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
397 
398 	for (i = 0; i < 3; i++) {
399 		switch (i) {
400 		case 0:
401 			j = readlink("/etc/malloc.conf", b, sizeof b - 1);
402 			if (j <= 0)
403 				continue;
404 			b[j] = '\0';
405 			p = b;
406 			break;
407 		case 1:
408 			if (issetugid() == 0)
409 				p = getenv("MALLOC_OPTIONS");
410 			else
411 				continue;
412 			break;
413 		case 2:
414 			p = malloc_options;
415 			break;
416 		default:
417 			p = NULL;
418 		}
419 
420 		for (; p != NULL && *p != '\0'; p++) {
421 			switch (*p) {
422 			case 'S':
423 				for (q = "CFGJ"; *q != '\0'; q++)
424 					omalloc_parseopt(*q);
425 				mopts.malloc_cache = 0;
426 				break;
427 			case 's':
428 				for (q = "cfgj"; *q != '\0'; q++)
429 					omalloc_parseopt(*q);
430 				mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
431 				break;
432 			default:
433 				omalloc_parseopt(*p);
434 				break;
435 			}
436 		}
437 	}
438 
439 #ifdef MALLOC_STATS
440 	if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
441 		dprintf(STDERR_FILENO, "malloc() warning: atexit(2) failed."
442 		    " Will not be able to dump stats on exit\n");
443 	}
444 #endif /* MALLOC_STATS */
445 
446 	while ((mopts.malloc_canary = arc4random()) == 0)
447 		;
448 }
449 
450 static void
451 omalloc_poolinit(struct dir_info **dp)
452 {
453 	char *p;
454 	size_t d_avail, regioninfo_size;
455 	struct dir_info *d;
456 	int i, j;
457 
458 	/*
459 	 * Allocate dir_info with a guard page on either side. Also
460 	 * randomise offset inside the page at which the dir_info
461 	 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
462 	 */
463 	if ((p = MMAPNONE(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED)
464 		wrterror(NULL, "malloc init mmap failed");
465 	mprotect(p + MALLOC_PAGESIZE, DIR_INFO_RSZ, PROT_READ | PROT_WRITE);
466 	d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
467 	d = (struct dir_info *)(p + MALLOC_PAGESIZE +
468 	    (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
469 
470 	rbytes_init(d);
471 	d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS;
472 	regioninfo_size = d->regions_total * sizeof(struct region_info);
473 	d->r = MMAP(regioninfo_size);
474 	if (d->r == MAP_FAILED) {
475 		d->regions_total = 0;
476 		wrterror(NULL, "malloc init mmap failed");
477 	}
478 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
479 		LIST_INIT(&d->chunk_info_list[i]);
480 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++)
481 			LIST_INIT(&d->chunk_dir[i][j]);
482 	}
483 	STATS_ADD(d->malloc_used, regioninfo_size);
484 	d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
485 	d->canary2 = ~d->canary1;
486 
487 	*dp = d;
488 }
489 
490 static int
491 omalloc_grow(struct dir_info *d)
492 {
493 	size_t newtotal;
494 	size_t newsize;
495 	size_t mask;
496 	size_t i;
497 	struct region_info *p;
498 
499 	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2)
500 		return 1;
501 
502 	newtotal = d->regions_total * 2;
503 	newsize = newtotal * sizeof(struct region_info);
504 	mask = newtotal - 1;
505 
506 	p = MMAP(newsize);
507 	if (p == MAP_FAILED)
508 		return 1;
509 
510 	STATS_ADD(d->malloc_used, newsize);
511 	STATS_ZERO(d->inserts);
512 	STATS_ZERO(d->insert_collisions);
513 	for (i = 0; i < d->regions_total; i++) {
514 		void *q = d->r[i].p;
515 		if (q != NULL) {
516 			size_t index = hash(q) & mask;
517 			STATS_INC(d->inserts);
518 			while (p[index].p != NULL) {
519 				index = (index - 1) & mask;
520 				STATS_INC(d->insert_collisions);
521 			}
522 			p[index] = d->r[i];
523 		}
524 	}
525 	/* avoid pages containing meta info to end up in cache */
526 	if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
527 		wrterror(d, "munmap %p", (void *)d->r);
528 	else
529 		STATS_SUB(d->malloc_used,
530 		    d->regions_total * sizeof(struct region_info));
531 	d->regions_free = d->regions_free + d->regions_total;
532 	d->regions_total = newtotal;
533 	d->r = p;
534 	return 0;
535 }
536 
537 /*
538  * The hashtable uses the assumption that p is never NULL. This holds since
539  * non-MAP_FIXED mappings with hint 0 start at BRKSIZ.
540  */
541 static int
542 insert(struct dir_info *d, void *p, size_t sz, void *f)
543 {
544 	size_t index;
545 	size_t mask;
546 	void *q;
547 
548 	if (d->regions_free * 4 < d->regions_total) {
549 		if (omalloc_grow(d))
550 			return 1;
551 	}
552 	mask = d->regions_total - 1;
553 	index = hash(p) & mask;
554 	q = d->r[index].p;
555 	STATS_INC(d->inserts);
556 	while (q != NULL) {
557 		index = (index - 1) & mask;
558 		q = d->r[index].p;
559 		STATS_INC(d->insert_collisions);
560 	}
561 	d->r[index].p = p;
562 	d->r[index].size = sz;
563 #ifdef MALLOC_STATS
564 	d->r[index].f = f;
565 #endif
566 	d->regions_free--;
567 	return 0;
568 }
569 
570 static struct region_info *
571 find(struct dir_info *d, void *p)
572 {
573 	size_t index;
574 	size_t mask = d->regions_total - 1;
575 	void *q, *r;
576 
577 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
578 	    d->canary1 != ~d->canary2)
579 		wrterror(d, "internal struct corrupt");
580 	p = MASK_POINTER(p);
581 	index = hash(p) & mask;
582 	r = d->r[index].p;
583 	q = MASK_POINTER(r);
584 	STATS_INC(d->finds);
585 	while (q != p && r != NULL) {
586 		index = (index - 1) & mask;
587 		r = d->r[index].p;
588 		q = MASK_POINTER(r);
589 		STATS_INC(d->find_collisions);
590 	}
591 	return (q == p && r != NULL) ? &d->r[index] : NULL;
592 }
593 
594 static void
595 delete(struct dir_info *d, struct region_info *ri)
596 {
597 	/* algorithm R, Knuth Vol III section 6.4 */
598 	size_t mask = d->regions_total - 1;
599 	size_t i, j, r;
600 
601 	if (d->regions_total & (d->regions_total - 1))
602 		wrterror(d, "regions_total not 2^x");
603 	d->regions_free++;
604 	STATS_INC(d->deletes);
605 
606 	i = ri - d->r;
607 	for (;;) {
608 		d->r[i].p = NULL;
609 		d->r[i].size = 0;
610 		j = i;
611 		for (;;) {
612 			i = (i - 1) & mask;
613 			if (d->r[i].p == NULL)
614 				return;
615 			r = hash(d->r[i].p) & mask;
616 			if ((i <= r && r < j) || (r < j && j < i) ||
617 			    (j < i && i <= r))
618 				continue;
619 			d->r[j] = d->r[i];
620 			STATS_INC(d->delete_moves);
621 			break;
622 		}
623 
624 	}
625 }
626 
627 /*
628  * Cache maintenance. We keep at most malloc_cache pages cached.
629  * If the cache is becoming full, unmap pages in the cache for real,
630  * and then add the region to the cache
631  * Opposed to the regular region data structure, the sizes in the
632  * cache are in MALLOC_PAGESIZE units.
633  */
634 static void
635 unmap(struct dir_info *d, void *p, size_t sz, size_t clear, int junk)
636 {
637 	size_t psz = sz >> MALLOC_PAGESHIFT;
638 	size_t rsz;
639 	struct region_info *r;
640 	u_int i, offset, mask;
641 
642 	if (sz != PAGEROUND(sz))
643 		wrterror(d, "munmap round");
644 
645 	rsz = mopts.malloc_cache - d->free_regions_size;
646 
647 	/*
648 	 * normally the cache holds recently freed regions, but if the region
649 	 * to unmap is larger than the cache size or we're clearing and the
650 	 * cache is full, just munmap
651 	 */
652 	if (psz > mopts.malloc_cache || (clear > 0 && rsz == 0)) {
653 		i = munmap(p, sz);
654 		if (i)
655 			wrterror(d, "munmap %p", p);
656 		STATS_SUB(d->malloc_used, sz);
657 		return;
658 	}
659 	offset = getrbyte(d);
660 	mask = mopts.malloc_cache - 1;
661 	if (psz > rsz) {
662 		size_t tounmap = psz - rsz;
663 		for (i = 0; ; i++) {
664 			r = &d->free_regions[(i + offset) & mask];
665 			if (r->p != NULL) {
666 				rsz = r->size << MALLOC_PAGESHIFT;
667 				if (munmap(r->p, rsz))
668 					wrterror(d, "munmap %p", r->p);
669 				r->p = NULL;
670 				if (tounmap > r->size)
671 					tounmap -= r->size;
672 				else
673 					tounmap = 0;
674 				d->free_regions_size -= r->size;
675 				STATS_SUB(d->malloc_used, rsz);
676 				if (tounmap == 0) {
677 					offset = i;
678 					break;
679 				}
680 			}
681 		}
682 	}
683 	for (i = 0; ; i++) {
684 		r = &d->free_regions[(i + offset) & mask];
685 		if (r->p == NULL) {
686 			if (clear > 0)
687 				memset(p, 0, clear);
688 			if (junk && !mopts.malloc_freeunmap) {
689 				size_t amt = junk == 1 ?  MALLOC_MAXCHUNK : sz;
690 				memset(p, SOME_FREEJUNK, amt);
691 			}
692 			if (mopts.malloc_freeunmap)
693 				mprotect(p, sz, PROT_NONE);
694 			r->p = p;
695 			r->size = psz;
696 			d->free_regions_size += psz;
697 			break;
698 		}
699 	}
700 	if (d->free_regions_size > mopts.malloc_cache)
701 		wrterror(d, "malloc cache overflow");
702 }
703 
704 static void
705 zapcacheregion(struct dir_info *d, void *p, size_t len)
706 {
707 	u_int i;
708 	struct region_info *r;
709 	size_t rsz;
710 
711 	for (i = 0; i < mopts.malloc_cache; i++) {
712 		r = &d->free_regions[i];
713 		if (r->p >= p && r->p <= (void *)((char *)p + len)) {
714 			rsz = r->size << MALLOC_PAGESHIFT;
715 			if (munmap(r->p, rsz))
716 				wrterror(d, "munmap %p", r->p);
717 			r->p = NULL;
718 			d->free_regions_size -= r->size;
719 			STATS_SUB(d->malloc_used, rsz);
720 		}
721 	}
722 }
723 
724 static void *
725 map(struct dir_info *d, void *hint, size_t sz, int zero_fill)
726 {
727 	size_t psz = sz >> MALLOC_PAGESHIFT;
728 	struct region_info *r, *big = NULL;
729 	u_int i;
730 	void *p;
731 
732 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
733 	    d->canary1 != ~d->canary2)
734 		wrterror(d, "internal struct corrupt");
735 	if (sz != PAGEROUND(sz))
736 		wrterror(d, "map round");
737 
738 	if (hint == NULL && psz > d->free_regions_size) {
739 		_MALLOC_LEAVE(d);
740 		p = MMAP(sz);
741 		_MALLOC_ENTER(d);
742 		if (p != MAP_FAILED)
743 			STATS_ADD(d->malloc_used, sz);
744 		/* zero fill not needed */
745 		return p;
746 	}
747 	for (i = 0; i < mopts.malloc_cache; i++) {
748 		r = &d->free_regions[(i + d->rotor) & (mopts.malloc_cache - 1)];
749 		if (r->p != NULL) {
750 			if (hint != NULL && r->p != hint)
751 				continue;
752 			if (r->size == psz) {
753 				p = r->p;
754 				r->p = NULL;
755 				d->free_regions_size -= psz;
756 				if (mopts.malloc_freeunmap)
757 					mprotect(p, sz, PROT_READ | PROT_WRITE);
758 				if (zero_fill)
759 					memset(p, 0, sz);
760 				else if (mopts.malloc_junk == 2 &&
761 				    mopts.malloc_freeunmap)
762 					memset(p, SOME_FREEJUNK, sz);
763 				d->rotor += i + 1;
764 				return p;
765 			} else if (r->size > psz)
766 				big = r;
767 		}
768 	}
769 	if (big != NULL) {
770 		r = big;
771 		p = r->p;
772 		r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT);
773 		if (mopts.malloc_freeunmap)
774 			mprotect(p, sz, PROT_READ | PROT_WRITE);
775 		r->size -= psz;
776 		d->free_regions_size -= psz;
777 		if (zero_fill)
778 			memset(p, 0, sz);
779 		else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap)
780 			memset(p, SOME_FREEJUNK, sz);
781 		return p;
782 	}
783 	if (hint != NULL)
784 		return MAP_FAILED;
785 	if (d->free_regions_size > mopts.malloc_cache)
786 		wrterror(d, "malloc cache");
787 	_MALLOC_LEAVE(d);
788 	p = MMAP(sz);
789 	_MALLOC_ENTER(d);
790 	if (p != MAP_FAILED)
791 		STATS_ADD(d->malloc_used, sz);
792 	/* zero fill not needed */
793 	return p;
794 }
795 
796 static void
797 init_chunk_info(struct dir_info *d, struct chunk_info *p, int bits)
798 {
799 	int i;
800 
801 	if (bits == 0) {
802 		p->shift = MALLOC_MINSHIFT;
803 		p->total = p->free = MALLOC_PAGESIZE >> p->shift;
804 		p->size = 0;
805 		p->offset = 0xdead;
806 	} else {
807 		p->shift = bits;
808 		p->total = p->free = MALLOC_PAGESIZE >> p->shift;
809 		p->size = 1U << bits;
810 		p->offset = howmany(p->total, MALLOC_BITS);
811 	}
812 	p->canary = (u_short)d->canary1;
813 
814 	/* set all valid bits in the bitmap */
815  	i = p->total - 1;
816 	memset(p->bits, 0xff, sizeof(p->bits[0]) * (i / MALLOC_BITS));
817 	p->bits[i / MALLOC_BITS] = (2U << (i % MALLOC_BITS)) - 1;
818 }
819 
820 static struct chunk_info *
821 alloc_chunk_info(struct dir_info *d, int bits)
822 {
823 	struct chunk_info *p;
824 
825 	if (LIST_EMPTY(&d->chunk_info_list[bits])) {
826 		size_t size, count, i;
827 		char *q;
828 
829 		if (bits == 0)
830 			count = MALLOC_PAGESIZE / MALLOC_MINSIZE;
831 		else
832 			count = MALLOC_PAGESIZE >> bits;
833 
834 		size = howmany(count, MALLOC_BITS);
835 		size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
836 		if (mopts.chunk_canaries)
837 			size += count * sizeof(u_short);
838 		size = _ALIGN(size);
839 
840 		q = MMAP(MALLOC_PAGESIZE);
841 		if (q == MAP_FAILED)
842 			return NULL;
843 		STATS_ADD(d->malloc_used, MALLOC_PAGESIZE);
844 		count = MALLOC_PAGESIZE / size;
845 
846 		for (i = 0; i < count; i++, q += size) {
847 			p = (struct chunk_info *)q;
848 			LIST_INSERT_HEAD(&d->chunk_info_list[bits], p, entries);
849 		}
850 	}
851 	p = LIST_FIRST(&d->chunk_info_list[bits]);
852 	LIST_REMOVE(p, entries);
853 	if (p->shift == 0)
854 		init_chunk_info(d, p, bits);
855 	return p;
856 }
857 
858 /*
859  * Allocate a page of chunks
860  */
861 static struct chunk_info *
862 omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
863 {
864 	struct chunk_info *bp;
865 	void *pp;
866 
867 	/* Allocate a new bucket */
868 	pp = map(d, NULL, MALLOC_PAGESIZE, 0);
869 	if (pp == MAP_FAILED)
870 		return NULL;
871 
872 	/* memory protect the page allocated in the malloc(0) case */
873 	if (bits == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) < 0)
874 		goto err;
875 
876 	bp = alloc_chunk_info(d, bits);
877 	if (bp == NULL)
878 		goto err;
879 	bp->page = pp;
880 
881 	if (insert(d, (void *)((uintptr_t)pp | (bits + 1)), (uintptr_t)bp,
882 	    NULL))
883 		goto err;
884 	LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries);
885 	return bp;
886 
887 err:
888 	unmap(d, pp, MALLOC_PAGESIZE, 0, mopts.malloc_junk);
889 	return NULL;
890 }
891 
892 static int
893 find_chunksize(size_t size)
894 {
895 	int r;
896 
897 	/* malloc(0) is special */
898 	if (size == 0)
899 		return 0;
900 
901 	if (size < MALLOC_MINSIZE)
902 		size = MALLOC_MINSIZE;
903 	size--;
904 
905 	r = MALLOC_MINSHIFT;
906 	while (size >> r)
907 		r++;
908 	return r;
909 }
910 
911 static void
912 fill_canary(char *ptr, size_t sz, size_t allocated)
913 {
914 	size_t check_sz = allocated - sz;
915 
916 	if (check_sz > CHUNK_CHECK_LENGTH)
917 		check_sz = CHUNK_CHECK_LENGTH;
918 	memset(ptr + sz, SOME_JUNK, check_sz);
919 }
920 
921 /*
922  * Allocate a chunk
923  */
924 static void *
925 malloc_bytes(struct dir_info *d, size_t size, void *f)
926 {
927 	u_int i, r;
928 	int j, listnum;
929 	size_t k;
930 	u_short	*lp;
931 	struct chunk_info *bp;
932 	void *p;
933 
934 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
935 	    d->canary1 != ~d->canary2)
936 		wrterror(d, "internal struct corrupt");
937 
938 	j = find_chunksize(size);
939 
940 	r = ((u_int)getrbyte(d) << 8) | getrbyte(d);
941 	listnum = r % MALLOC_CHUNK_LISTS;
942 	/* If it's empty, make a page more of that size chunks */
943 	if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) {
944 		bp = omalloc_make_chunks(d, j, listnum);
945 		if (bp == NULL)
946 			return NULL;
947 	}
948 
949 	if (bp->canary != (u_short)d->canary1)
950 		wrterror(d, "chunk info corrupted");
951 
952 	i = (r / MALLOC_CHUNK_LISTS) & (bp->total - 1);
953 
954 	/* start somewhere in a short */
955 	lp = &bp->bits[i / MALLOC_BITS];
956 	if (*lp) {
957 		j = i % MALLOC_BITS;
958 		k = ffs(*lp >> j);
959 		if (k != 0) {
960 			k += j - 1;
961 			goto found;
962 		}
963 	}
964 	/* no bit halfway, go to next full short */
965 	i /= MALLOC_BITS;
966 	for (;;) {
967 		if (++i >= bp->total / MALLOC_BITS)
968 			i = 0;
969 		lp = &bp->bits[i];
970 		if (*lp) {
971 			k = ffs(*lp) - 1;
972 			break;
973 		}
974 	}
975 found:
976 #ifdef MALLOC_STATS
977 	if (i == 0 && k == 0) {
978 		struct region_info *r = find(d, bp->page);
979 		r->f = f;
980 	}
981 #endif
982 
983 	*lp ^= 1 << k;
984 
985 	/* If there are no more free, remove from free-list */
986 	if (--bp->free == 0)
987 		LIST_REMOVE(bp, entries);
988 
989 	/* Adjust to the real offset of that chunk */
990 	k += (lp - bp->bits) * MALLOC_BITS;
991 
992 	if (mopts.chunk_canaries && size > 0)
993 		bp->bits[bp->offset + k] = size;
994 
995 	k <<= bp->shift;
996 
997 	p = (char *)bp->page + k;
998 	if (bp->size > 0) {
999 		if (mopts.malloc_junk == 2)
1000 			memset(p, SOME_JUNK, bp->size);
1001 		else if (mopts.chunk_canaries)
1002 			fill_canary(p, size, bp->size);
1003 	}
1004 	return p;
1005 }
1006 
1007 static void
1008 validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1009 {
1010 	size_t check_sz = allocated - sz;
1011 	u_char *p, *q;
1012 
1013 	if (check_sz > CHUNK_CHECK_LENGTH)
1014 		check_sz = CHUNK_CHECK_LENGTH;
1015 	p = ptr + sz;
1016 	q = p + check_sz;
1017 
1018 	while (p < q) {
1019 		if (*p != SOME_JUNK) {
1020 			wrterror(d, "chunk canary corrupted %p %#tx@%#zx%s",
1021 			    ptr, p - ptr, sz,
1022 			    *p == SOME_FREEJUNK ? " (double free?)" : "");
1023 		}
1024 		p++;
1025 	}
1026 }
1027 
1028 static uint32_t
1029 find_chunknum(struct dir_info *d, struct chunk_info *info, void *ptr, int check)
1030 {
1031 	uint32_t chunknum;
1032 
1033 	if (info->canary != (u_short)d->canary1)
1034 		wrterror(d, "chunk info corrupted");
1035 
1036 	/* Find the chunk number on the page */
1037 	chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1038 
1039 	if ((uintptr_t)ptr & ((1U << (info->shift)) - 1))
1040 		wrterror(d, "modified chunk-pointer %p", ptr);
1041 	if (info->bits[chunknum / MALLOC_BITS] &
1042 	    (1U << (chunknum % MALLOC_BITS)))
1043 		wrterror(d, "chunk is already free %p", ptr);
1044 	if (check && info->size > 0) {
1045 		validate_canary(d, ptr, info->bits[info->offset + chunknum],
1046 		    info->size);
1047 	}
1048 	return chunknum;
1049 }
1050 
1051 /*
1052  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1053  */
1054 static void
1055 free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1056 {
1057 	struct chunk_head *mp;
1058 	struct chunk_info *info;
1059 	uint32_t chunknum;
1060 	int listnum;
1061 
1062 	info = (struct chunk_info *)r->size;
1063 	chunknum = find_chunknum(d, info, ptr, 0);
1064 
1065 	info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
1066 	info->free++;
1067 
1068 	if (info->free == 1) {
1069 		/* Page became non-full */
1070 		listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
1071 		if (info->size != 0)
1072 			mp = &d->chunk_dir[info->shift][listnum];
1073 		else
1074 			mp = &d->chunk_dir[0][listnum];
1075 
1076 		LIST_INSERT_HEAD(mp, info, entries);
1077 		return;
1078 	}
1079 
1080 	if (info->free != info->total)
1081 		return;
1082 
1083 	LIST_REMOVE(info, entries);
1084 
1085 	if (info->size == 0 && !mopts.malloc_freeunmap)
1086 		mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1087 	unmap(d, info->page, MALLOC_PAGESIZE, 0, 0);
1088 
1089 	delete(d, r);
1090 	if (info->size != 0)
1091 		mp = &d->chunk_info_list[info->shift];
1092 	else
1093 		mp = &d->chunk_info_list[0];
1094 	LIST_INSERT_HEAD(mp, info, entries);
1095 }
1096 
1097 
1098 
1099 static void *
1100 omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1101 {
1102 	void *p;
1103 	size_t psz;
1104 
1105 	if (sz > MALLOC_MAXCHUNK) {
1106 		if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1107 			errno = ENOMEM;
1108 			return NULL;
1109 		}
1110 		sz += mopts.malloc_guard;
1111 		psz = PAGEROUND(sz);
1112 		p = map(pool, NULL, psz, zero_fill);
1113 		if (p == MAP_FAILED) {
1114 			errno = ENOMEM;
1115 			return NULL;
1116 		}
1117 		if (insert(pool, p, sz, f)) {
1118 			unmap(pool, p, psz, 0, 0);
1119 			errno = ENOMEM;
1120 			return NULL;
1121 		}
1122 		if (mopts.malloc_guard) {
1123 			if (mprotect((char *)p + psz - mopts.malloc_guard,
1124 			    mopts.malloc_guard, PROT_NONE))
1125 				wrterror(pool, "mprotect");
1126 			STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1127 		}
1128 
1129 		if (MALLOC_MOVE_COND(sz)) {
1130 			/* fill whole allocation */
1131 			if (mopts.malloc_junk == 2)
1132 				memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1133 			/* shift towards the end */
1134 			p = MALLOC_MOVE(p, sz);
1135 			/* fill zeros if needed and overwritten above */
1136 			if (zero_fill && mopts.malloc_junk == 2)
1137 				memset(p, 0, sz - mopts.malloc_guard);
1138 		} else {
1139 			if (mopts.malloc_junk == 2) {
1140 				if (zero_fill)
1141 					memset((char *)p + sz - mopts.malloc_guard,
1142 					    SOME_JUNK, psz - sz);
1143 				else
1144 					memset(p, SOME_JUNK,
1145 					    psz - mopts.malloc_guard);
1146 			} else if (mopts.chunk_canaries)
1147 				fill_canary(p, sz - mopts.malloc_guard,
1148 				    psz - mopts.malloc_guard);
1149 		}
1150 
1151 	} else {
1152 		/* takes care of SOME_JUNK */
1153 		p = malloc_bytes(pool, sz, f);
1154 		if (zero_fill && p != NULL && sz > 0)
1155 			memset(p, 0, sz);
1156 	}
1157 
1158 	return p;
1159 }
1160 
1161 /*
1162  * Common function for handling recursion.  Only
1163  * print the error message once, to avoid making the problem
1164  * potentially worse.
1165  */
1166 static void
1167 malloc_recurse(struct dir_info *d)
1168 {
1169 	static int noprint;
1170 
1171 	if (noprint == 0) {
1172 		noprint = 1;
1173 		wrterror(d, "recursive call");
1174 	}
1175 	d->active--;
1176 	_MALLOC_UNLOCK(d->mutex);
1177 	errno = EDEADLK;
1178 }
1179 
1180 void
1181 _malloc_init(int from_rthreads)
1182 {
1183 	int i, max;
1184 	struct dir_info *d;
1185 
1186 	_MALLOC_LOCK(0);
1187 	if (!from_rthreads && mopts.malloc_pool[0]) {
1188 		_MALLOC_UNLOCK(0);
1189 		return;
1190 	}
1191 	if (!mopts.malloc_canary)
1192 		omalloc_init();
1193 
1194 	max = from_rthreads ? _MALLOC_MUTEXES : 1;
1195 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1196 		mprotect(&malloc_readonly, sizeof(malloc_readonly),
1197 		    PROT_READ | PROT_WRITE);
1198 	for (i = 0; i < max; i++) {
1199 		if (mopts.malloc_pool[i])
1200 			continue;
1201 		omalloc_poolinit(&d);
1202 		d->mutex = i;
1203 		mopts.malloc_pool[i] = d;
1204 	}
1205 
1206 	if (from_rthreads)
1207 		mopts.malloc_mt = 1;
1208 	else
1209 		mopts.internal_funcs = 1;
1210 
1211 	/*
1212 	 * Options have been set and will never be reset.
1213 	 * Prevent further tampering with them.
1214 	 */
1215 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1216 		mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
1217 	_MALLOC_UNLOCK(0);
1218 }
1219 DEF_STRONG(_malloc_init);
1220 
1221 void *
1222 malloc(size_t size)
1223 {
1224 	void *r;
1225 	struct dir_info *d;
1226 	int saved_errno = errno;
1227 
1228 	d = getpool();
1229 	if (d == NULL) {
1230 		_malloc_init(0);
1231 		d = getpool();
1232 	}
1233 	_MALLOC_LOCK(d->mutex);
1234 	d->func = "malloc";
1235 
1236 	if (d->active++) {
1237 		malloc_recurse(d);
1238 		return NULL;
1239 	}
1240 	r = omalloc(d, size, 0, CALLER);
1241 	d->active--;
1242 	_MALLOC_UNLOCK(d->mutex);
1243 	if (r == NULL && mopts.malloc_xmalloc)
1244 		wrterror(d, "out of memory");
1245 	if (r != NULL)
1246 		errno = saved_errno;
1247 	return r;
1248 }
1249 /*DEF_STRONG(malloc);*/
1250 
1251 static void
1252 validate_junk(struct dir_info *pool, void *p)
1253 {
1254 	struct region_info *r;
1255 	size_t byte, sz;
1256 
1257 	if (p == NULL)
1258 		return;
1259 	r = find(pool, p);
1260 	if (r == NULL)
1261 		wrterror(pool, "bogus pointer in validate_junk %p", p);
1262 	REALSIZE(sz, r);
1263 	if (sz > CHUNK_CHECK_LENGTH)
1264 		sz = CHUNK_CHECK_LENGTH;
1265 	for (byte = 0; byte < sz; byte++) {
1266 		if (((unsigned char *)p)[byte] != SOME_FREEJUNK)
1267 			wrterror(pool, "use after free %p", p);
1268 	}
1269 }
1270 
1271 static void
1272 ofree(struct dir_info *argpool, void *p, int clear, int check, size_t argsz)
1273 {
1274 	struct dir_info *pool;
1275 	struct region_info *r;
1276 	char *saved_function;
1277 	size_t sz;
1278 	int i;
1279 
1280 	pool = argpool;
1281 	r = find(pool, p);
1282 	if (r == NULL) {
1283 		if (mopts.malloc_mt)  {
1284 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1285 				if (i == argpool->mutex)
1286 					continue;
1287 				pool->active--;
1288 				_MALLOC_UNLOCK(pool->mutex);
1289 				pool = mopts.malloc_pool[i];
1290 				_MALLOC_LOCK(pool->mutex);
1291 				pool->active++;
1292 				r = find(pool, p);
1293 				if (r != NULL) {
1294 					saved_function = pool->func;
1295 					pool->func = argpool->func;
1296 					break;
1297 				}
1298 			}
1299 		}
1300 		if (r == NULL)
1301 			wrterror(argpool, "bogus pointer (double free?) %p", p);
1302 	}
1303 
1304 	REALSIZE(sz, r);
1305 	if (check) {
1306 		if (sz <= MALLOC_MAXCHUNK) {
1307 			if (mopts.chunk_canaries && sz > 0) {
1308 				struct chunk_info *info =
1309 				    (struct chunk_info *)r->size;
1310 				uint32_t chunknum =
1311 				    find_chunknum(pool, info, p, 0);
1312 
1313 				if (info->bits[info->offset + chunknum] < argsz)
1314 					wrterror(pool, "recorded size %hu"
1315 					    " < %zu",
1316 					    info->bits[info->offset + chunknum],
1317 					    argsz);
1318 			} else {
1319 				if (sz < argsz)
1320 					wrterror(pool, "chunk size %zu < %zu",
1321 					    sz, argsz);
1322 			}
1323 		} else if (sz - mopts.malloc_guard < argsz) {
1324 			wrterror(pool, "recorded size %zu < %zu",
1325 			    sz - mopts.malloc_guard, argsz);
1326 		}
1327 	}
1328 	if (sz > MALLOC_MAXCHUNK) {
1329 		if (!MALLOC_MOVE_COND(sz)) {
1330 			if (r->p != p)
1331 				wrterror(pool, "bogus pointer %p", p);
1332 			if (mopts.chunk_canaries)
1333 				validate_canary(pool, p,
1334 				    sz - mopts.malloc_guard,
1335 				    PAGEROUND(sz - mopts.malloc_guard));
1336 		} else {
1337 			/* shifted towards the end */
1338 			if (p != MALLOC_MOVE(r->p, sz))
1339 				wrterror(pool, "bogus moved pointer %p", p);
1340 			p = r->p;
1341 		}
1342 		if (mopts.malloc_guard) {
1343 			if (sz < mopts.malloc_guard)
1344 				wrterror(pool, "guard size");
1345 			if (!mopts.malloc_freeunmap) {
1346 				if (mprotect((char *)p + PAGEROUND(sz) -
1347 				    mopts.malloc_guard, mopts.malloc_guard,
1348 				    PROT_READ | PROT_WRITE))
1349 					wrterror(pool, "mprotect");
1350 			}
1351 			STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
1352 		}
1353 		unmap(pool, p, PAGEROUND(sz), clear ? argsz : 0,
1354 		    mopts.malloc_junk);
1355 		delete(pool, r);
1356 	} else {
1357 		/* Validate and optionally canary check */
1358 		struct chunk_info *info = (struct chunk_info *)r->size;
1359 		find_chunknum(pool, info, p, mopts.chunk_canaries);
1360 		if (!clear) {
1361 			void *tmp;
1362 			int i;
1363 
1364 			if (mopts.malloc_freecheck) {
1365 				for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK; i++)
1366 					if (p == pool->delayed_chunks[i])
1367 						wrterror(pool,
1368 						    "double free %p", p);
1369 			}
1370 			if (mopts.malloc_junk && sz > 0)
1371 				memset(p, SOME_FREEJUNK, sz);
1372 			i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK;
1373 			tmp = p;
1374 			p = pool->delayed_chunks[i];
1375 			if (tmp == p)
1376 				wrterror(pool, "double free %p", tmp);
1377 			pool->delayed_chunks[i] = tmp;
1378 			if (mopts.malloc_junk)
1379 				validate_junk(pool, p);
1380 		} else if (argsz > 0)
1381 			memset(p, 0, argsz);
1382 		if (p != NULL) {
1383 			r = find(pool, p);
1384 			if (r == NULL)
1385 				wrterror(pool,
1386 				    "bogus pointer (double free?) %p", p);
1387 			free_bytes(pool, r, p);
1388 		}
1389 	}
1390 
1391 	if (argpool != pool) {
1392 		pool->active--;
1393 		pool->func = saved_function;
1394 		_MALLOC_UNLOCK(pool->mutex);
1395 		_MALLOC_LOCK(argpool->mutex);
1396 		argpool->active++;
1397 	}
1398 }
1399 
1400 void
1401 free(void *ptr)
1402 {
1403 	struct dir_info *d;
1404 	int saved_errno = errno;
1405 
1406 	/* This is legal. */
1407 	if (ptr == NULL)
1408 		return;
1409 
1410 	d = getpool();
1411 	if (d == NULL)
1412 		wrterror(d, "free() called before allocation");
1413 	_MALLOC_LOCK(d->mutex);
1414 	d->func = "free";
1415 	if (d->active++) {
1416 		malloc_recurse(d);
1417 		return;
1418 	}
1419 	ofree(d, ptr, 0, 0, 0);
1420 	d->active--;
1421 	_MALLOC_UNLOCK(d->mutex);
1422 	errno = saved_errno;
1423 }
1424 /*DEF_STRONG(free);*/
1425 
1426 static void
1427 freezero_p(void *ptr, size_t sz)
1428 {
1429 	explicit_bzero(ptr, sz);
1430 	free(ptr);
1431 }
1432 
1433 void
1434 freezero(void *ptr, size_t sz)
1435 {
1436 	struct dir_info *d;
1437 	int saved_errno = errno;
1438 
1439 	/* This is legal. */
1440 	if (ptr == NULL)
1441 		return;
1442 
1443 	if (!mopts.internal_funcs) {
1444 		freezero_p(ptr, sz);
1445 		return;
1446 	}
1447 
1448 	d = getpool();
1449 	if (d == NULL)
1450 		wrterror(d, "freezero() called before allocation");
1451 	_MALLOC_LOCK(d->mutex);
1452 	d->func = "freezero";
1453 	if (d->active++) {
1454 		malloc_recurse(d);
1455 		return;
1456 	}
1457 	ofree(d, ptr, 1, 1, sz);
1458 	d->active--;
1459 	_MALLOC_UNLOCK(d->mutex);
1460 	errno = saved_errno;
1461 }
1462 DEF_WEAK(freezero);
1463 
1464 static void *
1465 orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
1466 {
1467 	struct dir_info *pool;
1468 	struct region_info *r;
1469 	struct chunk_info *info;
1470 	size_t oldsz, goldsz, gnewsz;
1471 	void *q, *ret;
1472 	char *saved_function;
1473 	int i;
1474 	uint32_t chunknum;
1475 
1476 	pool = argpool;
1477 
1478 	if (p == NULL)
1479 		return omalloc(pool, newsz, 0, f);
1480 
1481 	r = find(pool, p);
1482 	if (r == NULL) {
1483 		if (mopts.malloc_mt) {
1484 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1485 				if (i == argpool->mutex)
1486 					continue;
1487 				pool->active--;
1488 				_MALLOC_UNLOCK(pool->mutex);
1489 				pool = mopts.malloc_pool[i];
1490 				_MALLOC_LOCK(pool->mutex);
1491 				pool->active++;
1492 				r = find(pool, p);
1493 				if (r != NULL) {
1494 					saved_function = pool->func;
1495 					pool->func = argpool->func;
1496 					break;
1497 				}
1498 			}
1499 		}
1500 		if (r == NULL)
1501 			wrterror(argpool, "bogus pointer (double free?) %p", p);
1502 	}
1503 	if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1504 		errno = ENOMEM;
1505 		ret = NULL;
1506 		goto done;
1507 	}
1508 
1509 	REALSIZE(oldsz, r);
1510 	if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) {
1511 		info = (struct chunk_info *)r->size;
1512 		chunknum = find_chunknum(pool, info, p, 0);
1513 	}
1514 
1515 	goldsz = oldsz;
1516 	if (oldsz > MALLOC_MAXCHUNK) {
1517 		if (oldsz < mopts.malloc_guard)
1518 			wrterror(pool, "guard size");
1519 		oldsz -= mopts.malloc_guard;
1520 	}
1521 
1522 	gnewsz = newsz;
1523 	if (gnewsz > MALLOC_MAXCHUNK)
1524 		gnewsz += mopts.malloc_guard;
1525 
1526 	if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK &&
1527 	    !mopts.malloc_realloc) {
1528 		/* First case: from n pages sized allocation to m pages sized
1529 		   allocation, m > n */
1530 		size_t roldsz = PAGEROUND(goldsz);
1531 		size_t rnewsz = PAGEROUND(gnewsz);
1532 
1533 		if (rnewsz > roldsz) {
1534 			/* try to extend existing region */
1535 			if (!mopts.malloc_guard) {
1536 				void *hint = (char *)r->p + roldsz;
1537 				size_t needed = rnewsz - roldsz;
1538 
1539 				STATS_INC(pool->cheap_realloc_tries);
1540 				q = map(pool, hint, needed, 0);
1541 				if (q == hint)
1542 					goto gotit;
1543 				zapcacheregion(pool, hint, needed);
1544 				q = MQUERY(hint, needed);
1545 				if (q == hint)
1546 					q = MMAPA(hint, needed);
1547 				else
1548 					q = MAP_FAILED;
1549 				if (q == hint) {
1550 gotit:
1551 					STATS_ADD(pool->malloc_used, needed);
1552 					if (mopts.malloc_junk == 2)
1553 						memset(q, SOME_JUNK, needed);
1554 					r->size = gnewsz;
1555 					if (r->p != p) {
1556 						/* old pointer is moved */
1557 						memmove(r->p, p, oldsz);
1558 						p = r->p;
1559 					}
1560 					if (mopts.chunk_canaries)
1561 						fill_canary(p, newsz,
1562 						    PAGEROUND(newsz));
1563 					STATS_SETF(r, f);
1564 					STATS_INC(pool->cheap_reallocs);
1565 					ret = p;
1566 					goto done;
1567 				} else if (q != MAP_FAILED) {
1568 					if (munmap(q, needed))
1569 						wrterror(pool, "munmap %p", q);
1570 				}
1571 			}
1572 		} else if (rnewsz < roldsz) {
1573 			/* shrink number of pages */
1574 			if (mopts.malloc_guard) {
1575 				if (mprotect((char *)r->p + roldsz -
1576 				    mopts.malloc_guard, mopts.malloc_guard,
1577 				    PROT_READ | PROT_WRITE))
1578 					wrterror(pool, "mprotect");
1579 				if (mprotect((char *)r->p + rnewsz -
1580 				    mopts.malloc_guard, mopts.malloc_guard,
1581 				    PROT_NONE))
1582 					wrterror(pool, "mprotect");
1583 			}
1584 			unmap(pool, (char *)r->p + rnewsz, roldsz - rnewsz, 0,
1585 			    mopts.malloc_junk);
1586 			r->size = gnewsz;
1587 			if (MALLOC_MOVE_COND(gnewsz)) {
1588 				void *pp = MALLOC_MOVE(r->p, gnewsz);
1589 				memmove(pp, p, newsz);
1590 				p = pp;
1591 			} else if (mopts.chunk_canaries)
1592 				fill_canary(p, newsz, PAGEROUND(newsz));
1593 			STATS_SETF(r, f);
1594 			ret = p;
1595 			goto done;
1596 		} else {
1597 			/* number of pages remains the same */
1598 			void *pp = r->p;
1599 
1600 			r->size = gnewsz;
1601 			if (MALLOC_MOVE_COND(gnewsz))
1602 				pp = MALLOC_MOVE(r->p, gnewsz);
1603 			if (p != pp) {
1604 				memmove(pp, p, oldsz < newsz ? oldsz : newsz);
1605 				p = pp;
1606 			}
1607 			if (p == r->p) {
1608 				if (newsz > oldsz && mopts.malloc_junk == 2)
1609 					memset((char *)p + newsz, SOME_JUNK,
1610 					    rnewsz - mopts.malloc_guard -
1611 					    newsz);
1612 				if (mopts.chunk_canaries)
1613 					fill_canary(p, newsz, PAGEROUND(newsz));
1614 			}
1615 			STATS_SETF(r, f);
1616 			ret = p;
1617 			goto done;
1618 		}
1619 	}
1620 	if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 &&
1621 	    newsz <= MALLOC_MAXCHUNK && newsz > 0 &&
1622 	    1 << find_chunksize(newsz) == oldsz && !mopts.malloc_realloc) {
1623 		/* do not reallocate if new size fits good in existing chunk */
1624 		if (mopts.malloc_junk == 2)
1625 			memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1626 		if (mopts.chunk_canaries) {
1627 			info->bits[info->offset + chunknum] = newsz;
1628 			fill_canary(p, newsz, info->size);
1629 		}
1630 		STATS_SETF(r, f);
1631 		ret = p;
1632 	} else if (newsz != oldsz || mopts.malloc_realloc) {
1633 		/* create new allocation */
1634 		q = omalloc(pool, newsz, 0, f);
1635 		if (q == NULL) {
1636 			ret = NULL;
1637 			goto done;
1638 		}
1639 		if (newsz != 0 && oldsz != 0)
1640 			memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1641 		ofree(pool, p, 0, 0, 0);
1642 		ret = q;
1643 	} else {
1644 		/* oldsz == newsz */
1645 		if (newsz != 0)
1646 			wrterror(pool, "realloc internal inconsistency");
1647 		STATS_SETF(r, f);
1648 		ret = p;
1649 	}
1650 done:
1651 	if (argpool != pool) {
1652 		pool->active--;
1653 		pool->func = saved_function;
1654 		_MALLOC_UNLOCK(pool->mutex);
1655 		_MALLOC_LOCK(argpool->mutex);
1656 		argpool->active++;
1657 	}
1658 	return ret;
1659 }
1660 
1661 void *
1662 realloc(void *ptr, size_t size)
1663 {
1664 	struct dir_info *d;
1665 	void *r;
1666 	int saved_errno = errno;
1667 
1668 	d = getpool();
1669 	if (d == NULL) {
1670 		_malloc_init(0);
1671 		d = getpool();
1672 	}
1673 	_MALLOC_LOCK(d->mutex);
1674 	d->func = "realloc";
1675 	if (d->active++) {
1676 		malloc_recurse(d);
1677 		return NULL;
1678 	}
1679 	r = orealloc(d, ptr, size, CALLER);
1680 
1681 	d->active--;
1682 	_MALLOC_UNLOCK(d->mutex);
1683 	if (r == NULL && mopts.malloc_xmalloc)
1684 		wrterror(d, "out of memory");
1685 	if (r != NULL)
1686 		errno = saved_errno;
1687 	return r;
1688 }
1689 /*DEF_STRONG(realloc);*/
1690 
1691 
1692 /*
1693  * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
1694  * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
1695  */
1696 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1697 
1698 void *
1699 calloc(size_t nmemb, size_t size)
1700 {
1701 	struct dir_info *d;
1702 	void *r;
1703 	int saved_errno = errno;
1704 
1705 	d = getpool();
1706 	if (d == NULL) {
1707 		_malloc_init(0);
1708 		d = getpool();
1709 	}
1710 	_MALLOC_LOCK(d->mutex);
1711 	d->func = "calloc";
1712 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1713 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1714 		_MALLOC_UNLOCK(d->mutex);
1715 		if (mopts.malloc_xmalloc)
1716 			wrterror(d, "out of memory");
1717 		errno = ENOMEM;
1718 		return NULL;
1719 	}
1720 
1721 	if (d->active++) {
1722 		malloc_recurse(d);
1723 		return NULL;
1724 	}
1725 
1726 	size *= nmemb;
1727 	r = omalloc(d, size, 1, CALLER);
1728 
1729 	d->active--;
1730 	_MALLOC_UNLOCK(d->mutex);
1731 	if (r == NULL && mopts.malloc_xmalloc)
1732 		wrterror(d, "out of memory");
1733 	if (r != NULL)
1734 		errno = saved_errno;
1735 	return r;
1736 }
1737 /*DEF_STRONG(calloc);*/
1738 
1739 static void *
1740 orecallocarray(struct dir_info *argpool, void *p, size_t oldsize,
1741     size_t newsize, void *f)
1742 {
1743 	struct dir_info *pool;
1744 	struct region_info *r;
1745 	void *newptr;
1746 	size_t sz;
1747 	int i;
1748 
1749 	pool = argpool;
1750 
1751 	if (p == NULL)
1752 		return omalloc(pool, newsize, 1, f);
1753 
1754 	if (oldsize == newsize)
1755 		return p;
1756 
1757 	r = find(pool, p);
1758 	if (r == NULL) {
1759 		if (mopts.malloc_mt) {
1760 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1761 				if (i == argpool->mutex)
1762 					continue;
1763 				pool->active--;
1764 				_MALLOC_UNLOCK(pool->mutex);
1765 				pool = mopts.malloc_pool[i];
1766 				_MALLOC_LOCK(pool->mutex);
1767 				pool->active++;
1768 				r = find(pool, p);
1769 				if (r != NULL)
1770 					break;
1771 			}
1772 		}
1773 		if (r == NULL)
1774 			wrterror(pool, "bogus pointer (double free?) %p", p);
1775 	}
1776 
1777 	REALSIZE(sz, r);
1778 	if (sz <= MALLOC_MAXCHUNK) {
1779 		if (mopts.chunk_canaries && sz > 0) {
1780 			struct chunk_info *info = (struct chunk_info *)r->size;
1781 			uint32_t chunknum = find_chunknum(pool, info, p, 0);
1782 
1783 			if (info->bits[info->offset + chunknum] != oldsize)
1784 				wrterror(pool, "recorded old size %hu != %zu",
1785 				    info->bits[info->offset + chunknum],
1786 				    oldsize);
1787 		}
1788 	} else if (oldsize != sz - mopts.malloc_guard)
1789 		wrterror(pool, "recorded old size %zu != %zu",
1790 		    sz - mopts.malloc_guard, oldsize);
1791 
1792 	newptr = omalloc(pool, newsize, 0, f);
1793 	if (newptr == NULL)
1794 		goto done;
1795 
1796 	if (newsize > oldsize) {
1797 		memcpy(newptr, p, oldsize);
1798 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
1799 	} else
1800 		memcpy(newptr, p, newsize);
1801 
1802 	ofree(pool, p, 1, 0, oldsize);
1803 
1804 done:
1805 	if (argpool != pool) {
1806 		pool->active--;
1807 		_MALLOC_UNLOCK(pool->mutex);
1808 		_MALLOC_LOCK(argpool->mutex);
1809 		argpool->active++;
1810 	}
1811 
1812 	return newptr;
1813 }
1814 
1815 static void *
1816 recallocarray_p(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1817 {
1818 	size_t oldsize, newsize;
1819 	void *newptr;
1820 
1821 	if (ptr == NULL)
1822 		return calloc(newnmemb, size);
1823 
1824 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1825 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
1826 		errno = ENOMEM;
1827 		return NULL;
1828 	}
1829 	newsize = newnmemb * size;
1830 
1831 	if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1832 	    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
1833 		errno = EINVAL;
1834 		return NULL;
1835 	}
1836 	oldsize = oldnmemb * size;
1837 
1838 	/*
1839 	 * Don't bother too much if we're shrinking just a bit,
1840 	 * we do not shrink for series of small steps, oh well.
1841 	 */
1842 	if (newsize <= oldsize) {
1843 		size_t d = oldsize - newsize;
1844 
1845 		if (d < oldsize / 2 && d < MALLOC_PAGESIZE) {
1846 			memset((char *)ptr + newsize, 0, d);
1847 			return ptr;
1848 		}
1849 	}
1850 
1851 	newptr = malloc(newsize);
1852 	if (newptr == NULL)
1853 		return NULL;
1854 
1855 	if (newsize > oldsize) {
1856 		memcpy(newptr, ptr, oldsize);
1857 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
1858 	} else
1859 		memcpy(newptr, ptr, newsize);
1860 
1861 	explicit_bzero(ptr, oldsize);
1862 	free(ptr);
1863 
1864 	return newptr;
1865 }
1866 
1867 void *
1868 recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1869 {
1870 	struct dir_info *d;
1871 	size_t oldsize = 0, newsize;
1872 	void *r;
1873 	int saved_errno = errno;
1874 
1875 	if (!mopts.internal_funcs)
1876 		return recallocarray_p(ptr, oldnmemb, newnmemb, size);
1877 
1878 	d = getpool();
1879 	if (d == NULL) {
1880 		_malloc_init(0);
1881 		d = getpool();
1882 	}
1883 
1884 	_MALLOC_LOCK(d->mutex);
1885 	d->func = "recallocarray";
1886 
1887 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1888 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
1889 		_MALLOC_UNLOCK(d->mutex);
1890 		if (mopts.malloc_xmalloc)
1891 			wrterror(d, "out of memory");
1892 		errno = ENOMEM;
1893 		return NULL;
1894 	}
1895 	newsize = newnmemb * size;
1896 
1897 	if (ptr != NULL) {
1898 		if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1899 		    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
1900 			_MALLOC_UNLOCK(d->mutex);
1901 			errno = EINVAL;
1902 			return NULL;
1903 		}
1904 		oldsize = oldnmemb * size;
1905 	}
1906 
1907 	if (d->active++) {
1908 		malloc_recurse(d);
1909 		return NULL;
1910 	}
1911 
1912 	r = orecallocarray(d, ptr, oldsize, newsize, CALLER);
1913 
1914 	d->active--;
1915 	_MALLOC_UNLOCK(d->mutex);
1916 	if (r == NULL && mopts.malloc_xmalloc)
1917 		wrterror(d, "out of memory");
1918 	if (r != NULL)
1919 		errno = saved_errno;
1920 	return r;
1921 }
1922 DEF_WEAK(recallocarray);
1923 
1924 
1925 static void *
1926 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
1927 {
1928 	char *p, *q;
1929 
1930 	if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
1931 		wrterror(d, "mapalign bad alignment");
1932 	if (sz != PAGEROUND(sz))
1933 		wrterror(d, "mapalign round");
1934 
1935 	/* Allocate sz + alignment bytes of memory, which must include a
1936 	 * subrange of size bytes that is properly aligned.  Unmap the
1937 	 * other bytes, and then return that subrange.
1938 	 */
1939 
1940 	/* We need sz + alignment to fit into a size_t. */
1941 	if (alignment > SIZE_MAX - sz)
1942 		return MAP_FAILED;
1943 
1944 	p = map(d, NULL, sz + alignment, zero_fill);
1945 	if (p == MAP_FAILED)
1946 		return MAP_FAILED;
1947 	q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
1948 	if (q != p) {
1949 		if (munmap(p, q - p))
1950 			wrterror(d, "munmap %p", p);
1951 	}
1952 	if (munmap(q + sz, alignment - (q - p)))
1953 		wrterror(d, "munmap %p", q + sz);
1954 	STATS_SUB(d->malloc_used, alignment);
1955 
1956 	return q;
1957 }
1958 
1959 static void *
1960 omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill,
1961     void *f)
1962 {
1963 	size_t psz;
1964 	void *p;
1965 
1966 	/* If between half a page and a page, avoid MALLOC_MOVE. */
1967 	if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE)
1968 		sz = MALLOC_PAGESIZE;
1969 	if (alignment <= MALLOC_PAGESIZE) {
1970 		/*
1971 		 * max(size, alignment) is enough to assure the requested
1972 		 * alignment, since the allocator always allocates
1973 		 * power-of-two blocks.
1974 		 */
1975 		if (sz < alignment)
1976 			sz = alignment;
1977 		return omalloc(pool, sz, zero_fill, f);
1978 	}
1979 
1980 	if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1981 		errno = ENOMEM;
1982 		return NULL;
1983 	}
1984 
1985 	sz += mopts.malloc_guard;
1986 	psz = PAGEROUND(sz);
1987 
1988 	p = mapalign(pool, alignment, psz, zero_fill);
1989 	if (p == MAP_FAILED) {
1990 		errno = ENOMEM;
1991 		return NULL;
1992 	}
1993 
1994 	if (insert(pool, p, sz, f)) {
1995 		unmap(pool, p, psz, 0, 0);
1996 		errno = ENOMEM;
1997 		return NULL;
1998 	}
1999 
2000 	if (mopts.malloc_guard) {
2001 		if (mprotect((char *)p + psz - mopts.malloc_guard,
2002 		    mopts.malloc_guard, PROT_NONE))
2003 			wrterror(pool, "mprotect");
2004 		STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
2005 	}
2006 
2007 	if (mopts.malloc_junk == 2) {
2008 		if (zero_fill)
2009 			memset((char *)p + sz - mopts.malloc_guard,
2010 			    SOME_JUNK, psz - sz);
2011 		else
2012 			memset(p, SOME_JUNK, psz - mopts.malloc_guard);
2013 	} else if (mopts.chunk_canaries)
2014 		fill_canary(p, sz - mopts.malloc_guard,
2015 		    psz - mopts.malloc_guard);
2016 
2017 	return p;
2018 }
2019 
2020 int
2021 posix_memalign(void **memptr, size_t alignment, size_t size)
2022 {
2023 	struct dir_info *d;
2024 	int res, saved_errno = errno;
2025 	void *r;
2026 
2027 	/* Make sure that alignment is a large enough power of 2. */
2028 	if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
2029 		return EINVAL;
2030 
2031 	d = getpool();
2032 	if (d == NULL) {
2033 		_malloc_init(0);
2034 		d = getpool();
2035 	}
2036 	_MALLOC_LOCK(d->mutex);
2037 	d->func = "posix_memalign";
2038 	if (d->active++) {
2039 		malloc_recurse(d);
2040 		goto err;
2041 	}
2042 	r = omemalign(d, alignment, size, 0, CALLER);
2043 	d->active--;
2044 	_MALLOC_UNLOCK(d->mutex);
2045 	if (r == NULL) {
2046 		if (mopts.malloc_xmalloc)
2047 			wrterror(d, "out of memory");
2048 		goto err;
2049 	}
2050 	errno = saved_errno;
2051 	*memptr = r;
2052 	return 0;
2053 
2054 err:
2055 	res = errno;
2056 	errno = saved_errno;
2057 	return res;
2058 }
2059 /*DEF_STRONG(posix_memalign);*/
2060 
2061 #ifdef MALLOC_STATS
2062 
2063 struct malloc_leak {
2064 	void *f;
2065 	size_t total_size;
2066 	int count;
2067 };
2068 
2069 struct leaknode {
2070 	RBT_ENTRY(leaknode) entry;
2071 	struct malloc_leak d;
2072 };
2073 
2074 static inline int
2075 leakcmp(const struct leaknode *e1, const struct leaknode *e2)
2076 {
2077 	return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
2078 }
2079 
2080 static RBT_HEAD(leaktree, leaknode) leakhead;
2081 RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp);
2082 RBT_GENERATE(leaktree, leaknode, entry, leakcmp);
2083 
2084 static void
2085 putleakinfo(void *f, size_t sz, int cnt)
2086 {
2087 	struct leaknode key, *p;
2088 	static struct leaknode *page;
2089 	static int used;
2090 
2091 	if (cnt == 0 || page == MAP_FAILED)
2092 		return;
2093 
2094 	key.d.f = f;
2095 	p = RBT_FIND(leaktree, &leakhead, &key);
2096 	if (p == NULL) {
2097 		if (page == NULL ||
2098 		    used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
2099 			page = MMAP(MALLOC_PAGESIZE);
2100 			if (page == MAP_FAILED)
2101 				return;
2102 			used = 0;
2103 		}
2104 		p = &page[used++];
2105 		p->d.f = f;
2106 		p->d.total_size = sz * cnt;
2107 		p->d.count = cnt;
2108 		RBT_INSERT(leaktree, &leakhead, p);
2109 	} else {
2110 		p->d.total_size += sz * cnt;
2111 		p->d.count += cnt;
2112 	}
2113 }
2114 
2115 static struct malloc_leak *malloc_leaks;
2116 
2117 static void
2118 dump_leaks(int fd)
2119 {
2120 	struct leaknode *p;
2121 	int i = 0;
2122 
2123 	dprintf(fd, "Leak report\n");
2124 	dprintf(fd, "                 f     sum      #    avg\n");
2125 	/* XXX only one page of summary */
2126 	if (malloc_leaks == NULL)
2127 		malloc_leaks = MMAP(MALLOC_PAGESIZE);
2128 	if (malloc_leaks != MAP_FAILED)
2129 		memset(malloc_leaks, 0, MALLOC_PAGESIZE);
2130 	RBT_FOREACH(p, leaktree, &leakhead) {
2131 		dprintf(fd, "%18p %7zu %6u %6zu\n", p->d.f,
2132 		    p->d.total_size, p->d.count, p->d.total_size / p->d.count);
2133 		if (malloc_leaks == MAP_FAILED ||
2134 		    i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak))
2135 			continue;
2136 		malloc_leaks[i].f = p->d.f;
2137 		malloc_leaks[i].total_size = p->d.total_size;
2138 		malloc_leaks[i].count = p->d.count;
2139 		i++;
2140 	}
2141 }
2142 
2143 static void
2144 dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist)
2145 {
2146 	while (p != NULL) {
2147 		dprintf(fd, "chunk %18p %18p %4d %d/%d\n",
2148 		    p->page, ((p->bits[0] & 1) ? NULL : f),
2149 		    p->size, p->free, p->total);
2150 		if (!fromfreelist) {
2151 			if (p->bits[0] & 1)
2152 				putleakinfo(NULL, p->size, p->total - p->free);
2153 			else {
2154 				putleakinfo(f, p->size, 1);
2155 				putleakinfo(NULL, p->size,
2156 				    p->total - p->free - 1);
2157 			}
2158 			break;
2159 		}
2160 		p = LIST_NEXT(p, entries);
2161 		if (p != NULL)
2162 			dprintf(fd, "        ");
2163 	}
2164 }
2165 
2166 static void
2167 dump_free_chunk_info(int fd, struct dir_info *d)
2168 {
2169 	int i, j, count;
2170 	struct chunk_info *p;
2171 
2172 	dprintf(fd, "Free chunk structs:\n");
2173 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
2174 		count = 0;
2175 		LIST_FOREACH(p, &d->chunk_info_list[i], entries)
2176 			count++;
2177 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++) {
2178 			p = LIST_FIRST(&d->chunk_dir[i][j]);
2179 			if (p == NULL && count == 0)
2180 				continue;
2181 			dprintf(fd, "%2d) %3d ", i, count);
2182 			if (p != NULL)
2183 				dump_chunk(fd, p, NULL, 1);
2184 			else
2185 				dprintf(fd, "\n");
2186 		}
2187 	}
2188 
2189 }
2190 
2191 static void
2192 dump_free_page_info(int fd, struct dir_info *d)
2193 {
2194 	int i;
2195 
2196 	dprintf(fd, "Free pages cached: %zu\n", d->free_regions_size);
2197 	for (i = 0; i < mopts.malloc_cache; i++) {
2198 		if (d->free_regions[i].p != NULL) {
2199 			dprintf(fd, "%2d) ", i);
2200 			dprintf(fd, "free at %p: %zu\n",
2201 			    d->free_regions[i].p, d->free_regions[i].size);
2202 		}
2203 	}
2204 }
2205 
2206 static void
2207 malloc_dump1(int fd, int poolno, struct dir_info *d)
2208 {
2209 	size_t i, realsize;
2210 
2211 	dprintf(fd, "Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
2212 	if (d == NULL)
2213 		return;
2214 	dprintf(fd, "Region slots free %zu/%zu\n",
2215 		d->regions_free, d->regions_total);
2216 	dprintf(fd, "Finds %zu/%zu\n", d->finds,
2217 	    d->find_collisions);
2218 	dprintf(fd, "Inserts %zu/%zu\n", d->inserts,
2219 	    d->insert_collisions);
2220 	dprintf(fd, "Deletes %zu/%zu\n", d->deletes,
2221 	    d->delete_moves);
2222 	dprintf(fd, "Cheap reallocs %zu/%zu\n",
2223 	    d->cheap_reallocs, d->cheap_realloc_tries);
2224 	dprintf(fd, "In use %zu\n", d->malloc_used);
2225 	dprintf(fd, "Guarded %zu\n", d->malloc_guarded);
2226 	dump_free_chunk_info(fd, d);
2227 	dump_free_page_info(fd, d);
2228 	dprintf(fd,
2229 	    "slot)  hash d  type               page                  f size [free/n]\n");
2230 	for (i = 0; i < d->regions_total; i++) {
2231 		if (d->r[i].p != NULL) {
2232 			size_t h = hash(d->r[i].p) &
2233 			    (d->regions_total - 1);
2234 			dprintf(fd, "%4zx) #%4zx %zd ",
2235 			    i, h, h - i);
2236 			REALSIZE(realsize, &d->r[i]);
2237 			if (realsize > MALLOC_MAXCHUNK) {
2238 				putleakinfo(d->r[i].f, realsize, 1);
2239 				dprintf(fd,
2240 				    "pages %18p %18p %zu\n", d->r[i].p,
2241 				    d->r[i].f, realsize);
2242 			} else
2243 				dump_chunk(fd,
2244 				    (struct chunk_info *)d->r[i].size,
2245 				    d->r[i].f, 0);
2246 		}
2247 	}
2248 	dump_leaks(fd);
2249 	dprintf(fd, "\n");
2250 }
2251 
2252 void
2253 malloc_dump(int fd, int poolno, struct dir_info *pool)
2254 {
2255 	int i;
2256 	void *p;
2257 	struct region_info *r;
2258 	int saved_errno = errno;
2259 
2260 	if (pool == NULL)
2261 		return;
2262 	for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) {
2263 		p = pool->delayed_chunks[i];
2264 		if (p == NULL)
2265 			continue;
2266 		r = find(pool, p);
2267 		if (r == NULL)
2268 			wrterror(pool, "bogus pointer in malloc_dump %p", p);
2269 		free_bytes(pool, r, p);
2270 		pool->delayed_chunks[i] = NULL;
2271 	}
2272 	/* XXX leak when run multiple times */
2273 	RBT_INIT(leaktree, &leakhead);
2274 	malloc_dump1(fd, poolno, pool);
2275 	errno = saved_errno;
2276 }
2277 DEF_WEAK(malloc_dump);
2278 
2279 void
2280 malloc_gdump(int fd)
2281 {
2282 	int i;
2283 	int saved_errno = errno;
2284 
2285 	for (i = 0; i < _MALLOC_MUTEXES; i++)
2286 		malloc_dump(fd, i, mopts.malloc_pool[i]);
2287 
2288 	errno = saved_errno;
2289 }
2290 DEF_WEAK(malloc_gdump);
2291 
2292 static void
2293 malloc_exit(void)
2294 {
2295 	int save_errno = errno, fd, i;
2296 
2297 	fd = open("malloc.out", O_RDWR|O_APPEND);
2298 	if (fd != -1) {
2299 		dprintf(fd, "******** Start dump %s *******\n", __progname);
2300 		dprintf(fd,
2301 		    "MT=%d I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u G=%zu\n",
2302 		    mopts.malloc_mt, mopts.internal_funcs,
2303 		    mopts.malloc_freecheck,
2304 		    mopts.malloc_freeunmap, mopts.malloc_junk,
2305 		    mopts.malloc_realloc, mopts.malloc_xmalloc,
2306 		    mopts.chunk_canaries, mopts.malloc_cache,
2307 		    mopts.malloc_guard);
2308 
2309 		for (i = 0; i < _MALLOC_MUTEXES; i++)
2310 			malloc_dump(fd, i, mopts.malloc_pool[i]);
2311 		dprintf(fd, "******** End dump %s *******\n", __progname);
2312 		close(fd);
2313 	} else
2314 		dprintf(STDERR_FILENO,
2315 		    "malloc() warning: Couldn't dump stats\n");
2316 	errno = save_errno;
2317 }
2318 
2319 #endif /* MALLOC_STATS */
2320