xref: /openbsd-src/lib/libc/stdlib/malloc.c (revision ddce81d18ead1cead306cecb446175faf94d13e9)
1 /*	$OpenBSD: malloc.c,v 1.255 2018/11/27 17:29:55 otto Exp $	*/
2 /*
3  * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
4  * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
5  * Copyright (c) 2008 Damien Miller <djm@openbsd.org>
6  * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * If we meet some day, and you think this stuff is worth it, you
23  * can buy me a beer in return. Poul-Henning Kamp
24  */
25 
26 /* #define MALLOC_STATS */
27 
28 #include <sys/types.h>
29 #include <sys/queue.h>
30 #include <sys/mman.h>
31 #include <sys/sysctl.h>
32 #include <uvm/uvmexp.h>
33 #include <errno.h>
34 #include <stdarg.h>
35 #include <stdint.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <unistd.h>
40 
41 #ifdef MALLOC_STATS
42 #include <sys/tree.h>
43 #include <fcntl.h>
44 #endif
45 
46 #include "thread_private.h"
47 #include <tib.h>
48 
49 #define MALLOC_PAGESHIFT	_MAX_PAGE_SHIFT
50 
51 #define MALLOC_MINSHIFT		4
52 #define MALLOC_MAXSHIFT		(MALLOC_PAGESHIFT - 1)
53 #define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
54 #define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
55 #define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
56 #define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
57 
58 #define MALLOC_MAXCHUNK		(1 << MALLOC_MAXSHIFT)
59 #define MALLOC_MAXCACHE		256
60 #define MALLOC_DELAYED_CHUNK_MASK	15
61 #ifdef MALLOC_STATS
62 #define MALLOC_INITIAL_REGIONS	512
63 #else
64 #define MALLOC_INITIAL_REGIONS	(MALLOC_PAGESIZE / sizeof(struct region_info))
65 #endif
66 #define MALLOC_DEFAULT_CACHE	64
67 #define MALLOC_CHUNK_LISTS	4
68 #define CHUNK_CHECK_LENGTH	32
69 
70 /*
71  * We move allocations between half a page and a whole page towards the end,
72  * subject to alignment constraints. This is the extra headroom we allow.
73  * Set to zero to be the most strict.
74  */
75 #define MALLOC_LEEWAY		0
76 #define MALLOC_MOVE_COND(sz)	((sz) - mopts.malloc_guard < 		\
77 				    MALLOC_PAGESIZE - MALLOC_LEEWAY)
78 #define MALLOC_MOVE(p, sz)  	(((char *)(p)) +			\
79 				    ((MALLOC_PAGESIZE - MALLOC_LEEWAY -	\
80 			    	    ((sz) - mopts.malloc_guard)) & 	\
81 				    ~(MALLOC_MINSIZE - 1)))
82 
83 #define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
84 
85 /*
86  * What to use for Junk.  This is the byte value we use to fill with
87  * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
88  * and SOME_FREEJUNK right before free.
89  */
90 #define SOME_JUNK		0xdb	/* deadbeef */
91 #define SOME_FREEJUNK		0xdf	/* dead, free */
92 
93 #define MMAP(sz)	mmap(NULL, (sz), PROT_READ | PROT_WRITE, \
94     MAP_ANON | MAP_PRIVATE, -1, 0)
95 
96 #define MMAPNONE(sz)	mmap(NULL, (sz), PROT_NONE, \
97     MAP_ANON | MAP_PRIVATE, -1, 0)
98 
99 #define MMAPA(a,sz)	mmap((a), (sz), PROT_READ | PROT_WRITE, \
100     MAP_ANON | MAP_PRIVATE, -1, 0)
101 
102 #define MQUERY(a, sz)	mquery((a), (sz), PROT_READ | PROT_WRITE, \
103     MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0)
104 
105 struct region_info {
106 	void *p;		/* page; low bits used to mark chunks */
107 	uintptr_t size;		/* size for pages, or chunk_info pointer */
108 #ifdef MALLOC_STATS
109 	void *f;		/* where allocated from */
110 #endif
111 };
112 
113 LIST_HEAD(chunk_head, chunk_info);
114 
115 struct dir_info {
116 	u_int32_t canary1;
117 	int active;			/* status of malloc */
118 	struct region_info *r;		/* region slots */
119 	size_t regions_total;		/* number of region slots */
120 	size_t regions_free;		/* number of free slots */
121 					/* lists of free chunk info structs */
122 	struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1];
123 					/* lists of chunks with free slots */
124 	struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS];
125 	size_t free_regions_size;	/* free pages cached */
126 					/* free pages cache */
127 	struct region_info free_regions[MALLOC_MAXCACHE];
128 					/* delayed free chunk slots */
129 	u_int rotor;
130 	void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
131 	size_t rbytesused;		/* random bytes used */
132 	char *func;			/* current function */
133 	int mutex;
134 	u_char rbytes[32];		/* random bytes */
135 #ifdef MALLOC_STATS
136 	size_t inserts;
137 	size_t insert_collisions;
138 	size_t finds;
139 	size_t find_collisions;
140 	size_t deletes;
141 	size_t delete_moves;
142 	size_t cheap_realloc_tries;
143 	size_t cheap_reallocs;
144 	size_t malloc_used;		/* bytes allocated */
145 	size_t malloc_guarded;		/* bytes used for guards */
146 #define STATS_ADD(x,y)	((x) += (y))
147 #define STATS_SUB(x,y)	((x) -= (y))
148 #define STATS_INC(x)	((x)++)
149 #define STATS_ZERO(x)	((x) = 0)
150 #define STATS_SETF(x,y)	((x)->f = (y))
151 #else
152 #define STATS_ADD(x,y)	/* nothing */
153 #define STATS_SUB(x,y)	/* nothing */
154 #define STATS_INC(x)	/* nothing */
155 #define STATS_ZERO(x)	/* nothing */
156 #define STATS_SETF(x,y)	/* nothing */
157 #endif /* MALLOC_STATS */
158 	u_int32_t canary2;
159 };
160 #define DIR_INFO_RSZ	((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
161 			~MALLOC_PAGEMASK)
162 
163 /*
164  * This structure describes a page worth of chunks.
165  *
166  * How many bits per u_short in the bitmap
167  */
168 #define MALLOC_BITS		(NBBY * sizeof(u_short))
169 struct chunk_info {
170 	LIST_ENTRY(chunk_info) entries;
171 	void *page;			/* pointer to the page */
172 	u_short canary;
173 	u_short size;			/* size of this page's chunks */
174 	u_short shift;			/* how far to shift for this size */
175 	u_short free;			/* how many free chunks */
176 	u_short total;			/* how many chunks */
177 	u_short offset;			/* requested size table offset */
178 	u_short bits[1];		/* which chunks are free */
179 };
180 
181 struct malloc_readonly {
182 	struct dir_info *malloc_pool[_MALLOC_MUTEXES];	/* Main bookkeeping information */
183 	int	malloc_mt;		/* multi-threaded mode? */
184 	int	malloc_freecheck;	/* Extensive double free check */
185 	int	malloc_freeunmap;	/* mprotect free pages PROT_NONE? */
186 	int	malloc_junk;		/* junk fill? */
187 	int	malloc_realloc;		/* always realloc? */
188 	int	malloc_xmalloc;		/* xmalloc behaviour? */
189 	int	chunk_canaries;		/* use canaries after chunks? */
190 	int	internal_funcs;		/* use better recallocarray/freezero? */
191 	u_int	malloc_cache;		/* free pages we cache */
192 	size_t	malloc_guard;		/* use guard pages after allocations? */
193 #ifdef MALLOC_STATS
194 	int	malloc_stats;		/* dump statistics at end */
195 #endif
196 	u_int32_t malloc_canary;	/* Matched against ones in malloc_pool */
197 };
198 
199 /* This object is mapped PROT_READ after initialisation to prevent tampering */
200 static union {
201 	struct malloc_readonly mopts;
202 	u_char _pad[MALLOC_PAGESIZE];
203 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
204 #define mopts	malloc_readonly.mopts
205 
206 char		*malloc_options;	/* compile-time options */
207 
208 static __dead void wrterror(struct dir_info *d, char *msg, ...)
209     __attribute__((__format__ (printf, 2, 3)));
210 
211 #ifdef MALLOC_STATS
212 void malloc_dump(int, int, struct dir_info *);
213 PROTO_NORMAL(malloc_dump);
214 void malloc_gdump(int);
215 PROTO_NORMAL(malloc_gdump);
216 static void malloc_exit(void);
217 #define CALLER	__builtin_return_address(0)
218 #else
219 #define CALLER	NULL
220 #endif
221 
222 /* low bits of r->p determine size: 0 means >= page size and r->size holding
223  * real size, otherwise low bits are a shift count, or 1 for malloc(0)
224  */
225 #define REALSIZE(sz, r)						\
226 	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
227 	(sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
228 
229 static inline void
230 _MALLOC_LEAVE(struct dir_info *d)
231 {
232 	if (mopts.malloc_mt) {
233 		d->active--;
234 		_MALLOC_UNLOCK(d->mutex);
235 	}
236 }
237 
238 static inline void
239 _MALLOC_ENTER(struct dir_info *d)
240 {
241 	if (mopts.malloc_mt) {
242 		_MALLOC_LOCK(d->mutex);
243 		d->active++;
244 	}
245 }
246 
247 static inline size_t
248 hash(void *p)
249 {
250 	size_t sum;
251 	uintptr_t u;
252 
253 	u = (uintptr_t)p >> MALLOC_PAGESHIFT;
254 	sum = u;
255 	sum = (sum << 7) - sum + (u >> 16);
256 #ifdef __LP64__
257 	sum = (sum << 7) - sum + (u >> 32);
258 	sum = (sum << 7) - sum + (u >> 48);
259 #endif
260 	return sum;
261 }
262 
263 static inline
264 struct dir_info *getpool(void)
265 {
266 	if (!mopts.malloc_mt)
267 		return mopts.malloc_pool[0];
268 	else
269 		return mopts.malloc_pool[TIB_GET()->tib_tid &
270 		    (_MALLOC_MUTEXES - 1)];
271 }
272 
273 static __dead void
274 wrterror(struct dir_info *d, char *msg, ...)
275 {
276 	int		saved_errno = errno;
277 	va_list		ap;
278 
279 	dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname,
280 	    getpid(), (d != NULL && d->func) ? d->func : "unknown");
281 	va_start(ap, msg);
282 	vdprintf(STDERR_FILENO, msg, ap);
283 	va_end(ap);
284 	dprintf(STDERR_FILENO, "\n");
285 
286 #ifdef MALLOC_STATS
287 	if (mopts.malloc_stats)
288 		malloc_gdump(STDERR_FILENO);
289 #endif /* MALLOC_STATS */
290 
291 	errno = saved_errno;
292 
293 	abort();
294 }
295 
296 static void
297 rbytes_init(struct dir_info *d)
298 {
299 	arc4random_buf(d->rbytes, sizeof(d->rbytes));
300 	/* add 1 to account for using d->rbytes[0] */
301 	d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2);
302 }
303 
304 static inline u_char
305 getrbyte(struct dir_info *d)
306 {
307 	u_char x;
308 
309 	if (d->rbytesused >= sizeof(d->rbytes))
310 		rbytes_init(d);
311 	x = d->rbytes[d->rbytesused++];
312 	return x;
313 }
314 
315 static void
316 omalloc_parseopt(char opt)
317 {
318 	switch (opt) {
319 	case '>':
320 		mopts.malloc_cache <<= 1;
321 		if (mopts.malloc_cache > MALLOC_MAXCACHE)
322 			mopts.malloc_cache = MALLOC_MAXCACHE;
323 		break;
324 	case '<':
325 		mopts.malloc_cache >>= 1;
326 		break;
327 	case 'c':
328 		mopts.chunk_canaries = 0;
329 		break;
330 	case 'C':
331 		mopts.chunk_canaries = 1;
332 		break;
333 #ifdef MALLOC_STATS
334 	case 'd':
335 		mopts.malloc_stats = 0;
336 		break;
337 	case 'D':
338 		mopts.malloc_stats = 1;
339 		break;
340 #endif /* MALLOC_STATS */
341 	case 'f':
342 		mopts.malloc_freecheck = 0;
343 		mopts.malloc_freeunmap = 0;
344 		break;
345 	case 'F':
346 		mopts.malloc_freecheck = 1;
347 		mopts.malloc_freeunmap = 1;
348 		break;
349 	case 'g':
350 		mopts.malloc_guard = 0;
351 		break;
352 	case 'G':
353 		mopts.malloc_guard = MALLOC_PAGESIZE;
354 		break;
355 	case 'j':
356 		if (mopts.malloc_junk > 0)
357 			mopts.malloc_junk--;
358 		break;
359 	case 'J':
360 		if (mopts.malloc_junk < 2)
361 			mopts.malloc_junk++;
362 		break;
363 	case 'r':
364 		mopts.malloc_realloc = 0;
365 		break;
366 	case 'R':
367 		mopts.malloc_realloc = 1;
368 		break;
369 	case 'u':
370 		mopts.malloc_freeunmap = 0;
371 		break;
372 	case 'U':
373 		mopts.malloc_freeunmap = 1;
374 		break;
375 	case 'x':
376 		mopts.malloc_xmalloc = 0;
377 		break;
378 	case 'X':
379 		mopts.malloc_xmalloc = 1;
380 		break;
381 	default:
382 		dprintf(STDERR_FILENO, "malloc() warning: "
383                     "unknown char in MALLOC_OPTIONS\n");
384 		break;
385 	}
386 }
387 
388 static void
389 omalloc_init(void)
390 {
391 	char *p, *q, b[16];
392 	int i, j, mib[2];
393 	size_t sb;
394 
395 	/*
396 	 * Default options
397 	 */
398 	mopts.malloc_junk = 1;
399 	mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
400 
401 	for (i = 0; i < 3; i++) {
402 		switch (i) {
403 		case 0:
404 			mib[0] = CTL_VM;
405 			mib[1] = VM_MALLOC_CONF;
406 			sb = sizeof(b);
407 			j = sysctl(mib, 2, b, &sb, NULL, 0);
408 			if (j != 0)
409 				continue;
410 			p = b;
411 			break;
412 		case 1:
413 			if (issetugid() == 0)
414 				p = getenv("MALLOC_OPTIONS");
415 			else
416 				continue;
417 			break;
418 		case 2:
419 			p = malloc_options;
420 			break;
421 		default:
422 			p = NULL;
423 		}
424 
425 		for (; p != NULL && *p != '\0'; p++) {
426 			switch (*p) {
427 			case 'S':
428 				for (q = "CFGJ"; *q != '\0'; q++)
429 					omalloc_parseopt(*q);
430 				mopts.malloc_cache = 0;
431 				break;
432 			case 's':
433 				for (q = "cfgj"; *q != '\0'; q++)
434 					omalloc_parseopt(*q);
435 				mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
436 				break;
437 			default:
438 				omalloc_parseopt(*p);
439 				break;
440 			}
441 		}
442 	}
443 
444 #ifdef MALLOC_STATS
445 	if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
446 		dprintf(STDERR_FILENO, "malloc() warning: atexit(2) failed."
447 		    " Will not be able to dump stats on exit\n");
448 	}
449 #endif /* MALLOC_STATS */
450 
451 	while ((mopts.malloc_canary = arc4random()) == 0)
452 		;
453 }
454 
455 static void
456 omalloc_poolinit(struct dir_info **dp)
457 {
458 	char *p;
459 	size_t d_avail, regioninfo_size;
460 	struct dir_info *d;
461 	int i, j;
462 
463 	/*
464 	 * Allocate dir_info with a guard page on either side. Also
465 	 * randomise offset inside the page at which the dir_info
466 	 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
467 	 */
468 	if ((p = MMAPNONE(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED)
469 		wrterror(NULL, "malloc init mmap failed");
470 	mprotect(p + MALLOC_PAGESIZE, DIR_INFO_RSZ, PROT_READ | PROT_WRITE);
471 	d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
472 	d = (struct dir_info *)(p + MALLOC_PAGESIZE +
473 	    (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
474 
475 	rbytes_init(d);
476 	d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS;
477 	regioninfo_size = d->regions_total * sizeof(struct region_info);
478 	d->r = MMAP(regioninfo_size);
479 	if (d->r == MAP_FAILED) {
480 		d->regions_total = 0;
481 		wrterror(NULL, "malloc init mmap failed");
482 	}
483 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
484 		LIST_INIT(&d->chunk_info_list[i]);
485 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++)
486 			LIST_INIT(&d->chunk_dir[i][j]);
487 	}
488 	STATS_ADD(d->malloc_used, regioninfo_size);
489 	d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
490 	d->canary2 = ~d->canary1;
491 
492 	*dp = d;
493 }
494 
495 static int
496 omalloc_grow(struct dir_info *d)
497 {
498 	size_t newtotal;
499 	size_t newsize;
500 	size_t mask;
501 	size_t i;
502 	struct region_info *p;
503 
504 	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2)
505 		return 1;
506 
507 	newtotal = d->regions_total * 2;
508 	newsize = newtotal * sizeof(struct region_info);
509 	mask = newtotal - 1;
510 
511 	p = MMAP(newsize);
512 	if (p == MAP_FAILED)
513 		return 1;
514 
515 	STATS_ADD(d->malloc_used, newsize);
516 	STATS_ZERO(d->inserts);
517 	STATS_ZERO(d->insert_collisions);
518 	for (i = 0; i < d->regions_total; i++) {
519 		void *q = d->r[i].p;
520 		if (q != NULL) {
521 			size_t index = hash(q) & mask;
522 			STATS_INC(d->inserts);
523 			while (p[index].p != NULL) {
524 				index = (index - 1) & mask;
525 				STATS_INC(d->insert_collisions);
526 			}
527 			p[index] = d->r[i];
528 		}
529 	}
530 	/* avoid pages containing meta info to end up in cache */
531 	if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
532 		wrterror(d, "munmap %p", (void *)d->r);
533 	else
534 		STATS_SUB(d->malloc_used,
535 		    d->regions_total * sizeof(struct region_info));
536 	d->regions_free = d->regions_free + d->regions_total;
537 	d->regions_total = newtotal;
538 	d->r = p;
539 	return 0;
540 }
541 
542 /*
543  * The hashtable uses the assumption that p is never NULL. This holds since
544  * non-MAP_FIXED mappings with hint 0 start at BRKSIZ.
545  */
546 static int
547 insert(struct dir_info *d, void *p, size_t sz, void *f)
548 {
549 	size_t index;
550 	size_t mask;
551 	void *q;
552 
553 	if (d->regions_free * 4 < d->regions_total) {
554 		if (omalloc_grow(d))
555 			return 1;
556 	}
557 	mask = d->regions_total - 1;
558 	index = hash(p) & mask;
559 	q = d->r[index].p;
560 	STATS_INC(d->inserts);
561 	while (q != NULL) {
562 		index = (index - 1) & mask;
563 		q = d->r[index].p;
564 		STATS_INC(d->insert_collisions);
565 	}
566 	d->r[index].p = p;
567 	d->r[index].size = sz;
568 #ifdef MALLOC_STATS
569 	d->r[index].f = f;
570 #endif
571 	d->regions_free--;
572 	return 0;
573 }
574 
575 static struct region_info *
576 find(struct dir_info *d, void *p)
577 {
578 	size_t index;
579 	size_t mask = d->regions_total - 1;
580 	void *q, *r;
581 
582 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
583 	    d->canary1 != ~d->canary2)
584 		wrterror(d, "internal struct corrupt");
585 	p = MASK_POINTER(p);
586 	index = hash(p) & mask;
587 	r = d->r[index].p;
588 	q = MASK_POINTER(r);
589 	STATS_INC(d->finds);
590 	while (q != p && r != NULL) {
591 		index = (index - 1) & mask;
592 		r = d->r[index].p;
593 		q = MASK_POINTER(r);
594 		STATS_INC(d->find_collisions);
595 	}
596 	return (q == p && r != NULL) ? &d->r[index] : NULL;
597 }
598 
599 static void
600 delete(struct dir_info *d, struct region_info *ri)
601 {
602 	/* algorithm R, Knuth Vol III section 6.4 */
603 	size_t mask = d->regions_total - 1;
604 	size_t i, j, r;
605 
606 	if (d->regions_total & (d->regions_total - 1))
607 		wrterror(d, "regions_total not 2^x");
608 	d->regions_free++;
609 	STATS_INC(d->deletes);
610 
611 	i = ri - d->r;
612 	for (;;) {
613 		d->r[i].p = NULL;
614 		d->r[i].size = 0;
615 		j = i;
616 		for (;;) {
617 			i = (i - 1) & mask;
618 			if (d->r[i].p == NULL)
619 				return;
620 			r = hash(d->r[i].p) & mask;
621 			if ((i <= r && r < j) || (r < j && j < i) ||
622 			    (j < i && i <= r))
623 				continue;
624 			d->r[j] = d->r[i];
625 			STATS_INC(d->delete_moves);
626 			break;
627 		}
628 
629 	}
630 }
631 
632 /*
633  * Cache maintenance. We keep at most malloc_cache pages cached.
634  * If the cache is becoming full, unmap pages in the cache for real,
635  * and then add the region to the cache
636  * Opposed to the regular region data structure, the sizes in the
637  * cache are in MALLOC_PAGESIZE units.
638  */
639 static void
640 unmap(struct dir_info *d, void *p, size_t sz, size_t clear, int junk)
641 {
642 	size_t psz = sz >> MALLOC_PAGESHIFT;
643 	size_t rsz;
644 	struct region_info *r;
645 	u_int i, offset, mask;
646 
647 	if (sz != PAGEROUND(sz))
648 		wrterror(d, "munmap round");
649 
650 	rsz = mopts.malloc_cache - d->free_regions_size;
651 
652 	/*
653 	 * normally the cache holds recently freed regions, but if the region
654 	 * to unmap is larger than the cache size or we're clearing and the
655 	 * cache is full, just munmap
656 	 */
657 	if (psz > mopts.malloc_cache || (clear > 0 && rsz == 0)) {
658 		i = munmap(p, sz);
659 		if (i)
660 			wrterror(d, "munmap %p", p);
661 		STATS_SUB(d->malloc_used, sz);
662 		return;
663 	}
664 	offset = getrbyte(d);
665 	mask = mopts.malloc_cache - 1;
666 	if (psz > rsz) {
667 		size_t tounmap = psz - rsz;
668 		for (i = 0; ; i++) {
669 			r = &d->free_regions[(i + offset) & mask];
670 			if (r->p != NULL) {
671 				rsz = r->size << MALLOC_PAGESHIFT;
672 				if (munmap(r->p, rsz))
673 					wrterror(d, "munmap %p", r->p);
674 				r->p = NULL;
675 				if (tounmap > r->size)
676 					tounmap -= r->size;
677 				else
678 					tounmap = 0;
679 				d->free_regions_size -= r->size;
680 				STATS_SUB(d->malloc_used, rsz);
681 				if (tounmap == 0) {
682 					offset = i;
683 					break;
684 				}
685 			}
686 		}
687 	}
688 	for (i = 0; ; i++) {
689 		r = &d->free_regions[(i + offset) & mask];
690 		if (r->p == NULL) {
691 			if (clear > 0)
692 				memset(p, 0, clear);
693 			if (junk && !mopts.malloc_freeunmap) {
694 				size_t amt = junk == 1 ?  MALLOC_MAXCHUNK : sz;
695 				memset(p, SOME_FREEJUNK, amt);
696 			}
697 			if (mopts.malloc_freeunmap)
698 				mprotect(p, sz, PROT_NONE);
699 			r->p = p;
700 			r->size = psz;
701 			d->free_regions_size += psz;
702 			break;
703 		}
704 	}
705 	if (d->free_regions_size > mopts.malloc_cache)
706 		wrterror(d, "malloc cache overflow");
707 }
708 
709 static void
710 zapcacheregion(struct dir_info *d, void *p, size_t len)
711 {
712 	u_int i;
713 	struct region_info *r;
714 	size_t rsz;
715 
716 	for (i = 0; i < mopts.malloc_cache; i++) {
717 		r = &d->free_regions[i];
718 		if (r->p >= p && r->p <= (void *)((char *)p + len)) {
719 			rsz = r->size << MALLOC_PAGESHIFT;
720 			if (munmap(r->p, rsz))
721 				wrterror(d, "munmap %p", r->p);
722 			r->p = NULL;
723 			d->free_regions_size -= r->size;
724 			STATS_SUB(d->malloc_used, rsz);
725 		}
726 	}
727 }
728 
729 static void *
730 map(struct dir_info *d, void *hint, size_t sz, int zero_fill)
731 {
732 	size_t psz = sz >> MALLOC_PAGESHIFT;
733 	struct region_info *r, *big = NULL;
734 	u_int i;
735 	void *p;
736 
737 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
738 	    d->canary1 != ~d->canary2)
739 		wrterror(d, "internal struct corrupt");
740 	if (sz != PAGEROUND(sz))
741 		wrterror(d, "map round");
742 
743 	if (hint == NULL && psz > d->free_regions_size) {
744 		_MALLOC_LEAVE(d);
745 		p = MMAP(sz);
746 		_MALLOC_ENTER(d);
747 		if (p != MAP_FAILED)
748 			STATS_ADD(d->malloc_used, sz);
749 		/* zero fill not needed */
750 		return p;
751 	}
752 	for (i = 0; i < mopts.malloc_cache; i++) {
753 		r = &d->free_regions[(i + d->rotor) & (mopts.malloc_cache - 1)];
754 		if (r->p != NULL) {
755 			if (hint != NULL && r->p != hint)
756 				continue;
757 			if (r->size == psz) {
758 				p = r->p;
759 				r->p = NULL;
760 				d->free_regions_size -= psz;
761 				if (mopts.malloc_freeunmap)
762 					mprotect(p, sz, PROT_READ | PROT_WRITE);
763 				if (zero_fill)
764 					memset(p, 0, sz);
765 				else if (mopts.malloc_junk == 2 &&
766 				    mopts.malloc_freeunmap)
767 					memset(p, SOME_FREEJUNK, sz);
768 				d->rotor += i + 1;
769 				return p;
770 			} else if (r->size > psz)
771 				big = r;
772 		}
773 	}
774 	if (big != NULL) {
775 		r = big;
776 		p = r->p;
777 		r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT);
778 		if (mopts.malloc_freeunmap)
779 			mprotect(p, sz, PROT_READ | PROT_WRITE);
780 		r->size -= psz;
781 		d->free_regions_size -= psz;
782 		if (zero_fill)
783 			memset(p, 0, sz);
784 		else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap)
785 			memset(p, SOME_FREEJUNK, sz);
786 		return p;
787 	}
788 	if (hint != NULL)
789 		return MAP_FAILED;
790 	if (d->free_regions_size > mopts.malloc_cache)
791 		wrterror(d, "malloc cache");
792 	_MALLOC_LEAVE(d);
793 	p = MMAP(sz);
794 	_MALLOC_ENTER(d);
795 	if (p != MAP_FAILED)
796 		STATS_ADD(d->malloc_used, sz);
797 	/* zero fill not needed */
798 	return p;
799 }
800 
801 static void
802 init_chunk_info(struct dir_info *d, struct chunk_info *p, int bits)
803 {
804 	int i;
805 
806 	if (bits == 0) {
807 		p->shift = MALLOC_MINSHIFT;
808 		p->total = p->free = MALLOC_PAGESIZE >> p->shift;
809 		p->size = 0;
810 		p->offset = 0xdead;
811 	} else {
812 		p->shift = bits;
813 		p->total = p->free = MALLOC_PAGESIZE >> p->shift;
814 		p->size = 1U << bits;
815 		p->offset = howmany(p->total, MALLOC_BITS);
816 	}
817 	p->canary = (u_short)d->canary1;
818 
819 	/* set all valid bits in the bitmap */
820  	i = p->total - 1;
821 	memset(p->bits, 0xff, sizeof(p->bits[0]) * (i / MALLOC_BITS));
822 	p->bits[i / MALLOC_BITS] = (2U << (i % MALLOC_BITS)) - 1;
823 }
824 
825 static struct chunk_info *
826 alloc_chunk_info(struct dir_info *d, int bits)
827 {
828 	struct chunk_info *p;
829 
830 	if (LIST_EMPTY(&d->chunk_info_list[bits])) {
831 		size_t size, count, i;
832 		char *q;
833 
834 		if (bits == 0)
835 			count = MALLOC_PAGESIZE / MALLOC_MINSIZE;
836 		else
837 			count = MALLOC_PAGESIZE >> bits;
838 
839 		size = howmany(count, MALLOC_BITS);
840 		size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
841 		if (mopts.chunk_canaries)
842 			size += count * sizeof(u_short);
843 		size = _ALIGN(size);
844 
845 		q = MMAP(MALLOC_PAGESIZE);
846 		if (q == MAP_FAILED)
847 			return NULL;
848 		STATS_ADD(d->malloc_used, MALLOC_PAGESIZE);
849 		count = MALLOC_PAGESIZE / size;
850 
851 		for (i = 0; i < count; i++, q += size) {
852 			p = (struct chunk_info *)q;
853 			LIST_INSERT_HEAD(&d->chunk_info_list[bits], p, entries);
854 		}
855 	}
856 	p = LIST_FIRST(&d->chunk_info_list[bits]);
857 	LIST_REMOVE(p, entries);
858 	if (p->shift == 0)
859 		init_chunk_info(d, p, bits);
860 	return p;
861 }
862 
863 /*
864  * Allocate a page of chunks
865  */
866 static struct chunk_info *
867 omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
868 {
869 	struct chunk_info *bp;
870 	void *pp;
871 
872 	/* Allocate a new bucket */
873 	pp = map(d, NULL, MALLOC_PAGESIZE, 0);
874 	if (pp == MAP_FAILED)
875 		return NULL;
876 
877 	/* memory protect the page allocated in the malloc(0) case */
878 	if (bits == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) < 0)
879 		goto err;
880 
881 	bp = alloc_chunk_info(d, bits);
882 	if (bp == NULL)
883 		goto err;
884 	bp->page = pp;
885 
886 	if (insert(d, (void *)((uintptr_t)pp | (bits + 1)), (uintptr_t)bp,
887 	    NULL))
888 		goto err;
889 	LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries);
890 	return bp;
891 
892 err:
893 	unmap(d, pp, MALLOC_PAGESIZE, 0, mopts.malloc_junk);
894 	return NULL;
895 }
896 
897 static int
898 find_chunksize(size_t size)
899 {
900 	int r;
901 
902 	/* malloc(0) is special */
903 	if (size == 0)
904 		return 0;
905 
906 	if (size < MALLOC_MINSIZE)
907 		size = MALLOC_MINSIZE;
908 	size--;
909 
910 	r = MALLOC_MINSHIFT;
911 	while (size >> r)
912 		r++;
913 	return r;
914 }
915 
916 static void
917 fill_canary(char *ptr, size_t sz, size_t allocated)
918 {
919 	size_t check_sz = allocated - sz;
920 
921 	if (check_sz > CHUNK_CHECK_LENGTH)
922 		check_sz = CHUNK_CHECK_LENGTH;
923 	memset(ptr + sz, SOME_JUNK, check_sz);
924 }
925 
926 /*
927  * Allocate a chunk
928  */
929 static void *
930 malloc_bytes(struct dir_info *d, size_t size, void *f)
931 {
932 	u_int i, r;
933 	int j, listnum;
934 	size_t k;
935 	u_short	*lp;
936 	struct chunk_info *bp;
937 	void *p;
938 
939 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
940 	    d->canary1 != ~d->canary2)
941 		wrterror(d, "internal struct corrupt");
942 
943 	j = find_chunksize(size);
944 
945 	r = ((u_int)getrbyte(d) << 8) | getrbyte(d);
946 	listnum = r % MALLOC_CHUNK_LISTS;
947 	/* If it's empty, make a page more of that size chunks */
948 	if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) {
949 		bp = omalloc_make_chunks(d, j, listnum);
950 		if (bp == NULL)
951 			return NULL;
952 	}
953 
954 	if (bp->canary != (u_short)d->canary1)
955 		wrterror(d, "chunk info corrupted");
956 
957 	i = (r / MALLOC_CHUNK_LISTS) & (bp->total - 1);
958 
959 	/* start somewhere in a short */
960 	lp = &bp->bits[i / MALLOC_BITS];
961 	if (*lp) {
962 		j = i % MALLOC_BITS;
963 		k = ffs(*lp >> j);
964 		if (k != 0) {
965 			k += j - 1;
966 			goto found;
967 		}
968 	}
969 	/* no bit halfway, go to next full short */
970 	i /= MALLOC_BITS;
971 	for (;;) {
972 		if (++i >= bp->total / MALLOC_BITS)
973 			i = 0;
974 		lp = &bp->bits[i];
975 		if (*lp) {
976 			k = ffs(*lp) - 1;
977 			break;
978 		}
979 	}
980 found:
981 #ifdef MALLOC_STATS
982 	if (i == 0 && k == 0) {
983 		struct region_info *r = find(d, bp->page);
984 		r->f = f;
985 	}
986 #endif
987 
988 	*lp ^= 1 << k;
989 
990 	/* If there are no more free, remove from free-list */
991 	if (--bp->free == 0)
992 		LIST_REMOVE(bp, entries);
993 
994 	/* Adjust to the real offset of that chunk */
995 	k += (lp - bp->bits) * MALLOC_BITS;
996 
997 	if (mopts.chunk_canaries && size > 0)
998 		bp->bits[bp->offset + k] = size;
999 
1000 	k <<= bp->shift;
1001 
1002 	p = (char *)bp->page + k;
1003 	if (bp->size > 0) {
1004 		if (mopts.malloc_junk == 2)
1005 			memset(p, SOME_JUNK, bp->size);
1006 		else if (mopts.chunk_canaries)
1007 			fill_canary(p, size, bp->size);
1008 	}
1009 	return p;
1010 }
1011 
1012 static void
1013 validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1014 {
1015 	size_t check_sz = allocated - sz;
1016 	u_char *p, *q;
1017 
1018 	if (check_sz > CHUNK_CHECK_LENGTH)
1019 		check_sz = CHUNK_CHECK_LENGTH;
1020 	p = ptr + sz;
1021 	q = p + check_sz;
1022 
1023 	while (p < q) {
1024 		if (*p != SOME_JUNK) {
1025 			wrterror(d, "chunk canary corrupted %p %#tx@%#zx%s",
1026 			    ptr, p - ptr, sz,
1027 			    *p == SOME_FREEJUNK ? " (double free?)" : "");
1028 		}
1029 		p++;
1030 	}
1031 }
1032 
1033 static uint32_t
1034 find_chunknum(struct dir_info *d, struct chunk_info *info, void *ptr, int check)
1035 {
1036 	uint32_t chunknum;
1037 
1038 	if (info->canary != (u_short)d->canary1)
1039 		wrterror(d, "chunk info corrupted");
1040 
1041 	/* Find the chunk number on the page */
1042 	chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1043 
1044 	if ((uintptr_t)ptr & ((1U << (info->shift)) - 1))
1045 		wrterror(d, "modified chunk-pointer %p", ptr);
1046 	if (info->bits[chunknum / MALLOC_BITS] &
1047 	    (1U << (chunknum % MALLOC_BITS)))
1048 		wrterror(d, "chunk is already free %p", ptr);
1049 	if (check && info->size > 0) {
1050 		validate_canary(d, ptr, info->bits[info->offset + chunknum],
1051 		    info->size);
1052 	}
1053 	return chunknum;
1054 }
1055 
1056 /*
1057  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1058  */
1059 static void
1060 free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1061 {
1062 	struct chunk_head *mp;
1063 	struct chunk_info *info;
1064 	uint32_t chunknum;
1065 	int listnum;
1066 
1067 	info = (struct chunk_info *)r->size;
1068 	chunknum = find_chunknum(d, info, ptr, 0);
1069 
1070 	info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
1071 	info->free++;
1072 
1073 	if (info->free == 1) {
1074 		/* Page became non-full */
1075 		listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
1076 		if (info->size != 0)
1077 			mp = &d->chunk_dir[info->shift][listnum];
1078 		else
1079 			mp = &d->chunk_dir[0][listnum];
1080 
1081 		LIST_INSERT_HEAD(mp, info, entries);
1082 		return;
1083 	}
1084 
1085 	if (info->free != info->total)
1086 		return;
1087 
1088 	LIST_REMOVE(info, entries);
1089 
1090 	if (info->size == 0 && !mopts.malloc_freeunmap)
1091 		mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1092 	unmap(d, info->page, MALLOC_PAGESIZE, 0, 0);
1093 
1094 	delete(d, r);
1095 	if (info->size != 0)
1096 		mp = &d->chunk_info_list[info->shift];
1097 	else
1098 		mp = &d->chunk_info_list[0];
1099 	LIST_INSERT_HEAD(mp, info, entries);
1100 }
1101 
1102 
1103 
1104 static void *
1105 omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1106 {
1107 	void *p;
1108 	size_t psz;
1109 
1110 	if (sz > MALLOC_MAXCHUNK) {
1111 		if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1112 			errno = ENOMEM;
1113 			return NULL;
1114 		}
1115 		sz += mopts.malloc_guard;
1116 		psz = PAGEROUND(sz);
1117 		p = map(pool, NULL, psz, zero_fill);
1118 		if (p == MAP_FAILED) {
1119 			errno = ENOMEM;
1120 			return NULL;
1121 		}
1122 		if (insert(pool, p, sz, f)) {
1123 			unmap(pool, p, psz, 0, 0);
1124 			errno = ENOMEM;
1125 			return NULL;
1126 		}
1127 		if (mopts.malloc_guard) {
1128 			if (mprotect((char *)p + psz - mopts.malloc_guard,
1129 			    mopts.malloc_guard, PROT_NONE))
1130 				wrterror(pool, "mprotect");
1131 			STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1132 		}
1133 
1134 		if (MALLOC_MOVE_COND(sz)) {
1135 			/* fill whole allocation */
1136 			if (mopts.malloc_junk == 2)
1137 				memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1138 			/* shift towards the end */
1139 			p = MALLOC_MOVE(p, sz);
1140 			/* fill zeros if needed and overwritten above */
1141 			if (zero_fill && mopts.malloc_junk == 2)
1142 				memset(p, 0, sz - mopts.malloc_guard);
1143 		} else {
1144 			if (mopts.malloc_junk == 2) {
1145 				if (zero_fill)
1146 					memset((char *)p + sz - mopts.malloc_guard,
1147 					    SOME_JUNK, psz - sz);
1148 				else
1149 					memset(p, SOME_JUNK,
1150 					    psz - mopts.malloc_guard);
1151 			} else if (mopts.chunk_canaries)
1152 				fill_canary(p, sz - mopts.malloc_guard,
1153 				    psz - mopts.malloc_guard);
1154 		}
1155 
1156 	} else {
1157 		/* takes care of SOME_JUNK */
1158 		p = malloc_bytes(pool, sz, f);
1159 		if (zero_fill && p != NULL && sz > 0)
1160 			memset(p, 0, sz);
1161 	}
1162 
1163 	return p;
1164 }
1165 
1166 /*
1167  * Common function for handling recursion.  Only
1168  * print the error message once, to avoid making the problem
1169  * potentially worse.
1170  */
1171 static void
1172 malloc_recurse(struct dir_info *d)
1173 {
1174 	static int noprint;
1175 
1176 	if (noprint == 0) {
1177 		noprint = 1;
1178 		wrterror(d, "recursive call");
1179 	}
1180 	d->active--;
1181 	_MALLOC_UNLOCK(d->mutex);
1182 	errno = EDEADLK;
1183 }
1184 
1185 void
1186 _malloc_init(int from_rthreads)
1187 {
1188 	int i, max;
1189 	struct dir_info *d;
1190 
1191 	_MALLOC_LOCK(0);
1192 	if (!from_rthreads && mopts.malloc_pool[0]) {
1193 		_MALLOC_UNLOCK(0);
1194 		return;
1195 	}
1196 	if (!mopts.malloc_canary)
1197 		omalloc_init();
1198 
1199 	max = from_rthreads ? _MALLOC_MUTEXES : 1;
1200 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1201 		mprotect(&malloc_readonly, sizeof(malloc_readonly),
1202 		    PROT_READ | PROT_WRITE);
1203 	for (i = 0; i < max; i++) {
1204 		if (mopts.malloc_pool[i])
1205 			continue;
1206 		omalloc_poolinit(&d);
1207 		d->mutex = i;
1208 		mopts.malloc_pool[i] = d;
1209 	}
1210 
1211 	if (from_rthreads)
1212 		mopts.malloc_mt = 1;
1213 	else
1214 		mopts.internal_funcs = 1;
1215 
1216 	/*
1217 	 * Options have been set and will never be reset.
1218 	 * Prevent further tampering with them.
1219 	 */
1220 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1221 		mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
1222 	_MALLOC_UNLOCK(0);
1223 }
1224 DEF_STRONG(_malloc_init);
1225 
1226 void *
1227 malloc(size_t size)
1228 {
1229 	void *r;
1230 	struct dir_info *d;
1231 	int saved_errno = errno;
1232 
1233 	d = getpool();
1234 	if (d == NULL) {
1235 		_malloc_init(0);
1236 		d = getpool();
1237 	}
1238 	_MALLOC_LOCK(d->mutex);
1239 	d->func = "malloc";
1240 
1241 	if (d->active++) {
1242 		malloc_recurse(d);
1243 		return NULL;
1244 	}
1245 	r = omalloc(d, size, 0, CALLER);
1246 	d->active--;
1247 	_MALLOC_UNLOCK(d->mutex);
1248 	if (r == NULL && mopts.malloc_xmalloc)
1249 		wrterror(d, "out of memory");
1250 	if (r != NULL)
1251 		errno = saved_errno;
1252 	return r;
1253 }
1254 /*DEF_STRONG(malloc);*/
1255 
1256 static void
1257 validate_junk(struct dir_info *pool, void *p)
1258 {
1259 	struct region_info *r;
1260 	size_t byte, sz;
1261 
1262 	if (p == NULL)
1263 		return;
1264 	r = find(pool, p);
1265 	if (r == NULL)
1266 		wrterror(pool, "bogus pointer in validate_junk %p", p);
1267 	REALSIZE(sz, r);
1268 	if (sz > CHUNK_CHECK_LENGTH)
1269 		sz = CHUNK_CHECK_LENGTH;
1270 	for (byte = 0; byte < sz; byte++) {
1271 		if (((unsigned char *)p)[byte] != SOME_FREEJUNK)
1272 			wrterror(pool, "use after free %p", p);
1273 	}
1274 }
1275 
1276 
1277 static struct region_info *
1278 findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool,
1279     char **saved_function)
1280 {
1281 	struct dir_info *pool = argpool;
1282 	struct region_info *r = find(pool, p);
1283 
1284 	if (r == NULL) {
1285 		if (mopts.malloc_mt)  {
1286 			int i;
1287 
1288 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1289 				if (i == argpool->mutex)
1290 					continue;
1291 				pool->active--;
1292 				_MALLOC_UNLOCK(pool->mutex);
1293 				pool = mopts.malloc_pool[i];
1294 				_MALLOC_LOCK(pool->mutex);
1295 				pool->active++;
1296 				r = find(pool, p);
1297 				if (r != NULL) {
1298 					*saved_function = pool->func;
1299 					pool->func = argpool->func;
1300 					break;
1301 				}
1302 			}
1303 		}
1304 		if (r == NULL)
1305 			wrterror(argpool, "bogus pointer (double free?) %p", p);
1306 	}
1307 	*foundpool = pool;
1308 	return r;
1309 }
1310 
1311 static void
1312 ofree(struct dir_info *argpool, void *p, int clear, int check, size_t argsz)
1313 {
1314 	struct region_info *r;
1315 	struct dir_info *pool;
1316 	char *saved_function;
1317 	size_t sz;
1318 
1319 	r = findpool(p, argpool, &pool, &saved_function);
1320 
1321 	REALSIZE(sz, r);
1322 	if (check) {
1323 		if (sz <= MALLOC_MAXCHUNK) {
1324 			if (mopts.chunk_canaries && sz > 0) {
1325 				struct chunk_info *info =
1326 				    (struct chunk_info *)r->size;
1327 				uint32_t chunknum =
1328 				    find_chunknum(pool, info, p, 0);
1329 
1330 				if (info->bits[info->offset + chunknum] < argsz)
1331 					wrterror(pool, "recorded size %hu"
1332 					    " < %zu",
1333 					    info->bits[info->offset + chunknum],
1334 					    argsz);
1335 			} else {
1336 				if (sz < argsz)
1337 					wrterror(pool, "chunk size %zu < %zu",
1338 					    sz, argsz);
1339 			}
1340 		} else if (sz - mopts.malloc_guard < argsz) {
1341 			wrterror(pool, "recorded size %zu < %zu",
1342 			    sz - mopts.malloc_guard, argsz);
1343 		}
1344 	}
1345 	if (sz > MALLOC_MAXCHUNK) {
1346 		if (!MALLOC_MOVE_COND(sz)) {
1347 			if (r->p != p)
1348 				wrterror(pool, "bogus pointer %p", p);
1349 			if (mopts.chunk_canaries)
1350 				validate_canary(pool, p,
1351 				    sz - mopts.malloc_guard,
1352 				    PAGEROUND(sz - mopts.malloc_guard));
1353 		} else {
1354 			/* shifted towards the end */
1355 			if (p != MALLOC_MOVE(r->p, sz))
1356 				wrterror(pool, "bogus moved pointer %p", p);
1357 			p = r->p;
1358 		}
1359 		if (mopts.malloc_guard) {
1360 			if (sz < mopts.malloc_guard)
1361 				wrterror(pool, "guard size");
1362 			if (!mopts.malloc_freeunmap) {
1363 				if (mprotect((char *)p + PAGEROUND(sz) -
1364 				    mopts.malloc_guard, mopts.malloc_guard,
1365 				    PROT_READ | PROT_WRITE))
1366 					wrterror(pool, "mprotect");
1367 			}
1368 			STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
1369 		}
1370 		unmap(pool, p, PAGEROUND(sz), clear ? argsz : 0,
1371 		    mopts.malloc_junk);
1372 		delete(pool, r);
1373 	} else {
1374 		/* Validate and optionally canary check */
1375 		struct chunk_info *info = (struct chunk_info *)r->size;
1376 		find_chunknum(pool, info, p, mopts.chunk_canaries);
1377 		if (!clear) {
1378 			void *tmp;
1379 			int i;
1380 
1381 			if (mopts.malloc_freecheck) {
1382 				for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK; i++)
1383 					if (p == pool->delayed_chunks[i])
1384 						wrterror(pool,
1385 						    "double free %p", p);
1386 			}
1387 			if (mopts.malloc_junk && sz > 0)
1388 				memset(p, SOME_FREEJUNK, sz);
1389 			i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK;
1390 			tmp = p;
1391 			p = pool->delayed_chunks[i];
1392 			if (tmp == p)
1393 				wrterror(pool, "double free %p", tmp);
1394 			pool->delayed_chunks[i] = tmp;
1395 			if (mopts.malloc_junk)
1396 				validate_junk(pool, p);
1397 		} else if (argsz > 0)
1398 			memset(p, 0, argsz);
1399 		if (p != NULL) {
1400 			r = find(pool, p);
1401 			if (r == NULL)
1402 				wrterror(pool,
1403 				    "bogus pointer (double free?) %p", p);
1404 			free_bytes(pool, r, p);
1405 		}
1406 	}
1407 
1408 	if (argpool != pool) {
1409 		pool->active--;
1410 		pool->func = saved_function;
1411 		_MALLOC_UNLOCK(pool->mutex);
1412 		_MALLOC_LOCK(argpool->mutex);
1413 		argpool->active++;
1414 	}
1415 }
1416 
1417 void
1418 free(void *ptr)
1419 {
1420 	struct dir_info *d;
1421 	int saved_errno = errno;
1422 
1423 	/* This is legal. */
1424 	if (ptr == NULL)
1425 		return;
1426 
1427 	d = getpool();
1428 	if (d == NULL)
1429 		wrterror(d, "free() called before allocation");
1430 	_MALLOC_LOCK(d->mutex);
1431 	d->func = "free";
1432 	if (d->active++) {
1433 		malloc_recurse(d);
1434 		return;
1435 	}
1436 	ofree(d, ptr, 0, 0, 0);
1437 	d->active--;
1438 	_MALLOC_UNLOCK(d->mutex);
1439 	errno = saved_errno;
1440 }
1441 /*DEF_STRONG(free);*/
1442 
1443 static void
1444 freezero_p(void *ptr, size_t sz)
1445 {
1446 	explicit_bzero(ptr, sz);
1447 	free(ptr);
1448 }
1449 
1450 void
1451 freezero(void *ptr, size_t sz)
1452 {
1453 	struct dir_info *d;
1454 	int saved_errno = errno;
1455 
1456 	/* This is legal. */
1457 	if (ptr == NULL)
1458 		return;
1459 
1460 	if (!mopts.internal_funcs) {
1461 		freezero_p(ptr, sz);
1462 		return;
1463 	}
1464 
1465 	d = getpool();
1466 	if (d == NULL)
1467 		wrterror(d, "freezero() called before allocation");
1468 	_MALLOC_LOCK(d->mutex);
1469 	d->func = "freezero";
1470 	if (d->active++) {
1471 		malloc_recurse(d);
1472 		return;
1473 	}
1474 	ofree(d, ptr, 1, 1, sz);
1475 	d->active--;
1476 	_MALLOC_UNLOCK(d->mutex);
1477 	errno = saved_errno;
1478 }
1479 DEF_WEAK(freezero);
1480 
1481 static void *
1482 orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
1483 {
1484 	struct region_info *r;
1485 	struct dir_info *pool;
1486 	char *saved_function;
1487 	struct chunk_info *info;
1488 	size_t oldsz, goldsz, gnewsz;
1489 	void *q, *ret;
1490 	uint32_t chunknum;
1491 
1492 	if (p == NULL)
1493 		return omalloc(argpool, newsz, 0, f);
1494 
1495 	if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1496 		errno = ENOMEM;
1497 		return  NULL;
1498 	}
1499 
1500 	r = findpool(p, argpool, &pool, &saved_function);
1501 
1502 	REALSIZE(oldsz, r);
1503 	if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) {
1504 		info = (struct chunk_info *)r->size;
1505 		chunknum = find_chunknum(pool, info, p, 0);
1506 	}
1507 
1508 	goldsz = oldsz;
1509 	if (oldsz > MALLOC_MAXCHUNK) {
1510 		if (oldsz < mopts.malloc_guard)
1511 			wrterror(pool, "guard size");
1512 		oldsz -= mopts.malloc_guard;
1513 	}
1514 
1515 	gnewsz = newsz;
1516 	if (gnewsz > MALLOC_MAXCHUNK)
1517 		gnewsz += mopts.malloc_guard;
1518 
1519 	if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK &&
1520 	    !mopts.malloc_realloc) {
1521 		/* First case: from n pages sized allocation to m pages sized
1522 		   allocation, m > n */
1523 		size_t roldsz = PAGEROUND(goldsz);
1524 		size_t rnewsz = PAGEROUND(gnewsz);
1525 
1526 		if (rnewsz > roldsz) {
1527 			/* try to extend existing region */
1528 			if (!mopts.malloc_guard) {
1529 				void *hint = (char *)r->p + roldsz;
1530 				size_t needed = rnewsz - roldsz;
1531 
1532 				STATS_INC(pool->cheap_realloc_tries);
1533 				q = map(pool, hint, needed, 0);
1534 				if (q == hint)
1535 					goto gotit;
1536 				zapcacheregion(pool, hint, needed);
1537 				q = MQUERY(hint, needed);
1538 				if (q == hint)
1539 					q = MMAPA(hint, needed);
1540 				else
1541 					q = MAP_FAILED;
1542 				if (q == hint) {
1543 gotit:
1544 					STATS_ADD(pool->malloc_used, needed);
1545 					if (mopts.malloc_junk == 2)
1546 						memset(q, SOME_JUNK, needed);
1547 					r->size = gnewsz;
1548 					if (r->p != p) {
1549 						/* old pointer is moved */
1550 						memmove(r->p, p, oldsz);
1551 						p = r->p;
1552 					}
1553 					if (mopts.chunk_canaries)
1554 						fill_canary(p, newsz,
1555 						    PAGEROUND(newsz));
1556 					STATS_SETF(r, f);
1557 					STATS_INC(pool->cheap_reallocs);
1558 					ret = p;
1559 					goto done;
1560 				} else if (q != MAP_FAILED) {
1561 					if (munmap(q, needed))
1562 						wrterror(pool, "munmap %p", q);
1563 				}
1564 			}
1565 		} else if (rnewsz < roldsz) {
1566 			/* shrink number of pages */
1567 			if (mopts.malloc_guard) {
1568 				if (mprotect((char *)r->p + roldsz -
1569 				    mopts.malloc_guard, mopts.malloc_guard,
1570 				    PROT_READ | PROT_WRITE))
1571 					wrterror(pool, "mprotect");
1572 				if (mprotect((char *)r->p + rnewsz -
1573 				    mopts.malloc_guard, mopts.malloc_guard,
1574 				    PROT_NONE))
1575 					wrterror(pool, "mprotect");
1576 			}
1577 			unmap(pool, (char *)r->p + rnewsz, roldsz - rnewsz, 0,
1578 			    mopts.malloc_junk);
1579 			r->size = gnewsz;
1580 			if (MALLOC_MOVE_COND(gnewsz)) {
1581 				void *pp = MALLOC_MOVE(r->p, gnewsz);
1582 				memmove(pp, p, newsz);
1583 				p = pp;
1584 			} else if (mopts.chunk_canaries)
1585 				fill_canary(p, newsz, PAGEROUND(newsz));
1586 			STATS_SETF(r, f);
1587 			ret = p;
1588 			goto done;
1589 		} else {
1590 			/* number of pages remains the same */
1591 			void *pp = r->p;
1592 
1593 			r->size = gnewsz;
1594 			if (MALLOC_MOVE_COND(gnewsz))
1595 				pp = MALLOC_MOVE(r->p, gnewsz);
1596 			if (p != pp) {
1597 				memmove(pp, p, oldsz < newsz ? oldsz : newsz);
1598 				p = pp;
1599 			}
1600 			if (p == r->p) {
1601 				if (newsz > oldsz && mopts.malloc_junk == 2)
1602 					memset((char *)p + newsz, SOME_JUNK,
1603 					    rnewsz - mopts.malloc_guard -
1604 					    newsz);
1605 				if (mopts.chunk_canaries)
1606 					fill_canary(p, newsz, PAGEROUND(newsz));
1607 			}
1608 			STATS_SETF(r, f);
1609 			ret = p;
1610 			goto done;
1611 		}
1612 	}
1613 	if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 &&
1614 	    newsz <= MALLOC_MAXCHUNK && newsz > 0 &&
1615 	    1 << find_chunksize(newsz) == oldsz && !mopts.malloc_realloc) {
1616 		/* do not reallocate if new size fits good in existing chunk */
1617 		if (mopts.malloc_junk == 2)
1618 			memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1619 		if (mopts.chunk_canaries) {
1620 			info->bits[info->offset + chunknum] = newsz;
1621 			fill_canary(p, newsz, info->size);
1622 		}
1623 		STATS_SETF(r, f);
1624 		ret = p;
1625 	} else if (newsz != oldsz || mopts.malloc_realloc) {
1626 		/* create new allocation */
1627 		q = omalloc(pool, newsz, 0, f);
1628 		if (q == NULL) {
1629 			ret = NULL;
1630 			goto done;
1631 		}
1632 		if (newsz != 0 && oldsz != 0)
1633 			memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1634 		ofree(pool, p, 0, 0, 0);
1635 		ret = q;
1636 	} else {
1637 		/* oldsz == newsz */
1638 		if (newsz != 0)
1639 			wrterror(pool, "realloc internal inconsistency");
1640 		STATS_SETF(r, f);
1641 		ret = p;
1642 	}
1643 done:
1644 	if (argpool != pool) {
1645 		pool->active--;
1646 		pool->func = saved_function;
1647 		_MALLOC_UNLOCK(pool->mutex);
1648 		_MALLOC_LOCK(argpool->mutex);
1649 		argpool->active++;
1650 	}
1651 	return ret;
1652 }
1653 
1654 void *
1655 realloc(void *ptr, size_t size)
1656 {
1657 	struct dir_info *d;
1658 	void *r;
1659 	int saved_errno = errno;
1660 
1661 	d = getpool();
1662 	if (d == NULL) {
1663 		_malloc_init(0);
1664 		d = getpool();
1665 	}
1666 	_MALLOC_LOCK(d->mutex);
1667 	d->func = "realloc";
1668 	if (d->active++) {
1669 		malloc_recurse(d);
1670 		return NULL;
1671 	}
1672 	r = orealloc(d, ptr, size, CALLER);
1673 
1674 	d->active--;
1675 	_MALLOC_UNLOCK(d->mutex);
1676 	if (r == NULL && mopts.malloc_xmalloc)
1677 		wrterror(d, "out of memory");
1678 	if (r != NULL)
1679 		errno = saved_errno;
1680 	return r;
1681 }
1682 /*DEF_STRONG(realloc);*/
1683 
1684 
1685 /*
1686  * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
1687  * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
1688  */
1689 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1690 
1691 void *
1692 calloc(size_t nmemb, size_t size)
1693 {
1694 	struct dir_info *d;
1695 	void *r;
1696 	int saved_errno = errno;
1697 
1698 	d = getpool();
1699 	if (d == NULL) {
1700 		_malloc_init(0);
1701 		d = getpool();
1702 	}
1703 	_MALLOC_LOCK(d->mutex);
1704 	d->func = "calloc";
1705 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1706 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1707 		_MALLOC_UNLOCK(d->mutex);
1708 		if (mopts.malloc_xmalloc)
1709 			wrterror(d, "out of memory");
1710 		errno = ENOMEM;
1711 		return NULL;
1712 	}
1713 
1714 	if (d->active++) {
1715 		malloc_recurse(d);
1716 		return NULL;
1717 	}
1718 
1719 	size *= nmemb;
1720 	r = omalloc(d, size, 1, CALLER);
1721 
1722 	d->active--;
1723 	_MALLOC_UNLOCK(d->mutex);
1724 	if (r == NULL && mopts.malloc_xmalloc)
1725 		wrterror(d, "out of memory");
1726 	if (r != NULL)
1727 		errno = saved_errno;
1728 	return r;
1729 }
1730 /*DEF_STRONG(calloc);*/
1731 
1732 static void *
1733 orecallocarray(struct dir_info *argpool, void *p, size_t oldsize,
1734     size_t newsize, void *f)
1735 {
1736 	struct region_info *r;
1737 	struct dir_info *pool;
1738 	char *saved_function;
1739 	void *newptr;
1740 	size_t sz;
1741 
1742 	if (p == NULL)
1743 		return omalloc(argpool, newsize, 1, f);
1744 
1745 	if (oldsize == newsize)
1746 		return p;
1747 
1748 	r = findpool(p, argpool, &pool, &saved_function);
1749 
1750 	REALSIZE(sz, r);
1751 	if (sz <= MALLOC_MAXCHUNK) {
1752 		if (mopts.chunk_canaries && sz > 0) {
1753 			struct chunk_info *info = (struct chunk_info *)r->size;
1754 			uint32_t chunknum = find_chunknum(pool, info, p, 0);
1755 
1756 			if (info->bits[info->offset + chunknum] != oldsize)
1757 				wrterror(pool, "recorded old size %hu != %zu",
1758 				    info->bits[info->offset + chunknum],
1759 				    oldsize);
1760 		}
1761 	} else if (oldsize != sz - mopts.malloc_guard)
1762 		wrterror(pool, "recorded old size %zu != %zu",
1763 		    sz - mopts.malloc_guard, oldsize);
1764 
1765 	newptr = omalloc(pool, newsize, 0, f);
1766 	if (newptr == NULL)
1767 		goto done;
1768 
1769 	if (newsize > oldsize) {
1770 		memcpy(newptr, p, oldsize);
1771 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
1772 	} else
1773 		memcpy(newptr, p, newsize);
1774 
1775 	ofree(pool, p, 1, 0, oldsize);
1776 
1777 done:
1778 	if (argpool != pool) {
1779 		pool->active--;
1780 		pool->func = saved_function;
1781 		_MALLOC_UNLOCK(pool->mutex);
1782 		_MALLOC_LOCK(argpool->mutex);
1783 		argpool->active++;
1784 	}
1785 
1786 	return newptr;
1787 }
1788 
1789 static void *
1790 recallocarray_p(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1791 {
1792 	size_t oldsize, newsize;
1793 	void *newptr;
1794 
1795 	if (ptr == NULL)
1796 		return calloc(newnmemb, size);
1797 
1798 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1799 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
1800 		errno = ENOMEM;
1801 		return NULL;
1802 	}
1803 	newsize = newnmemb * size;
1804 
1805 	if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1806 	    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
1807 		errno = EINVAL;
1808 		return NULL;
1809 	}
1810 	oldsize = oldnmemb * size;
1811 
1812 	/*
1813 	 * Don't bother too much if we're shrinking just a bit,
1814 	 * we do not shrink for series of small steps, oh well.
1815 	 */
1816 	if (newsize <= oldsize) {
1817 		size_t d = oldsize - newsize;
1818 
1819 		if (d < oldsize / 2 && d < MALLOC_PAGESIZE) {
1820 			memset((char *)ptr + newsize, 0, d);
1821 			return ptr;
1822 		}
1823 	}
1824 
1825 	newptr = malloc(newsize);
1826 	if (newptr == NULL)
1827 		return NULL;
1828 
1829 	if (newsize > oldsize) {
1830 		memcpy(newptr, ptr, oldsize);
1831 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
1832 	} else
1833 		memcpy(newptr, ptr, newsize);
1834 
1835 	explicit_bzero(ptr, oldsize);
1836 	free(ptr);
1837 
1838 	return newptr;
1839 }
1840 
1841 void *
1842 recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1843 {
1844 	struct dir_info *d;
1845 	size_t oldsize = 0, newsize;
1846 	void *r;
1847 	int saved_errno = errno;
1848 
1849 	if (!mopts.internal_funcs)
1850 		return recallocarray_p(ptr, oldnmemb, newnmemb, size);
1851 
1852 	d = getpool();
1853 	if (d == NULL) {
1854 		_malloc_init(0);
1855 		d = getpool();
1856 	}
1857 
1858 	_MALLOC_LOCK(d->mutex);
1859 	d->func = "recallocarray";
1860 
1861 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1862 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
1863 		_MALLOC_UNLOCK(d->mutex);
1864 		if (mopts.malloc_xmalloc)
1865 			wrterror(d, "out of memory");
1866 		errno = ENOMEM;
1867 		return NULL;
1868 	}
1869 	newsize = newnmemb * size;
1870 
1871 	if (ptr != NULL) {
1872 		if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1873 		    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
1874 			_MALLOC_UNLOCK(d->mutex);
1875 			errno = EINVAL;
1876 			return NULL;
1877 		}
1878 		oldsize = oldnmemb * size;
1879 	}
1880 
1881 	if (d->active++) {
1882 		malloc_recurse(d);
1883 		return NULL;
1884 	}
1885 
1886 	r = orecallocarray(d, ptr, oldsize, newsize, CALLER);
1887 
1888 	d->active--;
1889 	_MALLOC_UNLOCK(d->mutex);
1890 	if (r == NULL && mopts.malloc_xmalloc)
1891 		wrterror(d, "out of memory");
1892 	if (r != NULL)
1893 		errno = saved_errno;
1894 	return r;
1895 }
1896 DEF_WEAK(recallocarray);
1897 
1898 
1899 static void *
1900 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
1901 {
1902 	char *p, *q;
1903 
1904 	if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
1905 		wrterror(d, "mapalign bad alignment");
1906 	if (sz != PAGEROUND(sz))
1907 		wrterror(d, "mapalign round");
1908 
1909 	/* Allocate sz + alignment bytes of memory, which must include a
1910 	 * subrange of size bytes that is properly aligned.  Unmap the
1911 	 * other bytes, and then return that subrange.
1912 	 */
1913 
1914 	/* We need sz + alignment to fit into a size_t. */
1915 	if (alignment > SIZE_MAX - sz)
1916 		return MAP_FAILED;
1917 
1918 	p = map(d, NULL, sz + alignment, zero_fill);
1919 	if (p == MAP_FAILED)
1920 		return MAP_FAILED;
1921 	q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
1922 	if (q != p) {
1923 		if (munmap(p, q - p))
1924 			wrterror(d, "munmap %p", p);
1925 	}
1926 	if (munmap(q + sz, alignment - (q - p)))
1927 		wrterror(d, "munmap %p", q + sz);
1928 	STATS_SUB(d->malloc_used, alignment);
1929 
1930 	return q;
1931 }
1932 
1933 static void *
1934 omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill,
1935     void *f)
1936 {
1937 	size_t psz;
1938 	void *p;
1939 
1940 	/* If between half a page and a page, avoid MALLOC_MOVE. */
1941 	if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE)
1942 		sz = MALLOC_PAGESIZE;
1943 	if (alignment <= MALLOC_PAGESIZE) {
1944 		/*
1945 		 * max(size, alignment) is enough to assure the requested
1946 		 * alignment, since the allocator always allocates
1947 		 * power-of-two blocks.
1948 		 */
1949 		if (sz < alignment)
1950 			sz = alignment;
1951 		return omalloc(pool, sz, zero_fill, f);
1952 	}
1953 
1954 	if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1955 		errno = ENOMEM;
1956 		return NULL;
1957 	}
1958 
1959 	sz += mopts.malloc_guard;
1960 	psz = PAGEROUND(sz);
1961 
1962 	p = mapalign(pool, alignment, psz, zero_fill);
1963 	if (p == MAP_FAILED) {
1964 		errno = ENOMEM;
1965 		return NULL;
1966 	}
1967 
1968 	if (insert(pool, p, sz, f)) {
1969 		unmap(pool, p, psz, 0, 0);
1970 		errno = ENOMEM;
1971 		return NULL;
1972 	}
1973 
1974 	if (mopts.malloc_guard) {
1975 		if (mprotect((char *)p + psz - mopts.malloc_guard,
1976 		    mopts.malloc_guard, PROT_NONE))
1977 			wrterror(pool, "mprotect");
1978 		STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1979 	}
1980 
1981 	if (mopts.malloc_junk == 2) {
1982 		if (zero_fill)
1983 			memset((char *)p + sz - mopts.malloc_guard,
1984 			    SOME_JUNK, psz - sz);
1985 		else
1986 			memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1987 	} else if (mopts.chunk_canaries)
1988 		fill_canary(p, sz - mopts.malloc_guard,
1989 		    psz - mopts.malloc_guard);
1990 
1991 	return p;
1992 }
1993 
1994 int
1995 posix_memalign(void **memptr, size_t alignment, size_t size)
1996 {
1997 	struct dir_info *d;
1998 	int res, saved_errno = errno;
1999 	void *r;
2000 
2001 	/* Make sure that alignment is a large enough power of 2. */
2002 	if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
2003 		return EINVAL;
2004 
2005 	d = getpool();
2006 	if (d == NULL) {
2007 		_malloc_init(0);
2008 		d = getpool();
2009 	}
2010 	_MALLOC_LOCK(d->mutex);
2011 	d->func = "posix_memalign";
2012 	if (d->active++) {
2013 		malloc_recurse(d);
2014 		goto err;
2015 	}
2016 	r = omemalign(d, alignment, size, 0, CALLER);
2017 	d->active--;
2018 	_MALLOC_UNLOCK(d->mutex);
2019 	if (r == NULL) {
2020 		if (mopts.malloc_xmalloc)
2021 			wrterror(d, "out of memory");
2022 		goto err;
2023 	}
2024 	errno = saved_errno;
2025 	*memptr = r;
2026 	return 0;
2027 
2028 err:
2029 	res = errno;
2030 	errno = saved_errno;
2031 	return res;
2032 }
2033 /*DEF_STRONG(posix_memalign);*/
2034 
2035 void *
2036 aligned_alloc(size_t alignment, size_t size)
2037 {
2038 	struct dir_info *d;
2039 	int saved_errno = errno;
2040 	void *r;
2041 
2042 	/* Make sure that alignment is a positive power of 2. */
2043 	if (((alignment - 1) & alignment) != 0 || alignment == 0) {
2044 		errno = EINVAL;
2045 		return NULL;
2046 	};
2047 	/* Per spec, size should be a multiple of alignment */
2048 	if ((size & (alignment - 1)) != 0) {
2049 		errno = EINVAL;
2050 		return NULL;
2051 	}
2052 
2053 	d = getpool();
2054 	if (d == NULL) {
2055 		_malloc_init(0);
2056 		d = getpool();
2057 	}
2058 	_MALLOC_LOCK(d->mutex);
2059 	d->func = "aligned_alloc";
2060 	if (d->active++) {
2061 		malloc_recurse(d);
2062 		return NULL;
2063 	}
2064 	r = omemalign(d, alignment, size, 0, CALLER);
2065 	d->active--;
2066 	_MALLOC_UNLOCK(d->mutex);
2067 	if (r == NULL) {
2068 		if (mopts.malloc_xmalloc)
2069 			wrterror(d, "out of memory");
2070 		return NULL;
2071 	}
2072 	errno = saved_errno;
2073 	return r;
2074 }
2075 /*DEF_STRONG(aligned_alloc);*/
2076 
2077 #ifdef MALLOC_STATS
2078 
2079 struct malloc_leak {
2080 	void *f;
2081 	size_t total_size;
2082 	int count;
2083 };
2084 
2085 struct leaknode {
2086 	RBT_ENTRY(leaknode) entry;
2087 	struct malloc_leak d;
2088 };
2089 
2090 static inline int
2091 leakcmp(const struct leaknode *e1, const struct leaknode *e2)
2092 {
2093 	return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
2094 }
2095 
2096 static RBT_HEAD(leaktree, leaknode) leakhead;
2097 RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp);
2098 RBT_GENERATE(leaktree, leaknode, entry, leakcmp);
2099 
2100 static void
2101 putleakinfo(void *f, size_t sz, int cnt)
2102 {
2103 	struct leaknode key, *p;
2104 	static struct leaknode *page;
2105 	static int used;
2106 
2107 	if (cnt == 0 || page == MAP_FAILED)
2108 		return;
2109 
2110 	key.d.f = f;
2111 	p = RBT_FIND(leaktree, &leakhead, &key);
2112 	if (p == NULL) {
2113 		if (page == NULL ||
2114 		    used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
2115 			page = MMAP(MALLOC_PAGESIZE);
2116 			if (page == MAP_FAILED)
2117 				return;
2118 			used = 0;
2119 		}
2120 		p = &page[used++];
2121 		p->d.f = f;
2122 		p->d.total_size = sz * cnt;
2123 		p->d.count = cnt;
2124 		RBT_INSERT(leaktree, &leakhead, p);
2125 	} else {
2126 		p->d.total_size += sz * cnt;
2127 		p->d.count += cnt;
2128 	}
2129 }
2130 
2131 static struct malloc_leak *malloc_leaks;
2132 
2133 static void
2134 dump_leaks(int fd)
2135 {
2136 	struct leaknode *p;
2137 	int i = 0;
2138 
2139 	dprintf(fd, "Leak report\n");
2140 	dprintf(fd, "                 f     sum      #    avg\n");
2141 	/* XXX only one page of summary */
2142 	if (malloc_leaks == NULL)
2143 		malloc_leaks = MMAP(MALLOC_PAGESIZE);
2144 	if (malloc_leaks != MAP_FAILED)
2145 		memset(malloc_leaks, 0, MALLOC_PAGESIZE);
2146 	RBT_FOREACH(p, leaktree, &leakhead) {
2147 		dprintf(fd, "%18p %7zu %6u %6zu\n", p->d.f,
2148 		    p->d.total_size, p->d.count, p->d.total_size / p->d.count);
2149 		if (malloc_leaks == MAP_FAILED ||
2150 		    i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak))
2151 			continue;
2152 		malloc_leaks[i].f = p->d.f;
2153 		malloc_leaks[i].total_size = p->d.total_size;
2154 		malloc_leaks[i].count = p->d.count;
2155 		i++;
2156 	}
2157 }
2158 
2159 static void
2160 dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist)
2161 {
2162 	while (p != NULL) {
2163 		dprintf(fd, "chunk %18p %18p %4d %d/%d\n",
2164 		    p->page, ((p->bits[0] & 1) ? NULL : f),
2165 		    p->size, p->free, p->total);
2166 		if (!fromfreelist) {
2167 			if (p->bits[0] & 1)
2168 				putleakinfo(NULL, p->size, p->total - p->free);
2169 			else {
2170 				putleakinfo(f, p->size, 1);
2171 				putleakinfo(NULL, p->size,
2172 				    p->total - p->free - 1);
2173 			}
2174 			break;
2175 		}
2176 		p = LIST_NEXT(p, entries);
2177 		if (p != NULL)
2178 			dprintf(fd, "        ");
2179 	}
2180 }
2181 
2182 static void
2183 dump_free_chunk_info(int fd, struct dir_info *d)
2184 {
2185 	int i, j, count;
2186 	struct chunk_info *p;
2187 
2188 	dprintf(fd, "Free chunk structs:\n");
2189 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
2190 		count = 0;
2191 		LIST_FOREACH(p, &d->chunk_info_list[i], entries)
2192 			count++;
2193 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++) {
2194 			p = LIST_FIRST(&d->chunk_dir[i][j]);
2195 			if (p == NULL && count == 0)
2196 				continue;
2197 			dprintf(fd, "%2d) %3d ", i, count);
2198 			if (p != NULL)
2199 				dump_chunk(fd, p, NULL, 1);
2200 			else
2201 				dprintf(fd, "\n");
2202 		}
2203 	}
2204 
2205 }
2206 
2207 static void
2208 dump_free_page_info(int fd, struct dir_info *d)
2209 {
2210 	int i;
2211 
2212 	dprintf(fd, "Free pages cached: %zu\n", d->free_regions_size);
2213 	for (i = 0; i < mopts.malloc_cache; i++) {
2214 		if (d->free_regions[i].p != NULL) {
2215 			dprintf(fd, "%2d) ", i);
2216 			dprintf(fd, "free at %p: %zu\n",
2217 			    d->free_regions[i].p, d->free_regions[i].size);
2218 		}
2219 	}
2220 }
2221 
2222 static void
2223 malloc_dump1(int fd, int poolno, struct dir_info *d)
2224 {
2225 	size_t i, realsize;
2226 
2227 	dprintf(fd, "Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
2228 	if (d == NULL)
2229 		return;
2230 	dprintf(fd, "Region slots free %zu/%zu\n",
2231 		d->regions_free, d->regions_total);
2232 	dprintf(fd, "Finds %zu/%zu\n", d->finds,
2233 	    d->find_collisions);
2234 	dprintf(fd, "Inserts %zu/%zu\n", d->inserts,
2235 	    d->insert_collisions);
2236 	dprintf(fd, "Deletes %zu/%zu\n", d->deletes,
2237 	    d->delete_moves);
2238 	dprintf(fd, "Cheap reallocs %zu/%zu\n",
2239 	    d->cheap_reallocs, d->cheap_realloc_tries);
2240 	dprintf(fd, "In use %zu\n", d->malloc_used);
2241 	dprintf(fd, "Guarded %zu\n", d->malloc_guarded);
2242 	dump_free_chunk_info(fd, d);
2243 	dump_free_page_info(fd, d);
2244 	dprintf(fd,
2245 	    "slot)  hash d  type               page                  f size [free/n]\n");
2246 	for (i = 0; i < d->regions_total; i++) {
2247 		if (d->r[i].p != NULL) {
2248 			size_t h = hash(d->r[i].p) &
2249 			    (d->regions_total - 1);
2250 			dprintf(fd, "%4zx) #%4zx %zd ",
2251 			    i, h, h - i);
2252 			REALSIZE(realsize, &d->r[i]);
2253 			if (realsize > MALLOC_MAXCHUNK) {
2254 				putleakinfo(d->r[i].f, realsize, 1);
2255 				dprintf(fd,
2256 				    "pages %18p %18p %zu\n", d->r[i].p,
2257 				    d->r[i].f, realsize);
2258 			} else
2259 				dump_chunk(fd,
2260 				    (struct chunk_info *)d->r[i].size,
2261 				    d->r[i].f, 0);
2262 		}
2263 	}
2264 	dump_leaks(fd);
2265 	dprintf(fd, "\n");
2266 }
2267 
2268 void
2269 malloc_dump(int fd, int poolno, struct dir_info *pool)
2270 {
2271 	int i;
2272 	void *p;
2273 	struct region_info *r;
2274 	int saved_errno = errno;
2275 
2276 	if (pool == NULL)
2277 		return;
2278 	for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) {
2279 		p = pool->delayed_chunks[i];
2280 		if (p == NULL)
2281 			continue;
2282 		r = find(pool, p);
2283 		if (r == NULL)
2284 			wrterror(pool, "bogus pointer in malloc_dump %p", p);
2285 		free_bytes(pool, r, p);
2286 		pool->delayed_chunks[i] = NULL;
2287 	}
2288 	/* XXX leak when run multiple times */
2289 	RBT_INIT(leaktree, &leakhead);
2290 	malloc_dump1(fd, poolno, pool);
2291 	errno = saved_errno;
2292 }
2293 DEF_WEAK(malloc_dump);
2294 
2295 void
2296 malloc_gdump(int fd)
2297 {
2298 	int i;
2299 	int saved_errno = errno;
2300 
2301 	for (i = 0; i < _MALLOC_MUTEXES; i++)
2302 		malloc_dump(fd, i, mopts.malloc_pool[i]);
2303 
2304 	errno = saved_errno;
2305 }
2306 DEF_WEAK(malloc_gdump);
2307 
2308 static void
2309 malloc_exit(void)
2310 {
2311 	int save_errno = errno, fd, i;
2312 
2313 	fd = open("malloc.out", O_RDWR|O_APPEND);
2314 	if (fd != -1) {
2315 		dprintf(fd, "******** Start dump %s *******\n", __progname);
2316 		dprintf(fd,
2317 		    "MT=%d I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u G=%zu\n",
2318 		    mopts.malloc_mt, mopts.internal_funcs,
2319 		    mopts.malloc_freecheck,
2320 		    mopts.malloc_freeunmap, mopts.malloc_junk,
2321 		    mopts.malloc_realloc, mopts.malloc_xmalloc,
2322 		    mopts.chunk_canaries, mopts.malloc_cache,
2323 		    mopts.malloc_guard);
2324 
2325 		for (i = 0; i < _MALLOC_MUTEXES; i++)
2326 			malloc_dump(fd, i, mopts.malloc_pool[i]);
2327 		dprintf(fd, "******** End dump %s *******\n", __progname);
2328 		close(fd);
2329 	} else
2330 		dprintf(STDERR_FILENO,
2331 		    "malloc() warning: Couldn't dump stats\n");
2332 	errno = save_errno;
2333 }
2334 
2335 #endif /* MALLOC_STATS */
2336