xref: /openbsd-src/lib/libc/stdlib/malloc.c (revision 42ac1f71ddfc8f2b1ea1555399aa1e1ffc2faced)
1 /*	$OpenBSD: malloc.c,v 1.273 2022/02/26 16:14:42 otto Exp $	*/
2 /*
3  * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
4  * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
5  * Copyright (c) 2008 Damien Miller <djm@openbsd.org>
6  * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * If we meet some day, and you think this stuff is worth it, you
23  * can buy me a beer in return. Poul-Henning Kamp
24  */
25 
26 /* #define MALLOC_STATS */
27 
28 #include <sys/types.h>
29 #include <sys/queue.h>
30 #include <sys/mman.h>
31 #include <sys/sysctl.h>
32 #include <uvm/uvmexp.h>
33 #include <errno.h>
34 #include <stdarg.h>
35 #include <stdint.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <unistd.h>
40 
41 #ifdef MALLOC_STATS
42 #include <sys/tree.h>
43 #include <fcntl.h>
44 #endif
45 
46 #include "thread_private.h"
47 #include <tib.h>
48 
49 #define MALLOC_PAGESHIFT	_MAX_PAGE_SHIFT
50 
51 #define MALLOC_MINSHIFT		4
52 #define MALLOC_MAXSHIFT		(MALLOC_PAGESHIFT - 1)
53 #define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
54 #define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
55 #define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
56 #define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
57 
58 #define MALLOC_MAXCHUNK		(1 << MALLOC_MAXSHIFT)
59 #define MALLOC_MAXCACHE		256
60 #define MALLOC_DELAYED_CHUNK_MASK	15
61 #ifdef MALLOC_STATS
62 #define MALLOC_INITIAL_REGIONS	512
63 #else
64 #define MALLOC_INITIAL_REGIONS	(MALLOC_PAGESIZE / sizeof(struct region_info))
65 #endif
66 #define MALLOC_DEFAULT_CACHE	64
67 #define MALLOC_CHUNK_LISTS	4
68 #define CHUNK_CHECK_LENGTH	32
69 
70 /*
71  * We move allocations between half a page and a whole page towards the end,
72  * subject to alignment constraints. This is the extra headroom we allow.
73  * Set to zero to be the most strict.
74  */
75 #define MALLOC_LEEWAY		0
76 #define MALLOC_MOVE_COND(sz)	((sz) - mopts.malloc_guard < 		\
77 				    MALLOC_PAGESIZE - MALLOC_LEEWAY)
78 #define MALLOC_MOVE(p, sz)  	(((char *)(p)) +			\
79 				    ((MALLOC_PAGESIZE - MALLOC_LEEWAY -	\
80 			    	    ((sz) - mopts.malloc_guard)) & 	\
81 				    ~(MALLOC_MINSIZE - 1)))
82 
83 #define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
84 
85 /*
86  * What to use for Junk.  This is the byte value we use to fill with
87  * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
88  * and SOME_FREEJUNK right before free.
89  */
90 #define SOME_JUNK		0xdb	/* deadbeef */
91 #define SOME_FREEJUNK		0xdf	/* dead, free */
92 #define SOME_FREEJUNK_ULL	0xdfdfdfdfdfdfdfdfULL
93 
94 #define MMAP(sz,f)	mmap(NULL, (sz), PROT_READ | PROT_WRITE, \
95     MAP_ANON | MAP_PRIVATE | (f), -1, 0)
96 
97 #define MMAPNONE(sz,f)	mmap(NULL, (sz), PROT_NONE, \
98     MAP_ANON | MAP_PRIVATE | (f), -1, 0)
99 
100 #define MMAPA(a,sz,f)	mmap((a), (sz), PROT_READ | PROT_WRITE, \
101     MAP_ANON | MAP_PRIVATE | (f), -1, 0)
102 
103 #define MQUERY(a,sz,f)	mquery((a), (sz), PROT_READ | PROT_WRITE, \
104     MAP_ANON | MAP_PRIVATE | MAP_FIXED | (f), -1, 0)
105 
106 struct region_info {
107 	void *p;		/* page; low bits used to mark chunks */
108 	uintptr_t size;		/* size for pages, or chunk_info pointer */
109 #ifdef MALLOC_STATS
110 	void *f;		/* where allocated from */
111 #endif
112 };
113 
114 LIST_HEAD(chunk_head, chunk_info);
115 
116 /*
117  * Two caches, one for "small" regions, one for "big".
118  * Small cache is an array per size, big cache is one array with different
119  * sized regions
120  */
121 #define MAX_SMALLCACHEABLE_SIZE	32
122 #define MAX_BIGCACHEABLE_SIZE	512
123 /* If the total # of pages is larger than this, evict before inserting */
124 #define BIGCACHE_FILL(sz)	(MAX_BIGCACHEABLE_SIZE * (sz) / 4)
125 
126 struct smallcache {
127 	void **pages;
128 	ushort length;
129 	ushort max;
130 };
131 
132 struct bigcache {
133 	void *page;
134 	size_t psize;
135 };
136 
137 struct dir_info {
138 	u_int32_t canary1;
139 	int active;			/* status of malloc */
140 	struct region_info *r;		/* region slots */
141 	size_t regions_total;		/* number of region slots */
142 	size_t regions_free;		/* number of free slots */
143 	size_t rbytesused;		/* random bytes used */
144 	char *func;			/* current function */
145 	int malloc_junk;		/* junk fill? */
146 	int mmap_flag;			/* extra flag for mmap */
147 	int mutex;
148 					/* lists of free chunk info structs */
149 	struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1];
150 					/* lists of chunks with free slots */
151 	struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS];
152 					/* delayed free chunk slots */
153 	void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
154 	u_char rbytes[32];		/* random bytes */
155 					/* free pages cache */
156 	struct smallcache smallcache[MAX_SMALLCACHEABLE_SIZE];
157 	size_t bigcache_used;
158 	size_t bigcache_size;
159 	struct bigcache *bigcache;
160 #ifdef MALLOC_STATS
161 	size_t inserts;
162 	size_t insert_collisions;
163 	size_t finds;
164 	size_t find_collisions;
165 	size_t deletes;
166 	size_t delete_moves;
167 	size_t cheap_realloc_tries;
168 	size_t cheap_reallocs;
169 	size_t malloc_used;		/* bytes allocated */
170 	size_t malloc_guarded;		/* bytes used for guards */
171 	size_t pool_searches;		/* searches for pool */
172 	size_t other_pool;		/* searches in other pool */
173 #define STATS_ADD(x,y)	((x) += (y))
174 #define STATS_SUB(x,y)	((x) -= (y))
175 #define STATS_INC(x)	((x)++)
176 #define STATS_ZERO(x)	((x) = 0)
177 #define STATS_SETF(x,y)	((x)->f = (y))
178 #else
179 #define STATS_ADD(x,y)	/* nothing */
180 #define STATS_SUB(x,y)	/* nothing */
181 #define STATS_INC(x)	/* nothing */
182 #define STATS_ZERO(x)	/* nothing */
183 #define STATS_SETF(x,y)	/* nothing */
184 #endif /* MALLOC_STATS */
185 	u_int32_t canary2;
186 };
187 #define DIR_INFO_RSZ	((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
188 			~MALLOC_PAGEMASK)
189 
190 static void unmap(struct dir_info *d, void *p, size_t sz, size_t clear);
191 
192 /*
193  * This structure describes a page worth of chunks.
194  *
195  * How many bits per u_short in the bitmap
196  */
197 #define MALLOC_BITS		(NBBY * sizeof(u_short))
198 struct chunk_info {
199 	LIST_ENTRY(chunk_info) entries;
200 	void *page;			/* pointer to the page */
201 	u_short canary;
202 	u_short size;			/* size of this page's chunks */
203 	u_short shift;			/* how far to shift for this size */
204 	u_short free;			/* how many free chunks */
205 	u_short total;			/* how many chunks */
206 	u_short offset;			/* requested size table offset */
207 	u_short bits[1];		/* which chunks are free */
208 };
209 
210 struct malloc_readonly {
211 					/* Main bookkeeping information */
212 	struct dir_info *malloc_pool[_MALLOC_MUTEXES];
213 	u_int	malloc_mutexes;		/* how much in actual use? */
214 	int	malloc_mt;		/* multi-threaded mode? */
215 	int	malloc_freecheck;	/* Extensive double free check */
216 	int	malloc_freeunmap;	/* mprotect free pages PROT_NONE? */
217 	int	def_malloc_junk;	/* junk fill? */
218 	int	malloc_realloc;		/* always realloc? */
219 	int	malloc_xmalloc;		/* xmalloc behaviour? */
220 	u_int	chunk_canaries;		/* use canaries after chunks? */
221 	int	internal_funcs;		/* use better recallocarray/freezero? */
222 	u_int	def_maxcache;		/* free pages we cache */
223 	size_t	malloc_guard;		/* use guard pages after allocations? */
224 #ifdef MALLOC_STATS
225 	int	malloc_stats;		/* dump statistics at end */
226 #endif
227 	u_int32_t malloc_canary;	/* Matched against ones in pool */
228 };
229 
230 /* This object is mapped PROT_READ after initialisation to prevent tampering */
231 static union {
232 	struct malloc_readonly mopts;
233 	u_char _pad[MALLOC_PAGESIZE];
234 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
235 #define mopts	malloc_readonly.mopts
236 
237 char		*malloc_options;	/* compile-time options */
238 
239 static __dead void wrterror(struct dir_info *d, char *msg, ...)
240     __attribute__((__format__ (printf, 2, 3)));
241 
242 #ifdef MALLOC_STATS
243 void malloc_dump(int, int, struct dir_info *);
244 PROTO_NORMAL(malloc_dump);
245 void malloc_gdump(int);
246 PROTO_NORMAL(malloc_gdump);
247 static void malloc_exit(void);
248 #define CALLER	__builtin_return_address(0)
249 #else
250 #define CALLER	NULL
251 #endif
252 
253 /* low bits of r->p determine size: 0 means >= page size and r->size holding
254  * real size, otherwise low bits are a shift count, or 1 for malloc(0)
255  */
256 #define REALSIZE(sz, r)						\
257 	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
258 	(sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
259 
260 static inline void
261 _MALLOC_LEAVE(struct dir_info *d)
262 {
263 	if (mopts.malloc_mt) {
264 		d->active--;
265 		_MALLOC_UNLOCK(d->mutex);
266 	}
267 }
268 
269 static inline void
270 _MALLOC_ENTER(struct dir_info *d)
271 {
272 	if (mopts.malloc_mt) {
273 		_MALLOC_LOCK(d->mutex);
274 		d->active++;
275 	}
276 }
277 
278 static inline size_t
279 hash(void *p)
280 {
281 	size_t sum;
282 	uintptr_t u;
283 
284 	u = (uintptr_t)p >> MALLOC_PAGESHIFT;
285 	sum = u;
286 	sum = (sum << 7) - sum + (u >> 16);
287 #ifdef __LP64__
288 	sum = (sum << 7) - sum + (u >> 32);
289 	sum = (sum << 7) - sum + (u >> 48);
290 #endif
291 	return sum;
292 }
293 
294 static inline struct dir_info *
295 getpool(void)
296 {
297 	if (!mopts.malloc_mt)
298 		return mopts.malloc_pool[1];
299 	else	/* first one reserved for special pool */
300 		return mopts.malloc_pool[1 + TIB_GET()->tib_tid %
301 		    (mopts.malloc_mutexes - 1)];
302 }
303 
304 static __dead void
305 wrterror(struct dir_info *d, char *msg, ...)
306 {
307 	int		saved_errno = errno;
308 	va_list		ap;
309 
310 	dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname,
311 	    getpid(), (d != NULL && d->func) ? d->func : "unknown");
312 	va_start(ap, msg);
313 	vdprintf(STDERR_FILENO, msg, ap);
314 	va_end(ap);
315 	dprintf(STDERR_FILENO, "\n");
316 
317 #ifdef MALLOC_STATS
318 	if (mopts.malloc_stats)
319 		malloc_gdump(STDERR_FILENO);
320 #endif /* MALLOC_STATS */
321 
322 	errno = saved_errno;
323 
324 	abort();
325 }
326 
327 static void
328 rbytes_init(struct dir_info *d)
329 {
330 	arc4random_buf(d->rbytes, sizeof(d->rbytes));
331 	/* add 1 to account for using d->rbytes[0] */
332 	d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2);
333 }
334 
335 static inline u_char
336 getrbyte(struct dir_info *d)
337 {
338 	u_char x;
339 
340 	if (d->rbytesused >= sizeof(d->rbytes))
341 		rbytes_init(d);
342 	x = d->rbytes[d->rbytesused++];
343 	return x;
344 }
345 
346 static void
347 omalloc_parseopt(char opt)
348 {
349 	switch (opt) {
350 	case '+':
351 		mopts.malloc_mutexes <<= 1;
352 		if (mopts.malloc_mutexes > _MALLOC_MUTEXES)
353 			mopts.malloc_mutexes = _MALLOC_MUTEXES;
354 		break;
355 	case '-':
356 		mopts.malloc_mutexes >>= 1;
357 		if (mopts.malloc_mutexes < 2)
358 			mopts.malloc_mutexes = 2;
359 		break;
360 	case '>':
361 		mopts.def_maxcache <<= 1;
362 		if (mopts.def_maxcache > MALLOC_MAXCACHE)
363 			mopts.def_maxcache = MALLOC_MAXCACHE;
364 		break;
365 	case '<':
366 		mopts.def_maxcache >>= 1;
367 		break;
368 	case 'c':
369 		mopts.chunk_canaries = 0;
370 		break;
371 	case 'C':
372 		mopts.chunk_canaries = 1;
373 		break;
374 #ifdef MALLOC_STATS
375 	case 'd':
376 		mopts.malloc_stats = 0;
377 		break;
378 	case 'D':
379 		mopts.malloc_stats = 1;
380 		break;
381 #endif /* MALLOC_STATS */
382 	case 'f':
383 		mopts.malloc_freecheck = 0;
384 		mopts.malloc_freeunmap = 0;
385 		break;
386 	case 'F':
387 		mopts.malloc_freecheck = 1;
388 		mopts.malloc_freeunmap = 1;
389 		break;
390 	case 'g':
391 		mopts.malloc_guard = 0;
392 		break;
393 	case 'G':
394 		mopts.malloc_guard = MALLOC_PAGESIZE;
395 		break;
396 	case 'j':
397 		if (mopts.def_malloc_junk > 0)
398 			mopts.def_malloc_junk--;
399 		break;
400 	case 'J':
401 		if (mopts.def_malloc_junk < 2)
402 			mopts.def_malloc_junk++;
403 		break;
404 	case 'r':
405 		mopts.malloc_realloc = 0;
406 		break;
407 	case 'R':
408 		mopts.malloc_realloc = 1;
409 		break;
410 	case 'u':
411 		mopts.malloc_freeunmap = 0;
412 		break;
413 	case 'U':
414 		mopts.malloc_freeunmap = 1;
415 		break;
416 	case 'x':
417 		mopts.malloc_xmalloc = 0;
418 		break;
419 	case 'X':
420 		mopts.malloc_xmalloc = 1;
421 		break;
422 	default:
423 		dprintf(STDERR_FILENO, "malloc() warning: "
424                     "unknown char in MALLOC_OPTIONS\n");
425 		break;
426 	}
427 }
428 
429 static void
430 omalloc_init(void)
431 {
432 	char *p, *q, b[16];
433 	int i, j;
434 	const int mib[2] = { CTL_VM, VM_MALLOC_CONF };
435 	size_t sb;
436 
437 	/*
438 	 * Default options
439 	 */
440 	mopts.malloc_mutexes = 8;
441 	mopts.def_malloc_junk = 1;
442 	mopts.def_maxcache = MALLOC_DEFAULT_CACHE;
443 
444 	for (i = 0; i < 3; i++) {
445 		switch (i) {
446 		case 0:
447 			sb = sizeof(b);
448 			j = sysctl(mib, 2, b, &sb, NULL, 0);
449 			if (j != 0)
450 				continue;
451 			p = b;
452 			break;
453 		case 1:
454 			if (issetugid() == 0)
455 				p = getenv("MALLOC_OPTIONS");
456 			else
457 				continue;
458 			break;
459 		case 2:
460 			p = malloc_options;
461 			break;
462 		default:
463 			p = NULL;
464 		}
465 
466 		for (; p != NULL && *p != '\0'; p++) {
467 			switch (*p) {
468 			case 'S':
469 				for (q = "CFGJ"; *q != '\0'; q++)
470 					omalloc_parseopt(*q);
471 				mopts.def_maxcache = 0;
472 				break;
473 			case 's':
474 				for (q = "cfgj"; *q != '\0'; q++)
475 					omalloc_parseopt(*q);
476 				mopts.def_maxcache = MALLOC_DEFAULT_CACHE;
477 				break;
478 			default:
479 				omalloc_parseopt(*p);
480 				break;
481 			}
482 		}
483 	}
484 
485 #ifdef MALLOC_STATS
486 	if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
487 		dprintf(STDERR_FILENO, "malloc() warning: atexit(2) failed."
488 		    " Will not be able to dump stats on exit\n");
489 	}
490 #endif /* MALLOC_STATS */
491 
492 	while ((mopts.malloc_canary = arc4random()) == 0)
493 		;
494 	if (mopts.chunk_canaries)
495 		do {
496 			mopts.chunk_canaries = arc4random();
497 		} while ((u_char)mopts.chunk_canaries == 0 ||
498 		    (u_char)mopts.chunk_canaries == SOME_FREEJUNK);
499 }
500 
501 static void
502 omalloc_poolinit(struct dir_info **dp, int mmap_flag)
503 {
504 	char *p;
505 	size_t d_avail, regioninfo_size;
506 	struct dir_info *d;
507 	int i, j;
508 
509 	/*
510 	 * Allocate dir_info with a guard page on either side. Also
511 	 * randomise offset inside the page at which the dir_info
512 	 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
513 	 */
514 	if ((p = MMAPNONE(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2), mmap_flag)) ==
515 	    MAP_FAILED)
516 		wrterror(NULL, "malloc init mmap failed");
517 	mprotect(p + MALLOC_PAGESIZE, DIR_INFO_RSZ, PROT_READ | PROT_WRITE);
518 	d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
519 	d = (struct dir_info *)(p + MALLOC_PAGESIZE +
520 	    (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
521 
522 	rbytes_init(d);
523 	d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS;
524 	regioninfo_size = d->regions_total * sizeof(struct region_info);
525 	d->r = MMAP(regioninfo_size, mmap_flag);
526 	if (d->r == MAP_FAILED) {
527 		d->regions_total = 0;
528 		wrterror(NULL, "malloc init mmap failed");
529 	}
530 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
531 		LIST_INIT(&d->chunk_info_list[i]);
532 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++)
533 			LIST_INIT(&d->chunk_dir[i][j]);
534 	}
535 	STATS_ADD(d->malloc_used, regioninfo_size + 3 * MALLOC_PAGESIZE);
536 	d->mmap_flag = mmap_flag;
537 	d->malloc_junk = mopts.def_malloc_junk;
538 	d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
539 	d->canary2 = ~d->canary1;
540 
541 	*dp = d;
542 }
543 
544 static int
545 omalloc_grow(struct dir_info *d)
546 {
547 	size_t newtotal;
548 	size_t newsize;
549 	size_t mask;
550 	size_t i, oldpsz;
551 	struct region_info *p;
552 
553 	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2)
554 		return 1;
555 
556 	newtotal = d->regions_total * 2;
557 	newsize = PAGEROUND(newtotal * sizeof(struct region_info));
558 	mask = newtotal - 1;
559 
560 	/* Don't use cache here, we don't want user uaf touch this */
561 	p = MMAP(newsize, d->mmap_flag);
562 	if (p == MAP_FAILED)
563 		return 1;
564 
565 	STATS_ADD(d->malloc_used, newsize);
566 	STATS_ZERO(d->inserts);
567 	STATS_ZERO(d->insert_collisions);
568 	for (i = 0; i < d->regions_total; i++) {
569 		void *q = d->r[i].p;
570 		if (q != NULL) {
571 			size_t index = hash(q) & mask;
572 			STATS_INC(d->inserts);
573 			while (p[index].p != NULL) {
574 				index = (index - 1) & mask;
575 				STATS_INC(d->insert_collisions);
576 			}
577 			p[index] = d->r[i];
578 		}
579 	}
580 
581 	oldpsz = PAGEROUND(d->regions_total * sizeof(struct region_info));
582 	/* clear to avoid meta info ending up in the cache */
583 	unmap(d, d->r, oldpsz, oldpsz);
584 	d->regions_free += d->regions_total;
585 	d->regions_total = newtotal;
586 	d->r = p;
587 	return 0;
588 }
589 
590 /*
591  * The hashtable uses the assumption that p is never NULL. This holds since
592  * non-MAP_FIXED mappings with hint 0 start at BRKSIZ.
593  */
594 static int
595 insert(struct dir_info *d, void *p, size_t sz, void *f)
596 {
597 	size_t index;
598 	size_t mask;
599 	void *q;
600 
601 	if (d->regions_free * 4 < d->regions_total) {
602 		if (omalloc_grow(d))
603 			return 1;
604 	}
605 	mask = d->regions_total - 1;
606 	index = hash(p) & mask;
607 	q = d->r[index].p;
608 	STATS_INC(d->inserts);
609 	while (q != NULL) {
610 		index = (index - 1) & mask;
611 		q = d->r[index].p;
612 		STATS_INC(d->insert_collisions);
613 	}
614 	d->r[index].p = p;
615 	d->r[index].size = sz;
616 #ifdef MALLOC_STATS
617 	d->r[index].f = f;
618 #endif
619 	d->regions_free--;
620 	return 0;
621 }
622 
623 static struct region_info *
624 find(struct dir_info *d, void *p)
625 {
626 	size_t index;
627 	size_t mask = d->regions_total - 1;
628 	void *q, *r;
629 
630 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
631 	    d->canary1 != ~d->canary2)
632 		wrterror(d, "internal struct corrupt");
633 	p = MASK_POINTER(p);
634 	index = hash(p) & mask;
635 	r = d->r[index].p;
636 	q = MASK_POINTER(r);
637 	STATS_INC(d->finds);
638 	while (q != p && r != NULL) {
639 		index = (index - 1) & mask;
640 		r = d->r[index].p;
641 		q = MASK_POINTER(r);
642 		STATS_INC(d->find_collisions);
643 	}
644 	return (q == p && r != NULL) ? &d->r[index] : NULL;
645 }
646 
647 static void
648 delete(struct dir_info *d, struct region_info *ri)
649 {
650 	/* algorithm R, Knuth Vol III section 6.4 */
651 	size_t mask = d->regions_total - 1;
652 	size_t i, j, r;
653 
654 	if (d->regions_total & (d->regions_total - 1))
655 		wrterror(d, "regions_total not 2^x");
656 	d->regions_free++;
657 	STATS_INC(d->deletes);
658 
659 	i = ri - d->r;
660 	for (;;) {
661 		d->r[i].p = NULL;
662 		d->r[i].size = 0;
663 		j = i;
664 		for (;;) {
665 			i = (i - 1) & mask;
666 			if (d->r[i].p == NULL)
667 				return;
668 			r = hash(d->r[i].p) & mask;
669 			if ((i <= r && r < j) || (r < j && j < i) ||
670 			    (j < i && i <= r))
671 				continue;
672 			d->r[j] = d->r[i];
673 			STATS_INC(d->delete_moves);
674 			break;
675 		}
676 
677 	}
678 }
679 
680 static inline void
681 junk_free(int junk, void *p, size_t sz)
682 {
683 	size_t i, step = 1;
684 	uint64_t *lp = p;
685 
686 	if (junk == 0 || sz == 0)
687 		return;
688 	sz /= sizeof(uint64_t);
689 	if (junk == 1) {
690 		if (sz > MALLOC_PAGESIZE / sizeof(uint64_t))
691 			sz = MALLOC_PAGESIZE / sizeof(uint64_t);
692 		step = sz / 4;
693 		if (step == 0)
694 			step = 1;
695 	}
696 	for (i = 0; i < sz; i += step)
697 		lp[i] = SOME_FREEJUNK_ULL;
698 }
699 
700 static inline void
701 validate_junk(struct dir_info *pool, void *p, size_t sz)
702 {
703 	size_t i, step = 1;
704 	uint64_t *lp = p;
705 
706 	if (pool->malloc_junk == 0 || sz == 0)
707 		return;
708 	sz /= sizeof(uint64_t);
709 	if (pool->malloc_junk == 1) {
710 		if (sz > MALLOC_PAGESIZE / sizeof(uint64_t))
711 			sz = MALLOC_PAGESIZE / sizeof(uint64_t);
712 		step = sz / 4;
713 		if (step == 0)
714 			step = 1;
715 	}
716 	for (i = 0; i < sz; i += step) {
717 		if (lp[i] != SOME_FREEJUNK_ULL)
718 			wrterror(pool, "write after free %p", p);
719 	}
720 }
721 
722 
723 /*
724  * Cache maintenance.
725  * Opposed to the regular region data structure, the sizes in the
726  * cache are in MALLOC_PAGESIZE units.
727  */
728 static void
729 unmap(struct dir_info *d, void *p, size_t sz, size_t clear)
730 {
731 	size_t psz = sz >> MALLOC_PAGESHIFT;
732 	void *r;
733 	u_short i;
734 	struct smallcache *cache;
735 
736 	if (sz != PAGEROUND(sz) || psz == 0)
737 		wrterror(d, "munmap round");
738 
739 	if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE &&
740 	    psz <= MAX_BIGCACHEABLE_SIZE) {
741 		u_short base = getrbyte(d);
742 		u_short j;
743 
744 		/* don't look through all slots */
745 		for (j = 0; j < d->bigcache_size / 4; j++) {
746 			i = (base + j) % d->bigcache_size;
747 			if (d->bigcache_used <
748 			    BIGCACHE_FILL(d->bigcache_size))  {
749 				if (d->bigcache[i].psize == 0)
750 					break;
751 			} else {
752 				if (d->bigcache[i].psize != 0)
753 					break;
754 			}
755 		}
756 		/* if we didn't find a preferred slot, use random one */
757 		if (d->bigcache[i].psize != 0) {
758 			size_t tmp;
759 
760 			r = d->bigcache[i].page;
761 			d->bigcache_used -= d->bigcache[i].psize;
762 			tmp = d->bigcache[i].psize << MALLOC_PAGESHIFT;
763 			if (!mopts.malloc_freeunmap)
764 				validate_junk(d, r, tmp);
765 			if (munmap(r, tmp))
766 				 wrterror(d, "munmap %p", r);
767 			STATS_SUB(d->malloc_used, tmp);
768 		}
769 
770 		if (clear > 0)
771 			explicit_bzero(p, clear);
772 		if (mopts.malloc_freeunmap) {
773 			if (mprotect(p, sz, PROT_NONE))
774 				wrterror(d, "mprotect %p", r);
775 		} else
776 			junk_free(d->malloc_junk, p, sz);
777 		d->bigcache[i].page = p;
778 		d->bigcache[i].psize = psz;
779 		d->bigcache_used += psz;
780 		return;
781 	}
782 	if (psz > MAX_SMALLCACHEABLE_SIZE || d->smallcache[psz - 1].max == 0) {
783 		if (munmap(p, sz))
784 			wrterror(d, "munmap %p", p);
785 		STATS_SUB(d->malloc_used, sz);
786 		return;
787 	}
788 	cache = &d->smallcache[psz - 1];
789 	if (cache->length == cache->max) {
790 		/* use a random slot */
791 		i = getrbyte(d) % cache->max;
792 		r = cache->pages[i];
793 		if (!mopts.malloc_freeunmap)
794 			validate_junk(d, r, sz);
795 		if (munmap(r, sz))
796 			wrterror(d, "munmap %p", r);
797 		STATS_SUB(d->malloc_used, sz);
798 		cache->length--;
799 	} else
800 		i = cache->length;
801 
802 	/* fill slot */
803 	if (clear > 0)
804 		explicit_bzero(p, clear);
805 	if (mopts.malloc_freeunmap)
806 		mprotect(p, sz, PROT_NONE);
807 	else
808 		junk_free(d->malloc_junk, p, sz);
809 	cache->pages[i] = p;
810 	cache->length++;
811 }
812 
813 static void *
814 map(struct dir_info *d, size_t sz, int zero_fill)
815 {
816 	size_t psz = sz >> MALLOC_PAGESHIFT;
817 	u_short i;
818 	void *p;
819 	struct smallcache *cache;
820 
821 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
822 	    d->canary1 != ~d->canary2)
823 		wrterror(d, "internal struct corrupt");
824 	if (sz != PAGEROUND(sz) || psz == 0)
825 		wrterror(d, "map round");
826 
827 
828 	if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE &&
829 	    psz <= MAX_BIGCACHEABLE_SIZE) {
830 		size_t base = getrbyte(d);
831 		size_t cached = d->bigcache_used;
832 		ushort j;
833 
834 		for (j = 0; j < d->bigcache_size && cached >= psz; j++) {
835 			i = (j + base) % d->bigcache_size;
836 			if (d->bigcache[i].psize == psz) {
837 				p = d->bigcache[i].page;
838 				d->bigcache_used -= psz;
839 				d->bigcache[i].page = NULL;
840 				d->bigcache[i].psize = 0;
841 
842 				if (!mopts.malloc_freeunmap)
843 					validate_junk(d, p, sz);
844 				if (mopts.malloc_freeunmap)
845 					mprotect(p, sz, PROT_READ | PROT_WRITE);
846 				if (zero_fill)
847 					memset(p, 0, sz);
848 				else if (mopts.malloc_freeunmap)
849 					junk_free(d->malloc_junk, p, sz);
850 				return p;
851 			}
852 			cached -= d->bigcache[i].psize;
853 		}
854 	}
855 	if (psz <= MAX_SMALLCACHEABLE_SIZE && d->smallcache[psz - 1].max > 0) {
856 		cache = &d->smallcache[psz - 1];
857 		if (cache->length > 0) {
858 			if (cache->length == 1)
859 				p = cache->pages[--cache->length];
860 			else {
861 				i = getrbyte(d) % cache->length;
862 				p = cache->pages[i];
863 				cache->pages[i] = cache->pages[--cache->length];
864 			}
865 			if (!mopts.malloc_freeunmap)
866 				validate_junk(d, p, sz);
867 			if (mopts.malloc_freeunmap)
868 				mprotect(p, sz, PROT_READ | PROT_WRITE);
869 			if (zero_fill)
870 				memset(p, 0, sz);
871 			else if (mopts.malloc_freeunmap)
872 				junk_free(d->malloc_junk, p, sz);
873 			return p;
874 		}
875 		if (psz <= 1) {
876 			_MALLOC_LEAVE(d);
877 			p = MMAP(cache->max * sz, d->mmap_flag);
878 			_MALLOC_ENTER(d);
879 			if (p != MAP_FAILED) {
880 				STATS_ADD(d->malloc_used, cache->max * sz);
881 				cache->length = cache->max - 1;
882 				for (i = 0; i < cache->max - 1; i++) {
883 					void *q = (char*)p + i * sz;
884 					cache->pages[i] = q;
885 					if (!mopts.malloc_freeunmap)
886 						junk_free(d->malloc_junk, q,
887 						    sz);
888 				}
889 				if (mopts.malloc_freeunmap)
890 					mprotect(p, (cache->max - 1) * sz,
891 					    PROT_NONE);
892 				p = (char*)p + (cache->max - 1) * sz;
893 				/* zero fill not needed */
894 				return p;
895 			}
896 		}
897 
898 	}
899 	_MALLOC_LEAVE(d);
900 	p = MMAP(sz, d->mmap_flag);
901 	_MALLOC_ENTER(d);
902 	if (p != MAP_FAILED)
903 		STATS_ADD(d->malloc_used, sz);
904 	/* zero fill not needed */
905 	return p;
906 }
907 
908 static void
909 init_chunk_info(struct dir_info *d, struct chunk_info *p, int bits)
910 {
911 	int i;
912 
913 	if (bits == 0) {
914 		p->shift = MALLOC_MINSHIFT;
915 		p->total = p->free = MALLOC_PAGESIZE >> p->shift;
916 		p->size = 0;
917 		p->offset = 0xdead;
918 	} else {
919 		p->shift = bits;
920 		p->total = p->free = MALLOC_PAGESIZE >> p->shift;
921 		p->size = 1U << bits;
922 		p->offset = howmany(p->total, MALLOC_BITS);
923 	}
924 	p->canary = (u_short)d->canary1;
925 
926 	/* set all valid bits in the bitmap */
927  	i = p->total - 1;
928 	memset(p->bits, 0xff, sizeof(p->bits[0]) * (i / MALLOC_BITS));
929 	p->bits[i / MALLOC_BITS] = (2U << (i % MALLOC_BITS)) - 1;
930 }
931 
932 static struct chunk_info *
933 alloc_chunk_info(struct dir_info *d, int bits)
934 {
935 	struct chunk_info *p;
936 
937 	if (LIST_EMPTY(&d->chunk_info_list[bits])) {
938 		size_t size, count, i;
939 		char *q;
940 
941 		if (bits == 0)
942 			count = MALLOC_PAGESIZE / MALLOC_MINSIZE;
943 		else
944 			count = MALLOC_PAGESIZE >> bits;
945 
946 		size = howmany(count, MALLOC_BITS);
947 		size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
948 		if (mopts.chunk_canaries)
949 			size += count * sizeof(u_short);
950 		size = _ALIGN(size);
951 
952 		/* Don't use cache here, we don't want user uaf touch this */
953 		q = MMAP(MALLOC_PAGESIZE, d->mmap_flag);
954 		if (q == MAP_FAILED)
955 			return NULL;
956 		STATS_ADD(d->malloc_used, MALLOC_PAGESIZE);
957 		count = MALLOC_PAGESIZE / size;
958 
959 		for (i = 0; i < count; i++, q += size) {
960 			p = (struct chunk_info *)q;
961 			LIST_INSERT_HEAD(&d->chunk_info_list[bits], p, entries);
962 		}
963 	}
964 	p = LIST_FIRST(&d->chunk_info_list[bits]);
965 	LIST_REMOVE(p, entries);
966 	if (p->shift == 0)
967 		init_chunk_info(d, p, bits);
968 	return p;
969 }
970 
971 /*
972  * Allocate a page of chunks
973  */
974 static struct chunk_info *
975 omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
976 {
977 	struct chunk_info *bp;
978 	void *pp;
979 
980 	/* Allocate a new bucket */
981 	pp = map(d, MALLOC_PAGESIZE, 0);
982 	if (pp == MAP_FAILED)
983 		return NULL;
984 
985 	/* memory protect the page allocated in the malloc(0) case */
986 	if (bits == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) == -1)
987 		goto err;
988 
989 	bp = alloc_chunk_info(d, bits);
990 	if (bp == NULL)
991 		goto err;
992 	bp->page = pp;
993 
994 	if (insert(d, (void *)((uintptr_t)pp | (bits + 1)), (uintptr_t)bp,
995 	    NULL))
996 		goto err;
997 	LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries);
998 	return bp;
999 
1000 err:
1001 	unmap(d, pp, MALLOC_PAGESIZE, 0);
1002 	return NULL;
1003 }
1004 
1005 static int
1006 find_chunksize(size_t size)
1007 {
1008 	int r;
1009 
1010 	/* malloc(0) is special */
1011 	if (size == 0)
1012 		return 0;
1013 
1014 	if (size < MALLOC_MINSIZE)
1015 		size = MALLOC_MINSIZE;
1016 	size--;
1017 
1018 	r = MALLOC_MINSHIFT;
1019 	while (size >> r)
1020 		r++;
1021 	return r;
1022 }
1023 
1024 static void
1025 fill_canary(char *ptr, size_t sz, size_t allocated)
1026 {
1027 	size_t check_sz = allocated - sz;
1028 
1029 	if (check_sz > CHUNK_CHECK_LENGTH)
1030 		check_sz = CHUNK_CHECK_LENGTH;
1031 	memset(ptr + sz, mopts.chunk_canaries, check_sz);
1032 }
1033 
1034 /*
1035  * Allocate a chunk
1036  */
1037 static void *
1038 malloc_bytes(struct dir_info *d, size_t size, void *f)
1039 {
1040 	u_int i, r;
1041 	int j, listnum;
1042 	size_t k;
1043 	u_short	*lp;
1044 	struct chunk_info *bp;
1045 	void *p;
1046 
1047 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
1048 	    d->canary1 != ~d->canary2)
1049 		wrterror(d, "internal struct corrupt");
1050 
1051 	j = find_chunksize(size);
1052 
1053 	r = ((u_int)getrbyte(d) << 8) | getrbyte(d);
1054 	listnum = r % MALLOC_CHUNK_LISTS;
1055 	/* If it's empty, make a page more of that size chunks */
1056 	if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) {
1057 		bp = omalloc_make_chunks(d, j, listnum);
1058 		if (bp == NULL)
1059 			return NULL;
1060 	}
1061 
1062 	if (bp->canary != (u_short)d->canary1)
1063 		wrterror(d, "chunk info corrupted");
1064 
1065 	i = (r / MALLOC_CHUNK_LISTS) & (bp->total - 1);
1066 
1067 	/* start somewhere in a short */
1068 	lp = &bp->bits[i / MALLOC_BITS];
1069 	if (*lp) {
1070 		j = i % MALLOC_BITS;
1071 		k = ffs(*lp >> j);
1072 		if (k != 0) {
1073 			k += j - 1;
1074 			goto found;
1075 		}
1076 	}
1077 	/* no bit halfway, go to next full short */
1078 	i /= MALLOC_BITS;
1079 	for (;;) {
1080 		if (++i >= bp->total / MALLOC_BITS)
1081 			i = 0;
1082 		lp = &bp->bits[i];
1083 		if (*lp) {
1084 			k = ffs(*lp) - 1;
1085 			break;
1086 		}
1087 	}
1088 found:
1089 #ifdef MALLOC_STATS
1090 	if (i == 0 && k == 0) {
1091 		struct region_info *r = find(d, bp->page);
1092 		r->f = f;
1093 	}
1094 #endif
1095 
1096 	*lp ^= 1 << k;
1097 
1098 	/* If there are no more free, remove from free-list */
1099 	if (--bp->free == 0)
1100 		LIST_REMOVE(bp, entries);
1101 
1102 	/* Adjust to the real offset of that chunk */
1103 	k += (lp - bp->bits) * MALLOC_BITS;
1104 
1105 	if (mopts.chunk_canaries && size > 0)
1106 		bp->bits[bp->offset + k] = size;
1107 
1108 	k <<= bp->shift;
1109 
1110 	p = (char *)bp->page + k;
1111 	if (bp->size > 0) {
1112 		if (d->malloc_junk == 2)
1113 			memset(p, SOME_JUNK, bp->size);
1114 		else if (mopts.chunk_canaries)
1115 			fill_canary(p, size, bp->size);
1116 	}
1117 	return p;
1118 }
1119 
1120 static void
1121 validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1122 {
1123 	size_t check_sz = allocated - sz;
1124 	u_char *p, *q;
1125 
1126 	if (check_sz > CHUNK_CHECK_LENGTH)
1127 		check_sz = CHUNK_CHECK_LENGTH;
1128 	p = ptr + sz;
1129 	q = p + check_sz;
1130 
1131 	while (p < q) {
1132 		if (*p != (u_char)mopts.chunk_canaries && *p != SOME_JUNK) {
1133 			wrterror(d, "chunk canary corrupted %p %#tx@%#zx%s",
1134 			    ptr, p - ptr, sz,
1135 			    *p == SOME_FREEJUNK ? " (double free?)" : "");
1136 		}
1137 		p++;
1138 	}
1139 }
1140 
1141 static uint32_t
1142 find_chunknum(struct dir_info *d, struct chunk_info *info, void *ptr, int check)
1143 {
1144 	uint32_t chunknum;
1145 
1146 	if (info->canary != (u_short)d->canary1)
1147 		wrterror(d, "chunk info corrupted");
1148 
1149 	/* Find the chunk number on the page */
1150 	chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1151 
1152 	if ((uintptr_t)ptr & ((1U << (info->shift)) - 1))
1153 		wrterror(d, "modified chunk-pointer %p", ptr);
1154 	if (info->bits[chunknum / MALLOC_BITS] &
1155 	    (1U << (chunknum % MALLOC_BITS)))
1156 		wrterror(d, "chunk is already free %p", ptr);
1157 	if (check && info->size > 0) {
1158 		validate_canary(d, ptr, info->bits[info->offset + chunknum],
1159 		    info->size);
1160 	}
1161 	return chunknum;
1162 }
1163 
1164 /*
1165  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1166  */
1167 static void
1168 free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1169 {
1170 	struct chunk_head *mp;
1171 	struct chunk_info *info;
1172 	uint32_t chunknum;
1173 	int listnum;
1174 
1175 	info = (struct chunk_info *)r->size;
1176 	chunknum = find_chunknum(d, info, ptr, 0);
1177 
1178 	info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
1179 	info->free++;
1180 
1181 	if (info->free == 1) {
1182 		/* Page became non-full */
1183 		listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
1184 		if (info->size != 0)
1185 			mp = &d->chunk_dir[info->shift][listnum];
1186 		else
1187 			mp = &d->chunk_dir[0][listnum];
1188 
1189 		LIST_INSERT_HEAD(mp, info, entries);
1190 		return;
1191 	}
1192 
1193 	if (info->free != info->total)
1194 		return;
1195 
1196 	LIST_REMOVE(info, entries);
1197 
1198 	if (info->size == 0 && !mopts.malloc_freeunmap)
1199 		mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1200 	unmap(d, info->page, MALLOC_PAGESIZE, 0);
1201 
1202 	delete(d, r);
1203 	if (info->size != 0)
1204 		mp = &d->chunk_info_list[info->shift];
1205 	else
1206 		mp = &d->chunk_info_list[0];
1207 	LIST_INSERT_HEAD(mp, info, entries);
1208 }
1209 
1210 
1211 
1212 static void *
1213 omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1214 {
1215 	void *p;
1216 	size_t psz;
1217 
1218 	if (sz > MALLOC_MAXCHUNK) {
1219 		if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1220 			errno = ENOMEM;
1221 			return NULL;
1222 		}
1223 		sz += mopts.malloc_guard;
1224 		psz = PAGEROUND(sz);
1225 		p = map(pool, psz, zero_fill);
1226 		if (p == MAP_FAILED) {
1227 			errno = ENOMEM;
1228 			return NULL;
1229 		}
1230 		if (insert(pool, p, sz, f)) {
1231 			unmap(pool, p, psz, 0);
1232 			errno = ENOMEM;
1233 			return NULL;
1234 		}
1235 		if (mopts.malloc_guard) {
1236 			if (mprotect((char *)p + psz - mopts.malloc_guard,
1237 			    mopts.malloc_guard, PROT_NONE))
1238 				wrterror(pool, "mprotect");
1239 			STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1240 		}
1241 
1242 		if (MALLOC_MOVE_COND(sz)) {
1243 			/* fill whole allocation */
1244 			if (pool->malloc_junk == 2)
1245 				memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1246 			/* shift towards the end */
1247 			p = MALLOC_MOVE(p, sz);
1248 			/* fill zeros if needed and overwritten above */
1249 			if (zero_fill && pool->malloc_junk == 2)
1250 				memset(p, 0, sz - mopts.malloc_guard);
1251 		} else {
1252 			if (pool->malloc_junk == 2) {
1253 				if (zero_fill)
1254 					memset((char *)p + sz -
1255 					    mopts.malloc_guard, SOME_JUNK,
1256 					    psz - sz);
1257 				else
1258 					memset(p, SOME_JUNK,
1259 					    psz - mopts.malloc_guard);
1260 			} else if (mopts.chunk_canaries)
1261 				fill_canary(p, sz - mopts.malloc_guard,
1262 				    psz - mopts.malloc_guard);
1263 		}
1264 
1265 	} else {
1266 		/* takes care of SOME_JUNK */
1267 		p = malloc_bytes(pool, sz, f);
1268 		if (zero_fill && p != NULL && sz > 0)
1269 			memset(p, 0, sz);
1270 	}
1271 
1272 	return p;
1273 }
1274 
1275 /*
1276  * Common function for handling recursion.  Only
1277  * print the error message once, to avoid making the problem
1278  * potentially worse.
1279  */
1280 static void
1281 malloc_recurse(struct dir_info *d)
1282 {
1283 	static int noprint;
1284 
1285 	if (noprint == 0) {
1286 		noprint = 1;
1287 		wrterror(d, "recursive call");
1288 	}
1289 	d->active--;
1290 	_MALLOC_UNLOCK(d->mutex);
1291 	errno = EDEADLK;
1292 }
1293 
1294 void
1295 _malloc_init(int from_rthreads)
1296 {
1297 	u_int i, j, nmutexes;
1298 	struct dir_info *d;
1299 
1300 	_MALLOC_LOCK(1);
1301 	if (!from_rthreads && mopts.malloc_pool[1]) {
1302 		_MALLOC_UNLOCK(1);
1303 		return;
1304 	}
1305 	if (!mopts.malloc_canary)
1306 		omalloc_init();
1307 
1308 	nmutexes = from_rthreads ? mopts.malloc_mutexes : 2;
1309 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1310 		mprotect(&malloc_readonly, sizeof(malloc_readonly),
1311 		    PROT_READ | PROT_WRITE);
1312 	for (i = 0; i < nmutexes; i++) {
1313 		if (mopts.malloc_pool[i])
1314 			continue;
1315 		if (i == 0) {
1316 			omalloc_poolinit(&d, MAP_CONCEAL);
1317 			d->malloc_junk = 2;
1318 			d->bigcache_size = 0;
1319 			for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++)
1320 				d->smallcache[j].max = 0;
1321 		} else {
1322 			size_t sz = 0;
1323 
1324 			omalloc_poolinit(&d, 0);
1325 			d->malloc_junk = mopts.def_malloc_junk;
1326 			d->bigcache_size = mopts.def_maxcache;
1327 			for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) {
1328 				d->smallcache[j].max =
1329 				    mopts.def_maxcache >> (j / 8);
1330 				sz += d->smallcache[j].max * sizeof(void *);
1331 			}
1332 			sz += d->bigcache_size * sizeof(struct bigcache);
1333 			if (sz > 0) {
1334 				void *p = MMAP(sz, 0);
1335 				if (p == MAP_FAILED)
1336 					wrterror(NULL,
1337 					    "malloc init mmap failed");
1338 				for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) {
1339 					d->smallcache[j].pages = p;
1340 					p = (char *)p + d->smallcache[j].max *
1341 					    sizeof(void *);
1342 				}
1343 				d->bigcache = p;
1344 			}
1345 		}
1346 		d->mutex = i;
1347 		mopts.malloc_pool[i] = d;
1348 	}
1349 
1350 	if (from_rthreads)
1351 		mopts.malloc_mt = 1;
1352 	else
1353 		mopts.internal_funcs = 1;
1354 
1355 	/*
1356 	 * Options have been set and will never be reset.
1357 	 * Prevent further tampering with them.
1358 	 */
1359 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1360 		mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
1361 	_MALLOC_UNLOCK(1);
1362 }
1363 DEF_STRONG(_malloc_init);
1364 
1365 #define PROLOGUE(p, fn)			\
1366 	d = (p); 			\
1367 	if (d == NULL) { 		\
1368 		_malloc_init(0);	\
1369 		d = (p);		\
1370 	}				\
1371 	_MALLOC_LOCK(d->mutex);		\
1372 	d->func = fn;			\
1373 	if (d->active++) {		\
1374 		malloc_recurse(d);	\
1375 		return NULL;		\
1376 	}				\
1377 
1378 #define EPILOGUE()				\
1379 	d->active--;				\
1380 	_MALLOC_UNLOCK(d->mutex);		\
1381 	if (r == NULL && mopts.malloc_xmalloc)	\
1382 		wrterror(d, "out of memory");	\
1383 	if (r != NULL)				\
1384 		errno = saved_errno;		\
1385 
1386 void *
1387 malloc(size_t size)
1388 {
1389 	void *r;
1390 	struct dir_info *d;
1391 	int saved_errno = errno;
1392 
1393 	PROLOGUE(getpool(), "malloc")
1394 	r = omalloc(d, size, 0, CALLER);
1395 	EPILOGUE()
1396 	return r;
1397 }
1398 /*DEF_STRONG(malloc);*/
1399 
1400 void *
1401 malloc_conceal(size_t size)
1402 {
1403 	void *r;
1404 	struct dir_info *d;
1405 	int saved_errno = errno;
1406 
1407 	PROLOGUE(mopts.malloc_pool[0], "malloc_conceal")
1408 	r = omalloc(d, size, 0, CALLER);
1409 	EPILOGUE()
1410 	return r;
1411 }
1412 DEF_WEAK(malloc_conceal);
1413 
1414 static struct region_info *
1415 findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool,
1416     char **saved_function)
1417 {
1418 	struct dir_info *pool = argpool;
1419 	struct region_info *r = find(pool, p);
1420 
1421 	STATS_INC(pool->pool_searches);
1422 	if (r == NULL) {
1423 		u_int i, nmutexes;
1424 
1425 		nmutexes = mopts.malloc_mt ? mopts.malloc_mutexes : 2;
1426 		STATS_INC(pool->other_pool);
1427 		for (i = 1; i < nmutexes; i++) {
1428 			u_int j = (argpool->mutex + i) & (nmutexes - 1);
1429 
1430 			pool->active--;
1431 			_MALLOC_UNLOCK(pool->mutex);
1432 			pool = mopts.malloc_pool[j];
1433 			_MALLOC_LOCK(pool->mutex);
1434 			pool->active++;
1435 			r = find(pool, p);
1436 			if (r != NULL) {
1437 				*saved_function = pool->func;
1438 				pool->func = argpool->func;
1439 				break;
1440 			}
1441 		}
1442 		if (r == NULL)
1443 			wrterror(argpool, "bogus pointer (double free?) %p", p);
1444 	}
1445 	*foundpool = pool;
1446 	return r;
1447 }
1448 
1449 static void
1450 ofree(struct dir_info **argpool, void *p, int clear, int check, size_t argsz)
1451 {
1452 	struct region_info *r;
1453 	struct dir_info *pool;
1454 	char *saved_function;
1455 	size_t sz;
1456 
1457 	r = findpool(p, *argpool, &pool, &saved_function);
1458 
1459 	REALSIZE(sz, r);
1460 	if (pool->mmap_flag) {
1461 		clear = 1;
1462 		if (!check) {
1463 			argsz = sz;
1464 			if (sz > MALLOC_MAXCHUNK)
1465 				argsz -= mopts.malloc_guard;
1466 		}
1467 	}
1468 	if (check) {
1469 		if (sz <= MALLOC_MAXCHUNK) {
1470 			if (mopts.chunk_canaries && sz > 0) {
1471 				struct chunk_info *info =
1472 				    (struct chunk_info *)r->size;
1473 				uint32_t chunknum =
1474 				    find_chunknum(pool, info, p, 0);
1475 
1476 				if (info->bits[info->offset + chunknum] < argsz)
1477 					wrterror(pool, "recorded size %hu"
1478 					    " < %zu",
1479 					    info->bits[info->offset + chunknum],
1480 					    argsz);
1481 			} else {
1482 				if (sz < argsz)
1483 					wrterror(pool, "chunk size %zu < %zu",
1484 					    sz, argsz);
1485 			}
1486 		} else if (sz - mopts.malloc_guard < argsz) {
1487 			wrterror(pool, "recorded size %zu < %zu",
1488 			    sz - mopts.malloc_guard, argsz);
1489 		}
1490 	}
1491 	if (sz > MALLOC_MAXCHUNK) {
1492 		if (!MALLOC_MOVE_COND(sz)) {
1493 			if (r->p != p)
1494 				wrterror(pool, "bogus pointer %p", p);
1495 			if (mopts.chunk_canaries)
1496 				validate_canary(pool, p,
1497 				    sz - mopts.malloc_guard,
1498 				    PAGEROUND(sz - mopts.malloc_guard));
1499 		} else {
1500 			/* shifted towards the end */
1501 			if (p != MALLOC_MOVE(r->p, sz))
1502 				wrterror(pool, "bogus moved pointer %p", p);
1503 			p = r->p;
1504 		}
1505 		if (mopts.malloc_guard) {
1506 			if (sz < mopts.malloc_guard)
1507 				wrterror(pool, "guard size");
1508 			if (!mopts.malloc_freeunmap) {
1509 				if (mprotect((char *)p + PAGEROUND(sz) -
1510 				    mopts.malloc_guard, mopts.malloc_guard,
1511 				    PROT_READ | PROT_WRITE))
1512 					wrterror(pool, "mprotect");
1513 			}
1514 			STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
1515 		}
1516 		unmap(pool, p, PAGEROUND(sz), clear ? argsz : 0);
1517 		delete(pool, r);
1518 	} else {
1519 		/* Validate and optionally canary check */
1520 		struct chunk_info *info = (struct chunk_info *)r->size;
1521 		if (info->size != sz)
1522 			wrterror(pool, "internal struct corrupt");
1523 		find_chunknum(pool, info, p, mopts.chunk_canaries);
1524 		if (!clear) {
1525 			void *tmp;
1526 			int i;
1527 
1528 			if (mopts.malloc_freecheck) {
1529 				for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK; i++)
1530 					if (p == pool->delayed_chunks[i])
1531 						wrterror(pool,
1532 						    "double free %p", p);
1533 			}
1534 			junk_free(pool->malloc_junk, p, sz);
1535 			i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK;
1536 			tmp = p;
1537 			p = pool->delayed_chunks[i];
1538 			if (tmp == p)
1539 				wrterror(pool, "double free %p", tmp);
1540 			pool->delayed_chunks[i] = tmp;
1541 			if (p != NULL) {
1542 				r = find(pool, p);
1543 				REALSIZE(sz, r);
1544 				if (r != NULL)
1545 					validate_junk(pool, p, sz);
1546 			}
1547 		} else if (argsz > 0) {
1548 			r = find(pool, p);
1549 			explicit_bzero(p, argsz);
1550 		}
1551 		if (p != NULL) {
1552 			if (r == NULL)
1553 				wrterror(pool,
1554 				    "bogus pointer (double free?) %p", p);
1555 			free_bytes(pool, r, p);
1556 		}
1557 	}
1558 
1559 	if (*argpool != pool) {
1560 		pool->func = saved_function;
1561 		*argpool = pool;
1562 	}
1563 }
1564 
1565 void
1566 free(void *ptr)
1567 {
1568 	struct dir_info *d;
1569 	int saved_errno = errno;
1570 
1571 	/* This is legal. */
1572 	if (ptr == NULL)
1573 		return;
1574 
1575 	d = getpool();
1576 	if (d == NULL)
1577 		wrterror(d, "free() called before allocation");
1578 	_MALLOC_LOCK(d->mutex);
1579 	d->func = "free";
1580 	if (d->active++) {
1581 		malloc_recurse(d);
1582 		return;
1583 	}
1584 	ofree(&d, ptr, 0, 0, 0);
1585 	d->active--;
1586 	_MALLOC_UNLOCK(d->mutex);
1587 	errno = saved_errno;
1588 }
1589 /*DEF_STRONG(free);*/
1590 
1591 static void
1592 freezero_p(void *ptr, size_t sz)
1593 {
1594 	explicit_bzero(ptr, sz);
1595 	free(ptr);
1596 }
1597 
1598 void
1599 freezero(void *ptr, size_t sz)
1600 {
1601 	struct dir_info *d;
1602 	int saved_errno = errno;
1603 
1604 	/* This is legal. */
1605 	if (ptr == NULL)
1606 		return;
1607 
1608 	if (!mopts.internal_funcs) {
1609 		freezero_p(ptr, sz);
1610 		return;
1611 	}
1612 
1613 	d = getpool();
1614 	if (d == NULL)
1615 		wrterror(d, "freezero() called before allocation");
1616 	_MALLOC_LOCK(d->mutex);
1617 	d->func = "freezero";
1618 	if (d->active++) {
1619 		malloc_recurse(d);
1620 		return;
1621 	}
1622 	ofree(&d, ptr, 1, 1, sz);
1623 	d->active--;
1624 	_MALLOC_UNLOCK(d->mutex);
1625 	errno = saved_errno;
1626 }
1627 DEF_WEAK(freezero);
1628 
1629 static void *
1630 orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f)
1631 {
1632 	struct region_info *r;
1633 	struct dir_info *pool;
1634 	char *saved_function;
1635 	struct chunk_info *info;
1636 	size_t oldsz, goldsz, gnewsz;
1637 	void *q, *ret;
1638 	uint32_t chunknum;
1639 	int forced;
1640 
1641 	if (p == NULL)
1642 		return omalloc(*argpool, newsz, 0, f);
1643 
1644 	if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1645 		errno = ENOMEM;
1646 		return  NULL;
1647 	}
1648 
1649 	r = findpool(p, *argpool, &pool, &saved_function);
1650 
1651 	REALSIZE(oldsz, r);
1652 	if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) {
1653 		info = (struct chunk_info *)r->size;
1654 		chunknum = find_chunknum(pool, info, p, 0);
1655 	}
1656 
1657 	goldsz = oldsz;
1658 	if (oldsz > MALLOC_MAXCHUNK) {
1659 		if (oldsz < mopts.malloc_guard)
1660 			wrterror(pool, "guard size");
1661 		oldsz -= mopts.malloc_guard;
1662 	}
1663 
1664 	gnewsz = newsz;
1665 	if (gnewsz > MALLOC_MAXCHUNK)
1666 		gnewsz += mopts.malloc_guard;
1667 
1668 	forced = mopts.malloc_realloc || pool->mmap_flag;
1669 	if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && !forced) {
1670 		/* First case: from n pages sized allocation to m pages sized
1671 		   allocation, m > n */
1672 		size_t roldsz = PAGEROUND(goldsz);
1673 		size_t rnewsz = PAGEROUND(gnewsz);
1674 
1675 		if (rnewsz < roldsz && rnewsz > roldsz / 2 &&
1676 		    roldsz - rnewsz < mopts.def_maxcache * MALLOC_PAGESIZE &&
1677 		    !mopts.malloc_guard) {
1678 
1679 			ret = p;
1680 			goto done;
1681 		}
1682 
1683 		if (rnewsz > roldsz) {
1684 			/* try to extend existing region */
1685 			if (!mopts.malloc_guard) {
1686 				void *hint = (char *)r->p + roldsz;
1687 				size_t needed = rnewsz - roldsz;
1688 
1689 				STATS_INC(pool->cheap_realloc_tries);
1690 				q = MQUERY(hint, needed, pool->mmap_flag);
1691 				if (q == hint)
1692 					q = MMAPA(hint, needed, pool->mmap_flag);
1693 				else
1694 					q = MAP_FAILED;
1695 				if (q == hint) {
1696 					STATS_ADD(pool->malloc_used, needed);
1697 					if (pool->malloc_junk == 2)
1698 						memset(q, SOME_JUNK, needed);
1699 					r->size = gnewsz;
1700 					if (r->p != p) {
1701 						/* old pointer is moved */
1702 						memmove(r->p, p, oldsz);
1703 						p = r->p;
1704 					}
1705 					if (mopts.chunk_canaries)
1706 						fill_canary(p, newsz,
1707 						    PAGEROUND(newsz));
1708 					STATS_SETF(r, f);
1709 					STATS_INC(pool->cheap_reallocs);
1710 					ret = p;
1711 					goto done;
1712 				} else if (q != MAP_FAILED) {
1713 					if (munmap(q, needed))
1714 						wrterror(pool, "munmap %p", q);
1715 				}
1716 			}
1717 		} else if (rnewsz < roldsz) {
1718 			/* shrink number of pages */
1719 			if (mopts.malloc_guard) {
1720 				if (mprotect((char *)r->p + rnewsz -
1721 				    mopts.malloc_guard, mopts.malloc_guard,
1722 				    PROT_NONE))
1723 					wrterror(pool, "mprotect");
1724 			}
1725 			if (munmap((char *)r->p + rnewsz, roldsz - rnewsz))
1726 				wrterror(pool, "munmap %p", (char *)r->p +
1727 				    rnewsz);
1728 			STATS_SUB(pool->malloc_used, roldsz - rnewsz);
1729 			r->size = gnewsz;
1730 			if (MALLOC_MOVE_COND(gnewsz)) {
1731 				void *pp = MALLOC_MOVE(r->p, gnewsz);
1732 				memmove(pp, p, newsz);
1733 				p = pp;
1734 			} else if (mopts.chunk_canaries)
1735 				fill_canary(p, newsz, PAGEROUND(newsz));
1736 			STATS_SETF(r, f);
1737 			ret = p;
1738 			goto done;
1739 		} else {
1740 			/* number of pages remains the same */
1741 			void *pp = r->p;
1742 
1743 			r->size = gnewsz;
1744 			if (MALLOC_MOVE_COND(gnewsz))
1745 				pp = MALLOC_MOVE(r->p, gnewsz);
1746 			if (p != pp) {
1747 				memmove(pp, p, oldsz < newsz ? oldsz : newsz);
1748 				p = pp;
1749 			}
1750 			if (p == r->p) {
1751 				if (newsz > oldsz && pool->malloc_junk == 2)
1752 					memset((char *)p + newsz, SOME_JUNK,
1753 					    rnewsz - mopts.malloc_guard -
1754 					    newsz);
1755 				if (mopts.chunk_canaries)
1756 					fill_canary(p, newsz, PAGEROUND(newsz));
1757 			}
1758 			STATS_SETF(r, f);
1759 			ret = p;
1760 			goto done;
1761 		}
1762 	}
1763 	if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 &&
1764 	    newsz <= MALLOC_MAXCHUNK && newsz > 0 &&
1765 	    1 << find_chunksize(newsz) == oldsz && !forced) {
1766 		/* do not reallocate if new size fits good in existing chunk */
1767 		if (pool->malloc_junk == 2)
1768 			memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1769 		if (mopts.chunk_canaries) {
1770 			info->bits[info->offset + chunknum] = newsz;
1771 			fill_canary(p, newsz, info->size);
1772 		}
1773 		STATS_SETF(r, f);
1774 		ret = p;
1775 	} else if (newsz != oldsz || forced) {
1776 		/* create new allocation */
1777 		q = omalloc(pool, newsz, 0, f);
1778 		if (q == NULL) {
1779 			ret = NULL;
1780 			goto done;
1781 		}
1782 		if (newsz != 0 && oldsz != 0)
1783 			memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1784 		ofree(&pool, p, 0, 0, 0);
1785 		ret = q;
1786 	} else {
1787 		/* oldsz == newsz */
1788 		if (newsz != 0)
1789 			wrterror(pool, "realloc internal inconsistency");
1790 		STATS_SETF(r, f);
1791 		ret = p;
1792 	}
1793 done:
1794 	if (*argpool != pool) {
1795 		pool->func = saved_function;
1796 		*argpool = pool;
1797 	}
1798 	return ret;
1799 }
1800 
1801 void *
1802 realloc(void *ptr, size_t size)
1803 {
1804 	struct dir_info *d;
1805 	void *r;
1806 	int saved_errno = errno;
1807 
1808 	PROLOGUE(getpool(), "realloc")
1809 	r = orealloc(&d, ptr, size, CALLER);
1810 	EPILOGUE()
1811 	return r;
1812 }
1813 /*DEF_STRONG(realloc);*/
1814 
1815 /*
1816  * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
1817  * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
1818  */
1819 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1820 
1821 void *
1822 calloc(size_t nmemb, size_t size)
1823 {
1824 	struct dir_info *d;
1825 	void *r;
1826 	int saved_errno = errno;
1827 
1828 	PROLOGUE(getpool(), "calloc")
1829 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1830 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1831 		d->active--;
1832 		_MALLOC_UNLOCK(d->mutex);
1833 		if (mopts.malloc_xmalloc)
1834 			wrterror(d, "out of memory");
1835 		errno = ENOMEM;
1836 		return NULL;
1837 	}
1838 
1839 	size *= nmemb;
1840 	r = omalloc(d, size, 1, CALLER);
1841 	EPILOGUE()
1842 	return r;
1843 }
1844 /*DEF_STRONG(calloc);*/
1845 
1846 void *
1847 calloc_conceal(size_t nmemb, size_t size)
1848 {
1849 	struct dir_info *d;
1850 	void *r;
1851 	int saved_errno = errno;
1852 
1853 	PROLOGUE(mopts.malloc_pool[0], "calloc_conceal")
1854 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1855 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1856 		d->active--;
1857 		_MALLOC_UNLOCK(d->mutex);
1858 		if (mopts.malloc_xmalloc)
1859 			wrterror(d, "out of memory");
1860 		errno = ENOMEM;
1861 		return NULL;
1862 	}
1863 
1864 	size *= nmemb;
1865 	r = omalloc(d, size, 1, CALLER);
1866 	EPILOGUE()
1867 	return r;
1868 }
1869 DEF_WEAK(calloc_conceal);
1870 
1871 static void *
1872 orecallocarray(struct dir_info **argpool, void *p, size_t oldsize,
1873     size_t newsize, void *f)
1874 {
1875 	struct region_info *r;
1876 	struct dir_info *pool;
1877 	char *saved_function;
1878 	void *newptr;
1879 	size_t sz;
1880 
1881 	if (p == NULL)
1882 		return omalloc(*argpool, newsize, 1, f);
1883 
1884 	if (oldsize == newsize)
1885 		return p;
1886 
1887 	r = findpool(p, *argpool, &pool, &saved_function);
1888 
1889 	REALSIZE(sz, r);
1890 	if (sz <= MALLOC_MAXCHUNK) {
1891 		if (mopts.chunk_canaries && sz > 0) {
1892 			struct chunk_info *info = (struct chunk_info *)r->size;
1893 			uint32_t chunknum = find_chunknum(pool, info, p, 0);
1894 
1895 			if (info->bits[info->offset + chunknum] != oldsize)
1896 				wrterror(pool, "recorded old size %hu != %zu",
1897 				    info->bits[info->offset + chunknum],
1898 				    oldsize);
1899 		}
1900 	} else if (oldsize < (sz - mopts.malloc_guard) / 2)
1901 		wrterror(pool, "recorded old size %zu != %zu",
1902 		    sz - mopts.malloc_guard, oldsize);
1903 
1904 	newptr = omalloc(pool, newsize, 0, f);
1905 	if (newptr == NULL)
1906 		goto done;
1907 
1908 	if (newsize > oldsize) {
1909 		memcpy(newptr, p, oldsize);
1910 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
1911 	} else
1912 		memcpy(newptr, p, newsize);
1913 
1914 	ofree(&pool, p, 1, 0, oldsize);
1915 
1916 done:
1917 	if (*argpool != pool) {
1918 		pool->func = saved_function;
1919 		*argpool = pool;
1920 	}
1921 
1922 	return newptr;
1923 }
1924 
1925 static void *
1926 recallocarray_p(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1927 {
1928 	size_t oldsize, newsize;
1929 	void *newptr;
1930 
1931 	if (ptr == NULL)
1932 		return calloc(newnmemb, size);
1933 
1934 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1935 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
1936 		errno = ENOMEM;
1937 		return NULL;
1938 	}
1939 	newsize = newnmemb * size;
1940 
1941 	if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1942 	    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
1943 		errno = EINVAL;
1944 		return NULL;
1945 	}
1946 	oldsize = oldnmemb * size;
1947 
1948 	/*
1949 	 * Don't bother too much if we're shrinking just a bit,
1950 	 * we do not shrink for series of small steps, oh well.
1951 	 */
1952 	if (newsize <= oldsize) {
1953 		size_t d = oldsize - newsize;
1954 
1955 		if (d < oldsize / 2 && d < MALLOC_PAGESIZE) {
1956 			memset((char *)ptr + newsize, 0, d);
1957 			return ptr;
1958 		}
1959 	}
1960 
1961 	newptr = malloc(newsize);
1962 	if (newptr == NULL)
1963 		return NULL;
1964 
1965 	if (newsize > oldsize) {
1966 		memcpy(newptr, ptr, oldsize);
1967 		memset((char *)newptr + oldsize, 0, newsize - oldsize);
1968 	} else
1969 		memcpy(newptr, ptr, newsize);
1970 
1971 	explicit_bzero(ptr, oldsize);
1972 	free(ptr);
1973 
1974 	return newptr;
1975 }
1976 
1977 void *
1978 recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1979 {
1980 	struct dir_info *d;
1981 	size_t oldsize = 0, newsize;
1982 	void *r;
1983 	int saved_errno = errno;
1984 
1985 	if (!mopts.internal_funcs)
1986 		return recallocarray_p(ptr, oldnmemb, newnmemb, size);
1987 
1988 	PROLOGUE(getpool(), "recallocarray")
1989 
1990 	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1991 	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
1992 		d->active--;
1993 		_MALLOC_UNLOCK(d->mutex);
1994 		if (mopts.malloc_xmalloc)
1995 			wrterror(d, "out of memory");
1996 		errno = ENOMEM;
1997 		return NULL;
1998 	}
1999 	newsize = newnmemb * size;
2000 
2001 	if (ptr != NULL) {
2002 		if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2003 		    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
2004 			d->active--;
2005 			_MALLOC_UNLOCK(d->mutex);
2006 			errno = EINVAL;
2007 			return NULL;
2008 		}
2009 		oldsize = oldnmemb * size;
2010 	}
2011 
2012 	r = orecallocarray(&d, ptr, oldsize, newsize, CALLER);
2013 	EPILOGUE()
2014 	return r;
2015 }
2016 DEF_WEAK(recallocarray);
2017 
2018 static void *
2019 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
2020 {
2021 	char *p, *q;
2022 
2023 	if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
2024 		wrterror(d, "mapalign bad alignment");
2025 	if (sz != PAGEROUND(sz))
2026 		wrterror(d, "mapalign round");
2027 
2028 	/* Allocate sz + alignment bytes of memory, which must include a
2029 	 * subrange of size bytes that is properly aligned.  Unmap the
2030 	 * other bytes, and then return that subrange.
2031 	 */
2032 
2033 	/* We need sz + alignment to fit into a size_t. */
2034 	if (alignment > SIZE_MAX - sz)
2035 		return MAP_FAILED;
2036 
2037 	p = map(d, sz + alignment, zero_fill);
2038 	if (p == MAP_FAILED)
2039 		return MAP_FAILED;
2040 	q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
2041 	if (q != p) {
2042 		if (munmap(p, q - p))
2043 			wrterror(d, "munmap %p", p);
2044 	}
2045 	if (munmap(q + sz, alignment - (q - p)))
2046 		wrterror(d, "munmap %p", q + sz);
2047 	STATS_SUB(d->malloc_used, alignment);
2048 
2049 	return q;
2050 }
2051 
2052 static void *
2053 omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill,
2054     void *f)
2055 {
2056 	size_t psz;
2057 	void *p;
2058 
2059 	/* If between half a page and a page, avoid MALLOC_MOVE. */
2060 	if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE)
2061 		sz = MALLOC_PAGESIZE;
2062 	if (alignment <= MALLOC_PAGESIZE) {
2063 		/*
2064 		 * max(size, alignment) is enough to assure the requested
2065 		 * alignment, since the allocator always allocates
2066 		 * power-of-two blocks.
2067 		 */
2068 		if (sz < alignment)
2069 			sz = alignment;
2070 		return omalloc(pool, sz, zero_fill, f);
2071 	}
2072 
2073 	if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
2074 		errno = ENOMEM;
2075 		return NULL;
2076 	}
2077 
2078 	if (sz < MALLOC_PAGESIZE)
2079 		sz = MALLOC_PAGESIZE;
2080 	sz += mopts.malloc_guard;
2081 	psz = PAGEROUND(sz);
2082 
2083 	p = mapalign(pool, alignment, psz, zero_fill);
2084 	if (p == MAP_FAILED) {
2085 		errno = ENOMEM;
2086 		return NULL;
2087 	}
2088 
2089 	if (insert(pool, p, sz, f)) {
2090 		unmap(pool, p, psz, 0);
2091 		errno = ENOMEM;
2092 		return NULL;
2093 	}
2094 
2095 	if (mopts.malloc_guard) {
2096 		if (mprotect((char *)p + psz - mopts.malloc_guard,
2097 		    mopts.malloc_guard, PROT_NONE))
2098 			wrterror(pool, "mprotect");
2099 		STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
2100 	}
2101 
2102 	if (pool->malloc_junk == 2) {
2103 		if (zero_fill)
2104 			memset((char *)p + sz - mopts.malloc_guard,
2105 			    SOME_JUNK, psz - sz);
2106 		else
2107 			memset(p, SOME_JUNK, psz - mopts.malloc_guard);
2108 	} else if (mopts.chunk_canaries)
2109 		fill_canary(p, sz - mopts.malloc_guard,
2110 		    psz - mopts.malloc_guard);
2111 
2112 	return p;
2113 }
2114 
2115 int
2116 posix_memalign(void **memptr, size_t alignment, size_t size)
2117 {
2118 	struct dir_info *d;
2119 	int res, saved_errno = errno;
2120 	void *r;
2121 
2122 	/* Make sure that alignment is a large enough power of 2. */
2123 	if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
2124 		return EINVAL;
2125 
2126 	d = getpool();
2127 	if (d == NULL) {
2128 		_malloc_init(0);
2129 		d = getpool();
2130 	}
2131 	_MALLOC_LOCK(d->mutex);
2132 	d->func = "posix_memalign";
2133 	if (d->active++) {
2134 		malloc_recurse(d);
2135 		goto err;
2136 	}
2137 	r = omemalign(d, alignment, size, 0, CALLER);
2138 	d->active--;
2139 	_MALLOC_UNLOCK(d->mutex);
2140 	if (r == NULL) {
2141 		if (mopts.malloc_xmalloc)
2142 			wrterror(d, "out of memory");
2143 		goto err;
2144 	}
2145 	errno = saved_errno;
2146 	*memptr = r;
2147 	return 0;
2148 
2149 err:
2150 	res = errno;
2151 	errno = saved_errno;
2152 	return res;
2153 }
2154 /*DEF_STRONG(posix_memalign);*/
2155 
2156 void *
2157 aligned_alloc(size_t alignment, size_t size)
2158 {
2159 	struct dir_info *d;
2160 	int saved_errno = errno;
2161 	void *r;
2162 
2163 	/* Make sure that alignment is a positive power of 2. */
2164 	if (((alignment - 1) & alignment) != 0 || alignment == 0) {
2165 		errno = EINVAL;
2166 		return NULL;
2167 	};
2168 	/* Per spec, size should be a multiple of alignment */
2169 	if ((size & (alignment - 1)) != 0) {
2170 		errno = EINVAL;
2171 		return NULL;
2172 	}
2173 
2174 	PROLOGUE(getpool(), "aligned_alloc")
2175 	r = omemalign(d, alignment, size, 0, CALLER);
2176 	EPILOGUE()
2177 	return r;
2178 }
2179 /*DEF_STRONG(aligned_alloc);*/
2180 
2181 #ifdef MALLOC_STATS
2182 
2183 struct malloc_leak {
2184 	void *f;
2185 	size_t total_size;
2186 	int count;
2187 };
2188 
2189 struct leaknode {
2190 	RBT_ENTRY(leaknode) entry;
2191 	struct malloc_leak d;
2192 };
2193 
2194 static inline int
2195 leakcmp(const struct leaknode *e1, const struct leaknode *e2)
2196 {
2197 	return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
2198 }
2199 
2200 static RBT_HEAD(leaktree, leaknode) leakhead;
2201 RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp);
2202 RBT_GENERATE(leaktree, leaknode, entry, leakcmp);
2203 
2204 static void
2205 putleakinfo(void *f, size_t sz, int cnt)
2206 {
2207 	struct leaknode key, *p;
2208 	static struct leaknode *page;
2209 	static unsigned int used;
2210 
2211 	if (cnt == 0 || page == MAP_FAILED)
2212 		return;
2213 
2214 	key.d.f = f;
2215 	p = RBT_FIND(leaktree, &leakhead, &key);
2216 	if (p == NULL) {
2217 		if (page == NULL ||
2218 		    used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
2219 			page = MMAP(MALLOC_PAGESIZE, 0);
2220 			if (page == MAP_FAILED)
2221 				return;
2222 			used = 0;
2223 		}
2224 		p = &page[used++];
2225 		p->d.f = f;
2226 		p->d.total_size = sz * cnt;
2227 		p->d.count = cnt;
2228 		RBT_INSERT(leaktree, &leakhead, p);
2229 	} else {
2230 		p->d.total_size += sz * cnt;
2231 		p->d.count += cnt;
2232 	}
2233 }
2234 
2235 static struct malloc_leak *malloc_leaks;
2236 
2237 static void
2238 dump_leaks(int fd)
2239 {
2240 	struct leaknode *p;
2241 	unsigned int i = 0;
2242 
2243 	dprintf(fd, "Leak report\n");
2244 	dprintf(fd, "                 f     sum      #    avg\n");
2245 	/* XXX only one page of summary */
2246 	if (malloc_leaks == NULL)
2247 		malloc_leaks = MMAP(MALLOC_PAGESIZE, 0);
2248 	if (malloc_leaks != MAP_FAILED)
2249 		memset(malloc_leaks, 0, MALLOC_PAGESIZE);
2250 	RBT_FOREACH(p, leaktree, &leakhead) {
2251 		dprintf(fd, "%18p %7zu %6u %6zu\n", p->d.f,
2252 		    p->d.total_size, p->d.count, p->d.total_size / p->d.count);
2253 		if (malloc_leaks == MAP_FAILED ||
2254 		    i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak))
2255 			continue;
2256 		malloc_leaks[i].f = p->d.f;
2257 		malloc_leaks[i].total_size = p->d.total_size;
2258 		malloc_leaks[i].count = p->d.count;
2259 		i++;
2260 	}
2261 }
2262 
2263 static void
2264 dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist)
2265 {
2266 	while (p != NULL) {
2267 		dprintf(fd, "chunk %18p %18p %4d %d/%d\n",
2268 		    p->page, ((p->bits[0] & 1) ? NULL : f),
2269 		    p->size, p->free, p->total);
2270 		if (!fromfreelist) {
2271 			if (p->bits[0] & 1)
2272 				putleakinfo(NULL, p->size, p->total - p->free);
2273 			else {
2274 				putleakinfo(f, p->size, 1);
2275 				putleakinfo(NULL, p->size,
2276 				    p->total - p->free - 1);
2277 			}
2278 			break;
2279 		}
2280 		p = LIST_NEXT(p, entries);
2281 		if (p != NULL)
2282 			dprintf(fd, "        ");
2283 	}
2284 }
2285 
2286 static void
2287 dump_free_chunk_info(int fd, struct dir_info *d)
2288 {
2289 	int i, j, count;
2290 	struct chunk_info *p;
2291 
2292 	dprintf(fd, "Free chunk structs:\n");
2293 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
2294 		count = 0;
2295 		LIST_FOREACH(p, &d->chunk_info_list[i], entries)
2296 			count++;
2297 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++) {
2298 			p = LIST_FIRST(&d->chunk_dir[i][j]);
2299 			if (p == NULL && count == 0)
2300 				continue;
2301 			dprintf(fd, "%2d) %3d ", i, count);
2302 			if (p != NULL)
2303 				dump_chunk(fd, p, NULL, 1);
2304 			else
2305 				dprintf(fd, "\n");
2306 		}
2307 	}
2308 
2309 }
2310 
2311 static void
2312 dump_free_page_info(int fd, struct dir_info *d)
2313 {
2314 	struct smallcache *cache;
2315 	size_t i, total = 0;
2316 
2317 	dprintf(fd, "Cached in small cache:\n");
2318 	for (i = 0; i < MAX_SMALLCACHEABLE_SIZE; i++) {
2319 		cache = &d->smallcache[i];
2320 		if (cache->length != 0)
2321 			dprintf(fd, "%zu(%u): %u = %zu\n", i + 1, cache->max,
2322 			    cache->length, cache->length * (i + 1));
2323 		total += cache->length * (i + 1);
2324 	}
2325 
2326 	dprintf(fd, "Cached in big cache: %zu/%zu\n", d->bigcache_used,
2327 	    d->bigcache_size);
2328 	for (i = 0; i < d->bigcache_size; i++) {
2329 		if (d->bigcache[i].psize != 0)
2330 			dprintf(fd, "%zu: %zu\n", i, d->bigcache[i].psize);
2331 		total += d->bigcache[i].psize;
2332 	}
2333 	dprintf(fd, "Free pages cached: %zu\n", total);
2334 }
2335 
2336 static void
2337 malloc_dump1(int fd, int poolno, struct dir_info *d)
2338 {
2339 	size_t i, realsize;
2340 
2341 	dprintf(fd, "Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
2342 	if (d == NULL)
2343 		return;
2344 	dprintf(fd, "J=%d Fl=%x\n", d->malloc_junk, d->mmap_flag);
2345 	dprintf(fd, "Region slots free %zu/%zu\n",
2346 		d->regions_free, d->regions_total);
2347 	dprintf(fd, "Finds %zu/%zu\n", d->finds, d->find_collisions);
2348 	dprintf(fd, "Inserts %zu/%zu\n", d->inserts, d->insert_collisions);
2349 	dprintf(fd, "Deletes %zu/%zu\n", d->deletes, d->delete_moves);
2350 	dprintf(fd, "Cheap reallocs %zu/%zu\n",
2351 	    d->cheap_reallocs, d->cheap_realloc_tries);
2352 	dprintf(fd, "Other pool searches %zu/%zu\n",
2353 	    d->other_pool, d->pool_searches);
2354 	dprintf(fd, "In use %zu\n", d->malloc_used);
2355 	dprintf(fd, "Guarded %zu\n", d->malloc_guarded);
2356 	dump_free_chunk_info(fd, d);
2357 	dump_free_page_info(fd, d);
2358 	dprintf(fd,
2359 	    "slot)  hash d  type               page                  f "
2360 	    "size [free/n]\n");
2361 	for (i = 0; i < d->regions_total; i++) {
2362 		if (d->r[i].p != NULL) {
2363 			size_t h = hash(d->r[i].p) &
2364 			    (d->regions_total - 1);
2365 			dprintf(fd, "%4zx) #%4zx %zd ",
2366 			    i, h, h - i);
2367 			REALSIZE(realsize, &d->r[i]);
2368 			if (realsize > MALLOC_MAXCHUNK) {
2369 				putleakinfo(d->r[i].f, realsize, 1);
2370 				dprintf(fd,
2371 				    "pages %18p %18p %zu\n", d->r[i].p,
2372 				    d->r[i].f, realsize);
2373 			} else
2374 				dump_chunk(fd,
2375 				    (struct chunk_info *)d->r[i].size,
2376 				    d->r[i].f, 0);
2377 		}
2378 	}
2379 	dump_leaks(fd);
2380 	dprintf(fd, "\n");
2381 }
2382 
2383 void
2384 malloc_dump(int fd, int poolno, struct dir_info *pool)
2385 {
2386 	int i;
2387 	void *p;
2388 	struct region_info *r;
2389 	int saved_errno = errno;
2390 
2391 	if (pool == NULL)
2392 		return;
2393 	for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) {
2394 		p = pool->delayed_chunks[i];
2395 		if (p == NULL)
2396 			continue;
2397 		r = find(pool, p);
2398 		if (r == NULL)
2399 			wrterror(pool, "bogus pointer in malloc_dump %p", p);
2400 		free_bytes(pool, r, p);
2401 		pool->delayed_chunks[i] = NULL;
2402 	}
2403 	/* XXX leak when run multiple times */
2404 	RBT_INIT(leaktree, &leakhead);
2405 	malloc_dump1(fd, poolno, pool);
2406 	errno = saved_errno;
2407 }
2408 DEF_WEAK(malloc_dump);
2409 
2410 void
2411 malloc_gdump(int fd)
2412 {
2413 	int i;
2414 	int saved_errno = errno;
2415 
2416 	for (i = 0; i < mopts.malloc_mutexes; i++)
2417 		malloc_dump(fd, i, mopts.malloc_pool[i]);
2418 
2419 	errno = saved_errno;
2420 }
2421 DEF_WEAK(malloc_gdump);
2422 
2423 static void
2424 malloc_exit(void)
2425 {
2426 	int save_errno = errno, fd;
2427 	unsigned i;
2428 
2429 	fd = open("malloc.out", O_RDWR|O_APPEND);
2430 	if (fd != -1) {
2431 		dprintf(fd, "******** Start dump %s *******\n", __progname);
2432 		dprintf(fd,
2433 		    "MT=%d M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u "
2434 		    "G=%zu\n",
2435 		    mopts.malloc_mt, mopts.malloc_mutexes,
2436 		    mopts.internal_funcs, mopts.malloc_freecheck,
2437 		    mopts.malloc_freeunmap, mopts.def_malloc_junk,
2438 		    mopts.malloc_realloc, mopts.malloc_xmalloc,
2439 		    mopts.chunk_canaries, mopts.def_maxcache,
2440 		    mopts.malloc_guard);
2441 
2442 		for (i = 0; i < mopts.malloc_mutexes; i++)
2443 			malloc_dump(fd, i, mopts.malloc_pool[i]);
2444 		dprintf(fd, "******** End dump %s *******\n", __progname);
2445 		close(fd);
2446 	} else
2447 		dprintf(STDERR_FILENO,
2448 		    "malloc() warning: Couldn't dump stats\n");
2449 	errno = save_errno;
2450 }
2451 
2452 #endif /* MALLOC_STATS */
2453