xref: /openbsd-src/lib/libc/stdlib/malloc.c (revision 50027fe110c3c362514cbbf1128910104a00203e)
1 /*	$OpenBSD: malloc.c,v 1.122 2009/12/07 18:47:38 miod Exp $	*/
2 /*
3  * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 /*
19  * Parts of this code, mainly the sub page sized chunk management code is
20  * derived from the malloc implementation with the following license:
21  */
22 /*
23  * ----------------------------------------------------------------------------
24  * "THE BEER-WARE LICENSE" (Revision 42):
25  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
26  * can do whatever you want with this stuff. If we meet some day, and you think
27  * this stuff is worth it, you can buy me a beer in return.  Poul-Henning Kamp
28  * ----------------------------------------------------------------------------
29  */
30 
31 /* #define MALLOC_STATS */
32 
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/queue.h>
36 #include <sys/mman.h>
37 #include <sys/uio.h>
38 #include <errno.h>
39 #include <stdint.h>
40 #include <stdlib.h>
41 #include <string.h>
42 #include <stdio.h>
43 #include <unistd.h>
44 
45 #ifdef MALLOC_STATS
46 #include <fcntl.h>
47 #endif
48 
49 #include "thread_private.h"
50 
51 #define MALLOC_MINSHIFT		4
52 #define MALLOC_MAXSHIFT		16
53 
54 #if defined(__sparc__) && !defined(__sparcv9__)
55 #define MALLOC_PAGESHIFT	(13U)
56 #elif defined(__mips64__)
57 #define MALLOC_PAGESHIFT	(14U)
58 #else
59 #define MALLOC_PAGESHIFT	(PGSHIFT)
60 #endif
61 
62 #define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
63 #define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
64 #define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
65 #define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
66 
67 #define MALLOC_MAXCHUNK		(1 << (MALLOC_PAGESHIFT-1))
68 #define MALLOC_MAXCACHE		256
69 #define MALLOC_DELAYED_CHUNKS	16	/* should be power of 2 */
70 /*
71  * When the P option is active, we move allocations between half a page
72  * and a whole page towards the end, subject to alignment constraints.
73  * This is the extra headroom we allow. Set to zero to be the most
74  * strict.
75  */
76 #define MALLOC_LEEWAY		0
77 
78 #define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
79 
80 /*
81  * What to use for Junk.  This is the byte value we use to fill with
82  * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
83  * and SOME_FREEJUNK right before free.
84  */
85 #define SOME_JUNK		0xd0	/* as in "Duh" :-) */
86 #define SOME_FREEJUNK		0xdf
87 
88 #define MMAP(sz)	mmap(NULL, (size_t)(sz), PROT_READ | PROT_WRITE, \
89     MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
90 
91 #define MMAPA(a,sz)	mmap((a), (size_t)(sz), PROT_READ | PROT_WRITE, \
92     MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
93 
94 struct region_info {
95 	void *p;		/* page; low bits used to mark chunks */
96 	uintptr_t size;		/* size for pages, or chunk_info pointer */
97 };
98 
99 LIST_HEAD(chunk_head, chunk_info);
100 
101 struct dir_info {
102 	u_int32_t canary1;
103 	struct region_info *r;		/* region slots */
104 	size_t regions_total;		/* number of region slots */
105 	size_t regions_bits;		/* log2 of total */
106 	size_t regions_free;		/* number of free slots */
107 					/* list of free chunk info structs */
108 	struct chunk_head chunk_info_list;
109 					/* lists of chunks with free slots */
110 	struct chunk_head chunk_dir[MALLOC_MAXSHIFT];
111 	size_t free_regions_size;	/* free pages cached */
112 					/* free pages cache */
113 	struct region_info free_regions[MALLOC_MAXCACHE];
114 					/* delayed free chunk slots */
115 	void *delayed_chunks[MALLOC_DELAYED_CHUNKS];
116 #ifdef MALLOC_STATS
117 	size_t inserts;
118 	size_t insert_collisions;
119 	size_t finds;
120 	size_t find_collisions;
121 	size_t deletes;
122 	size_t delete_moves;
123 	size_t cheap_realloc_tries;
124 	size_t cheap_reallocs;
125 #define STATS_INC(x) ((x)++)
126 #define STATS_ZERO(x) ((x) = 0)
127 #else
128 #define STATS_INC(x)	/* nothing */
129 #define STATS_ZERO(x)	/* nothing */
130 #endif /* MALLOC_STATS */
131 	u_int32_t canary2;
132 };
133 #define DIR_INFO_RSZ	((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
134 			~MALLOC_PAGEMASK)
135 
136 /*
137  * This structure describes a page worth of chunks.
138  *
139  * How many bits per u_long in the bitmap
140  */
141 #define MALLOC_BITS		(NBBY * sizeof(u_long))
142 struct chunk_info {
143 	LIST_ENTRY(chunk_info) entries;
144 	void *page;			/* pointer to the page */
145 	u_int32_t canary;
146 	u_short size;			/* size of this page's chunks */
147 	u_short shift;			/* how far to shift for this size */
148 	u_short free;			/* how many free chunks */
149 	u_short total;			/* how many chunk */
150 					/* which chunks are free */
151 	u_long bits[(MALLOC_PAGESIZE / MALLOC_MINSIZE) / MALLOC_BITS];
152 };
153 
154 struct malloc_readonly {
155 	struct dir_info *g_pool;	/* Main bookkeeping information */
156 	int	malloc_abort;		/* abort() on error */
157 	int	malloc_freeprot;	/* mprotect free pages PROT_NONE? */
158 	int	malloc_hint;		/* call madvice on free pages?  */
159 	int	malloc_junk;		/* junk fill? */
160 	int	malloc_move;		/* move allocations to end of page? */
161 	int	malloc_realloc;		/* always realloc? */
162 	int	malloc_xmalloc;		/* xmalloc behaviour? */
163 	int	malloc_zero;		/* zero fill? */
164 	size_t	malloc_guard;		/* use guard pages after allocations? */
165 	u_int	malloc_cache;		/* free pages we cache */
166 #ifdef MALLOC_STATS
167 	int	malloc_stats;		/* dump statistics at end */
168 #endif
169 	u_int32_t malloc_canary;	/* Matched against ones in g_pool */
170 };
171 
172 /* This object is mapped PROT_READ after initialisation to prevent tampering */
173 static union {
174 	struct malloc_readonly mopts;
175 	u_char _pad[MALLOC_PAGESIZE];
176 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
177 #define mopts	malloc_readonly.mopts
178 #define g_pool	mopts.g_pool
179 
180 char		*malloc_options;	/* compile-time options */
181 
182 static char	*malloc_func;		/* current function */
183 static int	malloc_active;		/* status of malloc */
184 
185 static size_t	malloc_guarded;		/* bytes used for guards */
186 static size_t	malloc_used;		/* bytes allocated */
187 
188 static size_t rbytesused;		/* random bytes used */
189 static u_char rbytes[512];		/* random bytes */
190 static u_char getrbyte(void);
191 
192 extern char	*__progname;
193 
194 /* low bits of r->p determine size: 0 means >= page size and p->size holding
195  *  real size, otherwise r->size is a shift count, or 1 for malloc(0)
196  */
197 #define REALSIZE(sz, r) 					\
198 	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
199 	(sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
200 
201 static inline size_t
202 hash(void *p)
203 {
204 	size_t sum;
205 	union {
206 		uintptr_t p;
207 		unsigned short a[sizeof(void *) / sizeof(short)];
208 	} u;
209 	u.p = (uintptr_t)p >> MALLOC_PAGESHIFT;
210 	sum = u.a[0];
211 	sum = (sum << 7) - sum + u.a[1];
212 #ifdef __LP64__
213 	sum = (sum << 7) - sum + u.a[2];
214 	sum = (sum << 7) - sum + u.a[3];
215 #endif
216 	return sum;
217 }
218 
219 #ifdef MALLOC_STATS
220 static void
221 dump_chunk(int fd, struct chunk_info *p, int fromfreelist)
222 {
223 	char buf[64];
224 
225 	while (p != NULL) {
226 		snprintf(buf, sizeof(buf), "chunk %d %d/%d %p\n", p->size,
227 		    p->free, p->total, p->page);
228 		write(fd, buf, strlen(buf));
229 		if (!fromfreelist)
230 			break;
231 		p = LIST_NEXT(p, entries);
232 		if (p != NULL) {
233 			snprintf(buf, sizeof(buf), "    ");
234 			write(fd, buf, strlen(buf));
235 		}
236 	}
237 }
238 
239 static void
240 dump_free_chunk_info(int fd, struct dir_info *d)
241 {
242 	char buf[64];
243 	int i;
244 
245 	snprintf(buf, sizeof(buf), "Free chunk structs:\n");
246 	write(fd, buf, strlen(buf));
247 	for (i = 0; i < MALLOC_MAXSHIFT; i++) {
248 		struct chunk_info *p = LIST_FIRST(&d->chunk_dir[i]);
249 		if (p != NULL) {
250 			snprintf(buf, sizeof(buf), "%2d) ", i);
251 			write(fd, buf, strlen(buf));
252 			dump_chunk(fd, p, 1);
253 		}
254 	}
255 
256 }
257 
258 static void
259 dump_free_page_info(int fd, struct dir_info *d)
260 {
261 	char buf[64];
262 	int i;
263 
264 	snprintf(buf, sizeof(buf), "Free pages cached: %zu\n",
265 	    d->free_regions_size);
266 	write(fd, buf, strlen(buf));
267 	for (i = 0; i < mopts.malloc_cache; i++) {
268 		if (d->free_regions[i].p != NULL) {
269 			snprintf(buf, sizeof(buf), "%2d) ", i);
270 			write(fd, buf, strlen(buf));
271 			snprintf(buf, sizeof(buf), "free at %p: %zu\n",
272 			    d->free_regions[i].p, d->free_regions[i].size);
273 			write(fd, buf, strlen(buf));
274 		}
275 	}
276 }
277 
278 static void
279 malloc_dump1(int fd, struct dir_info *d)
280 {
281 	char buf[64];
282 	size_t i, realsize;
283 
284 	snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d);
285 	write(fd, buf, strlen(buf));
286 	if (d == NULL)
287 		return;
288 	snprintf(buf, sizeof(buf), "Regions slots %zu\n", d->regions_total);
289 	write(fd, buf, strlen(buf));
290 	snprintf(buf, sizeof(buf), "Finds %zu/%zu %f\n", d->finds,
291 	    d->find_collisions,
292 	    1.0 + (double)d->find_collisions / d->finds);
293 	write(fd, buf, strlen(buf));
294 	snprintf(buf, sizeof(buf), "Inserts %zu/%zu %f\n", d->inserts,
295 	    d->insert_collisions,
296 	    1.0 + (double)d->insert_collisions / d->inserts);
297 	write(fd, buf, strlen(buf));
298 	snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes,
299 	     d->delete_moves);
300 	write(fd, buf, strlen(buf));
301 	snprintf(buf, sizeof(buf), "Cheap reallocs %zu/%zu\n",
302 	    d->cheap_reallocs, d->cheap_realloc_tries);
303 	write(fd, buf, strlen(buf));
304 	snprintf(buf, sizeof(buf), "Regions slots free %zu\n", d->regions_free);
305 	write(fd, buf, strlen(buf));
306 	for (i = 0; i < d->regions_total; i++) {
307 		if (d->r[i].p != NULL) {
308 			size_t h = hash(d->r[i].p) &
309 			    (d->regions_total - 1);
310 			snprintf(buf, sizeof(buf), "%4zx) #%zx %zd ",
311 			    i, h, h - i);
312 			write(fd, buf, strlen(buf));
313 			REALSIZE(realsize, &d->r[i]);
314 			if (realsize > MALLOC_MAXCHUNK) {
315 				snprintf(buf, sizeof(buf),
316 				    "%p: %zu\n", d->r[i].p, realsize);
317 				write(fd, buf, strlen(buf));
318 			} else
319 				dump_chunk(fd,
320 				    (struct chunk_info *)d->r[i].size, 0);
321 		}
322 	}
323 	dump_free_chunk_info(fd, d);
324 	dump_free_page_info(fd, d);
325 	snprintf(buf, sizeof(buf), "In use %zu\n", malloc_used);
326 	write(fd, buf, strlen(buf));
327 	snprintf(buf, sizeof(buf), "Guarded %zu\n", malloc_guarded);
328 	write(fd, buf, strlen(buf));
329 }
330 
331 
332 void
333 malloc_dump(int fd)
334 {
335 	malloc_dump1(fd, g_pool);
336 }
337 
338 static void
339 malloc_exit(void)
340 {
341 	static const char q[] = "malloc() warning: Couldn't dump stats\n";
342 	int save_errno = errno, fd;
343 
344 	fd = open("malloc.out", O_RDWR|O_APPEND);
345 	if (fd != -1) {
346 		malloc_dump(fd);
347 		close(fd);
348 	} else
349 		write(STDERR_FILENO, q, sizeof(q) - 1);
350 	errno = save_errno;
351 }
352 #endif /* MALLOC_STATS */
353 
354 
355 
356 static void
357 wrterror(char *p)
358 {
359 	char		*q = " error: ";
360 	struct iovec	iov[5];
361 
362 	iov[0].iov_base = __progname;
363 	iov[0].iov_len = strlen(__progname);
364 	iov[1].iov_base = malloc_func;
365 	iov[1].iov_len = strlen(malloc_func);
366 	iov[2].iov_base = q;
367 	iov[2].iov_len = strlen(q);
368 	iov[3].iov_base = p;
369 	iov[3].iov_len = strlen(p);
370 	iov[4].iov_base = "\n";
371 	iov[4].iov_len = 1;
372 	writev(STDERR_FILENO, iov, 5);
373 
374 #ifdef MALLOC_STATS
375 	if (mopts.malloc_stats)
376 		malloc_dump(STDERR_FILENO);
377 #endif /* MALLOC_STATS */
378 	//malloc_active--;
379 	if (mopts.malloc_abort)
380 		abort();
381 }
382 
383 /*
384  * Cache maintenance. We keep at most malloc_cache pages cached.
385  * If the cache is becoming full, unmap pages in the cache for real,
386  * and then add the region to the cache
387  * Opposed to the regular region data structure, the sizes in the
388  * cache are in MALLOC_PAGESIZE units.
389  */
390 static void
391 unmap(struct dir_info *d, void *p, size_t sz)
392 {
393 	size_t psz = sz >> MALLOC_PAGESHIFT;
394 	size_t rsz, tounmap;
395 	struct region_info *r;
396 	u_int i, offset;
397 
398 	if (sz != PAGEROUND(sz)) {
399 		wrterror("munmap round");
400 		return;
401 	}
402 
403 	if (psz > mopts.malloc_cache) {
404 		if (munmap(p, sz))
405 			wrterror("munmap");
406 		malloc_used -= sz;
407 		return;
408 	}
409 	tounmap = 0;
410 	rsz = mopts.malloc_cache - d->free_regions_size;
411 	if (psz > rsz)
412 		tounmap = psz - rsz;
413 	offset = getrbyte();
414 	for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) {
415 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
416 		if (r->p != NULL) {
417 			rsz = r->size << MALLOC_PAGESHIFT;
418 			if (munmap(r->p, rsz))
419 				wrterror("munmap");
420 			r->p = NULL;
421 			if (tounmap > r->size)
422 				tounmap -= r->size;
423 			else
424 				tounmap = 0;
425 			d->free_regions_size -= r->size;
426 			r->size = 0;
427 			malloc_used -= rsz;
428 		}
429 	}
430 	if (tounmap > 0)
431 		wrterror("malloc cache underflow");
432 	for (i = 0; i < mopts.malloc_cache; i++) {
433 		r = &d->free_regions[i];
434 		if (r->p == NULL) {
435 			if (mopts.malloc_hint)
436 				madvise(p, sz, MADV_FREE);
437 			if (mopts.malloc_freeprot)
438 				mprotect(p, sz, PROT_NONE);
439 			r->p = p;
440 			r->size = psz;
441 			d->free_regions_size += psz;
442 			break;
443 		}
444 	}
445 	if (i == mopts.malloc_cache)
446 		wrterror("malloc free slot lost");
447 	if (d->free_regions_size > mopts.malloc_cache)
448 		wrterror("malloc cache overflow");
449 }
450 
451 static void
452 zapcacheregion(struct dir_info *d, void *p)
453 {
454 	u_int i;
455 	struct region_info *r;
456 	size_t rsz;
457 
458 	for (i = 0; i < mopts.malloc_cache; i++) {
459 		r = &d->free_regions[i];
460 		if (r->p == p) {
461 			rsz = r->size << MALLOC_PAGESHIFT;
462 			if (munmap(r->p, rsz))
463 				wrterror("munmap");
464 			r->p = NULL;
465 			d->free_regions_size -= r->size;
466 			r->size = 0;
467 			malloc_used -= rsz;
468 		}
469 	}
470 }
471 
472 static void *
473 map(struct dir_info *d, size_t sz, int zero_fill)
474 {
475 	size_t psz = sz >> MALLOC_PAGESHIFT;
476 	struct region_info *r, *big = NULL;
477 	u_int i, offset;
478 	void *p;
479 
480 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
481 	    d->canary1 != ~d->canary2)
482 		wrterror("internal struct corrupt");
483 	if (sz != PAGEROUND(sz)) {
484 		wrterror("map round");
485 		return NULL;
486 	}
487 	if (psz > d->free_regions_size) {
488 		p = MMAP(sz);
489 		if (p != MAP_FAILED)
490 			malloc_used += sz;
491 		/* zero fill not needed */
492 		return p;
493 	}
494 	offset = getrbyte();
495 	for (i = 0; i < mopts.malloc_cache; i++) {
496 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
497 		if (r->p != NULL) {
498 			if (r->size == psz) {
499 				p = r->p;
500 				if (mopts.malloc_freeprot)
501 					mprotect(p, sz, PROT_READ | PROT_WRITE);
502 				if (mopts.malloc_hint)
503 					madvise(p, sz, MADV_NORMAL);
504 				r->p = NULL;
505 				r->size = 0;
506 				d->free_regions_size -= psz;
507 				if (zero_fill)
508 					memset(p, 0, sz);
509 				else if (mopts.malloc_junk &&
510 				    mopts.malloc_freeprot)
511 					memset(p, SOME_FREEJUNK, sz);
512 				return p;
513 			} else if (r->size > psz)
514 				big = r;
515 		}
516 	}
517 	if (big != NULL) {
518 		r = big;
519 		p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT);
520 		if (mopts.malloc_freeprot)
521 			mprotect(p, sz, PROT_READ | PROT_WRITE);
522 		if (mopts.malloc_hint)
523 			madvise(p, sz, MADV_NORMAL);
524 		r->size -= psz;
525 		d->free_regions_size -= psz;
526 		if (zero_fill)
527 			memset(p, 0, sz);
528 		else if (mopts.malloc_junk && mopts.malloc_freeprot)
529 			memset(p, SOME_FREEJUNK, sz);
530 		return p;
531 	}
532 	p = MMAP(sz);
533 	if (p != MAP_FAILED)
534 		malloc_used += sz;
535 	if (d->free_regions_size > mopts.malloc_cache)
536 		wrterror("malloc cache");
537 	/* zero fill not needed */
538 	return p;
539 }
540 
541 static void
542 rbytes_init(void)
543 {
544 	arc4random_buf(rbytes, sizeof(rbytes));
545 	rbytesused = 0;
546 }
547 
548 static u_char
549 getrbyte(void)
550 {
551 	if (rbytesused >= sizeof(rbytes))
552 		rbytes_init();
553 	return rbytes[rbytesused++];
554 }
555 
556 /*
557  * Initialize a dir_info, which should have been cleared by caller
558  */
559 static int
560 omalloc_init(struct dir_info **dp)
561 {
562 	char *p, b[64];
563 	int i, j;
564 	size_t d_avail, regioninfo_size;
565 	struct dir_info *d;
566 
567 	rbytes_init();
568 
569 	/*
570 	 * Default options
571 	 */
572 	mopts.malloc_abort = 1;
573 	mopts.malloc_move = 1;
574 	mopts.malloc_cache = 64;
575 
576 	for (i = 0; i < 3; i++) {
577 		switch (i) {
578 		case 0:
579 			j = readlink("/etc/malloc.conf", b, sizeof b - 1);
580 			if (j <= 0)
581 				continue;
582 			b[j] = '\0';
583 			p = b;
584 			break;
585 		case 1:
586 			if (issetugid() == 0)
587 				p = getenv("MALLOC_OPTIONS");
588 			else
589 				continue;
590 			break;
591 		case 2:
592 			p = malloc_options;
593 			break;
594 		default:
595 			p = NULL;
596 		}
597 
598 		for (; p != NULL && *p != '\0'; p++) {
599 			switch (*p) {
600 			case '>':
601 				mopts.malloc_cache <<= 1;
602 				if (mopts.malloc_cache > MALLOC_MAXCACHE)
603 					mopts.malloc_cache = MALLOC_MAXCACHE;
604 				break;
605 			case '<':
606 				mopts.malloc_cache >>= 1;
607 				break;
608 			case 'a':
609 				mopts.malloc_abort = 0;
610 				break;
611 			case 'A':
612 				mopts.malloc_abort = 1;
613 				break;
614 #ifdef MALLOC_STATS
615 			case 'd':
616 				mopts.malloc_stats = 0;
617 				break;
618 			case 'D':
619 				mopts.malloc_stats = 1;
620 				break;
621 #endif /* MALLOC_STATS */
622 			case 'f':
623 				mopts.malloc_freeprot = 0;
624 				break;
625 			case 'F':
626 				mopts.malloc_freeprot = 1;
627 				break;
628 			case 'g':
629 				mopts.malloc_guard = 0;
630 				break;
631 			case 'G':
632 				mopts.malloc_guard = MALLOC_PAGESIZE;
633 				break;
634 			case 'h':
635 				mopts.malloc_hint = 0;
636 				break;
637 			case 'H':
638 				mopts.malloc_hint = 1;
639 				break;
640 			case 'j':
641 				mopts.malloc_junk = 0;
642 				break;
643 			case 'J':
644 				mopts.malloc_junk = 1;
645 				break;
646 			case 'n':
647 			case 'N':
648 				break;
649 			case 'p':
650 				mopts.malloc_move = 0;
651 				break;
652 			case 'P':
653 				mopts.malloc_move = 1;
654 				break;
655 			case 'r':
656 				mopts.malloc_realloc = 0;
657 				break;
658 			case 'R':
659 				mopts.malloc_realloc = 1;
660 				break;
661 			case 'x':
662 				mopts.malloc_xmalloc = 0;
663 				break;
664 			case 'X':
665 				mopts.malloc_xmalloc = 1;
666 				break;
667 			case 'z':
668 				mopts.malloc_zero = 0;
669 				break;
670 			case 'Z':
671 				mopts.malloc_zero = 1;
672 				break;
673 			default: {
674 				static const char q[] = "malloc() warning: "
675 				    "unknown char in MALLOC_OPTIONS\n";
676 				write(STDERR_FILENO, q, sizeof(q) - 1);
677 				break;
678 			}
679 			}
680 		}
681 	}
682 
683 	/*
684 	 * We want junk in the entire allocation, and zero only in the part
685 	 * the user asked for.
686 	 */
687 	if (mopts.malloc_zero)
688 		mopts.malloc_junk = 1;
689 
690 #ifdef MALLOC_STATS
691 	if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
692 		static const char q[] = "malloc() warning: atexit(2) failed."
693 		    " Will not be able to dump stats on exit\n";
694 		write(STDERR_FILENO, q, sizeof(q) - 1);
695 	}
696 #endif /* MALLOC_STATS */
697 
698 	while ((mopts.malloc_canary = arc4random()) == 0)
699 		;
700 
701 	/*
702 	 * Allocate dir_info with a guard page on either side. Also
703 	 * randomise offset inside the page at which the dir_info
704 	 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
705 	 */
706 	if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED)
707 		return -1;
708 	mprotect(p, MALLOC_PAGESIZE, PROT_NONE);
709 	mprotect(p + MALLOC_PAGESIZE + DIR_INFO_RSZ,
710 	    MALLOC_PAGESIZE, PROT_NONE);
711 	d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
712 	d = (struct dir_info *)(p + MALLOC_PAGESIZE +
713 	    (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
714 
715 	d->regions_bits = 9;
716 	d->regions_free = d->regions_total = 1 << d->regions_bits;
717 	regioninfo_size = d->regions_total * sizeof(struct region_info);
718 	d->r = MMAP(regioninfo_size);
719 	if (d->r == MAP_FAILED) {
720 		wrterror("malloc init mmap failed");
721 		d->regions_total = 0;
722 		return 1;
723 	}
724 	LIST_INIT(&d->chunk_info_list);
725 	for (i = 0; i < MALLOC_MAXSHIFT; i++)
726 		LIST_INIT(&d->chunk_dir[i]);
727 	malloc_used += regioninfo_size;
728 	d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
729 	d->canary2 = ~d->canary1;
730 
731 	*dp = d;
732 
733 	/*
734 	 * Options have been set and will never be reset.
735 	 * Prevent further tampering with them.
736 	 */
737 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
738 		mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
739 
740 	return 0;
741 }
742 
743 static int
744 omalloc_grow(struct dir_info *d)
745 {
746 	size_t newbits;
747 	size_t newtotal;
748 	size_t newsize;
749 	size_t mask;
750 	size_t i;
751 	struct region_info *p;
752 
753 	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 )
754 		return 1;
755 
756 	newbits = d->regions_bits + 1;
757 	newtotal = d->regions_total * 2;
758 	newsize = newtotal * sizeof(struct region_info);
759 	mask = newtotal - 1;
760 
761 	p = MMAP(newsize);
762 	if (p == MAP_FAILED)
763 		return 1;
764 
765 	malloc_used += newsize;
766 	memset(p, 0, newsize);
767 	STATS_ZERO(d->inserts);
768 	STATS_ZERO(d->insert_collisions);
769 	for (i = 0; i < d->regions_total; i++) {
770 		void *q = d->r[i].p;
771 		if (q != NULL) {
772 			size_t index = hash(q) & mask;
773 			STATS_INC(d->inserts);
774 			while (p[index].p != NULL) {
775 				index = (index - 1) & mask;
776 				STATS_INC(d->insert_collisions);
777 			}
778 			p[index] = d->r[i];
779 		}
780 	}
781 	/* avoid pages containing meta info to end up in cache */
782 	if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
783 		wrterror("munmap");
784 	else
785 		malloc_used -= d->regions_total * sizeof(struct region_info);
786 	d->regions_free = d->regions_free + d->regions_total;
787 	d->regions_total = newtotal;
788 	d->regions_bits = newbits;
789 	d->r = p;
790 	return 0;
791 }
792 
793 static struct chunk_info *
794 alloc_chunk_info(struct dir_info *d)
795 {
796 	struct chunk_info *p;
797 	int i;
798 
799 	if (LIST_EMPTY(&d->chunk_info_list)) {
800 		p = MMAP(MALLOC_PAGESIZE);
801 		if (p == MAP_FAILED)
802 			return NULL;
803 		malloc_used += MALLOC_PAGESIZE;
804 		for (i = 0; i < MALLOC_PAGESIZE / sizeof(*p); i++)
805 			LIST_INSERT_HEAD(&d->chunk_info_list, &p[i], entries);
806 	}
807 	p = LIST_FIRST(&d->chunk_info_list);
808 	LIST_REMOVE(p, entries);
809 	memset(p, 0, sizeof *p);
810 	p->canary = d->canary1;
811 	return p;
812 }
813 
814 static int
815 insert(struct dir_info *d, void *p, size_t sz)
816 {
817 	size_t index;
818 	size_t mask;
819 	void *q;
820 
821 	if (d->regions_free * 4 < d->regions_total) {
822 		if (omalloc_grow(d))
823 			return 1;
824 	}
825 	mask = d->regions_total - 1;
826 	index = hash(p) & mask;
827 	q = d->r[index].p;
828 	STATS_INC(d->inserts);
829 	while (q != NULL) {
830 		index = (index - 1) & mask;
831 		q = d->r[index].p;
832 		STATS_INC(d->insert_collisions);
833 	}
834 	d->r[index].p = p;
835 	d->r[index].size = sz;
836 	d->regions_free--;
837 	return 0;
838 }
839 
840 static struct region_info *
841 find(struct dir_info *d, void *p)
842 {
843 	size_t index;
844 	size_t mask = d->regions_total - 1;
845 	void *q, *r;
846 
847 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
848 	    d->canary1 != ~d->canary2)
849 		wrterror("internal struct corrupt");
850 	p = MASK_POINTER(p);
851 	index = hash(p) & mask;
852 	r = d->r[index].p;
853 	q = MASK_POINTER(r);
854 	STATS_INC(d->finds);
855 	while (q != p && r != NULL) {
856 		index = (index - 1) & mask;
857 		r = d->r[index].p;
858 		q = MASK_POINTER(r);
859 		STATS_INC(d->find_collisions);
860 	}
861 	return q == p ? &d->r[index] : NULL;
862 }
863 
864 static void
865 delete(struct dir_info *d, struct region_info *ri)
866 {
867 	/* algorithm R, Knuth Vol III section 6.4 */
868 	size_t mask = d->regions_total - 1;
869 	size_t i, j, r;
870 
871 	if (d->regions_total & (d->regions_total - 1))
872 		wrterror("regions_total not 2^x");
873 	d->regions_free++;
874 	STATS_INC(g_pool->deletes);
875 
876 	i = ri - d->r;
877 	for (;;) {
878 		d->r[i].p = NULL;
879 		d->r[i].size = 0;
880 		j = i;
881 		for (;;) {
882 			i = (i - 1) & mask;
883 			if (d->r[i].p == NULL)
884 				return;
885 			r = hash(d->r[i].p) & mask;
886 			if ((i <= r && r < j) || (r < j && j < i) ||
887 			    (j < i && i <= r))
888 				continue;
889 			d->r[j] = d->r[i];
890 			STATS_INC(g_pool->delete_moves);
891 			break;
892 		}
893 
894 	}
895 }
896 
897 /*
898  * Allocate a page of chunks
899  */
900 static struct chunk_info *
901 omalloc_make_chunks(struct dir_info *d, int bits)
902 {
903 	struct chunk_info *bp;
904 	void		*pp;
905 	long		i, k;
906 
907 	/* Allocate a new bucket */
908 	pp = map(d, MALLOC_PAGESIZE, 0);
909 	if (pp == MAP_FAILED)
910 		return NULL;
911 
912 	bp = alloc_chunk_info(d);
913 	if (bp == NULL) {
914 		unmap(d, pp, MALLOC_PAGESIZE);
915 		return NULL;
916 	}
917 
918 	/* memory protect the page allocated in the malloc(0) case */
919 	if (bits == 0) {
920 		bp->size = 0;
921 		bp->shift = 1;
922 		i = MALLOC_MINSIZE - 1;
923 		while (i >>= 1)
924 			bp->shift++;
925 		bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift;
926 		bp->page = pp;
927 
928 		k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE);
929 		if (k < 0) {
930 			unmap(d, pp, MALLOC_PAGESIZE);
931 			LIST_INSERT_HEAD(&d->chunk_info_list, bp, entries);
932 			return NULL;
933 		}
934 	} else {
935 		bp->size = (1UL << bits);
936 		bp->shift = bits;
937 		bp->total = bp->free = MALLOC_PAGESIZE >> bits;
938 		bp->page = pp;
939 	}
940 
941 	/* set all valid bits in the bitmap */
942 	k = bp->total;
943 	i = 0;
944 
945 	/* Do a bunch at a time */
946 	for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS)
947 		bp->bits[i / MALLOC_BITS] = ~0UL;
948 
949 	for (; i < k; i++)
950 		bp->bits[i / MALLOC_BITS] |= 1UL << (i % MALLOC_BITS);
951 
952 	LIST_INSERT_HEAD(&d->chunk_dir[bits], bp, entries);
953 
954 	bits++;
955 	if ((uintptr_t)pp & bits)
956 		wrterror("pp & bits");
957 
958 	insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp);
959 	return bp;
960 }
961 
962 
963 /*
964  * Allocate a chunk
965  */
966 static void *
967 malloc_bytes(struct dir_info *d, size_t size)
968 {
969 	int		i, j;
970 	size_t		k;
971 	u_long		u, *lp;
972 	struct chunk_info *bp;
973 
974 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
975 	    d->canary1 != ~d->canary2)
976 		wrterror("internal struct corrupt");
977 	/* Don't bother with anything less than this */
978 	/* unless we have a malloc(0) requests */
979 	if (size != 0 && size < MALLOC_MINSIZE)
980 		size = MALLOC_MINSIZE;
981 
982 	/* Find the right bucket */
983 	if (size == 0)
984 		j = 0;
985 	else {
986 		j = MALLOC_MINSHIFT;
987 		i = (size - 1) >> (MALLOC_MINSHIFT - 1);
988 		while (i >>= 1)
989 			j++;
990 	}
991 
992 	/* If it's empty, make a page more of that size chunks */
993 	if (LIST_EMPTY(&d->chunk_dir[j])) {
994 		bp = omalloc_make_chunks(d, j);
995 		if (bp == NULL)
996 			return NULL;
997 	} else
998 		bp = LIST_FIRST(&d->chunk_dir[j]);
999 
1000 	if (bp->canary != d->canary1)
1001 		wrterror("chunk info corrupted");
1002 	/* Find first word of bitmap which isn't empty */
1003 	for (lp = bp->bits; !*lp; lp++)
1004 		/* EMPTY */;
1005 
1006 	/* Find that bit, and tweak it */
1007 	u = 1;
1008 	k = 0;
1009 	while (!(*lp & u)) {
1010 		u += u;
1011 		k++;
1012 	}
1013 
1014 	/* advance a random # of positions */
1015 	i = (getrbyte() & (MALLOC_DELAYED_CHUNKS - 1)) % bp->free;
1016 	while (i > 0) {
1017 		u += u;
1018 		k++;
1019 		if (k >= MALLOC_BITS) {
1020 			lp++;
1021 			u = 1;
1022 			k = 0;
1023 		}
1024 		if (lp - bp->bits > (bp->total - 1) / MALLOC_BITS) {
1025 			wrterror("chunk overflow");
1026 			errno = EFAULT;
1027 			return (NULL);
1028 		}
1029 		if (*lp & u)
1030 			i--;
1031 	}
1032 
1033 	*lp ^= u;
1034 
1035 	/* If there are no more free, remove from free-list */
1036 	if (!--bp->free)
1037 		LIST_REMOVE(bp, entries);
1038 
1039 	/* Adjust to the real offset of that chunk */
1040 	k += (lp - bp->bits) * MALLOC_BITS;
1041 	k <<= bp->shift;
1042 
1043 	if (mopts.malloc_junk && bp->size > 0)
1044 		memset((char *)bp->page + k, SOME_JUNK, bp->size);
1045 	return ((char *)bp->page + k);
1046 }
1047 
1048 
1049 /*
1050  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1051  */
1052 static void
1053 free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1054 {
1055 	struct chunk_head *mp;
1056 	struct chunk_info *info;
1057 	long i;
1058 
1059 	info = (struct chunk_info *)r->size;
1060 	if (info->canary != d->canary1)
1061 		wrterror("chunk info corrupted");
1062 
1063 	/* Find the chunk number on the page */
1064 	i = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1065 
1066 	if ((uintptr_t)ptr & ((1UL << (info->shift)) - 1)) {
1067 		wrterror("modified chunk-pointer");
1068 		return;
1069 	}
1070 	if (info->bits[i / MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
1071 		wrterror("chunk is already free");
1072 		return;
1073 	}
1074 
1075 	info->bits[i / MALLOC_BITS] |= 1UL << (i % MALLOC_BITS);
1076 	info->free++;
1077 
1078 	if (info->size != 0)
1079 		mp = d->chunk_dir + info->shift;
1080 	else
1081 		mp = d->chunk_dir;
1082 
1083 	if (info->free == 1) {
1084 		/* Page became non-full */
1085 		LIST_INSERT_HEAD(mp, info, entries);
1086 		return;
1087 	}
1088 	if (info->free != info->total)
1089 		return;
1090 
1091 	LIST_REMOVE(info, entries);
1092 
1093 	if (info->size == 0 && !mopts.malloc_freeprot)
1094 		mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1095 	unmap(d, info->page, MALLOC_PAGESIZE);
1096 
1097 	delete(d, r);
1098 	LIST_INSERT_HEAD(&d->chunk_info_list, info, entries);
1099 }
1100 
1101 
1102 
1103 static void *
1104 omalloc(size_t sz, int zero_fill)
1105 {
1106 	void *p;
1107 	size_t psz;
1108 
1109 	if (sz > MALLOC_MAXCHUNK) {
1110 		if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1111 			errno = ENOMEM;
1112 			return NULL;
1113 		}
1114 		sz += mopts.malloc_guard;
1115 		psz = PAGEROUND(sz);
1116 		p = map(g_pool, psz, zero_fill);
1117 		if (p == MAP_FAILED) {
1118 			errno = ENOMEM;
1119 			return NULL;
1120 		}
1121 		if (insert(g_pool, p, sz)) {
1122 			unmap(g_pool, p, psz);
1123 			errno = ENOMEM;
1124 			return NULL;
1125 		}
1126 		if (mopts.malloc_guard) {
1127 			if (mprotect((char *)p + psz - mopts.malloc_guard,
1128 			    mopts.malloc_guard, PROT_NONE))
1129 				wrterror("mprotect");
1130 			malloc_guarded += mopts.malloc_guard;
1131 		}
1132 
1133 		if (mopts.malloc_move &&
1134 		    sz - mopts.malloc_guard < MALLOC_PAGESIZE -
1135 		    MALLOC_LEEWAY) {
1136 			/* fill whole allocation */
1137 			if (mopts.malloc_junk)
1138 				memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1139 			/* shift towards the end */
1140 			p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY -
1141 			    (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1));
1142 			/* fill zeros if needed and overwritten above */
1143 			if (zero_fill && mopts.malloc_junk)
1144 				memset(p, 0, sz - mopts.malloc_guard);
1145 		} else {
1146 			if (mopts.malloc_junk) {
1147 				if (zero_fill)
1148 					memset(p + sz - mopts.malloc_guard,
1149 					    SOME_JUNK, psz - sz);
1150 				else
1151 					memset(p, SOME_JUNK,
1152 					    psz - mopts.malloc_guard);
1153 			}
1154 		}
1155 
1156 	} else {
1157 		/* takes care of SOME_JUNK */
1158 		p = malloc_bytes(g_pool, sz);
1159 		if (zero_fill && p != NULL && sz > 0)
1160 			memset(p, 0, sz);
1161 	}
1162 
1163 	return p;
1164 }
1165 
1166 /*
1167  * Common function for handling recursion.  Only
1168  * print the error message once, to avoid making the problem
1169  * potentially worse.
1170  */
1171 static void
1172 malloc_recurse(void)
1173 {
1174 	static int noprint;
1175 
1176 	if (noprint == 0) {
1177 		noprint = 1;
1178 		wrterror("recursive call");
1179 	}
1180 	malloc_active--;
1181 	_MALLOC_UNLOCK();
1182 	errno = EDEADLK;
1183 }
1184 
1185 static int
1186 malloc_init(void)
1187 {
1188 	if (omalloc_init(&g_pool)) {
1189 		_MALLOC_UNLOCK();
1190 		if (mopts.malloc_xmalloc)
1191 			wrterror("out of memory");
1192 		errno = ENOMEM;
1193 		return -1;
1194 	}
1195 	return 0;
1196 }
1197 
1198 void *
1199 malloc(size_t size)
1200 {
1201 	void *r;
1202 	int saved_errno = errno;
1203 
1204 	_MALLOC_LOCK();
1205 	malloc_func = " in malloc():";
1206 	if (g_pool == NULL) {
1207 		if (malloc_init() != 0)
1208 			return NULL;
1209 	}
1210 	if (malloc_active++) {
1211 		malloc_recurse();
1212 		return NULL;
1213 	}
1214 	r = omalloc(size, mopts.malloc_zero);
1215 	malloc_active--;
1216 	_MALLOC_UNLOCK();
1217 	if (r == NULL && mopts.malloc_xmalloc) {
1218 		wrterror("out of memory");
1219 		errno = ENOMEM;
1220 	}
1221 	if (r != NULL)
1222 		errno = saved_errno;
1223 	return r;
1224 }
1225 
1226 static void
1227 ofree(void *p)
1228 {
1229 	struct region_info *r;
1230 	size_t sz;
1231 
1232 	r = find(g_pool, p);
1233 	if (r == NULL) {
1234 		wrterror("bogus pointer (double free?)");
1235 		return;
1236 	}
1237 	REALSIZE(sz, r);
1238 	if (sz > MALLOC_MAXCHUNK) {
1239 		if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE -
1240 		    MALLOC_LEEWAY) {
1241 			if (r->p != p) {
1242 				wrterror("bogus pointer");
1243 				return;
1244 			}
1245 		} else {
1246 #if notyetbecause_of_realloc
1247 			/* shifted towards the end */
1248 			if (p != ((char *)r->p) + ((MALLOC_PAGESIZE -
1249 			    MALLOC_MINSIZE - sz - mopts.malloc_guard) &
1250 			    ~(MALLOC_MINSIZE-1))) {
1251 			}
1252 #endif
1253 			p = r->p;
1254 		}
1255 		if (mopts.malloc_guard) {
1256 			if (sz < mopts.malloc_guard)
1257 				wrterror("guard size");
1258 			if (!mopts.malloc_freeprot) {
1259 				if (mprotect((char *)p + PAGEROUND(sz) -
1260 				    mopts.malloc_guard, mopts.malloc_guard,
1261 				    PROT_READ | PROT_WRITE))
1262 					wrterror("mprotect");
1263 			}
1264 			malloc_guarded -= mopts.malloc_guard;
1265 		}
1266 		if (mopts.malloc_junk && !mopts.malloc_freeprot)
1267 			memset(p, SOME_FREEJUNK,
1268 			    PAGEROUND(sz) - mopts.malloc_guard);
1269 		unmap(g_pool, p, PAGEROUND(sz));
1270 		delete(g_pool, r);
1271 	} else {
1272 		void *tmp;
1273 		int i;
1274 
1275 		if (mopts.malloc_junk && sz > 0)
1276 			memset(p, SOME_FREEJUNK, sz);
1277 		if (!mopts.malloc_freeprot) {
1278 			i = getrbyte() & (MALLOC_DELAYED_CHUNKS - 1);
1279 			tmp = p;
1280 			p = g_pool->delayed_chunks[i];
1281 			g_pool->delayed_chunks[i] = tmp;
1282 		}
1283 		if (p != NULL) {
1284 			r = find(g_pool, p);
1285 			if (r == NULL) {
1286 				wrterror("bogus pointer (double free?)");
1287 				return;
1288 			}
1289 			free_bytes(g_pool, r, p);
1290 		}
1291 	}
1292 }
1293 
1294 void
1295 free(void *ptr)
1296 {
1297 	int saved_errno = errno;
1298 
1299 	/* This is legal. */
1300 	if (ptr == NULL)
1301 		return;
1302 
1303 	_MALLOC_LOCK();
1304 	malloc_func = " in free():";
1305 	if (g_pool == NULL) {
1306 		_MALLOC_UNLOCK();
1307 		wrterror("free() called before allocation");
1308 		return;
1309 	}
1310 	if (malloc_active++) {
1311 		malloc_recurse();
1312 		return;
1313 	}
1314 	ofree(ptr);
1315 	malloc_active--;
1316 	_MALLOC_UNLOCK();
1317 	errno = saved_errno;
1318 }
1319 
1320 
1321 static void *
1322 orealloc(void *p, size_t newsz)
1323 {
1324 	struct region_info *r;
1325 	size_t oldsz, goldsz, gnewsz;
1326 	void *q;
1327 
1328 	if (p == NULL)
1329 		return omalloc(newsz, 0);
1330 
1331 	r = find(g_pool, p);
1332 	if (r == NULL) {
1333 		wrterror("bogus pointer (double free?)");
1334 		return NULL;
1335 	}
1336 	if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1337 		errno = ENOMEM;
1338 		return NULL;
1339 	}
1340 
1341 	REALSIZE(oldsz, r);
1342 	goldsz = oldsz;
1343 	if (oldsz > MALLOC_MAXCHUNK) {
1344 		if (oldsz < mopts.malloc_guard)
1345 			wrterror("guard size");
1346 		oldsz -= mopts.malloc_guard;
1347 	}
1348 
1349 	gnewsz = newsz;
1350 	if (gnewsz > MALLOC_MAXCHUNK)
1351 		gnewsz += mopts.malloc_guard;
1352 
1353 	if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p &&
1354 	    !mopts.malloc_realloc) {
1355 		size_t roldsz = PAGEROUND(goldsz);
1356 		size_t rnewsz = PAGEROUND(gnewsz);
1357 
1358 		if (rnewsz > roldsz) {
1359 			if (!mopts.malloc_guard) {
1360 				STATS_INC(g_pool->cheap_realloc_tries);
1361 				zapcacheregion(g_pool, p + roldsz);
1362 				q = MMAPA(p + roldsz, rnewsz - roldsz);
1363 				if (q == p + roldsz) {
1364 					malloc_used += rnewsz - roldsz;
1365 					if (mopts.malloc_junk)
1366 						memset(q, SOME_JUNK,
1367 						    rnewsz - roldsz);
1368 					r->size = newsz;
1369 					STATS_INC(g_pool->cheap_reallocs);
1370 					return p;
1371 				} else if (q != MAP_FAILED)
1372 					munmap(q, rnewsz - roldsz);
1373 			}
1374 		} else if (rnewsz < roldsz) {
1375 			if (mopts.malloc_guard) {
1376 				if (mprotect((char *)p + roldsz -
1377 				    mopts.malloc_guard, mopts.malloc_guard,
1378 				    PROT_READ | PROT_WRITE))
1379 					wrterror("mprotect");
1380 				if (mprotect((char *)p + rnewsz -
1381 				    mopts.malloc_guard, mopts.malloc_guard,
1382 				    PROT_NONE))
1383 					wrterror("mprotect");
1384 			}
1385 			unmap(g_pool, (char *)p + rnewsz, roldsz - rnewsz);
1386 			r->size = gnewsz;
1387 			return p;
1388 		} else {
1389 			if (newsz > oldsz && mopts.malloc_junk)
1390 				memset((char *)p + newsz, SOME_JUNK,
1391 				    rnewsz - mopts.malloc_guard - newsz);
1392 			r->size = gnewsz;
1393 			return p;
1394 		}
1395 	}
1396 	if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) {
1397 		if (mopts.malloc_junk && newsz > 0)
1398 			memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1399 		return p;
1400 	} else if (newsz != oldsz || mopts.malloc_realloc) {
1401 		q = omalloc(newsz, 0);
1402 		if (q == NULL)
1403 			return NULL;
1404 		if (newsz != 0 && oldsz != 0)
1405 			memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1406 		ofree(p);
1407 		return q;
1408 	} else
1409 		return p;
1410 }
1411 
1412 void *
1413 realloc(void *ptr, size_t size)
1414 {
1415 	void *r;
1416 	int saved_errno = errno;
1417 
1418 	_MALLOC_LOCK();
1419 	malloc_func = " in realloc():";
1420 	if (g_pool == NULL) {
1421 		if (malloc_init() != 0)
1422 			return NULL;
1423 	}
1424 	if (malloc_active++) {
1425 		malloc_recurse();
1426 		return NULL;
1427 	}
1428 	r = orealloc(ptr, size);
1429 
1430 	malloc_active--;
1431 	_MALLOC_UNLOCK();
1432 	if (r == NULL && mopts.malloc_xmalloc) {
1433 		wrterror("out of memory");
1434 		errno = ENOMEM;
1435 	}
1436 	if (r != NULL)
1437 		errno = saved_errno;
1438 	return r;
1439 }
1440 
1441 
1442 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1443 
1444 void *
1445 calloc(size_t nmemb, size_t size)
1446 {
1447 	void *r;
1448 	int saved_errno = errno;
1449 
1450 	_MALLOC_LOCK();
1451 	malloc_func = " in calloc():";
1452 	if (g_pool == NULL) {
1453 		if (malloc_init() != 0)
1454 			return NULL;
1455 	}
1456 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1457 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1458 		_MALLOC_UNLOCK();
1459 		if (mopts.malloc_xmalloc)
1460 			wrterror("out of memory");
1461 		errno = ENOMEM;
1462 		return NULL;
1463 	}
1464 
1465 	if (malloc_active++) {
1466 		malloc_recurse();
1467 		return NULL;
1468 	}
1469 
1470 	size *= nmemb;
1471 	r = omalloc(size, 1);
1472 
1473 	malloc_active--;
1474 	_MALLOC_UNLOCK();
1475 	if (r == NULL && mopts.malloc_xmalloc) {
1476 		wrterror("out of memory");
1477 		errno = ENOMEM;
1478 	}
1479 	if (r != NULL)
1480 		errno = saved_errno;
1481 	return r;
1482 }
1483 
1484