xref: /openbsd-src/lib/libc/stdlib/malloc.c (revision a28daedfc357b214be5c701aa8ba8adb29a7f1c2)
1 /*	$OpenBSD: malloc.c,v 1.115 2009/01/03 12:58:28 djm Exp $	*/
2 /*
3  * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 /*
19  * Parts of this code, mainly the sub page sized chunk management code is
20  * derived from the malloc implementation with the following license:
21  */
22 /*
23  * ----------------------------------------------------------------------------
24  * "THE BEER-WARE LICENSE" (Revision 42):
25  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
26  * can do whatever you want with this stuff. If we meet some day, and you think
27  * this stuff is worth it, you can buy me a beer in return.  Poul-Henning Kamp
28  * ----------------------------------------------------------------------------
29  */
30 
31 /* #define MALLOC_STATS */
32 
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/mman.h>
36 #include <sys/uio.h>
37 #include <errno.h>
38 #include <stdint.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <stdio.h>
42 #include <unistd.h>
43 
44 #ifdef MALLOC_STATS
45 #include <fcntl.h>
46 #endif
47 
48 #include "thread_private.h"
49 
50 #define MALLOC_MINSHIFT		4
51 #define MALLOC_MAXSHIFT		16
52 
53 #if defined(__sparc__) && !defined(__sparcv9__)
54 #define MALLOC_PAGESHIFT	(13U)
55 #else
56 #define MALLOC_PAGESHIFT	(PGSHIFT)
57 #endif
58 
59 #define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
60 #define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
61 #define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
62 #define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
63 
64 #define MALLOC_MAXCHUNK		(1 << (MALLOC_PAGESHIFT-1))
65 #define MALLOC_MAXCACHE		256
66 #define MALLOC_DELAYED_CHUNKS	16	/* should be power of 2 */
67 /*
68  * When the P option is active, we move allocations between half a page
69  * and a whole page towards the end, subject to alignment constraints.
70  * This is the extra headroom we allow. Set to zero to be the most
71  * strict.
72  */
73 #define MALLOC_LEEWAY		0
74 
75 #define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
76 
77 /*
78  * What to use for Junk.  This is the byte value we use to fill with
79  * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
80  * and SOME_FREEJUNK right before free.
81  */
82 #define SOME_JUNK		0xd0	/* as in "Duh" :-) */
83 #define SOME_FREEJUNK		0xdf
84 
85 #define MMAP(sz)	mmap(NULL, (size_t)(sz), PROT_READ | PROT_WRITE, \
86     MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
87 
88 #define MMAPA(a,sz)	mmap((a), (size_t)(sz), PROT_READ | PROT_WRITE, \
89     MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
90 
91 struct region_info {
92 	void *p;		/* page; low bits used to mark chunks */
93 	uintptr_t size;		/* size for pages, or chunk_info pointer */
94 };
95 
96 struct dir_info {
97 	u_int32_t canary1;
98 	struct region_info *r;		/* region slots */
99 	size_t regions_total;		/* number of region slots */
100 	size_t regions_bits;		/* log2 of total */
101 	size_t regions_free;		/* number of free slots */
102 					/* list of free chunk info structs */
103 	struct chunk_info *chunk_info_list;
104 					/* lists of chunks with free slots */
105 	struct chunk_info *chunk_dir[MALLOC_MAXSHIFT];
106 	size_t free_regions_size;	/* free pages cached */
107 					/* free pages cache */
108 	struct region_info free_regions[MALLOC_MAXCACHE];
109 					/* delayed free chunk slots */
110 	void *delayed_chunks[MALLOC_DELAYED_CHUNKS];
111 #ifdef MALLOC_STATS
112 	size_t inserts;
113 	size_t insert_collisions;
114 	size_t finds;
115 	size_t find_collisions;
116 	size_t deletes;
117 	size_t delete_moves;
118 	size_t cheap_realloc_tries;
119 	size_t cheap_reallocs;
120 #define STATS_INC(x) ((x)++)
121 #define STATS_ZERO(x) ((x) = 0)
122 #else
123 #define STATS_INC(x)	/* nothing */
124 #define STATS_ZERO(x)	/* nothing */
125 #endif /* MALLOC_STATS */
126 	u_int32_t canary2;
127 };
128 #define DIR_INFO_RSZ	((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
129 			~MALLOC_PAGEMASK)
130 
131 /*
132  * This structure describes a page worth of chunks.
133  *
134  * How many bits per u_long in the bitmap
135  */
136 #define MALLOC_BITS		(NBBY * sizeof(u_long))
137 struct chunk_info {
138 	struct chunk_info *next;	/* next on the free list */
139 	void *page;			/* pointer to the page */
140 	u_int32_t canary;
141 	u_short size;			/* size of this page's chunks */
142 	u_short shift;			/* how far to shift for this size */
143 	u_short free;			/* how many free chunks */
144 	u_short total;			/* how many chunk */
145 					/* which chunks are free */
146 	u_long bits[(MALLOC_PAGESIZE / MALLOC_MINSIZE) / MALLOC_BITS];
147 };
148 
149 struct malloc_readonly {
150 	struct dir_info *g_pool;	/* Main bookkeeping information */
151 	int	malloc_abort;		/* abort() on error */
152 	int	malloc_freeprot;	/* mprotect free pages PROT_NONE? */
153 	int	malloc_hint;		/* call madvice on free pages?  */
154 	int	malloc_junk;		/* junk fill? */
155 	int	malloc_move;		/* move allocations to end of page? */
156 	int	malloc_realloc;		/* always realloc? */
157 	int	malloc_xmalloc;		/* xmalloc behaviour? */
158 	int	malloc_zero;		/* zero fill? */
159 	size_t	malloc_guard;		/* use guard pages after allocations? */
160 	u_int	malloc_cache;		/* free pages we cache */
161 #ifdef MALLOC_STATS
162 	int	malloc_stats;		/* dump statistics at end */
163 #endif
164 	u_int32_t malloc_canary;	/* Matched against ones in g_pool */
165 };
166 
167 /* This object is mapped PROT_READ after initialisation to prevent tampering */
168 static union {
169 	struct malloc_readonly mopts;
170 	u_char _pad[MALLOC_PAGESIZE];
171 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
172 #define mopts	malloc_readonly.mopts
173 #define g_pool	mopts.g_pool
174 
175 char		*malloc_options;	/* compile-time options */
176 
177 static char	*malloc_func;		/* current function */
178 static int	malloc_active;		/* status of malloc */
179 
180 static size_t	malloc_guarded;		/* bytes used for guards */
181 static size_t	malloc_used;		/* bytes allocated */
182 
183 static size_t rbytesused;		/* random bytes used */
184 static u_char rbytes[512];		/* random bytes */
185 static u_char getrbyte(void);
186 
187 extern char	*__progname;
188 
189 /* low bits of r->p determine size: 0 means >= page size and p->size holding
190  *  real size, otherwise r->size is a shift count, or 1 for malloc(0)
191  */
192 #define REALSIZE(sz, r) 					\
193 	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
194 	(sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
195 
196 static inline size_t
197 hash(void *p)
198 {
199 	size_t sum;
200 	union {
201 		uintptr_t p;
202 		unsigned short a[sizeof(void *) / sizeof(short)];
203 	} u;
204 	u.p = (uintptr_t)p >> MALLOC_PAGESHIFT;
205 	sum = u.a[0];
206 	sum = (sum << 7) - sum + u.a[1];
207 #ifdef __LP64__
208 	sum = (sum << 7) - sum + u.a[2];
209 	sum = (sum << 7) - sum + u.a[3];
210 #endif
211 	return sum;
212 }
213 
214 #ifdef MALLOC_STATS
215 static void
216 dump_chunk(int fd, struct chunk_info *p, int fromfreelist)
217 {
218 	char buf[64];
219 
220 	while (p) {
221 		snprintf(buf, sizeof(buf), "chunk %d %d/%d %p\n", p->size,
222 		    p->free, p->total, p->page);
223 		write(fd, buf, strlen(buf));
224 		if (!fromfreelist)
225 			break;
226 		p = p->next;
227 		if (p != NULL) {
228 			snprintf(buf, sizeof(buf), "    ");
229 			write(fd, buf, strlen(buf));
230 		}
231 	}
232 }
233 
234 static void
235 dump_free_chunk_info(int fd, struct dir_info *d)
236 {
237 	char buf[64];
238 	int i;
239 
240 	snprintf(buf, sizeof(buf), "Free chunk structs:\n");
241 	write(fd, buf, strlen(buf));
242 	for (i = 0; i < MALLOC_MAXSHIFT; i++) {
243 		struct chunk_info *p = d->chunk_dir[i];
244 		if (p != NULL) {
245 			snprintf(buf, sizeof(buf), "%2d) ", i);
246 			write(fd, buf, strlen(buf));
247 			dump_chunk(fd, p, 1);
248 		}
249 	}
250 
251 }
252 
253 static void
254 dump_free_page_info(int fd, struct dir_info *d)
255 {
256 	char buf[64];
257 	int i;
258 
259 	snprintf(buf, sizeof(buf), "Free pages cached: %zu\n",
260 	    d->free_regions_size);
261 	write(fd, buf, strlen(buf));
262 	for (i = 0; i < mopts.malloc_cache; i++) {
263 		if (d->free_regions[i].p != NULL) {
264 			snprintf(buf, sizeof(buf), "%2d) ", i);
265 			write(fd, buf, strlen(buf));
266 			snprintf(buf, sizeof(buf), "free at %p: %zu\n",
267 			    d->free_regions[i].p, d->free_regions[i].size);
268 			write(fd, buf, strlen(buf));
269 		}
270 	}
271 }
272 
273 static void
274 malloc_dump1(int fd, struct dir_info *d)
275 {
276 	char buf[64];
277 	size_t i, realsize;
278 
279 	snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d);
280 	write(fd, buf, strlen(buf));
281 	if (d == NULL)
282 		return;
283 	snprintf(buf, sizeof(buf), "Regions slots %zu\n", d->regions_total);
284 	write(fd, buf, strlen(buf));
285 	snprintf(buf, sizeof(buf), "Finds %zu/%zu %f\n", d->finds,
286 	    d->find_collisions,
287 	    1.0 + (double)d->find_collisions / d->finds);
288 	write(fd, buf, strlen(buf));
289 	snprintf(buf, sizeof(buf), "Inserts %zu/%zu %f\n", d->inserts,
290 	    d->insert_collisions,
291 	    1.0 + (double)d->insert_collisions / d->inserts);
292 	write(fd, buf, strlen(buf));
293 	snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes,
294 	     d->delete_moves);
295 	write(fd, buf, strlen(buf));
296 	snprintf(buf, sizeof(buf), "Cheap reallocs %zu/%zu\n",
297 	    d->cheap_reallocs, d->cheap_realloc_tries);
298 	write(fd, buf, strlen(buf));
299 	snprintf(buf, sizeof(buf), "Regions slots free %zu\n", d->regions_free);
300 	write(fd, buf, strlen(buf));
301 	for (i = 0; i < d->regions_total; i++) {
302 		if (d->r[i].p != NULL) {
303 			size_t h = hash(d->r[i].p) &
304 			    (d->regions_total - 1);
305 			snprintf(buf, sizeof(buf), "%4zx) #%zx %zd ",
306 			    i, h, h - i);
307 			write(fd, buf, strlen(buf));
308 			REALSIZE(realsize, &d->r[i]);
309 			if (realsize > MALLOC_MAXCHUNK) {
310 				snprintf(buf, sizeof(buf),
311 				    "%p: %zu\n", d->r[i].p, realsize);
312 				write(fd, buf, strlen(buf));
313 			} else
314 				dump_chunk(fd,
315 				    (struct chunk_info *)d->r[i].size, 0);
316 		}
317 	}
318 	dump_free_chunk_info(fd, d);
319 	dump_free_page_info(fd, d);
320 	snprintf(buf, sizeof(buf), "In use %zu\n", malloc_used);
321 	write(fd, buf, strlen(buf));
322 	snprintf(buf, sizeof(buf), "Guarded %zu\n", malloc_guarded);
323 	write(fd, buf, strlen(buf));
324 }
325 
326 
327 void
328 malloc_dump(int fd)
329 {
330 	malloc_dump1(fd, g_pool);
331 }
332 
333 static void
334 malloc_exit(void)
335 {
336 	static const char q[] = "malloc() warning: Couldn't dump stats\n";
337 	int save_errno = errno, fd;
338 
339 	fd = open("malloc.out", O_RDWR|O_APPEND);
340 	if (fd != -1) {
341 		malloc_dump(fd);
342 		close(fd);
343 	} else
344 		write(STDERR_FILENO, q, sizeof(q) - 1);
345 	errno = save_errno;
346 }
347 #endif /* MALLOC_STATS */
348 
349 
350 
351 static void
352 wrterror(char *p)
353 {
354 	char		*q = " error: ";
355 	struct iovec	iov[5];
356 
357 	iov[0].iov_base = __progname;
358 	iov[0].iov_len = strlen(__progname);
359 	iov[1].iov_base = malloc_func;
360 	iov[1].iov_len = strlen(malloc_func);
361 	iov[2].iov_base = q;
362 	iov[2].iov_len = strlen(q);
363 	iov[3].iov_base = p;
364 	iov[3].iov_len = strlen(p);
365 	iov[4].iov_base = "\n";
366 	iov[4].iov_len = 1;
367 	writev(STDERR_FILENO, iov, 5);
368 
369 #ifdef MALLOC_STATS
370 	if (mopts.malloc_stats)
371 		malloc_dump(STDERR_FILENO);
372 #endif /* MALLOC_STATS */
373 	//malloc_active--;
374 	if (mopts.malloc_abort)
375 		abort();
376 }
377 
378 /*
379  * Cache maintenance. We keep at most malloc_cache pages cached.
380  * If the cache is becoming full, unmap pages in the cache for real,
381  * and then add the region to the cache
382  * Opposed to the regular region data structure, the sizes in the
383  * cache are in MALLOC_PAGESIZE units.
384  */
385 static void
386 unmap(struct dir_info *d, void *p, size_t sz)
387 {
388 	size_t psz = sz >> MALLOC_PAGESHIFT;
389 	size_t rsz, tounmap;
390 	struct region_info *r;
391 	u_int i, offset;
392 
393 	if (sz != PAGEROUND(sz)) {
394 		wrterror("munmap round");
395 		return;
396 	}
397 
398 	if (psz > mopts.malloc_cache) {
399 		if (munmap(p, sz))
400 			wrterror("munmap");
401 		malloc_used -= sz;
402 		return;
403 	}
404 	tounmap = 0;
405 	rsz = mopts.malloc_cache - d->free_regions_size;
406 	if (psz > rsz)
407 		tounmap = psz - rsz;
408 	offset = getrbyte();
409 	for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) {
410 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
411 		if (r->p != NULL) {
412 			rsz = r->size << MALLOC_PAGESHIFT;
413 			if (munmap(r->p, rsz))
414 				wrterror("munmap");
415 			r->p = NULL;
416 			if (tounmap > r->size)
417 				tounmap -= r->size;
418 			else
419 				tounmap = 0;
420 			d->free_regions_size -= r->size;
421 			r->size = 0;
422 			malloc_used -= rsz;
423 		}
424 	}
425 	if (tounmap > 0)
426 		wrterror("malloc cache underflow");
427 	for (i = 0; i < mopts.malloc_cache; i++) {
428 		r = &d->free_regions[i];
429 		if (r->p == NULL) {
430 			if (mopts.malloc_hint)
431 				madvise(p, sz, MADV_FREE);
432 			if (mopts.malloc_freeprot)
433 				mprotect(p, sz, PROT_NONE);
434 			r->p = p;
435 			r->size = psz;
436 			d->free_regions_size += psz;
437 			break;
438 		}
439 	}
440 	if (i == mopts.malloc_cache)
441 		wrterror("malloc free slot lost");
442 	if (d->free_regions_size > mopts.malloc_cache)
443 		wrterror("malloc cache overflow");
444 }
445 
446 static void
447 zapcacheregion(struct dir_info *d, void *p)
448 {
449 	u_int i;
450 	struct region_info *r;
451 	size_t rsz;
452 
453 	for (i = 0; i < mopts.malloc_cache; i++) {
454 		r = &d->free_regions[i];
455 		if (r->p == p) {
456 			rsz = r->size << MALLOC_PAGESHIFT;
457 			if (munmap(r->p, rsz))
458 				wrterror("munmap");
459 			r->p = NULL;
460 			d->free_regions_size -= r->size;
461 			r->size = 0;
462 			malloc_used -= rsz;
463 		}
464 	}
465 }
466 
467 static void *
468 map(struct dir_info *d, size_t sz, int zero_fill)
469 {
470 	size_t psz = sz >> MALLOC_PAGESHIFT;
471 	struct region_info *r, *big = NULL;
472 	u_int i, offset;
473 	void *p;
474 
475 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)d) ||
476 	    d->canary1 != ~d->canary2)
477 		wrterror("internal struct corrupt");
478 	if (sz != PAGEROUND(sz)) {
479 		wrterror("map round");
480 		return NULL;
481 	}
482 	if (psz > d->free_regions_size) {
483 		p = MMAP(sz);
484 		if (p != MAP_FAILED)
485 			malloc_used += sz;
486 		/* zero fill not needed */
487 		return p;
488 	}
489 	offset = getrbyte();
490 	for (i = 0; i < mopts.malloc_cache; i++) {
491 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
492 		if (r->p != NULL) {
493 			if (r->size == psz) {
494 				p = r->p;
495 				if (mopts.malloc_freeprot)
496 					mprotect(p, sz, PROT_READ | PROT_WRITE);
497 				if (mopts.malloc_hint)
498 					madvise(p, sz, MADV_NORMAL);
499 				r->p = NULL;
500 				r->size = 0;
501 				d->free_regions_size -= psz;
502 				if (zero_fill)
503 					memset(p, 0, sz);
504 				else if (mopts.malloc_junk &&
505 				    mopts.malloc_freeprot)
506 					memset(p, SOME_FREEJUNK, sz);
507 				return p;
508 			} else if (r->size > psz)
509 				big = r;
510 		}
511 	}
512 	if (big != NULL) {
513 		r = big;
514 		p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT);
515 		if (mopts.malloc_freeprot)
516 			mprotect(p, sz, PROT_READ | PROT_WRITE);
517 		if (mopts.malloc_hint)
518 			madvise(p, sz, MADV_NORMAL);
519 		r->size -= psz;
520 		d->free_regions_size -= psz;
521 		if (zero_fill)
522 			memset(p, 0, sz);
523 		return p;
524 	}
525 	p = MMAP(sz);
526 	if (p != MAP_FAILED)
527 		malloc_used += sz;
528 	if (d->free_regions_size > mopts.malloc_cache)
529 		wrterror("malloc cache");
530 	/* zero fill not needed */
531 	return p;
532 }
533 
534 static void
535 rbytes_init(void)
536 {
537 	arc4random_buf(rbytes, sizeof(rbytes));
538 	rbytesused = 0;
539 }
540 
541 static u_char
542 getrbyte(void)
543 {
544 	if (rbytesused >= sizeof(rbytes))
545 		rbytes_init();
546 	return rbytes[rbytesused++];
547 }
548 
549 /*
550  * Initialize a dir_info, which should have been cleared by caller
551  */
552 static int
553 omalloc_init(struct dir_info **dp)
554 {
555 	char *p, b[64];
556 	int i, j;
557 	size_t d_avail, regioninfo_size;
558 	struct dir_info *d;
559 
560 	rbytes_init();
561 
562 	/*
563 	 * Default options
564 	 */
565 	mopts.malloc_abort = 1;
566 	mopts.malloc_move = 1;
567 	mopts.malloc_cache = 64;
568 
569 	for (i = 0; i < 3; i++) {
570 		switch (i) {
571 		case 0:
572 			j = readlink("/etc/malloc.conf", b, sizeof b - 1);
573 			if (j <= 0)
574 				continue;
575 			b[j] = '\0';
576 			p = b;
577 			break;
578 		case 1:
579 			if (issetugid() == 0)
580 				p = getenv("MALLOC_OPTIONS");
581 			else
582 				continue;
583 			break;
584 		case 2:
585 			p = malloc_options;
586 			break;
587 		default:
588 			p = NULL;
589 		}
590 
591 		for (; p != NULL && *p != '\0'; p++) {
592 			switch (*p) {
593 			case '>':
594 				mopts.malloc_cache <<= 1;
595 				if (mopts.malloc_cache > MALLOC_MAXCACHE)
596 					mopts.malloc_cache = MALLOC_MAXCACHE;
597 				break;
598 			case '<':
599 				mopts.malloc_cache >>= 1;
600 				break;
601 			case 'a':
602 				mopts.malloc_abort = 0;
603 				break;
604 			case 'A':
605 				mopts.malloc_abort = 1;
606 				break;
607 #ifdef MALLOC_STATS
608 			case 'd':
609 				mopts.malloc_stats = 0;
610 				break;
611 			case 'D':
612 				mopts.malloc_stats = 1;
613 				break;
614 #endif /* MALLOC_STATS */
615 			case 'f':
616 				mopts.malloc_freeprot = 0;
617 				break;
618 			case 'F':
619 				mopts.malloc_freeprot = 1;
620 				break;
621 			case 'g':
622 				mopts.malloc_guard = 0;
623 				break;
624 			case 'G':
625 				mopts.malloc_guard = MALLOC_PAGESIZE;
626 				break;
627 			case 'h':
628 				mopts.malloc_hint = 0;
629 				break;
630 			case 'H':
631 				mopts.malloc_hint = 1;
632 				break;
633 			case 'j':
634 				mopts.malloc_junk = 0;
635 				break;
636 			case 'J':
637 				mopts.malloc_junk = 1;
638 				break;
639 			case 'n':
640 			case 'N':
641 				break;
642 			case 'p':
643 				mopts.malloc_move = 0;
644 				break;
645 			case 'P':
646 				mopts.malloc_move = 1;
647 				break;
648 			case 'r':
649 				mopts.malloc_realloc = 0;
650 				break;
651 			case 'R':
652 				mopts.malloc_realloc = 1;
653 				break;
654 			case 'x':
655 				mopts.malloc_xmalloc = 0;
656 				break;
657 			case 'X':
658 				mopts.malloc_xmalloc = 1;
659 				break;
660 			case 'z':
661 				mopts.malloc_zero = 0;
662 				break;
663 			case 'Z':
664 				mopts.malloc_zero = 1;
665 				break;
666 			default: {
667 				static const char q[] = "malloc() warning: "
668 				    "unknown char in MALLOC_OPTIONS\n";
669 				write(STDERR_FILENO, q, sizeof(q) - 1);
670 				break;
671 			}
672 			}
673 		}
674 	}
675 
676 	/*
677 	 * We want junk in the entire allocation, and zero only in the part
678 	 * the user asked for.
679 	 */
680 	if (mopts.malloc_zero)
681 		mopts.malloc_junk = 1;
682 
683 #ifdef MALLOC_STATS
684 	if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
685 		static const char q[] = "malloc() warning: atexit(2) failed."
686 		    " Will not be able to dump stats on exit\n";
687 		write(STDERR_FILENO, q, sizeof(q) - 1);
688 	}
689 #endif /* MALLOC_STATS */
690 
691 	while ((mopts.malloc_canary = arc4random()) == 0)
692 		;
693 
694 	/*
695 	 * Allocate dir_info with a guard page on either side. Also
696 	 * randomise offset inside the page at which the dir_info
697 	 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
698 	 */
699 	if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == NULL)
700 		return -1;
701 	mprotect(p, MALLOC_PAGESIZE, PROT_NONE);
702 	mprotect(p + MALLOC_PAGESIZE + DIR_INFO_RSZ,
703 	    MALLOC_PAGESIZE, PROT_NONE);
704 	d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
705 	d = (struct dir_info *)(p + MALLOC_PAGESIZE +
706 	    (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
707 
708 	d->regions_bits = 9;
709 	d->regions_free = d->regions_total = 1 << d->regions_bits;
710 	regioninfo_size = d->regions_total * sizeof(struct region_info);
711 	d->r = MMAP(regioninfo_size);
712 	if (d->r == MAP_FAILED) {
713 		wrterror("malloc init mmap failed");
714 		d->regions_total = 0;
715 		return 1;
716 	}
717 	malloc_used += regioninfo_size;
718 	memset(d->r, 0, regioninfo_size);
719 	d->canary1 = mopts.malloc_canary ^ (u_int32_t)d;
720 	d->canary2 = ~d->canary1;
721 
722 	*dp = d;
723 
724 	/*
725 	 * Options have been set and will never be reset.
726 	 * Prevent further tampering with them.
727 	 */
728 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
729 		mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
730 
731 	return 0;
732 }
733 
734 static int
735 omalloc_grow(struct dir_info *d)
736 {
737 	size_t newbits;
738 	size_t newtotal;
739 	size_t newsize;
740 	size_t mask;
741 	size_t i;
742 	struct region_info *p;
743 
744 	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 )
745 		return 1;
746 
747 	newbits = d->regions_bits + 1;
748 	newtotal = d->regions_total * 2;
749 	newsize = newtotal * sizeof(struct region_info);
750 	mask = newtotal - 1;
751 
752 	p = MMAP(newsize);
753 	if (p == MAP_FAILED)
754 		return 1;
755 
756 	malloc_used += newsize;
757 	memset(p, 0, newsize);
758 	STATS_ZERO(d->inserts);
759 	STATS_ZERO(d->insert_collisions);
760 	for (i = 0; i < d->regions_total; i++) {
761 		void *q = d->r[i].p;
762 		if (q != NULL) {
763 			size_t index = hash(q) & mask;
764 			STATS_INC(d->inserts);
765 			while (p[index].p != NULL) {
766 				index = (index - 1) & mask;
767 				STATS_INC(d->insert_collisions);
768 			}
769 			p[index] = d->r[i];
770 		}
771 	}
772 	/* avoid pages containing meta info to end up in cache */
773 	if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
774 		wrterror("munmap");
775 	else
776 		malloc_used -= d->regions_total * sizeof(struct region_info);
777 	d->regions_free = d->regions_free + d->regions_total;
778 	d->regions_total = newtotal;
779 	d->regions_bits = newbits;
780 	d->r = p;
781 	return 0;
782 }
783 
784 static struct chunk_info *
785 alloc_chunk_info(struct dir_info *d)
786 {
787 	struct chunk_info *p;
788 	int i;
789 
790 	if (d->chunk_info_list == NULL) {
791 		p = MMAP(MALLOC_PAGESIZE);
792 		if (p == MAP_FAILED)
793 			return NULL;
794 		malloc_used += MALLOC_PAGESIZE;
795 		for (i = 0; i < MALLOC_PAGESIZE / sizeof(*p); i++) {
796 			p[i].next = d->chunk_info_list;
797 			d->chunk_info_list = &p[i];
798 		}
799 	}
800 	p = d->chunk_info_list;
801 	d->chunk_info_list = p->next;
802 	memset(p, 0, sizeof *p);
803 	p->canary = d->canary1;
804 	return p;
805 }
806 
807 
808 static void
809 put_chunk_info(struct dir_info *d, struct chunk_info *p)
810 {
811 	p->next = d->chunk_info_list;
812 	d->chunk_info_list = p;
813 }
814 
815 static int
816 insert(struct dir_info *d, void *p, size_t sz)
817 {
818 	size_t index;
819 	size_t mask;
820 	void *q;
821 
822 	if (d->regions_free * 4 < d->regions_total) {
823 		if (omalloc_grow(d))
824 			return 1;
825 	}
826 	mask = d->regions_total - 1;
827 	index = hash(p) & mask;
828 	q = d->r[index].p;
829 	STATS_INC(d->inserts);
830 	while (q != NULL) {
831 		index = (index - 1) & mask;
832 		q = d->r[index].p;
833 		STATS_INC(d->insert_collisions);
834 	}
835 	d->r[index].p = p;
836 	d->r[index].size = sz;
837 	d->regions_free--;
838 	return 0;
839 }
840 
841 static struct region_info *
842 find(struct dir_info *d, void *p)
843 {
844 	size_t index;
845 	size_t mask = d->regions_total - 1;
846 	void *q, *r;
847 
848 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)d) ||
849 	    d->canary1 != ~d->canary2)
850 		wrterror("internal struct corrupt");
851 	p = MASK_POINTER(p);
852 	index = hash(p) & mask;
853 	r = d->r[index].p;
854 	q = MASK_POINTER(r);
855 	STATS_INC(d->finds);
856 	while (q != p && r != NULL) {
857 		index = (index - 1) & mask;
858 		r = d->r[index].p;
859 		q = MASK_POINTER(r);
860 		STATS_INC(d->find_collisions);
861 	}
862 	return q == p ? &d->r[index] : NULL;
863 }
864 
865 static void
866 delete(struct dir_info *d, struct region_info *ri)
867 {
868 	/* algorithm R, Knuth Vol III section 6.4 */
869 	size_t mask = d->regions_total - 1;
870 	size_t i, j, r;
871 
872 	if (d->regions_total & (d->regions_total - 1))
873 		wrterror("regions_total not 2^x");
874 	d->regions_free++;
875 	STATS_INC(g_pool->deletes);
876 
877 	i = ri - d->r;
878 	for (;;) {
879 		d->r[i].p = NULL;
880 		d->r[i].size = 0;
881 		j = i;
882 		for (;;) {
883 			i = (i - 1) & mask;
884 			if (d->r[i].p == NULL)
885 				return;
886 			r = hash(d->r[i].p) & mask;
887 			if ((i <= r && r < j) || (r < j && j < i) ||
888 			    (j < i && i <= r))
889 				continue;
890 			d->r[j] = d->r[i];
891 			STATS_INC(g_pool->delete_moves);
892 			break;
893 		}
894 
895 	}
896 }
897 
898 /*
899  * Allocate a page of chunks
900  */
901 static struct chunk_info *
902 omalloc_make_chunks(struct dir_info *d, int bits)
903 {
904 	struct chunk_info *bp;
905 	void		*pp;
906 	long		i, k;
907 
908 	/* Allocate a new bucket */
909 	pp = map(d, MALLOC_PAGESIZE, 0);
910 	if (pp == MAP_FAILED)
911 		return NULL;
912 
913 	bp = alloc_chunk_info(d);
914 	if (bp == NULL) {
915 		unmap(d, pp, MALLOC_PAGESIZE);
916 		return NULL;
917 	}
918 
919 	/* memory protect the page allocated in the malloc(0) case */
920 	if (bits == 0) {
921 		bp->size = 0;
922 		bp->shift = 1;
923 		i = MALLOC_MINSIZE - 1;
924 		while (i >>= 1)
925 			bp->shift++;
926 		bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift;
927 		bp->page = pp;
928 
929 		k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE);
930 		if (k < 0) {
931 			unmap(d, pp, MALLOC_PAGESIZE);
932 			put_chunk_info(d, bp);
933 			return NULL;
934 		}
935 	} else {
936 		bp->size = (1UL << bits);
937 		bp->shift = bits;
938 		bp->total = bp->free = MALLOC_PAGESIZE >> bits;
939 		bp->page = pp;
940 	}
941 
942 	/* set all valid bits in the bitmap */
943 	k = bp->total;
944 	i = 0;
945 
946 	/* Do a bunch at a time */
947 	for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS)
948 		bp->bits[i / MALLOC_BITS] = ~0UL;
949 
950 	for (; i < k; i++)
951 		bp->bits[i / MALLOC_BITS] |= 1UL << (i % MALLOC_BITS);
952 
953 	bp->next = d->chunk_dir[bits];
954 	d->chunk_dir[bits] = bp;
955 
956 	bits++;
957 	if ((uintptr_t)pp & bits)
958 		wrterror("pp & bits");
959 
960 	insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp);
961 	return bp;
962 }
963 
964 
965 /*
966  * Allocate a chunk
967  */
968 static void *
969 malloc_bytes(struct dir_info *d, size_t size)
970 {
971 	int		i, j;
972 	size_t		k;
973 	u_long		u, *lp;
974 	struct chunk_info *bp;
975 
976 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)d) ||
977 	    d->canary1 != ~d->canary2)
978 		wrterror("internal struct corrupt");
979 	/* Don't bother with anything less than this */
980 	/* unless we have a malloc(0) requests */
981 	if (size != 0 && size < MALLOC_MINSIZE)
982 		size = MALLOC_MINSIZE;
983 
984 	/* Find the right bucket */
985 	if (size == 0)
986 		j = 0;
987 	else {
988 		j = MALLOC_MINSHIFT;
989 		i = (size - 1) >> (MALLOC_MINSHIFT - 1);
990 		while (i >>= 1)
991 			j++;
992 	}
993 
994 	/* If it's empty, make a page more of that size chunks */
995 	bp = d->chunk_dir[j];
996 	if (bp == NULL && (bp = omalloc_make_chunks(d, j)) == NULL)
997 		return NULL;
998 
999 	if (bp->canary != d->canary1)
1000 		wrterror("chunk info corrupted");
1001 	/* Find first word of bitmap which isn't empty */
1002 	for (lp = bp->bits; !*lp; lp++)
1003 		/* EMPTY */;
1004 
1005 	/* Find that bit, and tweak it */
1006 	u = 1;
1007 	k = 0;
1008 	while (!(*lp & u)) {
1009 		u += u;
1010 		k++;
1011 	}
1012 
1013 	/* advance a random # of positions */
1014 	i = (getrbyte() & (MALLOC_DELAYED_CHUNKS - 1)) % bp->free;
1015 	while (i > 0) {
1016 		u += u;
1017 		k++;
1018 		if (k >= MALLOC_BITS) {
1019 			lp++;
1020 			u = 1;
1021 			k = 0;
1022 		}
1023 		if (lp - bp->bits > (bp->total - 1) / MALLOC_BITS) {
1024 			wrterror("chunk overflow");
1025 			errno = EFAULT;
1026 			return (NULL);
1027 		}
1028 		if (*lp & u)
1029 			i--;
1030 	}
1031 
1032 	*lp ^= u;
1033 
1034 	/* If there are no more free, remove from free-list */
1035 	if (!--bp->free) {
1036 		d->chunk_dir[j] = bp->next;
1037 		bp->next = NULL;
1038 	}
1039 	/* Adjust to the real offset of that chunk */
1040 	k += (lp - bp->bits) * MALLOC_BITS;
1041 	k <<= bp->shift;
1042 
1043 	if (mopts.malloc_junk && bp->size > 0)
1044 		memset((char *)bp->page + k, SOME_JUNK, bp->size);
1045 	return ((char *)bp->page + k);
1046 }
1047 
1048 
1049 /*
1050  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1051  */
1052 static void
1053 free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1054 {
1055 	struct chunk_info *info, **mp;
1056 	long i;
1057 
1058 	info = (struct chunk_info *)r->size;
1059 	if (info->canary != d->canary1)
1060 		wrterror("chunk info corrupted");
1061 
1062 	/* Find the chunk number on the page */
1063 	i = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1064 
1065 	if ((uintptr_t)ptr & ((1UL << (info->shift)) - 1)) {
1066 		wrterror("modified chunk-pointer");
1067 		return;
1068 	}
1069 	if (info->bits[i / MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
1070 		wrterror("chunk is already free");
1071 		return;
1072 	}
1073 
1074 	info->bits[i / MALLOC_BITS] |= 1UL << (i % MALLOC_BITS);
1075 	info->free++;
1076 
1077 	if (info->size != 0)
1078 		mp = d->chunk_dir + info->shift;
1079 	else
1080 		mp = d->chunk_dir;
1081 
1082 	if (info->free == 1) {
1083 		/* Page became non-full */
1084 
1085 		/* Insert in address order */
1086 		while (*mp != NULL && (*mp)->next != NULL &&
1087 		    (*mp)->next->page < info->page)
1088 			mp = &(*mp)->next;
1089 		info->next = *mp;
1090 		*mp = info;
1091 		return;
1092 	}
1093 	if (info->free != info->total)
1094 		return;
1095 
1096 	/* Find & remove this page in the queue */
1097 	while (*mp != info) {
1098 		mp = &((*mp)->next);
1099 		if (!*mp) {
1100 			wrterror("not on queue");
1101 			errno = EFAULT;
1102 			return;
1103 		}
1104 	}
1105 	*mp = info->next;
1106 
1107 	if (info->size == 0 && !mopts.malloc_freeprot)
1108 		mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1109 	unmap(d, info->page, MALLOC_PAGESIZE);
1110 
1111 	delete(d, r);
1112 	put_chunk_info(d, info);
1113 }
1114 
1115 
1116 
1117 static void *
1118 omalloc(size_t sz, int zero_fill)
1119 {
1120 	void *p;
1121 	size_t psz;
1122 
1123 	if (sz > MALLOC_MAXCHUNK) {
1124 		if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1125 			errno = ENOMEM;
1126 			return NULL;
1127 		}
1128 		sz += mopts.malloc_guard;
1129 		psz = PAGEROUND(sz);
1130 		p = map(g_pool, psz, zero_fill);
1131 		if (p == MAP_FAILED) {
1132 			errno = ENOMEM;
1133 			return NULL;
1134 		}
1135 		if (insert(g_pool, p, sz)) {
1136 			unmap(g_pool, p, psz);
1137 			errno = ENOMEM;
1138 			return NULL;
1139 		}
1140 		if (mopts.malloc_guard) {
1141 			if (mprotect((char *)p + psz - mopts.malloc_guard,
1142 			    mopts.malloc_guard, PROT_NONE))
1143 				wrterror("mprotect");
1144 			malloc_guarded += mopts.malloc_guard;
1145 		}
1146 
1147 		if (mopts.malloc_move &&
1148 		    sz - mopts.malloc_guard < MALLOC_PAGESIZE -
1149 		    MALLOC_LEEWAY) {
1150 			/* fill whole allocation */
1151 			if (mopts.malloc_junk)
1152 				memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1153 			/* shift towards the end */
1154 			p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY -
1155 			    (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1));
1156 			/* fill zeros if needed and overwritten above */
1157 			if (zero_fill && mopts.malloc_junk)
1158 				memset(p, 0, sz - mopts.malloc_guard);
1159 		} else {
1160 			if (mopts.malloc_junk) {
1161 				if (zero_fill)
1162 					memset(p + sz - mopts.malloc_guard,
1163 					    SOME_JUNK, psz - sz);
1164 				else
1165 					memset(p, SOME_JUNK,
1166 					    psz - mopts.malloc_guard);
1167 			}
1168 		}
1169 
1170 	} else {
1171 		/* takes care of SOME_JUNK */
1172 		p = malloc_bytes(g_pool, sz);
1173 		if (zero_fill && p != NULL && sz > 0)
1174 			memset(p, 0, sz);
1175 	}
1176 
1177 	return p;
1178 }
1179 
1180 /*
1181  * Common function for handling recursion.  Only
1182  * print the error message once, to avoid making the problem
1183  * potentially worse.
1184  */
1185 static void
1186 malloc_recurse(void)
1187 {
1188 	static int noprint;
1189 
1190 	if (noprint == 0) {
1191 		noprint = 1;
1192 		wrterror("recursive call");
1193 	}
1194 	malloc_active--;
1195 	_MALLOC_UNLOCK();
1196 	errno = EDEADLK;
1197 }
1198 
1199 static int
1200 malloc_init(void)
1201 {
1202 	if (omalloc_init(&g_pool)) {
1203 		_MALLOC_UNLOCK();
1204 		if (mopts.malloc_xmalloc)
1205 			wrterror("out of memory");
1206 		errno = ENOMEM;
1207 		return -1;
1208 	}
1209 	return 0;
1210 }
1211 
1212 void *
1213 malloc(size_t size)
1214 {
1215 	void *r;
1216 	int saved_errno = errno;
1217 
1218 	_MALLOC_LOCK();
1219 	malloc_func = " in malloc():";
1220 	if (g_pool == NULL) {
1221 		if (malloc_init() != 0)
1222 			return NULL;
1223 	}
1224 	if (malloc_active++) {
1225 		malloc_recurse();
1226 		return NULL;
1227 	}
1228 	r = omalloc(size, mopts.malloc_zero);
1229 	malloc_active--;
1230 	_MALLOC_UNLOCK();
1231 	if (r == NULL && mopts.malloc_xmalloc) {
1232 		wrterror("out of memory");
1233 		errno = ENOMEM;
1234 	}
1235 	if (r != NULL)
1236 		errno = saved_errno;
1237 	return r;
1238 }
1239 
1240 static void
1241 ofree(void *p)
1242 {
1243 	struct region_info *r;
1244 	size_t sz;
1245 
1246 	r = find(g_pool, p);
1247 	if (r == NULL) {
1248 		wrterror("bogus pointer (double free?)");
1249 		return;
1250 	}
1251 	REALSIZE(sz, r);
1252 	if (sz > MALLOC_MAXCHUNK) {
1253 		if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE -
1254 		    MALLOC_LEEWAY) {
1255 			if (r->p != p) {
1256 				wrterror("bogus pointer");
1257 				return;
1258 			}
1259 		} else {
1260 #if notyetbecause_of_realloc
1261 			/* shifted towards the end */
1262 			if (p != ((char *)r->p) + ((MALLOC_PAGESIZE -
1263 			    MALLOC_MINSIZE - sz - mopts.malloc_guard) &
1264 			    ~(MALLOC_MINSIZE-1))) {
1265 			}
1266 #endif
1267 			p = r->p;
1268 		}
1269 		if (mopts.malloc_guard) {
1270 			if (sz < mopts.malloc_guard)
1271 				wrterror("guard size");
1272 			if (!mopts.malloc_freeprot) {
1273 				if (mprotect((char *)p + PAGEROUND(sz) -
1274 				    mopts.malloc_guard, mopts.malloc_guard,
1275 				    PROT_READ | PROT_WRITE))
1276 					wrterror("mprotect");
1277 			}
1278 			malloc_guarded -= mopts.malloc_guard;
1279 		}
1280 		if (mopts.malloc_junk && !mopts.malloc_freeprot)
1281 			memset(p, SOME_FREEJUNK,
1282 			    PAGEROUND(sz) - mopts.malloc_guard);
1283 		unmap(g_pool, p, PAGEROUND(sz));
1284 		delete(g_pool, r);
1285 	} else {
1286 		void *tmp;
1287 		int i;
1288 
1289 		if (mopts.malloc_junk && sz > 0)
1290 			memset(p, SOME_FREEJUNK, sz);
1291 		if (!mopts.malloc_freeprot) {
1292 			i = getrbyte() & (MALLOC_DELAYED_CHUNKS - 1);
1293 			tmp = p;
1294 			p = g_pool->delayed_chunks[i];
1295 			g_pool->delayed_chunks[i] = tmp;
1296 		}
1297 		if (p != NULL) {
1298 			r = find(g_pool, p);
1299 			if (r == NULL) {
1300 				wrterror("bogus pointer (double free?)");
1301 				return;
1302 			}
1303 			free_bytes(g_pool, r, p);
1304 		}
1305 	}
1306 }
1307 
1308 void
1309 free(void *ptr)
1310 {
1311 	int saved_errno = errno;
1312 
1313 	/* This is legal. */
1314 	if (ptr == NULL)
1315 		return;
1316 
1317 	_MALLOC_LOCK();
1318 	malloc_func = " in free():";
1319 	if (g_pool == NULL) {
1320 		_MALLOC_UNLOCK();
1321 		wrterror("free() called before allocation");
1322 		return;
1323 	}
1324 	if (malloc_active++) {
1325 		malloc_recurse();
1326 		return;
1327 	}
1328 	ofree(ptr);
1329 	malloc_active--;
1330 	_MALLOC_UNLOCK();
1331 	errno = saved_errno;
1332 }
1333 
1334 
1335 static void *
1336 orealloc(void *p, size_t newsz)
1337 {
1338 	struct region_info *r;
1339 	size_t oldsz, goldsz, gnewsz;
1340 	void *q;
1341 
1342 	if (p == NULL)
1343 		return omalloc(newsz, 0);
1344 
1345 	r = find(g_pool, p);
1346 	if (r == NULL) {
1347 		wrterror("bogus pointer (double free?)");
1348 		return NULL;
1349 	}
1350 	if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1351 		errno = ENOMEM;
1352 		return NULL;
1353 	}
1354 
1355 	REALSIZE(oldsz, r);
1356 	goldsz = oldsz;
1357 	if (oldsz > MALLOC_MAXCHUNK) {
1358 		if (oldsz < mopts.malloc_guard)
1359 			wrterror("guard size");
1360 		oldsz -= mopts.malloc_guard;
1361 	}
1362 
1363 	gnewsz = newsz;
1364 	if (gnewsz > MALLOC_MAXCHUNK)
1365 		gnewsz += mopts.malloc_guard;
1366 
1367 	if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p &&
1368 	    !mopts.malloc_realloc) {
1369 		size_t roldsz = PAGEROUND(goldsz);
1370 		size_t rnewsz = PAGEROUND(gnewsz);
1371 
1372 		if (rnewsz > roldsz) {
1373 			if (!mopts.malloc_guard) {
1374 				STATS_INC(g_pool.cheap_realloc_tries);
1375 				zapcacheregion(g_pool, p + roldsz);
1376 				q = MMAPA(p + roldsz, rnewsz - roldsz);
1377 				if (q == p + roldsz) {
1378 					malloc_used += rnewsz - roldsz;
1379 					if (mopts.malloc_junk)
1380 						memset(q, SOME_JUNK,
1381 						    rnewsz - roldsz);
1382 					r->size = newsz;
1383 					STATS_INC(g_pool->cheap_reallocs);
1384 					return p;
1385 				} else if (q != MAP_FAILED)
1386 					munmap(q, rnewsz - roldsz);
1387 			}
1388 		} else if (rnewsz < roldsz) {
1389 			if (mopts.malloc_guard) {
1390 				if (mprotect((char *)p + roldsz -
1391 				    mopts.malloc_guard, mopts.malloc_guard,
1392 				    PROT_READ | PROT_WRITE))
1393 					wrterror("mprotect");
1394 				if (mprotect((char *)p + rnewsz -
1395 				    mopts.malloc_guard, mopts.malloc_guard,
1396 				    PROT_NONE))
1397 					wrterror("mprotect");
1398 			}
1399 			unmap(g_pool, (char *)p + rnewsz, roldsz - rnewsz);
1400 			r->size = gnewsz;
1401 			return p;
1402 		} else {
1403 			if (newsz > oldsz && mopts.malloc_junk)
1404 				memset((char *)p + newsz, SOME_JUNK,
1405 				    rnewsz - mopts.malloc_guard - newsz);
1406 			r->size = gnewsz;
1407 			return p;
1408 		}
1409 	}
1410 	if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) {
1411 		if (mopts.malloc_junk && newsz > 0)
1412 			memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1413 		return p;
1414 	} else if (newsz != oldsz || mopts.malloc_realloc) {
1415 		q = omalloc(newsz, 0);
1416 		if (q == NULL)
1417 			return NULL;
1418 		if (newsz != 0 && oldsz != 0)
1419 			memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1420 		ofree(p);
1421 		return q;
1422 	} else
1423 		return p;
1424 }
1425 
1426 void *
1427 realloc(void *ptr, size_t size)
1428 {
1429 	void *r;
1430 	int saved_errno = errno;
1431 
1432 	_MALLOC_LOCK();
1433 	malloc_func = " in realloc():";
1434 	if (g_pool == NULL) {
1435 		if (malloc_init() != 0)
1436 			return NULL;
1437 	}
1438 	if (malloc_active++) {
1439 		malloc_recurse();
1440 		return NULL;
1441 	}
1442 	r = orealloc(ptr, size);
1443 
1444 	malloc_active--;
1445 	_MALLOC_UNLOCK();
1446 	if (r == NULL && mopts.malloc_xmalloc) {
1447 		wrterror("out of memory");
1448 		errno = ENOMEM;
1449 	}
1450 	if (r != NULL)
1451 		errno = saved_errno;
1452 	return r;
1453 }
1454 
1455 
1456 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1457 
1458 void *
1459 calloc(size_t nmemb, size_t size)
1460 {
1461 	void *r;
1462 	int saved_errno = errno;
1463 
1464 	_MALLOC_LOCK();
1465 	malloc_func = " in calloc():";
1466 	if (g_pool == NULL) {
1467 		if (malloc_init() != 0)
1468 			return NULL;
1469 	}
1470 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1471 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1472 		_MALLOC_UNLOCK();
1473 		if (mopts.malloc_xmalloc)
1474 			wrterror("out of memory");
1475 		errno = ENOMEM;
1476 		return NULL;
1477 	}
1478 
1479 	if (malloc_active++) {
1480 		malloc_recurse();
1481 		return NULL;
1482 	}
1483 
1484 	size *= nmemb;
1485 	r = omalloc(size, 1);
1486 
1487 	malloc_active--;
1488 	_MALLOC_UNLOCK();
1489 	if (r == NULL && mopts.malloc_xmalloc) {
1490 		wrterror("out of memory");
1491 		errno = ENOMEM;
1492 	}
1493 	if (r != NULL)
1494 		errno = saved_errno;
1495 	return r;
1496 }
1497 
1498