xref: /openbsd-src/lib/libc/stdlib/malloc.c (revision 2821fdb080fdc801ba67bdf2837f5e164f3a2926)
1 /*	$OpenBSD: malloc.c,v 1.124 2010/01/13 12:40:11 otto Exp $	*/
2 /*
3  * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 /*
19  * Parts of this code, mainly the sub page sized chunk management code is
20  * derived from the malloc implementation with the following license:
21  */
22 /*
23  * ----------------------------------------------------------------------------
24  * "THE BEER-WARE LICENSE" (Revision 42):
25  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
26  * can do whatever you want with this stuff. If we meet some day, and you think
27  * this stuff is worth it, you can buy me a beer in return.  Poul-Henning Kamp
28  * ----------------------------------------------------------------------------
29  */
30 
31 /* #define MALLOC_STATS */
32 
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/queue.h>
36 #include <sys/mman.h>
37 #include <sys/uio.h>
38 #include <errno.h>
39 #include <stdint.h>
40 #include <stdlib.h>
41 #include <string.h>
42 #include <stdio.h>
43 #include <unistd.h>
44 
45 #ifdef MALLOC_STATS
46 #include <fcntl.h>
47 #endif
48 
49 #include "thread_private.h"
50 
51 #define MALLOC_MINSHIFT		4
52 #define MALLOC_MAXSHIFT		16
53 
54 #if defined(__sparc__) && !defined(__sparcv9__)
55 #define MALLOC_PAGESHIFT	(13U)
56 #elif defined(__mips64__)
57 #define MALLOC_PAGESHIFT	(14U)
58 #else
59 #define MALLOC_PAGESHIFT	(PGSHIFT)
60 #endif
61 
62 #define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
63 #define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
64 #define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
65 #define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
66 
67 #define MALLOC_MAXCHUNK		(1 << (MALLOC_PAGESHIFT-1))
68 #define MALLOC_MAXCACHE		256
69 #define MALLOC_DELAYED_CHUNKS	15	/* max of getrnibble() */
70 /*
71  * When the P option is active, we move allocations between half a page
72  * and a whole page towards the end, subject to alignment constraints.
73  * This is the extra headroom we allow. Set to zero to be the most
74  * strict.
75  */
76 #define MALLOC_LEEWAY		0
77 
78 #define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
79 
80 /*
81  * What to use for Junk.  This is the byte value we use to fill with
82  * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
83  * and SOME_FREEJUNK right before free.
84  */
85 #define SOME_JUNK		0xd0	/* as in "Duh" :-) */
86 #define SOME_FREEJUNK		0xdf
87 
88 #define MMAP(sz)	mmap(NULL, (size_t)(sz), PROT_READ | PROT_WRITE, \
89     MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
90 
91 #define MMAPA(a,sz)	mmap((a), (size_t)(sz), PROT_READ | PROT_WRITE, \
92     MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
93 
94 struct region_info {
95 	void *p;		/* page; low bits used to mark chunks */
96 	uintptr_t size;		/* size for pages, or chunk_info pointer */
97 };
98 
99 LIST_HEAD(chunk_head, chunk_info);
100 
101 struct dir_info {
102 	u_int32_t canary1;
103 	struct region_info *r;		/* region slots */
104 	size_t regions_total;		/* number of region slots */
105 	size_t regions_bits;		/* log2 of total */
106 	size_t regions_free;		/* number of free slots */
107 					/* list of free chunk info structs */
108 	struct chunk_head chunk_info_list;
109 					/* lists of chunks with free slots */
110 	struct chunk_head chunk_dir[MALLOC_MAXSHIFT];
111 	size_t free_regions_size;	/* free pages cached */
112 					/* free pages cache */
113 	struct region_info free_regions[MALLOC_MAXCACHE];
114 					/* delayed free chunk slots */
115 	void *delayed_chunks[MALLOC_DELAYED_CHUNKS + 1];
116 #ifdef MALLOC_STATS
117 	size_t inserts;
118 	size_t insert_collisions;
119 	size_t finds;
120 	size_t find_collisions;
121 	size_t deletes;
122 	size_t delete_moves;
123 	size_t cheap_realloc_tries;
124 	size_t cheap_reallocs;
125 #define STATS_INC(x) ((x)++)
126 #define STATS_ZERO(x) ((x) = 0)
127 #else
128 #define STATS_INC(x)	/* nothing */
129 #define STATS_ZERO(x)	/* nothing */
130 #endif /* MALLOC_STATS */
131 	u_int32_t canary2;
132 };
133 #define DIR_INFO_RSZ	((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
134 			~MALLOC_PAGEMASK)
135 
136 /*
137  * This structure describes a page worth of chunks.
138  *
139  * How many bits per u_long in the bitmap
140  */
141 #define MALLOC_BITS		(NBBY * sizeof(u_long))
142 struct chunk_info {
143 	LIST_ENTRY(chunk_info) entries;
144 	void *page;			/* pointer to the page */
145 	u_int32_t canary;
146 	u_short size;			/* size of this page's chunks */
147 	u_short shift;			/* how far to shift for this size */
148 	u_short free;			/* how many free chunks */
149 	u_short total;			/* how many chunk */
150 					/* which chunks are free */
151 	u_long bits[(MALLOC_PAGESIZE / MALLOC_MINSIZE) / MALLOC_BITS];
152 };
153 
154 struct malloc_readonly {
155 	struct dir_info *g_pool;	/* Main bookkeeping information */
156 	int	malloc_abort;		/* abort() on error */
157 	int	malloc_freeprot;	/* mprotect free pages PROT_NONE? */
158 	int	malloc_hint;		/* call madvice on free pages?  */
159 	int	malloc_junk;		/* junk fill? */
160 	int	malloc_move;		/* move allocations to end of page? */
161 	int	malloc_realloc;		/* always realloc? */
162 	int	malloc_xmalloc;		/* xmalloc behaviour? */
163 	int	malloc_zero;		/* zero fill? */
164 	size_t	malloc_guard;		/* use guard pages after allocations? */
165 	u_int	malloc_cache;		/* free pages we cache */
166 #ifdef MALLOC_STATS
167 	int	malloc_stats;		/* dump statistics at end */
168 #endif
169 	u_int32_t malloc_canary;	/* Matched against ones in g_pool */
170 };
171 
172 /* This object is mapped PROT_READ after initialisation to prevent tampering */
173 static union {
174 	struct malloc_readonly mopts;
175 	u_char _pad[MALLOC_PAGESIZE];
176 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
177 #define mopts	malloc_readonly.mopts
178 #define g_pool	mopts.g_pool
179 
180 char		*malloc_options;	/* compile-time options */
181 
182 static char	*malloc_func;		/* current function */
183 static int	malloc_active;		/* status of malloc */
184 
185 static size_t	malloc_guarded;		/* bytes used for guards */
186 static size_t	malloc_used;		/* bytes allocated */
187 
188 static size_t rnibblesused;		/* random nibbles used */
189 static u_char rbytes[512];		/* random bytes */
190 static u_char getrnibble(void);
191 
192 extern char	*__progname;
193 
194 /* low bits of r->p determine size: 0 means >= page size and p->size holding
195  *  real size, otherwise r->size is a shift count, or 1 for malloc(0)
196  */
197 #define REALSIZE(sz, r) 					\
198 	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
199 	(sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
200 
201 static inline size_t
202 hash(void *p)
203 {
204 	size_t sum;
205 	union {
206 		uintptr_t p;
207 		unsigned short a[sizeof(void *) / sizeof(short)];
208 	} u;
209 	u.p = (uintptr_t)p >> MALLOC_PAGESHIFT;
210 	sum = u.a[0];
211 	sum = (sum << 7) - sum + u.a[1];
212 #ifdef __LP64__
213 	sum = (sum << 7) - sum + u.a[2];
214 	sum = (sum << 7) - sum + u.a[3];
215 #endif
216 	return sum;
217 }
218 
219 #ifdef MALLOC_STATS
220 static void
221 dump_chunk(int fd, struct chunk_info *p, int fromfreelist)
222 {
223 	char buf[64];
224 
225 	while (p != NULL) {
226 		snprintf(buf, sizeof(buf), "chunk %d %d/%d %p\n", p->size,
227 		    p->free, p->total, p->page);
228 		write(fd, buf, strlen(buf));
229 		if (!fromfreelist)
230 			break;
231 		p = LIST_NEXT(p, entries);
232 		if (p != NULL) {
233 			snprintf(buf, sizeof(buf), "    ");
234 			write(fd, buf, strlen(buf));
235 		}
236 	}
237 }
238 
239 static void
240 dump_free_chunk_info(int fd, struct dir_info *d)
241 {
242 	char buf[64];
243 	int i;
244 
245 	snprintf(buf, sizeof(buf), "Free chunk structs:\n");
246 	write(fd, buf, strlen(buf));
247 	for (i = 0; i < MALLOC_MAXSHIFT; i++) {
248 		struct chunk_info *p = LIST_FIRST(&d->chunk_dir[i]);
249 		if (p != NULL) {
250 			snprintf(buf, sizeof(buf), "%2d) ", i);
251 			write(fd, buf, strlen(buf));
252 			dump_chunk(fd, p, 1);
253 		}
254 	}
255 
256 }
257 
258 static void
259 dump_free_page_info(int fd, struct dir_info *d)
260 {
261 	char buf[64];
262 	int i;
263 
264 	snprintf(buf, sizeof(buf), "Free pages cached: %zu\n",
265 	    d->free_regions_size);
266 	write(fd, buf, strlen(buf));
267 	for (i = 0; i < mopts.malloc_cache; i++) {
268 		if (d->free_regions[i].p != NULL) {
269 			snprintf(buf, sizeof(buf), "%2d) ", i);
270 			write(fd, buf, strlen(buf));
271 			snprintf(buf, sizeof(buf), "free at %p: %zu\n",
272 			    d->free_regions[i].p, d->free_regions[i].size);
273 			write(fd, buf, strlen(buf));
274 		}
275 	}
276 }
277 
278 static void
279 malloc_dump1(int fd, struct dir_info *d)
280 {
281 	char buf[64];
282 	size_t i, realsize;
283 
284 	snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d);
285 	write(fd, buf, strlen(buf));
286 	if (d == NULL)
287 		return;
288 	snprintf(buf, sizeof(buf), "Regions slots %zu\n", d->regions_total);
289 	write(fd, buf, strlen(buf));
290 	snprintf(buf, sizeof(buf), "Finds %zu/%zu %f\n", d->finds,
291 	    d->find_collisions,
292 	    1.0 + (double)d->find_collisions / d->finds);
293 	write(fd, buf, strlen(buf));
294 	snprintf(buf, sizeof(buf), "Inserts %zu/%zu %f\n", d->inserts,
295 	    d->insert_collisions,
296 	    1.0 + (double)d->insert_collisions / d->inserts);
297 	write(fd, buf, strlen(buf));
298 	snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes,
299 	     d->delete_moves);
300 	write(fd, buf, strlen(buf));
301 	snprintf(buf, sizeof(buf), "Cheap reallocs %zu/%zu\n",
302 	    d->cheap_reallocs, d->cheap_realloc_tries);
303 	write(fd, buf, strlen(buf));
304 	snprintf(buf, sizeof(buf), "Regions slots free %zu\n", d->regions_free);
305 	write(fd, buf, strlen(buf));
306 	for (i = 0; i < d->regions_total; i++) {
307 		if (d->r[i].p != NULL) {
308 			size_t h = hash(d->r[i].p) &
309 			    (d->regions_total - 1);
310 			snprintf(buf, sizeof(buf), "%4zx) #%zx %zd ",
311 			    i, h, h - i);
312 			write(fd, buf, strlen(buf));
313 			REALSIZE(realsize, &d->r[i]);
314 			if (realsize > MALLOC_MAXCHUNK) {
315 				snprintf(buf, sizeof(buf),
316 				    "%p: %zu\n", d->r[i].p, realsize);
317 				write(fd, buf, strlen(buf));
318 			} else
319 				dump_chunk(fd,
320 				    (struct chunk_info *)d->r[i].size, 0);
321 		}
322 	}
323 	dump_free_chunk_info(fd, d);
324 	dump_free_page_info(fd, d);
325 	snprintf(buf, sizeof(buf), "In use %zu\n", malloc_used);
326 	write(fd, buf, strlen(buf));
327 	snprintf(buf, sizeof(buf), "Guarded %zu\n", malloc_guarded);
328 	write(fd, buf, strlen(buf));
329 }
330 
331 
332 void
333 malloc_dump(int fd)
334 {
335 	malloc_dump1(fd, g_pool);
336 }
337 
338 static void
339 malloc_exit(void)
340 {
341 	static const char q[] = "malloc() warning: Couldn't dump stats\n";
342 	int save_errno = errno, fd;
343 
344 	fd = open("malloc.out", O_RDWR|O_APPEND);
345 	if (fd != -1) {
346 		malloc_dump(fd);
347 		close(fd);
348 	} else
349 		write(STDERR_FILENO, q, sizeof(q) - 1);
350 	errno = save_errno;
351 }
352 #endif /* MALLOC_STATS */
353 
354 
355 
356 static void
357 wrterror(char *p)
358 {
359 	char		*q = " error: ";
360 	struct iovec	iov[5];
361 
362 	iov[0].iov_base = __progname;
363 	iov[0].iov_len = strlen(__progname);
364 	iov[1].iov_base = malloc_func;
365 	iov[1].iov_len = strlen(malloc_func);
366 	iov[2].iov_base = q;
367 	iov[2].iov_len = strlen(q);
368 	iov[3].iov_base = p;
369 	iov[3].iov_len = strlen(p);
370 	iov[4].iov_base = "\n";
371 	iov[4].iov_len = 1;
372 	writev(STDERR_FILENO, iov, 5);
373 
374 #ifdef MALLOC_STATS
375 	if (mopts.malloc_stats)
376 		malloc_dump(STDERR_FILENO);
377 #endif /* MALLOC_STATS */
378 	//malloc_active--;
379 	if (mopts.malloc_abort)
380 		abort();
381 }
382 
383 static void
384 rbytes_init(void)
385 {
386 	arc4random_buf(rbytes, sizeof(rbytes));
387 	rnibblesused = 0;
388 }
389 
390 static inline u_char
391 getrnibble(void)
392 {
393 	u_char x;
394 
395 	if (rnibblesused >= 2 * sizeof(rbytes))
396 		rbytes_init();
397 	x = rbytes[rnibblesused++ / 2];
398 	return (rnibblesused & 1 ? x & 0xf : x >> 4);
399 }
400 
401 /*
402  * Cache maintenance. We keep at most malloc_cache pages cached.
403  * If the cache is becoming full, unmap pages in the cache for real,
404  * and then add the region to the cache
405  * Opposed to the regular region data structure, the sizes in the
406  * cache are in MALLOC_PAGESIZE units.
407  */
408 static void
409 unmap(struct dir_info *d, void *p, size_t sz)
410 {
411 	size_t psz = sz >> MALLOC_PAGESHIFT;
412 	size_t rsz, tounmap;
413 	struct region_info *r;
414 	u_int i, offset;
415 
416 	if (sz != PAGEROUND(sz)) {
417 		wrterror("munmap round");
418 		return;
419 	}
420 
421 	if (psz > mopts.malloc_cache) {
422 		if (munmap(p, sz))
423 			wrterror("munmap");
424 		malloc_used -= sz;
425 		return;
426 	}
427 	tounmap = 0;
428 	rsz = mopts.malloc_cache - d->free_regions_size;
429 	if (psz > rsz)
430 		tounmap = psz - rsz;
431 	offset = getrnibble();
432 	for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) {
433 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
434 		if (r->p != NULL) {
435 			rsz = r->size << MALLOC_PAGESHIFT;
436 			if (munmap(r->p, rsz))
437 				wrterror("munmap");
438 			r->p = NULL;
439 			if (tounmap > r->size)
440 				tounmap -= r->size;
441 			else
442 				tounmap = 0;
443 			d->free_regions_size -= r->size;
444 			r->size = 0;
445 			malloc_used -= rsz;
446 		}
447 	}
448 	if (tounmap > 0)
449 		wrterror("malloc cache underflow");
450 	for (i = 0; i < mopts.malloc_cache; i++) {
451 		r = &d->free_regions[i];
452 		if (r->p == NULL) {
453 			if (mopts.malloc_hint)
454 				madvise(p, sz, MADV_FREE);
455 			if (mopts.malloc_freeprot)
456 				mprotect(p, sz, PROT_NONE);
457 			r->p = p;
458 			r->size = psz;
459 			d->free_regions_size += psz;
460 			break;
461 		}
462 	}
463 	if (i == mopts.malloc_cache)
464 		wrterror("malloc free slot lost");
465 	if (d->free_regions_size > mopts.malloc_cache)
466 		wrterror("malloc cache overflow");
467 }
468 
469 static void
470 zapcacheregion(struct dir_info *d, void *p)
471 {
472 	u_int i;
473 	struct region_info *r;
474 	size_t rsz;
475 
476 	for (i = 0; i < mopts.malloc_cache; i++) {
477 		r = &d->free_regions[i];
478 		if (r->p == p) {
479 			rsz = r->size << MALLOC_PAGESHIFT;
480 			if (munmap(r->p, rsz))
481 				wrterror("munmap");
482 			r->p = NULL;
483 			d->free_regions_size -= r->size;
484 			r->size = 0;
485 			malloc_used -= rsz;
486 		}
487 	}
488 }
489 
490 static void *
491 map(struct dir_info *d, size_t sz, int zero_fill)
492 {
493 	size_t psz = sz >> MALLOC_PAGESHIFT;
494 	struct region_info *r, *big = NULL;
495 	u_int i, offset;
496 	void *p;
497 
498 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
499 	    d->canary1 != ~d->canary2)
500 		wrterror("internal struct corrupt");
501 	if (sz != PAGEROUND(sz)) {
502 		wrterror("map round");
503 		return NULL;
504 	}
505 	if (psz > d->free_regions_size) {
506 		p = MMAP(sz);
507 		if (p != MAP_FAILED)
508 			malloc_used += sz;
509 		/* zero fill not needed */
510 		return p;
511 	}
512 	offset = getrnibble();
513 	for (i = 0; i < mopts.malloc_cache; i++) {
514 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
515 		if (r->p != NULL) {
516 			if (r->size == psz) {
517 				p = r->p;
518 				if (mopts.malloc_freeprot)
519 					mprotect(p, sz, PROT_READ | PROT_WRITE);
520 				if (mopts.malloc_hint)
521 					madvise(p, sz, MADV_NORMAL);
522 				r->p = NULL;
523 				r->size = 0;
524 				d->free_regions_size -= psz;
525 				if (zero_fill)
526 					memset(p, 0, sz);
527 				else if (mopts.malloc_junk &&
528 				    mopts.malloc_freeprot)
529 					memset(p, SOME_FREEJUNK, sz);
530 				return p;
531 			} else if (r->size > psz)
532 				big = r;
533 		}
534 	}
535 	if (big != NULL) {
536 		r = big;
537 		p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT);
538 		if (mopts.malloc_freeprot)
539 			mprotect(p, sz, PROT_READ | PROT_WRITE);
540 		if (mopts.malloc_hint)
541 			madvise(p, sz, MADV_NORMAL);
542 		r->size -= psz;
543 		d->free_regions_size -= psz;
544 		if (zero_fill)
545 			memset(p, 0, sz);
546 		else if (mopts.malloc_junk && mopts.malloc_freeprot)
547 			memset(p, SOME_FREEJUNK, sz);
548 		return p;
549 	}
550 	p = MMAP(sz);
551 	if (p != MAP_FAILED)
552 		malloc_used += sz;
553 	if (d->free_regions_size > mopts.malloc_cache)
554 		wrterror("malloc cache");
555 	/* zero fill not needed */
556 	return p;
557 }
558 
559 /*
560  * Initialize a dir_info, which should have been cleared by caller
561  */
562 static int
563 omalloc_init(struct dir_info **dp)
564 {
565 	char *p, b[64];
566 	int i, j;
567 	size_t d_avail, regioninfo_size;
568 	struct dir_info *d;
569 
570 	rbytes_init();
571 
572 	/*
573 	 * Default options
574 	 */
575 	mopts.malloc_abort = 1;
576 	mopts.malloc_move = 1;
577 	mopts.malloc_cache = 64;
578 
579 	for (i = 0; i < 3; i++) {
580 		switch (i) {
581 		case 0:
582 			j = readlink("/etc/malloc.conf", b, sizeof b - 1);
583 			if (j <= 0)
584 				continue;
585 			b[j] = '\0';
586 			p = b;
587 			break;
588 		case 1:
589 			if (issetugid() == 0)
590 				p = getenv("MALLOC_OPTIONS");
591 			else
592 				continue;
593 			break;
594 		case 2:
595 			p = malloc_options;
596 			break;
597 		default:
598 			p = NULL;
599 		}
600 
601 		for (; p != NULL && *p != '\0'; p++) {
602 			switch (*p) {
603 			case '>':
604 				mopts.malloc_cache <<= 1;
605 				if (mopts.malloc_cache > MALLOC_MAXCACHE)
606 					mopts.malloc_cache = MALLOC_MAXCACHE;
607 				break;
608 			case '<':
609 				mopts.malloc_cache >>= 1;
610 				break;
611 			case 'a':
612 				mopts.malloc_abort = 0;
613 				break;
614 			case 'A':
615 				mopts.malloc_abort = 1;
616 				break;
617 #ifdef MALLOC_STATS
618 			case 'd':
619 				mopts.malloc_stats = 0;
620 				break;
621 			case 'D':
622 				mopts.malloc_stats = 1;
623 				break;
624 #endif /* MALLOC_STATS */
625 			case 'f':
626 				mopts.malloc_freeprot = 0;
627 				break;
628 			case 'F':
629 				mopts.malloc_freeprot = 1;
630 				break;
631 			case 'g':
632 				mopts.malloc_guard = 0;
633 				break;
634 			case 'G':
635 				mopts.malloc_guard = MALLOC_PAGESIZE;
636 				break;
637 			case 'h':
638 				mopts.malloc_hint = 0;
639 				break;
640 			case 'H':
641 				mopts.malloc_hint = 1;
642 				break;
643 			case 'j':
644 				mopts.malloc_junk = 0;
645 				break;
646 			case 'J':
647 				mopts.malloc_junk = 1;
648 				break;
649 			case 'n':
650 			case 'N':
651 				break;
652 			case 'p':
653 				mopts.malloc_move = 0;
654 				break;
655 			case 'P':
656 				mopts.malloc_move = 1;
657 				break;
658 			case 'r':
659 				mopts.malloc_realloc = 0;
660 				break;
661 			case 'R':
662 				mopts.malloc_realloc = 1;
663 				break;
664 			case 'S':
665 				mopts.malloc_freeprot = mopts.malloc_junk = 1;
666 				mopts.malloc_guard = MALLOC_PAGESIZE;
667 				break;
668 			case 'x':
669 				mopts.malloc_xmalloc = 0;
670 				break;
671 			case 'X':
672 				mopts.malloc_xmalloc = 1;
673 				break;
674 			case 'z':
675 				mopts.malloc_zero = 0;
676 				break;
677 			case 'Z':
678 				mopts.malloc_zero = 1;
679 				break;
680 			default: {
681 				static const char q[] = "malloc() warning: "
682 				    "unknown char in MALLOC_OPTIONS\n";
683 				write(STDERR_FILENO, q, sizeof(q) - 1);
684 				break;
685 			}
686 			}
687 		}
688 	}
689 
690 	/*
691 	 * We want junk in the entire allocation, and zero only in the part
692 	 * the user asked for.
693 	 */
694 	if (mopts.malloc_zero)
695 		mopts.malloc_junk = 1;
696 
697 #ifdef MALLOC_STATS
698 	if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
699 		static const char q[] = "malloc() warning: atexit(2) failed."
700 		    " Will not be able to dump stats on exit\n";
701 		write(STDERR_FILENO, q, sizeof(q) - 1);
702 	}
703 #endif /* MALLOC_STATS */
704 
705 	while ((mopts.malloc_canary = arc4random()) == 0)
706 		;
707 
708 	/*
709 	 * Allocate dir_info with a guard page on either side. Also
710 	 * randomise offset inside the page at which the dir_info
711 	 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
712 	 */
713 	if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED)
714 		return -1;
715 	mprotect(p, MALLOC_PAGESIZE, PROT_NONE);
716 	mprotect(p + MALLOC_PAGESIZE + DIR_INFO_RSZ,
717 	    MALLOC_PAGESIZE, PROT_NONE);
718 	d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
719 	d = (struct dir_info *)(p + MALLOC_PAGESIZE +
720 	    (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
721 
722 	d->regions_bits = 9;
723 	d->regions_free = d->regions_total = 1 << d->regions_bits;
724 	regioninfo_size = d->regions_total * sizeof(struct region_info);
725 	d->r = MMAP(regioninfo_size);
726 	if (d->r == MAP_FAILED) {
727 		wrterror("malloc init mmap failed");
728 		d->regions_total = 0;
729 		return 1;
730 	}
731 	LIST_INIT(&d->chunk_info_list);
732 	for (i = 0; i < MALLOC_MAXSHIFT; i++)
733 		LIST_INIT(&d->chunk_dir[i]);
734 	malloc_used += regioninfo_size;
735 	d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
736 	d->canary2 = ~d->canary1;
737 
738 	*dp = d;
739 
740 	/*
741 	 * Options have been set and will never be reset.
742 	 * Prevent further tampering with them.
743 	 */
744 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
745 		mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
746 
747 	return 0;
748 }
749 
750 static int
751 omalloc_grow(struct dir_info *d)
752 {
753 	size_t newbits;
754 	size_t newtotal;
755 	size_t newsize;
756 	size_t mask;
757 	size_t i;
758 	struct region_info *p;
759 
760 	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 )
761 		return 1;
762 
763 	newbits = d->regions_bits + 1;
764 	newtotal = d->regions_total * 2;
765 	newsize = newtotal * sizeof(struct region_info);
766 	mask = newtotal - 1;
767 
768 	p = MMAP(newsize);
769 	if (p == MAP_FAILED)
770 		return 1;
771 
772 	malloc_used += newsize;
773 	memset(p, 0, newsize);
774 	STATS_ZERO(d->inserts);
775 	STATS_ZERO(d->insert_collisions);
776 	for (i = 0; i < d->regions_total; i++) {
777 		void *q = d->r[i].p;
778 		if (q != NULL) {
779 			size_t index = hash(q) & mask;
780 			STATS_INC(d->inserts);
781 			while (p[index].p != NULL) {
782 				index = (index - 1) & mask;
783 				STATS_INC(d->insert_collisions);
784 			}
785 			p[index] = d->r[i];
786 		}
787 	}
788 	/* avoid pages containing meta info to end up in cache */
789 	if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
790 		wrterror("munmap");
791 	else
792 		malloc_used -= d->regions_total * sizeof(struct region_info);
793 	d->regions_free = d->regions_free + d->regions_total;
794 	d->regions_total = newtotal;
795 	d->regions_bits = newbits;
796 	d->r = p;
797 	return 0;
798 }
799 
800 static struct chunk_info *
801 alloc_chunk_info(struct dir_info *d)
802 {
803 	struct chunk_info *p;
804 	int i;
805 
806 	if (LIST_EMPTY(&d->chunk_info_list)) {
807 		p = MMAP(MALLOC_PAGESIZE);
808 		if (p == MAP_FAILED)
809 			return NULL;
810 		malloc_used += MALLOC_PAGESIZE;
811 		for (i = 0; i < MALLOC_PAGESIZE / sizeof(*p); i++)
812 			LIST_INSERT_HEAD(&d->chunk_info_list, &p[i], entries);
813 	}
814 	p = LIST_FIRST(&d->chunk_info_list);
815 	LIST_REMOVE(p, entries);
816 	memset(p, 0, sizeof *p);
817 	p->canary = d->canary1;
818 	return p;
819 }
820 
821 static int
822 insert(struct dir_info *d, void *p, size_t sz)
823 {
824 	size_t index;
825 	size_t mask;
826 	void *q;
827 
828 	if (d->regions_free * 4 < d->regions_total) {
829 		if (omalloc_grow(d))
830 			return 1;
831 	}
832 	mask = d->regions_total - 1;
833 	index = hash(p) & mask;
834 	q = d->r[index].p;
835 	STATS_INC(d->inserts);
836 	while (q != NULL) {
837 		index = (index - 1) & mask;
838 		q = d->r[index].p;
839 		STATS_INC(d->insert_collisions);
840 	}
841 	d->r[index].p = p;
842 	d->r[index].size = sz;
843 	d->regions_free--;
844 	return 0;
845 }
846 
847 static struct region_info *
848 find(struct dir_info *d, void *p)
849 {
850 	size_t index;
851 	size_t mask = d->regions_total - 1;
852 	void *q, *r;
853 
854 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
855 	    d->canary1 != ~d->canary2)
856 		wrterror("internal struct corrupt");
857 	p = MASK_POINTER(p);
858 	index = hash(p) & mask;
859 	r = d->r[index].p;
860 	q = MASK_POINTER(r);
861 	STATS_INC(d->finds);
862 	while (q != p && r != NULL) {
863 		index = (index - 1) & mask;
864 		r = d->r[index].p;
865 		q = MASK_POINTER(r);
866 		STATS_INC(d->find_collisions);
867 	}
868 	return q == p ? &d->r[index] : NULL;
869 }
870 
871 static void
872 delete(struct dir_info *d, struct region_info *ri)
873 {
874 	/* algorithm R, Knuth Vol III section 6.4 */
875 	size_t mask = d->regions_total - 1;
876 	size_t i, j, r;
877 
878 	if (d->regions_total & (d->regions_total - 1))
879 		wrterror("regions_total not 2^x");
880 	d->regions_free++;
881 	STATS_INC(g_pool->deletes);
882 
883 	i = ri - d->r;
884 	for (;;) {
885 		d->r[i].p = NULL;
886 		d->r[i].size = 0;
887 		j = i;
888 		for (;;) {
889 			i = (i - 1) & mask;
890 			if (d->r[i].p == NULL)
891 				return;
892 			r = hash(d->r[i].p) & mask;
893 			if ((i <= r && r < j) || (r < j && j < i) ||
894 			    (j < i && i <= r))
895 				continue;
896 			d->r[j] = d->r[i];
897 			STATS_INC(g_pool->delete_moves);
898 			break;
899 		}
900 
901 	}
902 }
903 
904 /*
905  * Allocate a page of chunks
906  */
907 static struct chunk_info *
908 omalloc_make_chunks(struct dir_info *d, int bits)
909 {
910 	struct chunk_info *bp;
911 	void		*pp;
912 	long		i, k;
913 
914 	/* Allocate a new bucket */
915 	pp = map(d, MALLOC_PAGESIZE, 0);
916 	if (pp == MAP_FAILED)
917 		return NULL;
918 
919 	bp = alloc_chunk_info(d);
920 	if (bp == NULL) {
921 		unmap(d, pp, MALLOC_PAGESIZE);
922 		return NULL;
923 	}
924 
925 	/* memory protect the page allocated in the malloc(0) case */
926 	if (bits == 0) {
927 		bp->size = 0;
928 		bp->shift = 1;
929 		i = MALLOC_MINSIZE - 1;
930 		while (i >>= 1)
931 			bp->shift++;
932 		bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift;
933 		bp->page = pp;
934 
935 		k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE);
936 		if (k < 0) {
937 			unmap(d, pp, MALLOC_PAGESIZE);
938 			LIST_INSERT_HEAD(&d->chunk_info_list, bp, entries);
939 			return NULL;
940 		}
941 	} else {
942 		bp->size = (1UL << bits);
943 		bp->shift = bits;
944 		bp->total = bp->free = MALLOC_PAGESIZE >> bits;
945 		bp->page = pp;
946 	}
947 
948 	/* set all valid bits in the bitmap */
949 	k = bp->total;
950 	i = 0;
951 
952 	/* Do a bunch at a time */
953 	for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS)
954 		bp->bits[i / MALLOC_BITS] = ~0UL;
955 
956 	for (; i < k; i++)
957 		bp->bits[i / MALLOC_BITS] |= 1UL << (i % MALLOC_BITS);
958 
959 	LIST_INSERT_HEAD(&d->chunk_dir[bits], bp, entries);
960 
961 	bits++;
962 	if ((uintptr_t)pp & bits)
963 		wrterror("pp & bits");
964 
965 	insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp);
966 	return bp;
967 }
968 
969 
970 /*
971  * Allocate a chunk
972  */
973 static void *
974 malloc_bytes(struct dir_info *d, size_t size)
975 {
976 	int		i, j;
977 	size_t		k;
978 	u_long		u, *lp;
979 	struct chunk_info *bp;
980 
981 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
982 	    d->canary1 != ~d->canary2)
983 		wrterror("internal struct corrupt");
984 	/* Don't bother with anything less than this */
985 	/* unless we have a malloc(0) requests */
986 	if (size != 0 && size < MALLOC_MINSIZE)
987 		size = MALLOC_MINSIZE;
988 
989 	/* Find the right bucket */
990 	if (size == 0)
991 		j = 0;
992 	else {
993 		j = MALLOC_MINSHIFT;
994 		i = (size - 1) >> (MALLOC_MINSHIFT - 1);
995 		while (i >>= 1)
996 			j++;
997 	}
998 
999 	/* If it's empty, make a page more of that size chunks */
1000 	if (LIST_EMPTY(&d->chunk_dir[j])) {
1001 		bp = omalloc_make_chunks(d, j);
1002 		if (bp == NULL)
1003 			return NULL;
1004 	} else
1005 		bp = LIST_FIRST(&d->chunk_dir[j]);
1006 
1007 	if (bp->canary != d->canary1)
1008 		wrterror("chunk info corrupted");
1009 	/* Find first word of bitmap which isn't empty */
1010 	for (lp = bp->bits; !*lp; lp++)
1011 		/* EMPTY */;
1012 
1013 	/* Find that bit, and tweak it */
1014 	u = 1;
1015 	k = 0;
1016 	while (!(*lp & u)) {
1017 		u += u;
1018 		k++;
1019 	}
1020 
1021 	/* advance a random # of positions */
1022 	i = getrnibble() % bp->free;
1023 	while (i > 0) {
1024 		u += u;
1025 		k++;
1026 		if (k >= MALLOC_BITS) {
1027 			lp++;
1028 			u = 1;
1029 			k = 0;
1030 		}
1031 		if (lp - bp->bits > (bp->total - 1) / MALLOC_BITS) {
1032 			wrterror("chunk overflow");
1033 			errno = EFAULT;
1034 			return (NULL);
1035 		}
1036 		if (*lp & u)
1037 			i--;
1038 	}
1039 
1040 	*lp ^= u;
1041 
1042 	/* If there are no more free, remove from free-list */
1043 	if (!--bp->free)
1044 		LIST_REMOVE(bp, entries);
1045 
1046 	/* Adjust to the real offset of that chunk */
1047 	k += (lp - bp->bits) * MALLOC_BITS;
1048 	k <<= bp->shift;
1049 
1050 	if (mopts.malloc_junk && bp->size > 0)
1051 		memset((char *)bp->page + k, SOME_JUNK, bp->size);
1052 	return ((char *)bp->page + k);
1053 }
1054 
1055 
1056 /*
1057  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1058  */
1059 static void
1060 free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1061 {
1062 	struct chunk_head *mp;
1063 	struct chunk_info *info;
1064 	long i;
1065 
1066 	info = (struct chunk_info *)r->size;
1067 	if (info->canary != d->canary1)
1068 		wrterror("chunk info corrupted");
1069 
1070 	/* Find the chunk number on the page */
1071 	i = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1072 
1073 	if ((uintptr_t)ptr & ((1UL << (info->shift)) - 1)) {
1074 		wrterror("modified chunk-pointer");
1075 		return;
1076 	}
1077 	if (info->bits[i / MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
1078 		wrterror("chunk is already free");
1079 		return;
1080 	}
1081 
1082 	info->bits[i / MALLOC_BITS] |= 1UL << (i % MALLOC_BITS);
1083 	info->free++;
1084 
1085 	if (info->size != 0)
1086 		mp = d->chunk_dir + info->shift;
1087 	else
1088 		mp = d->chunk_dir;
1089 
1090 	if (info->free == 1) {
1091 		/* Page became non-full */
1092 		LIST_INSERT_HEAD(mp, info, entries);
1093 		return;
1094 	}
1095 	if (info->free != info->total)
1096 		return;
1097 
1098 	LIST_REMOVE(info, entries);
1099 
1100 	if (info->size == 0 && !mopts.malloc_freeprot)
1101 		mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1102 	unmap(d, info->page, MALLOC_PAGESIZE);
1103 
1104 	delete(d, r);
1105 	LIST_INSERT_HEAD(&d->chunk_info_list, info, entries);
1106 }
1107 
1108 
1109 
1110 static void *
1111 omalloc(size_t sz, int zero_fill)
1112 {
1113 	void *p;
1114 	size_t psz;
1115 
1116 	if (sz > MALLOC_MAXCHUNK) {
1117 		if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1118 			errno = ENOMEM;
1119 			return NULL;
1120 		}
1121 		sz += mopts.malloc_guard;
1122 		psz = PAGEROUND(sz);
1123 		p = map(g_pool, psz, zero_fill);
1124 		if (p == MAP_FAILED) {
1125 			errno = ENOMEM;
1126 			return NULL;
1127 		}
1128 		if (insert(g_pool, p, sz)) {
1129 			unmap(g_pool, p, psz);
1130 			errno = ENOMEM;
1131 			return NULL;
1132 		}
1133 		if (mopts.malloc_guard) {
1134 			if (mprotect((char *)p + psz - mopts.malloc_guard,
1135 			    mopts.malloc_guard, PROT_NONE))
1136 				wrterror("mprotect");
1137 			malloc_guarded += mopts.malloc_guard;
1138 		}
1139 
1140 		if (mopts.malloc_move &&
1141 		    sz - mopts.malloc_guard < MALLOC_PAGESIZE -
1142 		    MALLOC_LEEWAY) {
1143 			/* fill whole allocation */
1144 			if (mopts.malloc_junk)
1145 				memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1146 			/* shift towards the end */
1147 			p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY -
1148 			    (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1));
1149 			/* fill zeros if needed and overwritten above */
1150 			if (zero_fill && mopts.malloc_junk)
1151 				memset(p, 0, sz - mopts.malloc_guard);
1152 		} else {
1153 			if (mopts.malloc_junk) {
1154 				if (zero_fill)
1155 					memset(p + sz - mopts.malloc_guard,
1156 					    SOME_JUNK, psz - sz);
1157 				else
1158 					memset(p, SOME_JUNK,
1159 					    psz - mopts.malloc_guard);
1160 			}
1161 		}
1162 
1163 	} else {
1164 		/* takes care of SOME_JUNK */
1165 		p = malloc_bytes(g_pool, sz);
1166 		if (zero_fill && p != NULL && sz > 0)
1167 			memset(p, 0, sz);
1168 	}
1169 
1170 	return p;
1171 }
1172 
1173 /*
1174  * Common function for handling recursion.  Only
1175  * print the error message once, to avoid making the problem
1176  * potentially worse.
1177  */
1178 static void
1179 malloc_recurse(void)
1180 {
1181 	static int noprint;
1182 
1183 	if (noprint == 0) {
1184 		noprint = 1;
1185 		wrterror("recursive call");
1186 	}
1187 	malloc_active--;
1188 	_MALLOC_UNLOCK();
1189 	errno = EDEADLK;
1190 }
1191 
1192 static int
1193 malloc_init(void)
1194 {
1195 	if (omalloc_init(&g_pool)) {
1196 		_MALLOC_UNLOCK();
1197 		if (mopts.malloc_xmalloc)
1198 			wrterror("out of memory");
1199 		errno = ENOMEM;
1200 		return -1;
1201 	}
1202 	return 0;
1203 }
1204 
1205 void *
1206 malloc(size_t size)
1207 {
1208 	void *r;
1209 	int saved_errno = errno;
1210 
1211 	_MALLOC_LOCK();
1212 	malloc_func = " in malloc():";
1213 	if (g_pool == NULL) {
1214 		if (malloc_init() != 0)
1215 			return NULL;
1216 	}
1217 	if (malloc_active++) {
1218 		malloc_recurse();
1219 		return NULL;
1220 	}
1221 	r = omalloc(size, mopts.malloc_zero);
1222 	malloc_active--;
1223 	_MALLOC_UNLOCK();
1224 	if (r == NULL && mopts.malloc_xmalloc) {
1225 		wrterror("out of memory");
1226 		errno = ENOMEM;
1227 	}
1228 	if (r != NULL)
1229 		errno = saved_errno;
1230 	return r;
1231 }
1232 
1233 static void
1234 ofree(void *p)
1235 {
1236 	struct region_info *r;
1237 	size_t sz;
1238 
1239 	r = find(g_pool, p);
1240 	if (r == NULL) {
1241 		wrterror("bogus pointer (double free?)");
1242 		return;
1243 	}
1244 	REALSIZE(sz, r);
1245 	if (sz > MALLOC_MAXCHUNK) {
1246 		if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE -
1247 		    MALLOC_LEEWAY) {
1248 			if (r->p != p) {
1249 				wrterror("bogus pointer");
1250 				return;
1251 			}
1252 		} else {
1253 #if notyetbecause_of_realloc
1254 			/* shifted towards the end */
1255 			if (p != ((char *)r->p) + ((MALLOC_PAGESIZE -
1256 			    MALLOC_MINSIZE - sz - mopts.malloc_guard) &
1257 			    ~(MALLOC_MINSIZE-1))) {
1258 			}
1259 #endif
1260 			p = r->p;
1261 		}
1262 		if (mopts.malloc_guard) {
1263 			if (sz < mopts.malloc_guard)
1264 				wrterror("guard size");
1265 			if (!mopts.malloc_freeprot) {
1266 				if (mprotect((char *)p + PAGEROUND(sz) -
1267 				    mopts.malloc_guard, mopts.malloc_guard,
1268 				    PROT_READ | PROT_WRITE))
1269 					wrterror("mprotect");
1270 			}
1271 			malloc_guarded -= mopts.malloc_guard;
1272 		}
1273 		if (mopts.malloc_junk && !mopts.malloc_freeprot)
1274 			memset(p, SOME_FREEJUNK,
1275 			    PAGEROUND(sz) - mopts.malloc_guard);
1276 		unmap(g_pool, p, PAGEROUND(sz));
1277 		delete(g_pool, r);
1278 	} else {
1279 		void *tmp;
1280 		int i;
1281 
1282 		if (mopts.malloc_junk && sz > 0)
1283 			memset(p, SOME_FREEJUNK, sz);
1284 		if (!mopts.malloc_freeprot) {
1285 			i = getrnibble();
1286 			tmp = p;
1287 			p = g_pool->delayed_chunks[i];
1288 			g_pool->delayed_chunks[i] = tmp;
1289 		}
1290 		if (p != NULL) {
1291 			r = find(g_pool, p);
1292 			if (r == NULL) {
1293 				wrterror("bogus pointer (double free?)");
1294 				return;
1295 			}
1296 			free_bytes(g_pool, r, p);
1297 		}
1298 	}
1299 }
1300 
1301 void
1302 free(void *ptr)
1303 {
1304 	int saved_errno = errno;
1305 
1306 	/* This is legal. */
1307 	if (ptr == NULL)
1308 		return;
1309 
1310 	_MALLOC_LOCK();
1311 	malloc_func = " in free():";
1312 	if (g_pool == NULL) {
1313 		_MALLOC_UNLOCK();
1314 		wrterror("free() called before allocation");
1315 		return;
1316 	}
1317 	if (malloc_active++) {
1318 		malloc_recurse();
1319 		return;
1320 	}
1321 	ofree(ptr);
1322 	malloc_active--;
1323 	_MALLOC_UNLOCK();
1324 	errno = saved_errno;
1325 }
1326 
1327 
1328 static void *
1329 orealloc(void *p, size_t newsz)
1330 {
1331 	struct region_info *r;
1332 	size_t oldsz, goldsz, gnewsz;
1333 	void *q;
1334 
1335 	if (p == NULL)
1336 		return omalloc(newsz, 0);
1337 
1338 	r = find(g_pool, p);
1339 	if (r == NULL) {
1340 		wrterror("bogus pointer (double free?)");
1341 		return NULL;
1342 	}
1343 	if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1344 		errno = ENOMEM;
1345 		return NULL;
1346 	}
1347 
1348 	REALSIZE(oldsz, r);
1349 	goldsz = oldsz;
1350 	if (oldsz > MALLOC_MAXCHUNK) {
1351 		if (oldsz < mopts.malloc_guard)
1352 			wrterror("guard size");
1353 		oldsz -= mopts.malloc_guard;
1354 	}
1355 
1356 	gnewsz = newsz;
1357 	if (gnewsz > MALLOC_MAXCHUNK)
1358 		gnewsz += mopts.malloc_guard;
1359 
1360 	if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p &&
1361 	    !mopts.malloc_realloc) {
1362 		size_t roldsz = PAGEROUND(goldsz);
1363 		size_t rnewsz = PAGEROUND(gnewsz);
1364 
1365 		if (rnewsz > roldsz) {
1366 			if (!mopts.malloc_guard) {
1367 				STATS_INC(g_pool->cheap_realloc_tries);
1368 				zapcacheregion(g_pool, p + roldsz);
1369 				q = MMAPA(p + roldsz, rnewsz - roldsz);
1370 				if (q == p + roldsz) {
1371 					malloc_used += rnewsz - roldsz;
1372 					if (mopts.malloc_junk)
1373 						memset(q, SOME_JUNK,
1374 						    rnewsz - roldsz);
1375 					r->size = newsz;
1376 					STATS_INC(g_pool->cheap_reallocs);
1377 					return p;
1378 				} else if (q != MAP_FAILED)
1379 					munmap(q, rnewsz - roldsz);
1380 			}
1381 		} else if (rnewsz < roldsz) {
1382 			if (mopts.malloc_guard) {
1383 				if (mprotect((char *)p + roldsz -
1384 				    mopts.malloc_guard, mopts.malloc_guard,
1385 				    PROT_READ | PROT_WRITE))
1386 					wrterror("mprotect");
1387 				if (mprotect((char *)p + rnewsz -
1388 				    mopts.malloc_guard, mopts.malloc_guard,
1389 				    PROT_NONE))
1390 					wrterror("mprotect");
1391 			}
1392 			unmap(g_pool, (char *)p + rnewsz, roldsz - rnewsz);
1393 			r->size = gnewsz;
1394 			return p;
1395 		} else {
1396 			if (newsz > oldsz && mopts.malloc_junk)
1397 				memset((char *)p + newsz, SOME_JUNK,
1398 				    rnewsz - mopts.malloc_guard - newsz);
1399 			r->size = gnewsz;
1400 			return p;
1401 		}
1402 	}
1403 	if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) {
1404 		if (mopts.malloc_junk && newsz > 0)
1405 			memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1406 		return p;
1407 	} else if (newsz != oldsz || mopts.malloc_realloc) {
1408 		q = omalloc(newsz, 0);
1409 		if (q == NULL)
1410 			return NULL;
1411 		if (newsz != 0 && oldsz != 0)
1412 			memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1413 		ofree(p);
1414 		return q;
1415 	} else
1416 		return p;
1417 }
1418 
1419 void *
1420 realloc(void *ptr, size_t size)
1421 {
1422 	void *r;
1423 	int saved_errno = errno;
1424 
1425 	_MALLOC_LOCK();
1426 	malloc_func = " in realloc():";
1427 	if (g_pool == NULL) {
1428 		if (malloc_init() != 0)
1429 			return NULL;
1430 	}
1431 	if (malloc_active++) {
1432 		malloc_recurse();
1433 		return NULL;
1434 	}
1435 	r = orealloc(ptr, size);
1436 
1437 	malloc_active--;
1438 	_MALLOC_UNLOCK();
1439 	if (r == NULL && mopts.malloc_xmalloc) {
1440 		wrterror("out of memory");
1441 		errno = ENOMEM;
1442 	}
1443 	if (r != NULL)
1444 		errno = saved_errno;
1445 	return r;
1446 }
1447 
1448 
1449 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1450 
1451 void *
1452 calloc(size_t nmemb, size_t size)
1453 {
1454 	void *r;
1455 	int saved_errno = errno;
1456 
1457 	_MALLOC_LOCK();
1458 	malloc_func = " in calloc():";
1459 	if (g_pool == NULL) {
1460 		if (malloc_init() != 0)
1461 			return NULL;
1462 	}
1463 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1464 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1465 		_MALLOC_UNLOCK();
1466 		if (mopts.malloc_xmalloc)
1467 			wrterror("out of memory");
1468 		errno = ENOMEM;
1469 		return NULL;
1470 	}
1471 
1472 	if (malloc_active++) {
1473 		malloc_recurse();
1474 		return NULL;
1475 	}
1476 
1477 	size *= nmemb;
1478 	r = omalloc(size, 1);
1479 
1480 	malloc_active--;
1481 	_MALLOC_UNLOCK();
1482 	if (r == NULL && mopts.malloc_xmalloc) {
1483 		wrterror("out of memory");
1484 		errno = ENOMEM;
1485 	}
1486 	if (r != NULL)
1487 		errno = saved_errno;
1488 	return r;
1489 }
1490 
1491