xref: /openbsd-src/libexec/ld.so/malloc.c (revision be3edcf0ecf3bbd3cb172d0acd89b345aa4abb83)
1*be3edcf0Sderaadt /*      $OpenBSD: malloc.c,v 1.35 2022/01/18 21:59:29 deraadt Exp $       */
2cb0b31f9Sotto /*
3cb0b31f9Sotto  * Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net>
4cb0b31f9Sotto  * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
5cb0b31f9Sotto  * Copyright (c) 2008 Damien Miller <djm@openbsd.org>
6cb0b31f9Sotto  * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org>
7cb0b31f9Sotto  *
8cb0b31f9Sotto  * Permission to use, copy, modify, and distribute this software for any
9cb0b31f9Sotto  * purpose with or without fee is hereby granted, provided that the above
10cb0b31f9Sotto  * copyright notice and this permission notice appear in all copies.
11cb0b31f9Sotto  *
12cb0b31f9Sotto  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13cb0b31f9Sotto  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14cb0b31f9Sotto  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15cb0b31f9Sotto  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16cb0b31f9Sotto  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17cb0b31f9Sotto  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18cb0b31f9Sotto  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19cb0b31f9Sotto  */
20cb0b31f9Sotto 
21cb0b31f9Sotto /*
22cb0b31f9Sotto  * If we meet some day, and you think this stuff is worth it, you
23cb0b31f9Sotto  * can buy me a beer in return. Poul-Henning Kamp
24cb0b31f9Sotto  */
25cb0b31f9Sotto 
26*be3edcf0Sderaadt #include <sys/types.h>
27cb0b31f9Sotto #include <sys/queue.h>
28*be3edcf0Sderaadt #include <sys/time.h>
29cb0b31f9Sotto #include <sys/mman.h>
30cb0b31f9Sotto #include <stdint.h>
31cb0b31f9Sotto 
32b722ba42Sguenther #include "syscall.h"
33b722ba42Sguenther #include "util.h"
34b722ba42Sguenther #include "resolve.h"		/* for lock_cb */
35cb0b31f9Sotto 
367544b685Sguenther #define MALLOC_PAGESHIFT	_MAX_PAGE_SHIFT
37cb0b31f9Sotto #define MALLOC_MINSHIFT		4
38cb0b31f9Sotto #define MALLOC_MAXSHIFT		(MALLOC_PAGESHIFT - 1)
39cb0b31f9Sotto #define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
40cb0b31f9Sotto #define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
41cb0b31f9Sotto #define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
42cb0b31f9Sotto #define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
43cb0b31f9Sotto 
44cb0b31f9Sotto #define MALLOC_MAXCHUNK		(1 << MALLOC_MAXSHIFT)
45cb0b31f9Sotto #define MALLOC_MAXCACHE		256
46cb0b31f9Sotto #define MALLOC_DELAYED_CHUNK_MASK	15
478ddaed9eSotto #define MALLOC_INITIAL_REGIONS	(MALLOC_PAGESIZE / sizeof(struct region_info))
48cb0b31f9Sotto #define MALLOC_DEFAULT_CACHE	64
49cb0b31f9Sotto #define MALLOC_CHUNK_LISTS	4
50cc359eb8Sotto #define CHUNK_CHECK_LENGTH	32
51cb0b31f9Sotto 
52cb0b31f9Sotto /*
538d3c1e29Sotto  * We move allocations between half a page and a whole page towards the end,
548d3c1e29Sotto  * subject to alignment constraints. This is the extra headroom we allow.
558d3c1e29Sotto  * Set to zero to be the most strict.
56cb0b31f9Sotto  */
57cb0b31f9Sotto #define MALLOC_LEEWAY		0
58cb0b31f9Sotto 
59cb0b31f9Sotto #define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
60cb0b31f9Sotto 
61cb0b31f9Sotto /*
62cb0b31f9Sotto  * What to use for Junk.  This is the byte value we use to fill with
63cb0b31f9Sotto  * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
64cb0b31f9Sotto  * and SOME_FREEJUNK right before free.
65cb0b31f9Sotto  */
66db04b79eSotto #define SOME_JUNK		0xdb	/* deadbeef */
67db04b79eSotto #define SOME_FREEJUNK		0xdf	/* dead, free */
68cb0b31f9Sotto 
69cb0b31f9Sotto #define MMAP(sz)	_dl_mmap(NULL, (size_t)(sz), PROT_READ | PROT_WRITE, \
70cb0b31f9Sotto     MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
71cb0b31f9Sotto 
728ddaed9eSotto #define MMAPNONE(sz)	_dl_mmap(NULL, (size_t)(sz), PROT_NONE, \
738ddaed9eSotto     MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
748ddaed9eSotto 
75cb0b31f9Sotto #define MMAP_ERROR(p)	(_dl_mmap_error(p) ? MAP_FAILED : (p))
76cb0b31f9Sotto 
77cb0b31f9Sotto struct region_info {
78cb0b31f9Sotto 	void *p;		/* page; low bits used to mark chunks */
79cb0b31f9Sotto 	uintptr_t size;		/* size for pages, or chunk_info pointer */
80cb0b31f9Sotto };
81cb0b31f9Sotto 
82cb0b31f9Sotto LIST_HEAD(chunk_head, chunk_info);
83cb0b31f9Sotto 
84cb0b31f9Sotto struct dir_info {
85cb0b31f9Sotto 	u_int32_t canary1;
86ea591b61Sotto 	int active;			/* status of malloc */
87cb0b31f9Sotto 	struct region_info *r;		/* region slots */
88cb0b31f9Sotto 	size_t regions_total;		/* number of region slots */
89cb0b31f9Sotto 	size_t regions_free;		/* number of free slots */
90cb0b31f9Sotto 					/* lists of free chunk info structs */
91cb0b31f9Sotto 	struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1];
92cb0b31f9Sotto 					/* lists of chunks with free slots */
93cb0b31f9Sotto 	struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS];
94cb0b31f9Sotto 	size_t free_regions_size;	/* free pages cached */
95cb0b31f9Sotto 					/* free pages cache */
968ddaed9eSotto 	u_int rotor;
97cb0b31f9Sotto 	struct region_info free_regions[MALLOC_MAXCACHE];
98cb0b31f9Sotto 					/* delayed free chunk slots */
99cb0b31f9Sotto 	void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
100cb0b31f9Sotto 	size_t rbytesused;		/* random bytes used */
101ea591b61Sotto 	char *func;			/* current function */
102422f06d5Sotto 	u_char rbytes[256];		/* random bytes */
103cb0b31f9Sotto 	u_int32_t canary2;
104cb0b31f9Sotto };
105cb0b31f9Sotto #define DIR_INFO_RSZ	((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
106cb0b31f9Sotto 			~MALLOC_PAGEMASK)
107cb0b31f9Sotto 
108cb0b31f9Sotto /*
109cb0b31f9Sotto  * This structure describes a page worth of chunks.
110cb0b31f9Sotto  *
111cb0b31f9Sotto  * How many bits per u_short in the bitmap
112cb0b31f9Sotto  */
113cb0b31f9Sotto #define MALLOC_BITS		(NBBY * sizeof(u_short))
114cb0b31f9Sotto struct chunk_info {
115cb0b31f9Sotto 	LIST_ENTRY(chunk_info) entries;
116cb0b31f9Sotto 	void *page;			/* pointer to the page */
1178ddaed9eSotto 	u_short canary;
118cb0b31f9Sotto 	u_short size;			/* size of this page's chunks */
119cb0b31f9Sotto 	u_short shift;			/* how far to shift for this size */
120cb0b31f9Sotto 	u_short free;			/* how many free chunks */
121cb0b31f9Sotto 	u_short total;			/* how many chunk */
122cc359eb8Sotto 	u_short offset;			/* requested size table offset */
123cb0b31f9Sotto 					/* which chunks are free */
124cb0b31f9Sotto 	u_short bits[1];
125cb0b31f9Sotto };
126cb0b31f9Sotto 
127133128c4Sotto #define MALLOC_FREEUNMAP	0
128133128c4Sotto #define MALLOC_JUNK		1
129133128c4Sotto #define CHUNK_CANARIES		1
130133128c4Sotto #define MALLOC_GUARD		((size_t)MALLOC_PAGESIZE)
131133128c4Sotto #define MALLOC_CACHE		MALLOC_DEFAULT_CACHE
132133128c4Sotto 
133cb0b31f9Sotto struct malloc_readonly {
134cb0b31f9Sotto 	struct dir_info *g_pool;	/* Main bookkeeping information */
135cb0b31f9Sotto 	u_int32_t malloc_canary;	/* Matched against ones in g_pool */
136cb0b31f9Sotto };
137cb0b31f9Sotto 
138d1b0fb8fSguenther /*
139133128c4Sotto  * malloc configuration
140d1b0fb8fSguenther  */
141133128c4Sotto static struct malloc_readonly mopts __relro;
142d1b0fb8fSguenther 
143cb0b31f9Sotto #define g_pool	mopts.g_pool
144cb0b31f9Sotto 
145cb0b31f9Sotto static u_char getrbyte(struct dir_info *d);
146cb0b31f9Sotto 
147cb0b31f9Sotto /* low bits of r->p determine size: 0 means >= page size and p->size holding
148cb0b31f9Sotto  *  real size, otherwise r->size is a shift count, or 1 for malloc(0)
149cb0b31f9Sotto  */
150cb0b31f9Sotto #define REALSIZE(sz, r)						\
151cb0b31f9Sotto 	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
152cb0b31f9Sotto 	(sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
153cb0b31f9Sotto 
154cb0b31f9Sotto static inline size_t
hash(void * p)155cb0b31f9Sotto hash(void *p)
156cb0b31f9Sotto {
157cb0b31f9Sotto 	size_t sum;
158cb0b31f9Sotto 	uintptr_t u;
159cb0b31f9Sotto 
160cb0b31f9Sotto 	u = (uintptr_t)p >> MALLOC_PAGESHIFT;
161cb0b31f9Sotto 	sum = u;
162cb0b31f9Sotto 	sum = (sum << 7) - sum + (u >> 16);
163cb0b31f9Sotto #ifdef __LP64__
164cb0b31f9Sotto 	sum = (sum << 7) - sum + (u >> 32);
165cb0b31f9Sotto 	sum = (sum << 7) - sum + (u >> 48);
166cb0b31f9Sotto #endif
167cb0b31f9Sotto 	return sum;
168cb0b31f9Sotto }
169cb0b31f9Sotto 
1703b50b772Sguenther static __dead void
wrterror(char * msg)171cb0b31f9Sotto wrterror(char *msg)
172cb0b31f9Sotto {
173c7bc160cSguenther 	if (g_pool != NULL && g_pool->func != NULL)
1743b50b772Sguenther 		_dl_die("%s error: %s", g_pool->func, msg);
175c7bc160cSguenther 	else
176c7bc160cSguenther 		_dl_die("%s", msg);
177cb0b31f9Sotto }
178cb0b31f9Sotto 
179cb0b31f9Sotto static void
rbytes_init(struct dir_info * d)180cb0b31f9Sotto rbytes_init(struct dir_info *d)
181cb0b31f9Sotto {
182acb148b1Sderaadt 	_dl_arc4randombuf(d->rbytes, sizeof(d->rbytes));
183ea591b61Sotto 	/* add 1 to account for using d->rbytes[0] */
184ea591b61Sotto 	d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2);
185cb0b31f9Sotto }
186cb0b31f9Sotto 
187cb0b31f9Sotto static inline u_char
getrbyte(struct dir_info * d)188cb0b31f9Sotto getrbyte(struct dir_info *d)
189cb0b31f9Sotto {
190cb0b31f9Sotto 	u_char x;
191cb0b31f9Sotto 
192cb0b31f9Sotto 	if (d->rbytesused >= sizeof(d->rbytes))
193cb0b31f9Sotto 		rbytes_init(d);
194cb0b31f9Sotto 	x = d->rbytes[d->rbytesused++];
195cb0b31f9Sotto 	return x;
196cb0b31f9Sotto }
197cb0b31f9Sotto 
198cb0b31f9Sotto /*
199d1b0fb8fSguenther  * Initialize the malloc subsystem before relro processing.
200cb0b31f9Sotto  */
201d1b0fb8fSguenther void
_dl_malloc_init(void)202d1b0fb8fSguenther _dl_malloc_init(void)
203cb0b31f9Sotto {
204cb0b31f9Sotto 	char *p;
205cb0b31f9Sotto 	int i, j;
206cb0b31f9Sotto 	size_t d_avail, regioninfo_size, tmp;
207cb0b31f9Sotto 	struct dir_info *d;
208cb0b31f9Sotto 
209cb0b31f9Sotto 	do {
210acb148b1Sderaadt 		_dl_arc4randombuf(&mopts.malloc_canary,
211cb0b31f9Sotto 		    sizeof(mopts.malloc_canary));
212cb0b31f9Sotto 	} while (mopts.malloc_canary == 0);
213cb0b31f9Sotto 
214cb0b31f9Sotto 	/*
215cb0b31f9Sotto 	 * Allocate dir_info with a guard page on either side. Also
216cb0b31f9Sotto 	 * randomise offset inside the page at which the dir_info
217cb0b31f9Sotto 	 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
218cb0b31f9Sotto 	 */
2198ddaed9eSotto 	p = MMAPNONE(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2));
220cb0b31f9Sotto 	p = MMAP_ERROR(p);
221cb0b31f9Sotto 	if (p == MAP_FAILED)
222ea591b61Sotto 		wrterror("malloc init mmap failed");
2238ddaed9eSotto 	_dl_mprotect(p + MALLOC_PAGESIZE, DIR_INFO_RSZ, PROT_READ | PROT_WRITE);
224cb0b31f9Sotto 	d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
225cb0b31f9Sotto 
226acb148b1Sderaadt 	_dl_arc4randombuf(&tmp, sizeof(tmp));
227cb0b31f9Sotto 	d = (struct dir_info *)(p + MALLOC_PAGESIZE +
2284d3a3fddSguenther 	    ((tmp % d_avail) << MALLOC_MINSHIFT)); /* not uniform */
229cb0b31f9Sotto 
230cb0b31f9Sotto 	rbytes_init(d);
231cb0b31f9Sotto 	d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS;
232cb0b31f9Sotto 	regioninfo_size = d->regions_total * sizeof(struct region_info);
233cb0b31f9Sotto 	d->r = MMAP(regioninfo_size);
234cb0b31f9Sotto 	d->r = MMAP_ERROR(d->r);
235ea591b61Sotto 	if (d->r == MAP_FAILED)
236cb0b31f9Sotto 		wrterror("malloc init mmap failed");
237cb0b31f9Sotto 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
238cb0b31f9Sotto 		LIST_INIT(&d->chunk_info_list[i]);
239cb0b31f9Sotto 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++)
240cb0b31f9Sotto 			LIST_INIT(&d->chunk_dir[i][j]);
241cb0b31f9Sotto 	}
242cb0b31f9Sotto 	d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
243cb0b31f9Sotto 	d->canary2 = ~d->canary1;
244cb0b31f9Sotto 
245d1b0fb8fSguenther 	g_pool = d;
246cb0b31f9Sotto }
247cb0b31f9Sotto 
248cb0b31f9Sotto static int
omalloc_grow(struct dir_info * d)249cb0b31f9Sotto omalloc_grow(struct dir_info *d)
250cb0b31f9Sotto {
251cb0b31f9Sotto 	size_t newtotal;
252cb0b31f9Sotto 	size_t newsize;
253cb0b31f9Sotto 	size_t mask;
254cb0b31f9Sotto 	size_t i;
255cb0b31f9Sotto 	struct region_info *p;
256cb0b31f9Sotto 
257cb0b31f9Sotto 	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2)
258cb0b31f9Sotto 		return 1;
259cb0b31f9Sotto 
260cb0b31f9Sotto 	newtotal = d->regions_total * 2;
261cb0b31f9Sotto 	newsize = newtotal * sizeof(struct region_info);
262cb0b31f9Sotto 	mask = newtotal - 1;
263cb0b31f9Sotto 
264cb0b31f9Sotto 	p = MMAP(newsize);
265cb0b31f9Sotto 	p = MMAP_ERROR(p);
266cb0b31f9Sotto 	if (p == MAP_FAILED)
267cb0b31f9Sotto 		return 1;
268cb0b31f9Sotto 
269cb0b31f9Sotto 	for (i = 0; i < d->regions_total; i++) {
270cb0b31f9Sotto 		void *q = d->r[i].p;
271cb0b31f9Sotto 		if (q != NULL) {
272cb0b31f9Sotto 			size_t index = hash(q) & mask;
273cb0b31f9Sotto 			while (p[index].p != NULL) {
274cb0b31f9Sotto 				index = (index - 1) & mask;
275cb0b31f9Sotto 			}
276cb0b31f9Sotto 			p[index] = d->r[i];
277cb0b31f9Sotto 		}
278cb0b31f9Sotto 	}
279cb0b31f9Sotto 	/* avoid pages containing meta info to end up in cache */
280cb0b31f9Sotto 	if (_dl_munmap(d->r, d->regions_total * sizeof(struct region_info)))
281cb0b31f9Sotto 		wrterror("munmap");
282cb0b31f9Sotto 	d->regions_free = d->regions_free + d->regions_total;
283cb0b31f9Sotto 	d->regions_total = newtotal;
284cb0b31f9Sotto 	d->r = p;
285cb0b31f9Sotto 	return 0;
286cb0b31f9Sotto }
287cb0b31f9Sotto 
288cb0b31f9Sotto /*
289cb0b31f9Sotto  * The hashtable uses the assumption that p is never NULL. This holds since
290cb0b31f9Sotto  * non-MAP_FIXED mappings with hint 0 start at BRKSIZ.
291cb0b31f9Sotto  */
292cb0b31f9Sotto static int
insert(struct dir_info * d,void * p,size_t sz)293cb0b31f9Sotto insert(struct dir_info *d, void *p, size_t sz)
294cb0b31f9Sotto {
295cb0b31f9Sotto 	size_t index;
296cb0b31f9Sotto 	size_t mask;
297cb0b31f9Sotto 	void *q;
298cb0b31f9Sotto 
299cb0b31f9Sotto 	if (d->regions_free * 4 < d->regions_total) {
300cb0b31f9Sotto 		if (omalloc_grow(d))
301cb0b31f9Sotto 			return 1;
302cb0b31f9Sotto 	}
303cb0b31f9Sotto 	mask = d->regions_total - 1;
304cb0b31f9Sotto 	index = hash(p) & mask;
305cb0b31f9Sotto 	q = d->r[index].p;
306cb0b31f9Sotto 	while (q != NULL) {
307cb0b31f9Sotto 		index = (index - 1) & mask;
308cb0b31f9Sotto 		q = d->r[index].p;
309cb0b31f9Sotto 	}
310cb0b31f9Sotto 	d->r[index].p = p;
311cb0b31f9Sotto 	d->r[index].size = sz;
312cb0b31f9Sotto 	d->regions_free--;
313cb0b31f9Sotto 	return 0;
314cb0b31f9Sotto }
315cb0b31f9Sotto 
316cb0b31f9Sotto static struct region_info *
find(struct dir_info * d,void * p)317cb0b31f9Sotto find(struct dir_info *d, void *p)
318cb0b31f9Sotto {
319cb0b31f9Sotto 	size_t index;
320cb0b31f9Sotto 	size_t mask = d->regions_total - 1;
321cb0b31f9Sotto 	void *q, *r;
322cb0b31f9Sotto 
323cb0b31f9Sotto 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
324cb0b31f9Sotto 	    d->canary1 != ~d->canary2)
325cb0b31f9Sotto 		wrterror("internal struct corrupt");
326cb0b31f9Sotto 	p = MASK_POINTER(p);
327cb0b31f9Sotto 	index = hash(p) & mask;
328cb0b31f9Sotto 	r = d->r[index].p;
329cb0b31f9Sotto 	q = MASK_POINTER(r);
330cb0b31f9Sotto 	while (q != p && r != NULL) {
331cb0b31f9Sotto 		index = (index - 1) & mask;
332cb0b31f9Sotto 		r = d->r[index].p;
333cb0b31f9Sotto 		q = MASK_POINTER(r);
334cb0b31f9Sotto 	}
335cb0b31f9Sotto 	return (q == p && r != NULL) ? &d->r[index] : NULL;
336cb0b31f9Sotto }
337cb0b31f9Sotto 
338cb0b31f9Sotto static void
delete(struct dir_info * d,struct region_info * ri)339cb0b31f9Sotto delete(struct dir_info *d, struct region_info *ri)
340cb0b31f9Sotto {
341cb0b31f9Sotto 	/* algorithm R, Knuth Vol III section 6.4 */
342cb0b31f9Sotto 	size_t mask = d->regions_total - 1;
343cb0b31f9Sotto 	size_t i, j, r;
344cb0b31f9Sotto 
345cb0b31f9Sotto 	if (d->regions_total & (d->regions_total - 1))
346cb0b31f9Sotto 		wrterror("regions_total not 2^x");
347cb0b31f9Sotto 	d->regions_free++;
348cb0b31f9Sotto 
349cb0b31f9Sotto 	i = ri - d->r;
350cb0b31f9Sotto 	for (;;) {
351cb0b31f9Sotto 		d->r[i].p = NULL;
352cb0b31f9Sotto 		d->r[i].size = 0;
353cb0b31f9Sotto 		j = i;
354cb0b31f9Sotto 		for (;;) {
355cb0b31f9Sotto 			i = (i - 1) & mask;
356cb0b31f9Sotto 			if (d->r[i].p == NULL)
357cb0b31f9Sotto 				return;
358cb0b31f9Sotto 			r = hash(d->r[i].p) & mask;
359cb0b31f9Sotto 			if ((i <= r && r < j) || (r < j && j < i) ||
360cb0b31f9Sotto 			    (j < i && i <= r))
361cb0b31f9Sotto 				continue;
362cb0b31f9Sotto 			d->r[j] = d->r[i];
363cb0b31f9Sotto 			break;
364cb0b31f9Sotto 		}
365cb0b31f9Sotto 
366cb0b31f9Sotto 	}
367cb0b31f9Sotto }
368cb0b31f9Sotto 
369cb0b31f9Sotto /*
3708ddaed9eSotto  * Cache maintenance. We keep at most malloc_cache pages cached.
3718ddaed9eSotto  * If the cache is becoming full, unmap pages in the cache for real,
3728ddaed9eSotto  * and then add the region to the cache
3738ddaed9eSotto  * Opposed to the regular region data structure, the sizes in the
3748ddaed9eSotto  * cache are in MALLOC_PAGESIZE units.
3758ddaed9eSotto  */
3768ddaed9eSotto static void
unmap(struct dir_info * d,void * p,size_t sz,int junk)3778ddaed9eSotto unmap(struct dir_info *d, void *p, size_t sz, int junk)
3788ddaed9eSotto {
3798ddaed9eSotto 	size_t psz = sz >> MALLOC_PAGESHIFT;
3808ddaed9eSotto 	size_t rsz;
3818ddaed9eSotto 	struct region_info *r;
3828ddaed9eSotto 	u_int i, offset, mask;
3838ddaed9eSotto 
3848ddaed9eSotto 	if (sz != PAGEROUND(sz))
3858ddaed9eSotto 		wrterror("munmap round");
3868ddaed9eSotto 
387133128c4Sotto 	rsz = MALLOC_CACHE - d->free_regions_size;
3888ddaed9eSotto 
389133128c4Sotto 	if (psz > MALLOC_CACHE) {
3908ddaed9eSotto 		if (_dl_munmap(p, sz))
3918ddaed9eSotto 			wrterror("munmap");
3928ddaed9eSotto 		return;
3938ddaed9eSotto 	}
3948ddaed9eSotto 	offset = getrbyte(d);
395133128c4Sotto 	mask = MALLOC_CACHE - 1;
3968ddaed9eSotto 	if (psz > rsz) {
3978ddaed9eSotto 		size_t tounmap = psz - rsz;
3982b5a4592Sotto 		for (i = 0; ; i++) {
3998ddaed9eSotto 			r = &d->free_regions[(i + offset) & mask];
4008ddaed9eSotto 			if (r->p != NULL) {
4018ddaed9eSotto 				rsz = r->size << MALLOC_PAGESHIFT;
4028ddaed9eSotto 				if (_dl_munmap(r->p, rsz))
4038ddaed9eSotto 					wrterror("munmap");
4048ddaed9eSotto 				r->p = NULL;
4058ddaed9eSotto 				if (tounmap > r->size)
4068ddaed9eSotto 					tounmap -= r->size;
4078ddaed9eSotto 				else
4088ddaed9eSotto 					tounmap = 0;
4098ddaed9eSotto 				d->free_regions_size -= r->size;
4108ddaed9eSotto 				if (tounmap == 0) {
4118ddaed9eSotto 					offset = i;
4128ddaed9eSotto 					break;
4138ddaed9eSotto 				}
4148ddaed9eSotto 			}
4158ddaed9eSotto 		}
4168ddaed9eSotto 	}
4178ddaed9eSotto 	for (i = 0; ; i++) {
4188ddaed9eSotto 		r = &d->free_regions[(i + offset) & mask];
4198ddaed9eSotto 		if (r->p == NULL) {
420133128c4Sotto 			if (junk && !MALLOC_FREEUNMAP) {
4218ddaed9eSotto 				size_t amt = junk == 1 ?  MALLOC_MAXCHUNK : sz;
4228ddaed9eSotto 				_dl_memset(p, SOME_FREEJUNK, amt);
4238ddaed9eSotto 			}
424133128c4Sotto 			if (MALLOC_FREEUNMAP)
4258ddaed9eSotto 				_dl_mprotect(p, sz, PROT_NONE);
4268ddaed9eSotto 			r->p = p;
4278ddaed9eSotto 			r->size = psz;
4288ddaed9eSotto 			d->free_regions_size += psz;
4298ddaed9eSotto 			break;
4308ddaed9eSotto 		}
4318ddaed9eSotto 	}
432133128c4Sotto 	if (d->free_regions_size > MALLOC_CACHE)
4338ddaed9eSotto 		wrterror("malloc cache overflow");
4348ddaed9eSotto }
4358ddaed9eSotto 
4368ddaed9eSotto static void *
map(struct dir_info * d,size_t sz,int zero_fill)4378ddaed9eSotto map(struct dir_info *d, size_t sz, int zero_fill)
4388ddaed9eSotto {
4398ddaed9eSotto 	size_t psz = sz >> MALLOC_PAGESHIFT;
4408ddaed9eSotto 	struct region_info *r, *big = NULL;
4418ddaed9eSotto 	u_int i;
4428ddaed9eSotto 	void *p;
4438ddaed9eSotto 
4448ddaed9eSotto 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
4458ddaed9eSotto 	    d->canary1 != ~d->canary2)
4468ddaed9eSotto 		wrterror("internal struct corrupt");
4478ddaed9eSotto 	if (sz != PAGEROUND(sz)) {
4488ddaed9eSotto 		wrterror("map round");
4498ddaed9eSotto 		return MAP_FAILED;
4508ddaed9eSotto 	}
4518ddaed9eSotto 	if (psz > d->free_regions_size) {
4528ddaed9eSotto 		p = MMAP(sz);
4538ddaed9eSotto 		p = MMAP_ERROR(p);
4548ddaed9eSotto 		/* zero fill not needed */
4558ddaed9eSotto 		return p;
4568ddaed9eSotto 	}
457133128c4Sotto 	for (i = 0; i < MALLOC_CACHE; i++) {
458133128c4Sotto 		r = &d->free_regions[(i + d->rotor) & (MALLOC_CACHE - 1)];
4598ddaed9eSotto 		if (r->p != NULL) {
4608ddaed9eSotto 			if (r->size == psz) {
4618ddaed9eSotto 				p = r->p;
462133128c4Sotto 				if (MALLOC_FREEUNMAP)
4638ddaed9eSotto 					_dl_mprotect(p, sz, PROT_READ | PROT_WRITE);
4648ddaed9eSotto 				r->p = NULL;
4658ddaed9eSotto 				d->free_regions_size -= psz;
4668ddaed9eSotto 				if (zero_fill)
4678ddaed9eSotto 					_dl_memset(p, 0, sz);
468133128c4Sotto 				else if (MALLOC_JUNK == 2 &&
469133128c4Sotto 				    MALLOC_FREEUNMAP)
4708ddaed9eSotto 					_dl_memset(p, SOME_FREEJUNK, sz);
4718ddaed9eSotto 				d->rotor += i + 1;
4728ddaed9eSotto 				return p;
4738ddaed9eSotto 			} else if (r->size > psz)
4748ddaed9eSotto 				big = r;
4758ddaed9eSotto 		}
4768ddaed9eSotto 	}
4778ddaed9eSotto 	if (big != NULL) {
4788ddaed9eSotto 		r = big;
4798ddaed9eSotto 		p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT);
480133128c4Sotto 		if (MALLOC_FREEUNMAP)
4818ddaed9eSotto 			_dl_mprotect(p, sz, PROT_READ | PROT_WRITE);
4828ddaed9eSotto 		r->size -= psz;
4838ddaed9eSotto 		d->free_regions_size -= psz;
4848ddaed9eSotto 		if (zero_fill)
4858ddaed9eSotto 			_dl_memset(p, 0, sz);
486133128c4Sotto 		else if (MALLOC_JUNK == 2 && MALLOC_FREEUNMAP)
4878ddaed9eSotto 			_dl_memset(p, SOME_FREEJUNK, sz);
4888ddaed9eSotto 		return p;
4898ddaed9eSotto 	}
4908ddaed9eSotto 	p = MMAP(sz);
4918ddaed9eSotto 	p = MMAP_ERROR(p);
492133128c4Sotto 	if (d->free_regions_size > MALLOC_CACHE)
4938ddaed9eSotto 		wrterror("malloc cache");
4948ddaed9eSotto 	/* zero fill not needed */
4958ddaed9eSotto 	return p;
4968ddaed9eSotto }
4978ddaed9eSotto 
4988ddaed9eSotto static void
init_chunk_info(struct dir_info * d,struct chunk_info * p,int bits)4998ddaed9eSotto init_chunk_info(struct dir_info *d, struct chunk_info *p, int bits)
5008ddaed9eSotto {
5018ddaed9eSotto 	int i;
5028ddaed9eSotto 
5038ddaed9eSotto 	if (bits == 0) {
5048ddaed9eSotto 		p->shift = MALLOC_MINSHIFT;
5058ddaed9eSotto 		p->total = p->free = MALLOC_PAGESIZE >> p->shift;
5068ddaed9eSotto 		p->size = 0;
5078ddaed9eSotto 		p->offset = 0xdead;
5088ddaed9eSotto 	} else {
5098ddaed9eSotto 		p->shift = bits;
5108ddaed9eSotto 		p->total = p->free = MALLOC_PAGESIZE >> p->shift;
5118ddaed9eSotto 		p->size = 1U << bits;
5128ddaed9eSotto 		p->offset = howmany(p->total, MALLOC_BITS);
5138ddaed9eSotto 	}
5148ddaed9eSotto 	p->canary = (u_short)d->canary1;
5158ddaed9eSotto 
5168ddaed9eSotto 	/* set all valid bits in the bitmap */
5178ddaed9eSotto 	i = p->total - 1;
5188ddaed9eSotto 	_dl_memset(p->bits, 0xff, sizeof(p->bits[0]) * (i / MALLOC_BITS));
5198ddaed9eSotto 	p->bits[i / MALLOC_BITS] = (2U << (i % MALLOC_BITS)) - 1;
5208ddaed9eSotto }
5218ddaed9eSotto 
5228ddaed9eSotto static struct chunk_info *
alloc_chunk_info(struct dir_info * d,int bits)5238ddaed9eSotto alloc_chunk_info(struct dir_info *d, int bits)
5248ddaed9eSotto {
5258ddaed9eSotto 	struct chunk_info *p;
5268ddaed9eSotto 
5278ddaed9eSotto 	if (LIST_EMPTY(&d->chunk_info_list[bits])) {
5288ddaed9eSotto 		size_t size, count, i;
5298ddaed9eSotto 		char *q;
5308ddaed9eSotto 
5318ddaed9eSotto 		if (bits == 0)
5328ddaed9eSotto 			count = MALLOC_PAGESIZE / MALLOC_MINSIZE;
5338ddaed9eSotto 		else
5348ddaed9eSotto 			count = MALLOC_PAGESIZE >> bits;
5358ddaed9eSotto 
5368ddaed9eSotto 		size = howmany(count, MALLOC_BITS);
5378ddaed9eSotto 		size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
538133128c4Sotto 		if (CHUNK_CANARIES)
5398ddaed9eSotto 			size += count * sizeof(u_short);
540*be3edcf0Sderaadt 		size = _ALIGN(size);
5418ddaed9eSotto 
5428ddaed9eSotto 		q = MMAP(MALLOC_PAGESIZE);
5438ddaed9eSotto 		q = MMAP_ERROR(q);
5448ddaed9eSotto 		if (q == MAP_FAILED)
5458ddaed9eSotto 			return NULL;
5468ddaed9eSotto 		count = MALLOC_PAGESIZE / size;
5478ddaed9eSotto 
5488ddaed9eSotto 		for (i = 0; i < count; i++, q += size)
5498ddaed9eSotto 			LIST_INSERT_HEAD(&d->chunk_info_list[bits],
5508ddaed9eSotto 			    (struct chunk_info *)q, entries);
5518ddaed9eSotto 	}
5528ddaed9eSotto 	p = LIST_FIRST(&d->chunk_info_list[bits]);
5538ddaed9eSotto 	LIST_REMOVE(p, entries);
5548ddaed9eSotto 	if (p->shift == 0)
5558ddaed9eSotto 		init_chunk_info(d, p, bits);
5568ddaed9eSotto 	return p;
5578ddaed9eSotto }
5588ddaed9eSotto 
5598ddaed9eSotto /*
560cb0b31f9Sotto  * Allocate a page of chunks
561cb0b31f9Sotto  */
562cb0b31f9Sotto static struct chunk_info *
omalloc_make_chunks(struct dir_info * d,int bits,int listnum)563cb0b31f9Sotto omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
564cb0b31f9Sotto {
565cb0b31f9Sotto 	struct chunk_info *bp;
566cb0b31f9Sotto 	void *pp;
567cb0b31f9Sotto 
568cb0b31f9Sotto 	/* Allocate a new bucket */
569cb0b31f9Sotto 	pp = map(d, MALLOC_PAGESIZE, 0);
570cb0b31f9Sotto 	if (pp == MAP_FAILED)
571cb0b31f9Sotto 		return NULL;
572cb0b31f9Sotto 
573cb0b31f9Sotto 	bp = alloc_chunk_info(d, bits);
5748ddaed9eSotto 	if (bp == NULL)
5758ddaed9eSotto 		goto err;
576cb0b31f9Sotto 	/* memory protect the page allocated in the malloc(0) case */
5778ddaed9eSotto 	if (bits == 0 && _dl_mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) < 0)
5788ddaed9eSotto 		goto err;
5798ddaed9eSotto 
580cb0b31f9Sotto 	bp->page = pp;
581cb0b31f9Sotto 
582bf73d255Sotto 	if (insert(d, (void *)((uintptr_t)pp | (bits + 1)), (uintptr_t)bp))
5838ddaed9eSotto 		goto err;
5848ddaed9eSotto 	LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries);
5858ddaed9eSotto 	return bp;
5868ddaed9eSotto 
5878ddaed9eSotto err:
588133128c4Sotto 	unmap(d, pp, MALLOC_PAGESIZE, MALLOC_JUNK);
589cb0b31f9Sotto 	return NULL;
590cb0b31f9Sotto }
5918ddaed9eSotto 
5928ddaed9eSotto static int
find_chunksize(size_t size)5938ddaed9eSotto find_chunksize(size_t size)
5948ddaed9eSotto {
5958ddaed9eSotto 	int r;
5968ddaed9eSotto 
5978ddaed9eSotto 	/* malloc(0) is special */
5988ddaed9eSotto 	if (size == 0)
5998ddaed9eSotto 		return 0;
6008ddaed9eSotto 
6018ddaed9eSotto 	if (size < MALLOC_MINSIZE)
6028ddaed9eSotto 		size = MALLOC_MINSIZE;
6038ddaed9eSotto 	size--;
6048ddaed9eSotto 
6058ddaed9eSotto 	r = MALLOC_MINSHIFT;
6068ddaed9eSotto 	while (size >> r)
6078ddaed9eSotto 		r++;
6088ddaed9eSotto 	return r;
609cb0b31f9Sotto }
610cb0b31f9Sotto 
6118ddaed9eSotto static void
fill_canary(char * ptr,size_t sz,size_t allocated)6128ddaed9eSotto fill_canary(char *ptr, size_t sz, size_t allocated)
6138ddaed9eSotto {
6148ddaed9eSotto 	size_t check_sz = allocated - sz;
615cb0b31f9Sotto 
6168ddaed9eSotto 	if (check_sz > CHUNK_CHECK_LENGTH)
6178ddaed9eSotto 		check_sz = CHUNK_CHECK_LENGTH;
6188ddaed9eSotto 	_dl_memset(ptr + sz, SOME_JUNK, check_sz);
619cb0b31f9Sotto }
620cb0b31f9Sotto 
621cb0b31f9Sotto /*
622cb0b31f9Sotto  * Allocate a chunk
623cb0b31f9Sotto  */
624cb0b31f9Sotto static void *
malloc_bytes(struct dir_info * d,size_t size)6258ddaed9eSotto malloc_bytes(struct dir_info *d, size_t size)
626cb0b31f9Sotto {
6278ddaed9eSotto 	u_int i, r;
6288ddaed9eSotto 	int j, listnum;
6298ddaed9eSotto 	size_t k;
6308ddaed9eSotto 	u_short	*lp;
631cb0b31f9Sotto 	struct chunk_info *bp;
6328ddaed9eSotto 	void *p;
633cb0b31f9Sotto 
634cb0b31f9Sotto 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
635cb0b31f9Sotto 	    d->canary1 != ~d->canary2)
636cb0b31f9Sotto 		wrterror("internal struct corrupt");
637cc359eb8Sotto 
6388ddaed9eSotto 	j = find_chunksize(size);
639cc359eb8Sotto 
6408ddaed9eSotto 	r = ((u_int)getrbyte(d) << 8) | getrbyte(d);
6418ddaed9eSotto 	listnum = r % MALLOC_CHUNK_LISTS;
642cb0b31f9Sotto 	/* If it's empty, make a page more of that size chunks */
643cb0b31f9Sotto 	if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) {
644cb0b31f9Sotto 		bp = omalloc_make_chunks(d, j, listnum);
645cb0b31f9Sotto 		if (bp == NULL)
646cb0b31f9Sotto 			return NULL;
647cb0b31f9Sotto 	}
648cb0b31f9Sotto 
6498ddaed9eSotto 	if (bp->canary != (u_short)d->canary1)
650cb0b31f9Sotto 		wrterror("chunk info corrupted");
651cb0b31f9Sotto 
6528ddaed9eSotto 	i = (r / MALLOC_CHUNK_LISTS) & (bp->total - 1);
6538ddaed9eSotto 
6548ddaed9eSotto 	/* start somewhere in a short */
655cb0b31f9Sotto 	lp = &bp->bits[i / MALLOC_BITS];
6568ddaed9eSotto 	if (*lp) {
6578ddaed9eSotto 		j = i % MALLOC_BITS;
6588ddaed9eSotto 		k = __builtin_ffs(*lp >> j);
6598ddaed9eSotto 		if (k != 0) {
6608ddaed9eSotto 			k += j - 1;
6618ddaed9eSotto 			goto found;
6628ddaed9eSotto 		}
6638ddaed9eSotto 	}
6648ddaed9eSotto 	/* no bit halfway, go to next full short */
6658ddaed9eSotto 	i /= MALLOC_BITS;
6668ddaed9eSotto 	for (;;) {
6678ddaed9eSotto 		if (++i >= bp->total / MALLOC_BITS)
668cb0b31f9Sotto 			i = 0;
6698ddaed9eSotto 		lp = &bp->bits[i];
6708ddaed9eSotto 		if (*lp) {
6718ddaed9eSotto 			k = __builtin_ffs(*lp) - 1;
672cb0b31f9Sotto 			break;
673cb0b31f9Sotto 		}
674cb0b31f9Sotto 	}
6758ddaed9eSotto found:
6768ddaed9eSotto 	*lp ^= 1 << k;
677cb0b31f9Sotto 
678cb0b31f9Sotto 	/* If there are no more free, remove from free-list */
6798ddaed9eSotto 	if (--bp->free == 0)
680cb0b31f9Sotto 		LIST_REMOVE(bp, entries);
681cb0b31f9Sotto 
682cb0b31f9Sotto 	/* Adjust to the real offset of that chunk */
683cb0b31f9Sotto 	k += (lp - bp->bits) * MALLOC_BITS;
684cc359eb8Sotto 
685133128c4Sotto 	if (CHUNK_CANARIES && size > 0)
6868ddaed9eSotto 		bp->bits[bp->offset + k] = size;
687cc359eb8Sotto 
688cb0b31f9Sotto 	k <<= bp->shift;
689cb0b31f9Sotto 
6908ddaed9eSotto 	p = (char *)bp->page + k;
691cc359eb8Sotto 	if (bp->size > 0) {
692133128c4Sotto 		if (MALLOC_JUNK == 2)
6938ddaed9eSotto 			_dl_memset(p, SOME_JUNK, bp->size);
694133128c4Sotto 		else if (CHUNK_CANARIES)
6958ddaed9eSotto 			fill_canary(p, size, bp->size);
696cc359eb8Sotto 	}
6978ddaed9eSotto 	return p;
698cb0b31f9Sotto }
699cb0b31f9Sotto 
7005aa68a31Sotto static void
validate_canary(u_char * ptr,size_t sz,size_t allocated)70159be9496Sotto validate_canary(u_char *ptr, size_t sz, size_t allocated)
7025aa68a31Sotto {
7035aa68a31Sotto 	size_t check_sz = allocated - sz;
7045aa68a31Sotto 	u_char *p, *q;
7055aa68a31Sotto 
7065aa68a31Sotto 	if (check_sz > CHUNK_CHECK_LENGTH)
7075aa68a31Sotto 		check_sz = CHUNK_CHECK_LENGTH;
7085aa68a31Sotto 	p = ptr + sz;
7095aa68a31Sotto 	q = p + check_sz;
7105aa68a31Sotto 
7115aa68a31Sotto 	while (p < q)
7125aa68a31Sotto 		if (*p++ != SOME_JUNK)
7135aa68a31Sotto 			wrterror("chunk canary corrupted");
7145aa68a31Sotto }
7155aa68a31Sotto 
716cb0b31f9Sotto static uint32_t
find_chunknum(struct dir_info * d,struct region_info * r,void * ptr,int check)717cc359eb8Sotto find_chunknum(struct dir_info *d, struct region_info *r, void *ptr, int check)
718cb0b31f9Sotto {
719cb0b31f9Sotto 	struct chunk_info *info;
720cb0b31f9Sotto 	uint32_t chunknum;
721cb0b31f9Sotto 
722cb0b31f9Sotto 	info = (struct chunk_info *)r->size;
7238ddaed9eSotto 	if (info->canary != (u_short)d->canary1)
724cb0b31f9Sotto 		wrterror("chunk info corrupted");
725cb0b31f9Sotto 
726cb0b31f9Sotto 	/* Find the chunk number on the page */
727cb0b31f9Sotto 	chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
7289e6c7057Sotto 	if (check && info->size > 0) {
72959be9496Sotto 		validate_canary(ptr, info->bits[info->offset + chunknum],
7305aa68a31Sotto 		    info->size);
731cc359eb8Sotto 	}
732cb0b31f9Sotto 
733cb0b31f9Sotto 	if ((uintptr_t)ptr & ((1U << (info->shift)) - 1)) {
734cb0b31f9Sotto 		wrterror("modified chunk-pointer");
735cb0b31f9Sotto 		return -1;
736cb0b31f9Sotto 	}
737cb0b31f9Sotto 	if (info->bits[chunknum / MALLOC_BITS] &
738cc359eb8Sotto 	    (1U << (chunknum % MALLOC_BITS)))
739cb0b31f9Sotto 		wrterror("chunk is already free");
740cb0b31f9Sotto 	return chunknum;
741cb0b31f9Sotto }
742cb0b31f9Sotto 
743cb0b31f9Sotto /*
744cb0b31f9Sotto  * Free a chunk, and possibly the page it's on, if the page becomes empty.
745cb0b31f9Sotto  */
746cb0b31f9Sotto static void
free_bytes(struct dir_info * d,struct region_info * r,void * ptr)747cb0b31f9Sotto free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
748cb0b31f9Sotto {
749cb0b31f9Sotto 	struct chunk_head *mp;
750cb0b31f9Sotto 	struct chunk_info *info;
751cb0b31f9Sotto 	uint32_t chunknum;
752cb0b31f9Sotto 	int listnum;
753cb0b31f9Sotto 
754cb0b31f9Sotto 	info = (struct chunk_info *)r->size;
755cc359eb8Sotto 	chunknum = find_chunknum(d, r, ptr, 0);
756cb0b31f9Sotto 
757cb0b31f9Sotto 	info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
758cb0b31f9Sotto 	info->free++;
759cb0b31f9Sotto 
760cb0b31f9Sotto 	if (info->free == 1) {
761cb0b31f9Sotto 		/* Page became non-full */
762cb0b31f9Sotto 		listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
763cb0b31f9Sotto 		if (info->size != 0)
764cb0b31f9Sotto 			mp = &d->chunk_dir[info->shift][listnum];
765cb0b31f9Sotto 		else
766cb0b31f9Sotto 			mp = &d->chunk_dir[0][listnum];
767cb0b31f9Sotto 
768cb0b31f9Sotto 		LIST_INSERT_HEAD(mp, info, entries);
769cb0b31f9Sotto 		return;
770cb0b31f9Sotto 	}
771cb0b31f9Sotto 
772cb0b31f9Sotto 	if (info->free != info->total)
773cb0b31f9Sotto 		return;
774cb0b31f9Sotto 
775cb0b31f9Sotto 	LIST_REMOVE(info, entries);
776cb0b31f9Sotto 
777133128c4Sotto 	if (info->size == 0 && !MALLOC_FREEUNMAP)
778cb0b31f9Sotto 		_dl_mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
7798ddaed9eSotto 	unmap(d, info->page, MALLOC_PAGESIZE, 0);
780cb0b31f9Sotto 
781cb0b31f9Sotto 	delete(d, r);
782cb0b31f9Sotto 	if (info->size != 0)
783cb0b31f9Sotto 		mp = &d->chunk_info_list[info->shift];
784cb0b31f9Sotto 	else
785cb0b31f9Sotto 		mp = &d->chunk_info_list[0];
786cb0b31f9Sotto 	LIST_INSERT_HEAD(mp, info, entries);
787cb0b31f9Sotto }
788cb0b31f9Sotto 
789cb0b31f9Sotto static void *
omalloc(size_t sz,int zero_fill)790cb0b31f9Sotto omalloc(size_t sz, int zero_fill)
791cb0b31f9Sotto {
792cb0b31f9Sotto 	void *p;
793cb0b31f9Sotto 	size_t psz;
794cb0b31f9Sotto 
795cb0b31f9Sotto 	if (sz > MALLOC_MAXCHUNK) {
796133128c4Sotto 		if (sz >= SIZE_MAX - MALLOC_GUARD - MALLOC_PAGESIZE) {
797cb0b31f9Sotto 			return NULL;
798cb0b31f9Sotto 		}
799133128c4Sotto 		sz += MALLOC_GUARD;
800cb0b31f9Sotto 		psz = PAGEROUND(sz);
801cb0b31f9Sotto 		p = map(g_pool, psz, zero_fill);
802cb0b31f9Sotto 		if (p == MAP_FAILED) {
803cb0b31f9Sotto 			return NULL;
804cb0b31f9Sotto 		}
805cb0b31f9Sotto 		if (insert(g_pool, p, sz)) {
8068ddaed9eSotto 			unmap(g_pool, p, psz, 0);
807cb0b31f9Sotto 			return NULL;
808cb0b31f9Sotto 		}
809133128c4Sotto 		if (MALLOC_GUARD) {
810133128c4Sotto 			if (_dl_mprotect((char *)p + psz - MALLOC_GUARD,
811133128c4Sotto 			    MALLOC_GUARD, PROT_NONE))
812cb0b31f9Sotto 				wrterror("mprotect");
813cb0b31f9Sotto 		}
814cb0b31f9Sotto 
815133128c4Sotto 		if (sz - MALLOC_GUARD < MALLOC_PAGESIZE - MALLOC_LEEWAY) {
816cb0b31f9Sotto 			/* fill whole allocation */
817133128c4Sotto 			if (MALLOC_JUNK == 2)
818133128c4Sotto 				_dl_memset(p, SOME_JUNK, psz - MALLOC_GUARD);
819cb0b31f9Sotto 			/* shift towards the end */
820cb0b31f9Sotto 			p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY -
821133128c4Sotto 			    (sz - MALLOC_GUARD)) & ~(MALLOC_MINSIZE-1));
822cb0b31f9Sotto 			/* fill zeros if needed and overwritten above */
823133128c4Sotto 			if (zero_fill && MALLOC_JUNK == 2)
824133128c4Sotto 				_dl_memset(p, 0, sz - MALLOC_GUARD);
825cb0b31f9Sotto 		} else {
826133128c4Sotto 			if (MALLOC_JUNK == 2) {
827cb0b31f9Sotto 				if (zero_fill)
828133128c4Sotto 					_dl_memset((char *)p + sz - MALLOC_GUARD,
829cb0b31f9Sotto 					    SOME_JUNK, psz - sz);
830cb0b31f9Sotto 				else
831cb0b31f9Sotto 					_dl_memset(p, SOME_JUNK,
832133128c4Sotto 					    psz - MALLOC_GUARD);
833133128c4Sotto 			} else if (CHUNK_CANARIES)
834133128c4Sotto 				fill_canary(p, sz - MALLOC_GUARD,
835133128c4Sotto 				    psz - MALLOC_GUARD);
836cb0b31f9Sotto 		}
837cb0b31f9Sotto 
838cb0b31f9Sotto 	} else {
839cb0b31f9Sotto 		/* takes care of SOME_JUNK */
840cb0b31f9Sotto 		p = malloc_bytes(g_pool, sz);
841cb0b31f9Sotto 		if (zero_fill && p != NULL && sz > 0)
842cb0b31f9Sotto 			_dl_memset(p, 0, sz);
843cb0b31f9Sotto 	}
844cb0b31f9Sotto 
845cb0b31f9Sotto 	return p;
846cb0b31f9Sotto }
847cb0b31f9Sotto 
848cb0b31f9Sotto /*
849cb0b31f9Sotto  * Common function for handling recursion.  Only
850cb0b31f9Sotto  * print the error message once, to avoid making the problem
851cb0b31f9Sotto  * potentially worse.
852cb0b31f9Sotto  */
853cb0b31f9Sotto static void
malloc_recurse(void)854cb0b31f9Sotto malloc_recurse(void)
855cb0b31f9Sotto {
856cb0b31f9Sotto 	static int noprint;
857cb0b31f9Sotto 
858cb0b31f9Sotto 	if (noprint == 0) {
859cb0b31f9Sotto 		noprint = 1;
860cb0b31f9Sotto 		wrterror("recursive call");
861cb0b31f9Sotto 	}
862ea591b61Sotto 	g_pool->active--;
863cb0b31f9Sotto }
864cb0b31f9Sotto 
865cb0b31f9Sotto void *
_dl_malloc(size_t size)866cb0b31f9Sotto _dl_malloc(size_t size)
867cb0b31f9Sotto {
8683c9c53bdSotto 	void *r = NULL;
86997e4ddacSguenther 	lock_cb *cb;
870cb0b31f9Sotto 
87197e4ddacSguenther 	cb = _dl_thread_kern_stop();
872ea591b61Sotto 	g_pool->func = "malloc():";
873ea591b61Sotto 	if (g_pool->active++) {
874cb0b31f9Sotto 		malloc_recurse();
8753c9c53bdSotto 		goto ret;
876cb0b31f9Sotto 	}
877c827e20bSotto 	r = omalloc(size, 0);
878ea591b61Sotto 	g_pool->active--;
8793c9c53bdSotto ret:
88097e4ddacSguenther 	_dl_thread_kern_go(cb);
881cb0b31f9Sotto 	return r;
882cb0b31f9Sotto }
883cb0b31f9Sotto 
884cb0b31f9Sotto static void
validate_junk(struct dir_info * pool,void * p)885cc359eb8Sotto validate_junk(struct dir_info *pool, void *p)
886cc359eb8Sotto {
887cc359eb8Sotto 	struct region_info *r;
888cc359eb8Sotto 	size_t byte, sz;
889cc359eb8Sotto 
890cc359eb8Sotto 	if (p == NULL)
891cc359eb8Sotto 		return;
892cc359eb8Sotto 	r = find(pool, p);
893cc359eb8Sotto 	if (r == NULL)
894cc359eb8Sotto 		wrterror("bogus pointer in validate_junk");
895cc359eb8Sotto 	REALSIZE(sz, r);
896cc359eb8Sotto 	if (sz > CHUNK_CHECK_LENGTH)
897cc359eb8Sotto 		sz = CHUNK_CHECK_LENGTH;
898cc359eb8Sotto 	for (byte = 0; byte < sz; byte++) {
899cc359eb8Sotto 		if (((unsigned char *)p)[byte] != SOME_FREEJUNK)
900cc359eb8Sotto 			wrterror("use after free");
901cc359eb8Sotto 	}
902cc359eb8Sotto }
903cc359eb8Sotto 
904cc359eb8Sotto static void
ofree(void * p)905cb0b31f9Sotto ofree(void *p)
906cb0b31f9Sotto {
907cb0b31f9Sotto 	struct region_info *r;
908cb0b31f9Sotto 	size_t sz;
909cb0b31f9Sotto 
910cb0b31f9Sotto 	r = find(g_pool, p);
911ea591b61Sotto 	if (r == NULL)
912cb0b31f9Sotto 		wrterror("bogus pointer (double free?)");
913cb0b31f9Sotto 	REALSIZE(sz, r);
914cb0b31f9Sotto 	if (sz > MALLOC_MAXCHUNK) {
915133128c4Sotto 		if (sz - MALLOC_GUARD >= MALLOC_PAGESIZE -
916cb0b31f9Sotto 		    MALLOC_LEEWAY) {
917ea591b61Sotto 			if (r->p != p)
918cb0b31f9Sotto 				wrterror("bogus pointer");
919133128c4Sotto 			if (CHUNK_CANARIES)
92059be9496Sotto 				validate_canary(p,
921133128c4Sotto 				    sz - MALLOC_GUARD,
922133128c4Sotto 				    PAGEROUND(sz - MALLOC_GUARD));
923cb0b31f9Sotto 		} else {
924cb0b31f9Sotto #if notyetbecause_of_realloc
925cb0b31f9Sotto 			/* shifted towards the end */
926cb0b31f9Sotto 			if (p != ((char *)r->p) + ((MALLOC_PAGESIZE -
927133128c4Sotto 			    MALLOC_MINSIZE - sz - MALLOC_GUARD) &
928cb0b31f9Sotto 			    ~(MALLOC_MINSIZE-1))) {
929cb0b31f9Sotto 			}
930cb0b31f9Sotto #endif
931cb0b31f9Sotto 			p = r->p;
932cb0b31f9Sotto 		}
933133128c4Sotto 		if (MALLOC_GUARD) {
934133128c4Sotto 			if (sz < MALLOC_GUARD)
935cb0b31f9Sotto 				wrterror("guard size");
936133128c4Sotto 			if (!MALLOC_FREEUNMAP) {
937cb0b31f9Sotto 				if (_dl_mprotect((char *)p + PAGEROUND(sz) -
938133128c4Sotto 				    MALLOC_GUARD, MALLOC_GUARD,
939cb0b31f9Sotto 				    PROT_READ | PROT_WRITE))
940cb0b31f9Sotto 					wrterror("mprotect");
941cb0b31f9Sotto 			}
942cb0b31f9Sotto 		}
943133128c4Sotto 		unmap(g_pool, p, PAGEROUND(sz), MALLOC_JUNK);
944cb0b31f9Sotto 		delete(g_pool, r);
945cb0b31f9Sotto 	} else {
946cb0b31f9Sotto 		void *tmp;
947cb0b31f9Sotto 		int i;
94852918795Sotto 		struct chunk_info *info = (struct chunk_info *)r->size;
949cb0b31f9Sotto 
95052918795Sotto 		if (info->size != sz)
95152918795Sotto 			wrterror("internal struct corrupt");
952133128c4Sotto 		find_chunknum(g_pool, r, p, CHUNK_CANARIES);
95359be9496Sotto 		for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK; i++) {
95459be9496Sotto 			if (p == g_pool->delayed_chunks[i])
95559be9496Sotto 				wrterror("double free");
95659be9496Sotto 		}
957133128c4Sotto 		if (MALLOC_JUNK && sz > 0)
958cb0b31f9Sotto 			_dl_memset(p, SOME_FREEJUNK, sz);
959cb0b31f9Sotto 		i = getrbyte(g_pool) & MALLOC_DELAYED_CHUNK_MASK;
960cb0b31f9Sotto 		tmp = p;
961cb0b31f9Sotto 		p = g_pool->delayed_chunks[i];
96259be9496Sotto 		g_pool->delayed_chunks[i] = tmp;
963133128c4Sotto 		if (MALLOC_JUNK)
964cc359eb8Sotto 			validate_junk(g_pool, p);
965cb0b31f9Sotto 		if (p != NULL) {
966cb0b31f9Sotto 			r = find(g_pool, p);
967ea591b61Sotto 			if (r == NULL)
968cb0b31f9Sotto 				wrterror("bogus pointer (double free?)");
969cb0b31f9Sotto 			free_bytes(g_pool, r, p);
970cb0b31f9Sotto 		}
971cb0b31f9Sotto 	}
972cb0b31f9Sotto }
973cb0b31f9Sotto 
974cb0b31f9Sotto void
_dl_free(void * ptr)975cb0b31f9Sotto _dl_free(void *ptr)
976cb0b31f9Sotto {
97797e4ddacSguenther 	lock_cb *cb;
97897e4ddacSguenther 
979cb0b31f9Sotto 	/* This is legal. */
980cb0b31f9Sotto 	if (ptr == NULL)
981cb0b31f9Sotto 		return;
982cb0b31f9Sotto 
98397e4ddacSguenther 	cb = _dl_thread_kern_stop();
984ea591b61Sotto 	if (g_pool == NULL)
985cb0b31f9Sotto 		wrterror("free() called before allocation");
986ea591b61Sotto 	g_pool->func = "free():";
987ea591b61Sotto 	if (g_pool->active++) {
988cb0b31f9Sotto 		malloc_recurse();
9893c9c53bdSotto 		goto ret;
990cb0b31f9Sotto 	}
991cb0b31f9Sotto 	ofree(ptr);
992ea591b61Sotto 	g_pool->active--;
9933c9c53bdSotto ret:
99497e4ddacSguenther 	_dl_thread_kern_go(cb);
995cb0b31f9Sotto }
996cb0b31f9Sotto 
997cb0b31f9Sotto 
998cb0b31f9Sotto /*
999cb0b31f9Sotto  * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
1000cb0b31f9Sotto  * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
1001cb0b31f9Sotto  */
1002cb0b31f9Sotto #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1003cb0b31f9Sotto 
1004cb0b31f9Sotto void *
_dl_calloc(size_t nmemb,size_t size)1005cb0b31f9Sotto _dl_calloc(size_t nmemb, size_t size)
1006cb0b31f9Sotto {
10073c9c53bdSotto 	void *r = NULL;
100897e4ddacSguenther 	lock_cb *cb;
1009cb0b31f9Sotto 
101097e4ddacSguenther 	cb = _dl_thread_kern_stop();
1011ea591b61Sotto 	g_pool->func = "calloc():";
1012cb0b31f9Sotto 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1013cb0b31f9Sotto 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
10143c9c53bdSotto 		goto ret;
1015cb0b31f9Sotto 	}
1016cb0b31f9Sotto 
1017ea591b61Sotto 	if (g_pool->active++) {
1018cb0b31f9Sotto 		malloc_recurse();
10193c9c53bdSotto 		goto ret;
1020cb0b31f9Sotto 	}
1021cb0b31f9Sotto 
1022cb0b31f9Sotto 	size *= nmemb;
1023cb0b31f9Sotto 	r = omalloc(size, 1);
1024ea591b61Sotto 	g_pool->active--;
10253c9c53bdSotto ret:
102697e4ddacSguenther 	_dl_thread_kern_go(cb);
1027cb0b31f9Sotto 	return r;
1028cb0b31f9Sotto }
1029c827e20bSotto 
1030c827e20bSotto 
1031c827e20bSotto static void *
orealloc(void * p,size_t newsz)1032c827e20bSotto orealloc(void *p, size_t newsz)
1033c827e20bSotto {
1034c827e20bSotto 	struct region_info *r;
1035c827e20bSotto 	void *q;
1036c827e20bSotto 	size_t oldsz;
1037c827e20bSotto 
1038c827e20bSotto 	q = omalloc(newsz, 0);
1039c827e20bSotto 	if (p == NULL || q == NULL)
1040c827e20bSotto 		return q;
1041c827e20bSotto 	r = find(g_pool, p);
1042c827e20bSotto 	if (r == NULL)
1043c827e20bSotto 		wrterror("bogus pointer (double free?)");
1044c827e20bSotto 	REALSIZE(oldsz, r);
1045c827e20bSotto 	if (oldsz > MALLOC_MAXCHUNK) {
1046133128c4Sotto 		if (oldsz < MALLOC_GUARD)
1047c827e20bSotto 			wrterror("guard size");
1048133128c4Sotto 		oldsz -= MALLOC_GUARD;
1049c827e20bSotto 	}
1050c827e20bSotto 	_dl_bcopy(p, q, oldsz < newsz ? oldsz : newsz);
10518d5943ebSguenther 	ofree(p);
1052c827e20bSotto 	return q;
1053c827e20bSotto }
1054c827e20bSotto 
1055c827e20bSotto 
1056c827e20bSotto void *
_dl_realloc(void * ptr,size_t size)1057c827e20bSotto _dl_realloc(void *ptr, size_t size)
1058c827e20bSotto {
10593c9c53bdSotto 	void *r = NULL;
106097e4ddacSguenther 	lock_cb *cb;
1061c827e20bSotto 
106297e4ddacSguenther 	cb = _dl_thread_kern_stop();
1063ea591b61Sotto 	g_pool->func = "realloc():";
1064ea591b61Sotto 	if (g_pool->active++) {
1065c827e20bSotto 		malloc_recurse();
10663c9c53bdSotto 		goto ret;
1067c827e20bSotto 	}
1068c827e20bSotto 	r = orealloc(ptr, size);
1069ea591b61Sotto 	g_pool->active--;
10703c9c53bdSotto ret:
107197e4ddacSguenther 	_dl_thread_kern_go(cb);
1072c827e20bSotto 	return r;
1073c827e20bSotto }
1074c827e20bSotto 
1075f54aa464Sguenther static void *
mapalign(struct dir_info * d,size_t alignment,size_t sz,int zero_fill)1076f54aa464Sguenther mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
1077f54aa464Sguenther {
1078f54aa464Sguenther 	char *p, *q;
1079f54aa464Sguenther 
1080f54aa464Sguenther 	if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
1081f54aa464Sguenther 		wrterror("mapalign bad alignment");
1082f54aa464Sguenther 	if (sz != PAGEROUND(sz))
1083f54aa464Sguenther 		wrterror("mapalign round");
1084f54aa464Sguenther 
1085f54aa464Sguenther 	/* Allocate sz + alignment bytes of memory, which must include a
1086f54aa464Sguenther 	 * subrange of size bytes that is properly aligned.  Unmap the
1087f54aa464Sguenther 	 * other bytes, and then return that subrange.
1088f54aa464Sguenther 	 */
1089f54aa464Sguenther 
1090f54aa464Sguenther 	/* We need sz + alignment to fit into a size_t. */
1091f54aa464Sguenther 	if (alignment > SIZE_MAX - sz)
1092f54aa464Sguenther 		return MAP_FAILED;
1093f54aa464Sguenther 
1094f54aa464Sguenther 	p = map(d, sz + alignment, zero_fill);
1095f54aa464Sguenther 	if (p == MAP_FAILED)
1096f54aa464Sguenther 		return MAP_FAILED;
1097f54aa464Sguenther 	q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
1098f54aa464Sguenther 	if (q != p) {
1099f54aa464Sguenther 		if (_dl_munmap(p, q - p))
1100f54aa464Sguenther 			wrterror("munmap");
1101f54aa464Sguenther 	}
1102f54aa464Sguenther 	if (_dl_munmap(q + sz, alignment - (q - p)))
1103f54aa464Sguenther 		wrterror("munmap");
1104f54aa464Sguenther 
1105f54aa464Sguenther 	return q;
1106f54aa464Sguenther }
1107f54aa464Sguenther 
1108f54aa464Sguenther static void *
omemalign(size_t alignment,size_t sz,int zero_fill)1109f54aa464Sguenther omemalign(size_t alignment, size_t sz, int zero_fill)
1110f54aa464Sguenther {
1111f54aa464Sguenther 	size_t psz;
1112f54aa464Sguenther 	void *p;
1113f54aa464Sguenther 
1114f54aa464Sguenther 	/* If between half a page and a page, avoid MALLOC_MOVE. */
1115f54aa464Sguenther 	if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE)
1116f54aa464Sguenther 		sz = MALLOC_PAGESIZE;
1117f54aa464Sguenther 	if (alignment <= MALLOC_PAGESIZE) {
1118f54aa464Sguenther 		/*
1119f54aa464Sguenther 		 * max(size, alignment) is enough to assure the requested
1120f54aa464Sguenther 		 * alignment, since the allocator always allocates
1121f54aa464Sguenther 		 * power-of-two blocks.
1122f54aa464Sguenther 		 */
1123f54aa464Sguenther 		if (sz < alignment)
1124f54aa464Sguenther 			sz = alignment;
1125f54aa464Sguenther 		return omalloc(sz, zero_fill);
1126f54aa464Sguenther 	}
1127f54aa464Sguenther 
1128133128c4Sotto 	if (sz >= SIZE_MAX - MALLOC_GUARD - MALLOC_PAGESIZE) {
1129f54aa464Sguenther 		return NULL;
1130f54aa464Sguenther 	}
1131f54aa464Sguenther 
1132133128c4Sotto 	sz += MALLOC_GUARD;
1133f54aa464Sguenther 	psz = PAGEROUND(sz);
1134f54aa464Sguenther 
1135f54aa464Sguenther 	p = mapalign(g_pool, alignment, psz, zero_fill);
1136f54aa464Sguenther 	if (p == MAP_FAILED) {
1137f54aa464Sguenther 		return NULL;
1138f54aa464Sguenther 	}
1139f54aa464Sguenther 
1140f54aa464Sguenther 	if (insert(g_pool, p, sz)) {
11418ddaed9eSotto 		unmap(g_pool, p, psz, 0);
1142f54aa464Sguenther 		return NULL;
1143f54aa464Sguenther 	}
1144f54aa464Sguenther 
1145133128c4Sotto 	if (MALLOC_GUARD) {
1146133128c4Sotto 		if (_dl_mprotect((char *)p + psz - MALLOC_GUARD,
1147133128c4Sotto 		    MALLOC_GUARD, PROT_NONE))
1148f54aa464Sguenther 			wrterror("mprotect");
1149f54aa464Sguenther 	}
1150f54aa464Sguenther 
1151133128c4Sotto 	if (MALLOC_JUNK == 2) {
1152f54aa464Sguenther 		if (zero_fill)
1153133128c4Sotto 			_dl_memset((char *)p + sz - MALLOC_GUARD,
1154f54aa464Sguenther 			    SOME_JUNK, psz - sz);
1155f54aa464Sguenther 		else
1156133128c4Sotto 			_dl_memset(p, SOME_JUNK, psz - MALLOC_GUARD);
1157133128c4Sotto 	} else if (CHUNK_CANARIES)
1158133128c4Sotto 		fill_canary(p, sz - MALLOC_GUARD,
1159133128c4Sotto 		    psz - MALLOC_GUARD);
1160f54aa464Sguenther 
1161f54aa464Sguenther 	return p;
1162f54aa464Sguenther }
1163f54aa464Sguenther 
1164f54aa464Sguenther void *
_dl_aligned_alloc(size_t alignment,size_t size)1165f54aa464Sguenther _dl_aligned_alloc(size_t alignment, size_t size)
1166f54aa464Sguenther {
1167f54aa464Sguenther 	void *r = NULL;
1168f54aa464Sguenther 	lock_cb *cb;
1169f54aa464Sguenther 
1170f54aa464Sguenther 	/* Make sure that alignment is a large enough power of 2. */
1171f54aa464Sguenther 	if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
1172f54aa464Sguenther 		return NULL;
1173f54aa464Sguenther 
1174f54aa464Sguenther 	cb = _dl_thread_kern_stop();
1175f54aa464Sguenther 	g_pool->func = "aligned_alloc():";
1176f54aa464Sguenther 	if (g_pool->active++) {
1177f54aa464Sguenther 		malloc_recurse();
1178f54aa464Sguenther 		goto ret;
1179f54aa464Sguenther 	}
1180f54aa464Sguenther 	r = omemalign(alignment, size, 0);
1181f54aa464Sguenther 	g_pool->active--;
1182f54aa464Sguenther ret:
1183f54aa464Sguenther 	_dl_thread_kern_go(cb);
1184f54aa464Sguenther 	return r;
1185f54aa464Sguenther }
1186