xref: /plan9/sys/src/9/port/alloc.c (revision 219b2ee8daee37f4aad58d63f21287faa8e4ffdc)
1 #include	"u.h"
2 #include	"../port/lib.h"
3 #include	"mem.h"
4 #include	"dat.h"
5 #include	"fns.h"
6 #include	"error.h"
7 
8 
9 /*
10  * Plan 9 has two kernel allocators, the x... routines provide a first
11  * fit hole allocator which should be used for permanent or large structures.
12  * Routines are provided to allocate aligned memory which does not cross
13  * arbitrary 2^n boundaries. A second allocator malloc, smalloc, free is
14  * a 2n bucket allocator which steals from the x routines. This should
15  * be used for small frequently used structures.
16  */
17 
18 #define	nil		((void*)0)
19 #define datoff		((ulong)((Xhdr*)0)->data)
20 #define bdatoff		((ulong)((Bucket*)0)->data)
21 
22 enum
23 {
24 	Maxpow		= 18,
25 	CUTOFF		= 12,
26 	Nhole		= 128,
27 	Magichole	= 0xDeadBabe,
28 	Magic2n		= 0xFeedBeef,
29 	Spanlist	= 64,
30 
31 	NTRACE		= 20,
32 };
33 
34 typedef struct Hole Hole;
35 typedef struct Xalloc Xalloc;
36 typedef struct Xhdr Xhdr;
37 typedef struct Bucket Bucket;
38 typedef struct Arena Arena;
39 
40 struct Hole
41 {
42 	ulong	addr;
43 	ulong	size;
44 	ulong	top;
45 	Hole	*link;
46 };
47 
48 struct Xhdr
49 {
50 	ulong	size;
51 	ulong	magix;
52 	char	data[1];
53 };
54 
55 struct Xalloc
56 {
57 	Lock;
58 	Hole	hole[Nhole];
59 	Hole	*flist;
60 	Hole	*table;
61 };
62 
63 struct Bucket
64 {
65 	int	size;
66 	int	magic;
67 	Bucket	*next;
68 	ulong	pc;
69 	char	data[1];
70 };
71 
72 struct Arena
73 {
74 	Lock;
75 	Bucket	*btab[Maxpow];
76 	int	nbuck[Maxpow];
77 	struct{
78 		ulong	pc;
79 		ulong	alloc;
80 		ulong	free;
81 	}	trace[NTRACE];
82 	QLock	rq;
83 	Rendez	r;
84 };
85 
86 static Arena	arena;
87 static Xalloc	xlists;
88 
89 void
90 xinit(void)
91 {
92 	Hole *h, *eh;
93 	int up, np0, np1;
94 
95 	eh = &xlists.hole[Nhole-1];
96 	for(h = xlists.hole; h < eh; h++)
97 		h->link = h+1;
98 
99 	xlists.flist = xlists.hole;
100 
101 	up = conf.upages;
102 	np1 = up;
103 	if(np1 > conf.npage1)
104 		np1 = conf.npage1;
105 
106 	palloc.p1 = conf.base1 + (conf.npage1 - np1)*BY2PG;
107 	conf.npage1 -= np1;
108 	xhole(conf.base1, conf.npage1*BY2PG);
109 	conf.npage1 = conf.base1+(conf.npage1*BY2PG);
110 	up -= np1;
111 
112 	np0 = up;
113 	if(np0 > conf.npage0)
114 		np0 = conf.npage0;
115 
116 	palloc.p0 = conf.base0 + (conf.npage0 - np0)*BY2PG;
117 	conf.npage0 -= np0;
118 	xhole(conf.base0, conf.npage0*BY2PG);
119 	conf.npage0 = conf.base0+(conf.npage0*BY2PG);
120 
121 	palloc.np0 = np0;
122 	palloc.np1 = np1;
123 	/* Save the bounds of kernel alloc memory for kernel mmu mapping */
124 	conf.base0 = (ulong)KADDR(conf.base0);
125 	conf.base1 = (ulong)KADDR(conf.base1);
126 	conf.npage0 = (ulong)KADDR(conf.npage0);
127 	conf.npage1 = (ulong)KADDR(conf.npage1);
128 }
129 
130 /*
131  * NB. spanalloc memory will cause a panic if free'd
132  */
133 void*
134 xspanalloc(ulong size, int align, ulong span)
135 {
136 	int i, j;
137 	ulong a, p, sinc;
138 	ulong ptr[Spanlist];
139 
140 	sinc = size/8;
141 	span = ~(span-1);
142 	for(i = 0; i < Spanlist; i++) {
143 		p = (ulong)xalloc(size+align);
144 		if(p == 0)
145 			break;
146 
147 		a = p+align;
148 		a &= ~(align-1);
149 		if((a&span) == ((a+size)&span)) {
150 			for(j = 0; j < i; j++)
151 				if(ptr[j])
152 					xfree((void*)ptr[j]);
153 
154 			return (void*)a;
155 		}
156 		xfree((void*)p);
157 		ptr[i] = (ulong)xalloc(sinc);
158 	}
159 	USED(sinc);
160 	xsummary();
161 	panic("xspanalloc: %d %d %lux\n", size, align, span);
162 	return 0;
163 }
164 
165 void*
166 xalloc(ulong size)
167 {
168 	Xhdr *p;
169 	Hole *h, **l;
170 
171 	size += BY2WD + sizeof(Xhdr);
172 	size &= ~(BY2WD-1);
173 
174 	lock(&xlists);
175 	l = &xlists.table;
176 	for(h = *l; h; h = h->link) {
177 		if(h->size >= size) {
178 			p = (Xhdr*)h->addr;
179 			h->addr += size;
180 			h->size -= size;
181 			if(h->size == 0) {
182 				*l = h->link;
183 				h->link = xlists.flist;
184 				xlists.flist = h;
185 			}
186 			unlock(&xlists);
187 			p = KADDR(p);
188 			memset(p, 0, size);
189 			p->magix = Magichole;
190 			p->size = size;
191 			return p->data;
192 		}
193 		l = &h->link;
194 	}
195 	unlock(&xlists);
196 	return nil;
197 }
198 
199 void
200 xfree(void *p)
201 {
202 	Xhdr *x;
203 
204 	x = (Xhdr*)((ulong)p - datoff);
205 	if(x->magix != Magichole)
206 		panic("xfree");
207 
208 	xhole(PADDR(x), x->size);
209 }
210 
211 void
212 xhole(ulong addr, ulong size)
213 {
214 	ulong top;
215 	Hole *h, *c, **l;
216 
217 	if(size == 0)
218 		return;
219 
220 	top = addr + size;
221 	lock(&xlists);
222 	l = &xlists.table;
223 	for(h = *l; h; h = h->link) {
224 		if(h->top == addr) {
225 			h->size += size;
226 			h->top = h->addr+h->size;
227 			c = h->link;
228 			if(c && h->top == c->addr) {
229 				h->top += c->size;
230 				h->size += c->size;
231 				h->link = c->link;
232 				c->link = xlists.flist;
233 				xlists.flist = c;
234 			}
235 			unlock(&xlists);
236 			return;
237 		}
238 		if(h->addr > addr)
239 			break;
240 		l = &h->link;
241 	}
242 	if(h && top == h->addr) {
243 		h->addr -= size;
244 		h->size += size;
245 		unlock(&xlists);
246 		return;
247 	}
248 
249 	if(xlists.flist == nil) {
250 		unlock(&xlists);
251 		print("xfree: no free holes, leaked %d bytes\n", size);
252 		return;
253 	}
254 
255 	h = xlists.flist;
256 	xlists.flist = h->link;
257 	h->addr = addr;
258 	h->top = top;
259 	h->size = size;
260 	h->link = *l;
261 	*l = h;
262 	unlock(&xlists);
263 }
264 
265 void
266 alloctrace(void *p, ulong pc)
267 {
268 	Bucket *bp;
269 	int i;
270 
271 	bp = (Bucket*)((ulong)p - bdatoff);
272 	if(bp->size != 13)
273 		return;
274 	bp->pc = pc;
275 	lock(&arena);
276 	for(i = 0; i < NTRACE; i++){
277 		if(arena.trace[i].pc == 0)
278 			arena.trace[i].pc = pc;
279 		if(arena.trace[i].pc == pc){
280 			arena.trace[i].alloc++;
281 			break;
282 		}
283 	}
284 	unlock(&arena);
285 }
286 
287 void*
288 malloc(ulong size)
289 {
290 	ulong next;
291 	int pow, n;
292 	Bucket *bp, *nbp;
293 
294 	for(pow = 3; pow < Maxpow; pow++)
295 		if(size <= (1<<pow))
296 			goto good;
297 
298 	return nil;
299 good:
300 	/* Allocate off this list */
301 	lock(&arena);
302 	bp = arena.btab[pow];
303 	if(bp) {
304 		arena.btab[pow] = bp->next;
305 		if(bp->magic != 0 || bp->size != pow){
306 			unlock(&arena);
307 			panic("malloc bp %lux magic %lux size %d next %lux pow %d", bp,
308 				bp->magic, bp->size, bp->next, pow);
309 		}
310 		bp->magic = Magic2n;
311 		unlock(&arena);
312 
313 		memset(bp->data, 0, size);
314 		bp->pc = getcallerpc(((uchar*)&size) - sizeof(size));
315 		return  bp->data;
316 	}
317 	unlock(&arena);
318 
319 	size = sizeof(Bucket)+(1<<pow);
320 	size += 3;
321 	size &= ~3;
322 
323 	if(pow < CUTOFF) {
324 		n = (CUTOFF-pow)+2;
325 		bp = xalloc(size*n);
326 		if(bp == nil)
327 			return nil;
328 
329 		nbp = bp;
330 		lock(&arena);
331 		arena.nbuck[pow] += n;
332 		while(--n) {
333 			next = (ulong)nbp+size;
334 			nbp = (Bucket*)next;
335 			nbp->size = pow;
336 			nbp->next = arena.btab[pow];
337 			arena.btab[pow] = nbp;
338 		}
339 		unlock(&arena);
340 	}
341 	else {
342 		bp = xalloc(size);
343 		if(bp == nil)
344 			return nil;
345 
346 		arena.nbuck[pow]++;
347 	}
348 
349 	bp->size = pow;
350 	bp->magic = Magic2n;
351 	bp->pc = getcallerpc(((uchar*)&size) - sizeof(size));
352 	return bp->data;
353 }
354 
355 void*
356 smalloc(ulong size)
357 {
358 	char *s;
359 	void *p;
360 	int attempt;
361 	Bucket *bp;
362 
363 	for(attempt = 0; attempt < 1000; attempt++) {
364 		p = malloc(size);
365 		if(p != nil) {
366 			bp = (Bucket*)((ulong)p - bdatoff);
367 			bp->pc = getcallerpc(((uchar*)&size) - sizeof(size));
368 			return p;
369 		}
370 		s = u->p->psstate;
371 		u->p->psstate = "Malloc";
372 		qlock(&arena.rq);
373 		while(waserror())
374 			;
375 		sleep(&arena.r, return0, nil);
376 		poperror();
377 		qunlock(&arena.rq);
378 		u->p->psstate = s;
379 	}
380 	print("%s:%d: out of memory in smalloc %d\n", u->p->text, u->p->pid, size);
381 	u->p->state = Broken;
382 	u->p->psstate = "NoMem";
383 	for(;;)
384 		sched();
385 	return 0;
386 }
387 
388 int
389 msize(void *ptr)
390 {
391 	Bucket *bp;
392 
393 	bp = (Bucket*)((ulong)ptr - bdatoff);
394 	if(bp->magic != Magic2n)
395 		panic("msize");
396 	return 1<<bp->size;
397 }
398 
399 void
400 free(void *ptr)
401 {
402 	ulong pc, n;
403 	Bucket *bp, **l;
404 
405 	bp = (Bucket*)((ulong)ptr - bdatoff);
406 	l = &arena.btab[bp->size];
407 
408 	lock(&arena);
409 	if(bp->magic != Magic2n)
410 		panic("free");
411 	bp->magic = 0;
412 	bp->next = *l;
413 	*l = bp;
414 	if(bp->size == 13) {
415 		pc = bp->pc;
416 		for(n = 0; n < NTRACE; n++){
417 			if(arena.trace[n].pc == pc){
418 				arena.trace[n].free++;
419 				break;
420 			}
421 		}
422 	}
423 	unlock(&arena);
424 	if(arena.r.p)
425 		wakeup(&arena.r);
426 }
427 
428 void
429 xsummary(void)
430 {
431 	Hole *h;
432 	Bucket *k;
433 	int i, nfree, nused;
434 
435 	i = 0;
436 	for(h = xlists.flist; h; h = h->link)
437 		i++;
438 
439 	print("%d holes free\n", i);
440 	i = 0;
441 	for(h = xlists.table; h; h = h->link) {
442 		print("%.8lux %.8lux %d\n", h->addr, h->top, h->size);
443 		i += h->size;
444 	}
445 	print("%d bytes free\n", i);
446 	nused = 0;
447 	for(i = 3; i < Maxpow; i++) {
448 		if(arena.btab[i] == 0 && arena.nbuck[i] == 0)
449 			continue;
450 		nused += arena.nbuck[i]*(1<<i);
451 		nfree = 0;
452 		for(k = arena.btab[i]; k; k = k->next)
453 			nfree++;
454 		print("%8d %4d %4d\n", 1<<i, arena.nbuck[i], nfree);
455 	}
456 	for(i = 0; i < NTRACE && arena.trace[i].pc; i++)
457 		print("%lux %d %d\n", arena.trace[i].pc, arena.trace[i].alloc, arena.trace[i].free);
458 	print("%d bytes in pool\n", nused);
459 }
460