xref: /csrg-svn/sys/kern/kern_malloc.c (revision 33438)
1 /*
2  * Copyright (c) 1987 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)kern_malloc.c	7.5 (Berkeley) 02/06/88
7  */
8 
9 #include "param.h"
10 #include "vm.h"
11 #include "cmap.h"
12 #include "time.h"
13 #include "proc.h"
14 #include "map.h"
15 #include "kernel.h"
16 #include "malloc.h"
17 
18 #include "../machine/pte.h"
19 
20 struct kmembuckets bucket[MINBUCKET + 16];
21 struct kmemstats kmemstats[M_LAST];
22 struct kmemusage *kmemusage;
23 long wantkmemmap;
24 
25 /*
26  * Allocate a block of memory
27  */
28 qaddr_t malloc(size, type, flags)
29 	unsigned long size;
30 	long type, flags;
31 {
32 	register struct kmembuckets *kbp;
33 	register struct kmemusage *kup;
34 	long indx, npg, alloc, allocsize, s;
35 	caddr_t va, cp;
36 #ifdef KMEMSTATS
37 	register struct kmemstats *ksp = &kmemstats[type];
38 #endif
39 
40 	indx = BUCKETINDX(size);
41 	kbp = &bucket[indx];
42 	s = splimp();
43 again:
44 #ifdef KMEMSTATS
45 	while (ksp->ks_inuse >= ksp->ks_limit) {
46 		if (flags & M_NOWAIT) {
47 			splx(s);
48 			return (0);
49 		}
50 		if (ksp->ks_limblocks < 65535)
51 			ksp->ks_limblocks++;
52 		sleep((caddr_t)ksp, PSWP+2);
53 	}
54 #endif
55 	if (kbp->kb_next == NULL) {
56 		if (size > MAXALLOCSAVE)
57 			allocsize = roundup(size, CLBYTES);
58 		else
59 			allocsize = 1 << indx;
60 		npg = clrnd(btoc(allocsize));
61 		if ((flags & M_NOWAIT) && freemem < npg) {
62 			splx(s);
63 			return (0);
64 		}
65 		alloc = rmalloc(kmemmap, npg);
66 		if (alloc == 0) {
67 			if (flags & M_NOWAIT) {
68 				splx(s);
69 				return (0);
70 			}
71 #ifdef KMEMSTATS
72 			if (ksp->ks_mapblocks < 65535)
73 				ksp->ks_mapblocks++;
74 #endif
75 			wantkmemmap++;
76 			sleep((caddr_t)&wantkmemmap, PSWP+2);
77 			goto again;
78 		}
79 		alloc -= CLSIZE;		/* convert to base 0 */
80 		(void) vmemall(&kmempt[alloc], npg, &proc[0], CSYS);
81 		va = (caddr_t) kmemxtob(alloc);
82 		vmaccess(&kmempt[alloc], va, npg);
83 #ifdef KMEMSTATS
84 		kbp->kb_total += kbp->kb_elmpercl;
85 #endif
86 		kup = btokup(va);
87 		kup->ku_indx = indx;
88 		if (allocsize > MAXALLOCSAVE) {
89 			if (npg > 65535)
90 				panic("malloc: allocation too large");
91 			kup->ku_pagecnt = npg;
92 #ifdef KMEMSTATS
93 			ksp->ks_memuse += allocsize;
94 #endif
95 			goto out;
96 		}
97 #ifdef KMEMSTATS
98 		kup->ku_freecnt = kbp->kb_elmpercl;
99 		kbp->kb_totalfree += kbp->kb_elmpercl;
100 #endif
101 		kbp->kb_next = va + (npg * NBPG) - allocsize;
102 		for (cp = kbp->kb_next; cp > va; cp -= allocsize)
103 			*(caddr_t *)cp = cp - allocsize;
104 		*(caddr_t *)cp = NULL;
105 	}
106 	va = kbp->kb_next;
107 	kbp->kb_next = *(caddr_t *)va;
108 #ifdef KMEMSTATS
109 	kup = btokup(va);
110 	if (kup->ku_indx != indx)
111 		panic("malloc: wrong bucket");
112 	if (kup->ku_freecnt == 0)
113 		panic("malloc: lost data");
114 	kup->ku_freecnt--;
115 	kbp->kb_totalfree--;
116 	ksp->ks_memuse += 1 << indx;
117 out:
118 	kbp->kb_calls++;
119 	ksp->ks_inuse++;
120 	ksp->ks_calls++;
121 	if (ksp->ks_inuse > ksp->ks_maxused)
122 		ksp->ks_maxused = ksp->ks_inuse;
123 #else
124 out:
125 #endif
126 	splx(s);
127 	return ((qaddr_t)va);
128 }
129 
130 /*
131  * Free a block of memory allocated by malloc.
132  */
133 void free(addr, type)
134 	caddr_t addr;
135 	long type;
136 {
137 	register struct kmembuckets *kbp;
138 	register struct kmemusage *kup;
139 	long alloc, s;
140 #ifdef KMEMSTATS
141 	register struct kmemstats *ksp = &kmemstats[type];
142 #endif
143 
144 	kup = btokup(addr);
145 	s = splimp();
146 	if (1 << kup->ku_indx > MAXALLOCSAVE) {
147 		alloc = btokmemx(addr);
148 		(void) memfree(&kmempt[alloc], kup->ku_pagecnt, 0);
149 		rmfree(kmemmap, (long)kup->ku_pagecnt, alloc + CLSIZE);
150 		if (wantkmemmap) {
151 			wakeup((caddr_t)&wantkmemmap);
152 			wantkmemmap = 0;
153 		}
154 #ifdef KMEMSTATS
155 		ksp->ks_memuse -= kup->ku_pagecnt << PGSHIFT;
156 		kup->ku_indx = 0;
157 		kup->ku_pagecnt = 0;
158 		if (ksp->ks_inuse == ksp->ks_limit)
159 			wakeup((caddr_t)ksp);
160 		ksp->ks_inuse--;
161 #endif
162 		splx(s);
163 		return;
164 	}
165 	kbp = &bucket[kup->ku_indx];
166 #ifdef KMEMSTATS
167 	kup->ku_freecnt++;
168 	if (kup->ku_freecnt >= kbp->kb_elmpercl)
169 		if (kup->ku_freecnt > kbp->kb_elmpercl)
170 			panic("free: multiple frees");
171 		else if (kbp->kb_totalfree > kbp->kb_highwat)
172 			kbp->kb_couldfree++;
173 	kbp->kb_totalfree++;
174 	if (ksp->ks_inuse == ksp->ks_limit)
175 		wakeup((caddr_t)ksp);
176 	ksp->ks_inuse--;
177 	ksp->ks_memuse -= 1 << kup->ku_indx;
178 #endif
179 	*(caddr_t *)addr = kbp->kb_next;
180 	kbp->kb_next = addr;
181 	splx(s);
182 }
183 
184 /*
185  * Initialize the kernel memory allocator
186  */
187 kmeminit()
188 {
189 	register long indx;
190 
191 	if (!powerof2(MAXALLOCSAVE))
192 		panic("kmeminit: MAXALLOCSAVE not power of 2");
193 	if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
194 		panic("kmeminit: MAXALLOCSAVE too big");
195 	if (MAXALLOCSAVE < CLBYTES)
196 		panic("kmeminit: MAXALLOCSAVE too small");
197 	rminit(kmemmap, ekmempt - kmempt, (long)CLSIZE,
198 		"malloc map", ekmempt - kmempt);
199 #ifdef KMEMSTATS
200 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
201 		if (1 << indx >= CLBYTES)
202 			bucket[indx].kb_elmpercl = 1;
203 		else
204 			bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
205 		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
206 	}
207 	for (indx = 0; indx < M_LAST; indx++)
208 		kmemstats[indx].ks_limit =
209 			(ekmempt - kmempt) * CLBYTES * 9 / 10;
210 #endif
211 }
212