1 /* 2 * Copyright (c) 1987 Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms are permitted 6 * provided that the above copyright notice and this paragraph are 7 * duplicated in all such forms and that any documentation, 8 * advertising materials, and other materials related to such 9 * distribution and use acknowledge that the software was developed 10 * by the University of California, Berkeley. The name of the 11 * University may not be used to endorse or promote products derived 12 * from this software without specific prior written permission. 13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 14 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 15 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 16 * 17 * @(#)kern_malloc.c 7.15 (Berkeley) 04/06/90 18 */ 19 20 #include "param.h" 21 #include "vm.h" 22 #include "cmap.h" 23 #include "time.h" 24 #include "proc.h" 25 #include "map.h" 26 #include "kernel.h" 27 #include "malloc.h" 28 29 #include "machine/pte.h" 30 31 struct kmembuckets bucket[MINBUCKET + 16]; 32 struct kmemstats kmemstats[M_LAST]; 33 struct kmemusage *kmemusage; 34 long wantkmemmap; 35 36 struct { 37 int nomap; 38 int atlimit; 39 int freemem; 40 } KFail; 41 42 /* 43 * Allocate a block of memory 44 */ 45 qaddr_t 46 malloc(size, type, flags) 47 unsigned long size; 48 int type, flags; 49 { 50 register struct kmembuckets *kbp; 51 register struct kmemusage *kup; 52 long indx, npg, alloc, allocsize; 53 int s; 54 caddr_t va, cp; 55 #ifdef KMEMSTATS 56 register struct kmemstats *ksp = &kmemstats[type]; 57 58 if (((unsigned long)type) > M_LAST) 59 panic("malloc - bogus type"); 60 #endif 61 62 indx = BUCKETINDX(size); 63 kbp = &bucket[indx]; 64 s = splimp(); 65 again: 66 #ifdef KMEMSTATS 67 while (ksp->ks_memuse >= ksp->ks_limit) { 68 if (flags & M_NOWAIT) { 69 KFail.atlimit++; 70 splx(s); 71 return (0); 72 } 73 if (ksp->ks_limblocks < 65535) 74 ksp->ks_limblocks++; 75 sleep((caddr_t)ksp, PSWP+2); 76 } 77 #endif 78 if (kbp->kb_next == NULL) { 79 if (size > MAXALLOCSAVE) 80 allocsize = roundup(size, CLBYTES); 81 else 82 allocsize = 1 << indx; 83 npg = clrnd(btoc(allocsize)); 84 if ((flags & M_NOWAIT) && freemem < npg) { 85 KFail.freemem++; 86 splx(s); 87 return (0); 88 } 89 alloc = rmalloc(kmemmap, npg); 90 if (alloc == 0) { 91 if (flags & M_NOWAIT) { 92 KFail.nomap++; 93 splx(s); 94 return (0); 95 } 96 #ifdef KMEMSTATS 97 if (ksp->ks_mapblocks < 65535) 98 ksp->ks_mapblocks++; 99 #endif 100 wantkmemmap++; 101 sleep((caddr_t)&wantkmemmap, PSWP+2); 102 goto again; 103 } 104 alloc -= CLSIZE; /* convert to base 0 */ 105 (void) vmemall(&kmempt[alloc], (int)npg, &proc[0], CSYS); 106 va = (caddr_t) kmemxtob(alloc); 107 vmaccess(&kmempt[alloc], va, (int)npg); 108 #ifdef KMEMSTATS 109 kbp->kb_total += kbp->kb_elmpercl; 110 #endif 111 kup = btokup(va); 112 kup->ku_indx = indx; 113 if (allocsize > MAXALLOCSAVE) { 114 if (npg > 65535) 115 panic("malloc: allocation too large"); 116 kup->ku_pagecnt = npg; 117 #ifdef KMEMSTATS 118 ksp->ks_memuse += allocsize; 119 #endif 120 goto out; 121 } 122 #ifdef KMEMSTATS 123 kup->ku_freecnt = kbp->kb_elmpercl; 124 kbp->kb_totalfree += kbp->kb_elmpercl; 125 #endif 126 kbp->kb_next = va + (npg * NBPG) - allocsize; 127 for (cp = kbp->kb_next; cp > va; cp -= allocsize) 128 *(caddr_t *)cp = cp - allocsize; 129 *(caddr_t *)cp = NULL; 130 } 131 va = kbp->kb_next; 132 kbp->kb_next = *(caddr_t *)va; 133 #ifdef KMEMSTATS 134 kup = btokup(va); 135 if (kup->ku_indx != indx) 136 panic("malloc: wrong bucket"); 137 if (kup->ku_freecnt == 0) 138 panic("malloc: lost data"); 139 kup->ku_freecnt--; 140 kbp->kb_totalfree--; 141 ksp->ks_memuse += 1 << indx; 142 out: 143 kbp->kb_calls++; 144 ksp->ks_inuse++; 145 ksp->ks_calls++; 146 if (ksp->ks_memuse > ksp->ks_maxused) 147 ksp->ks_maxused = ksp->ks_memuse; 148 #else 149 out: 150 #endif 151 splx(s); 152 return ((qaddr_t)va); 153 } 154 155 /* 156 * Free a block of memory allocated by malloc. 157 */ 158 void 159 free(addr, type) 160 caddr_t addr; 161 int type; 162 { 163 register struct kmembuckets *kbp; 164 register struct kmemusage *kup; 165 long alloc, size; 166 int s; 167 #ifdef KMEMSTATS 168 register struct kmemstats *ksp = &kmemstats[type]; 169 #endif 170 171 kup = btokup(addr); 172 kbp = &bucket[kup->ku_indx]; 173 s = splimp(); 174 size = 1 << kup->ku_indx; 175 if (size > MAXALLOCSAVE) { 176 alloc = btokmemx(addr); 177 (void) memfree(&kmempt[alloc], (int)kup->ku_pagecnt, 1); 178 rmfree(kmemmap, (long)kup->ku_pagecnt, alloc + CLSIZE); 179 if (wantkmemmap) { 180 wakeup((caddr_t)&wantkmemmap); 181 wantkmemmap = 0; 182 } 183 #ifdef KMEMSTATS 184 size = kup->ku_pagecnt << PGSHIFT; 185 ksp->ks_memuse -= size; 186 kup->ku_indx = 0; 187 kup->ku_pagecnt = 0; 188 if (ksp->ks_memuse + size >= ksp->ks_limit && 189 ksp->ks_memuse < ksp->ks_limit) 190 wakeup((caddr_t)ksp); 191 ksp->ks_inuse--; 192 kbp->kb_total -= 1; 193 #endif 194 splx(s); 195 return; 196 } 197 #ifdef KMEMSTATS 198 kup->ku_freecnt++; 199 if (kup->ku_freecnt >= kbp->kb_elmpercl) 200 if (kup->ku_freecnt > kbp->kb_elmpercl) 201 panic("free: multiple frees"); 202 else if (kbp->kb_totalfree > kbp->kb_highwat) 203 kbp->kb_couldfree++; 204 kbp->kb_totalfree++; 205 ksp->ks_memuse -= size; 206 if (ksp->ks_memuse + size >= ksp->ks_limit && 207 ksp->ks_memuse < ksp->ks_limit) 208 wakeup((caddr_t)ksp); 209 ksp->ks_inuse--; 210 #endif 211 *(caddr_t *)addr = kbp->kb_next; 212 kbp->kb_next = addr; 213 splx(s); 214 } 215 216 /* 217 * Initialize the kernel memory allocator 218 */ 219 kmeminit() 220 { 221 register long indx; 222 int npg; 223 224 #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0) 225 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2 226 #endif 227 #if (MAXALLOCSAVE > MINALLOCSIZE * 32768) 228 ERROR!_kmeminit:_MAXALLOCSAVE_too_big 229 #endif 230 #if (MAXALLOCSAVE < CLBYTES) 231 ERROR!_kmeminit:_MAXALLOCSAVE_too_small 232 #endif 233 npg = ekmempt - kmempt; 234 rminit(kmemmap, (long)npg, (long)CLSIZE, "malloc map", npg); 235 #ifdef KMEMSTATS 236 for (indx = 0; indx < MINBUCKET + 16; indx++) { 237 if (1 << indx >= CLBYTES) 238 bucket[indx].kb_elmpercl = 1; 239 else 240 bucket[indx].kb_elmpercl = CLBYTES / (1 << indx); 241 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; 242 } 243 for (indx = 0; indx < M_LAST; indx++) 244 kmemstats[indx].ks_limit = npg * CLBYTES * 8 / 10; 245 #endif 246 } 247