1 /* 2 * Copyright (c) 1987 Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)kern_malloc.c 7.18 (Berkeley) 06/28/90 8 */ 9 10 #include "param.h" 11 #include "vm.h" 12 #include "cmap.h" 13 #include "time.h" 14 #include "proc.h" 15 #include "map.h" 16 #include "kernel.h" 17 #include "malloc.h" 18 19 #include "machine/pte.h" 20 21 struct kmembuckets bucket[MINBUCKET + 16]; 22 struct kmemstats kmemstats[M_LAST]; 23 struct kmemusage *kmemusage; 24 long wantkmemmap; 25 26 /* 27 * Allocate a block of memory 28 */ 29 qaddr_t 30 malloc(size, type, flags) 31 unsigned long size; 32 int type, flags; 33 { 34 register struct kmembuckets *kbp; 35 register struct kmemusage *kup; 36 long indx, npg, alloc, allocsize; 37 int s; 38 caddr_t va, cp; 39 #ifdef KMEMSTATS 40 register struct kmemstats *ksp = &kmemstats[type]; 41 42 if (((unsigned long)type) > M_LAST) 43 panic("malloc - bogus type"); 44 #endif 45 46 indx = BUCKETINDX(size); 47 kbp = &bucket[indx]; 48 s = splimp(); 49 again: 50 #ifdef KMEMSTATS 51 while (ksp->ks_memuse >= ksp->ks_limit) { 52 if (flags & M_NOWAIT) { 53 splx(s); 54 return (0); 55 } 56 if (ksp->ks_limblocks < 65535) 57 ksp->ks_limblocks++; 58 sleep((caddr_t)ksp, PSWP+2); 59 } 60 #endif 61 if (kbp->kb_next == NULL) { 62 if (size > MAXALLOCSAVE) 63 allocsize = roundup(size, CLBYTES); 64 else 65 allocsize = 1 << indx; 66 npg = clrnd(btoc(allocsize)); 67 if ((flags & M_NOWAIT) && freemem < npg) { 68 splx(s); 69 return (0); 70 } 71 alloc = rmalloc(kmemmap, npg); 72 if (alloc == 0) { 73 if (flags & M_NOWAIT) { 74 splx(s); 75 return (0); 76 } 77 #ifdef KMEMSTATS 78 if (ksp->ks_mapblocks < 65535) 79 ksp->ks_mapblocks++; 80 #endif 81 wantkmemmap++; 82 sleep((caddr_t)&wantkmemmap, PSWP+2); 83 goto again; 84 } 85 alloc -= CLSIZE; /* convert to base 0 */ 86 (void) vmemall(&kmempt[alloc], (int)npg, &proc[0], CSYS); 87 va = (caddr_t) kmemxtob(alloc); 88 vmaccess(&kmempt[alloc], va, (int)npg); 89 #ifdef KMEMSTATS 90 kbp->kb_total += kbp->kb_elmpercl; 91 #endif 92 kup = btokup(va); 93 kup->ku_indx = indx; 94 if (allocsize > MAXALLOCSAVE) { 95 if (npg > 65535) 96 panic("malloc: allocation too large"); 97 kup->ku_pagecnt = npg; 98 #ifdef KMEMSTATS 99 ksp->ks_memuse += allocsize; 100 #endif 101 goto out; 102 } 103 #ifdef KMEMSTATS 104 kup->ku_freecnt = kbp->kb_elmpercl; 105 kbp->kb_totalfree += kbp->kb_elmpercl; 106 #endif 107 kbp->kb_next = va + (npg * NBPG) - allocsize; 108 for (cp = kbp->kb_next; cp > va; cp -= allocsize) 109 *(caddr_t *)cp = cp - allocsize; 110 *(caddr_t *)cp = NULL; 111 } 112 va = kbp->kb_next; 113 kbp->kb_next = *(caddr_t *)va; 114 #ifdef KMEMSTATS 115 kup = btokup(va); 116 if (kup->ku_indx != indx) 117 panic("malloc: wrong bucket"); 118 if (kup->ku_freecnt == 0) 119 panic("malloc: lost data"); 120 kup->ku_freecnt--; 121 kbp->kb_totalfree--; 122 ksp->ks_memuse += 1 << indx; 123 out: 124 kbp->kb_calls++; 125 ksp->ks_inuse++; 126 ksp->ks_calls++; 127 if (ksp->ks_memuse > ksp->ks_maxused) 128 ksp->ks_maxused = ksp->ks_memuse; 129 #else 130 out: 131 #endif 132 splx(s); 133 return ((qaddr_t)va); 134 } 135 136 /* 137 * Free a block of memory allocated by malloc. 138 */ 139 void 140 free(addr, type) 141 caddr_t addr; 142 int type; 143 { 144 register struct kmembuckets *kbp; 145 register struct kmemusage *kup; 146 long alloc, size; 147 int s; 148 #ifdef KMEMSTATS 149 register struct kmemstats *ksp = &kmemstats[type]; 150 #endif 151 152 kup = btokup(addr); 153 kbp = &bucket[kup->ku_indx]; 154 s = splimp(); 155 size = 1 << kup->ku_indx; 156 if (size > MAXALLOCSAVE) { 157 alloc = btokmemx(addr); 158 (void) memfree(&kmempt[alloc], (int)kup->ku_pagecnt, 1); 159 rmfree(kmemmap, (long)kup->ku_pagecnt, alloc + CLSIZE); 160 if (wantkmemmap) { 161 wakeup((caddr_t)&wantkmemmap); 162 wantkmemmap = 0; 163 } 164 #ifdef KMEMSTATS 165 size = kup->ku_pagecnt << PGSHIFT; 166 ksp->ks_memuse -= size; 167 kup->ku_indx = 0; 168 kup->ku_pagecnt = 0; 169 if (ksp->ks_memuse + size >= ksp->ks_limit && 170 ksp->ks_memuse < ksp->ks_limit) 171 wakeup((caddr_t)ksp); 172 ksp->ks_inuse--; 173 kbp->kb_total -= 1; 174 #endif 175 splx(s); 176 return; 177 } 178 #ifdef KMEMSTATS 179 kup->ku_freecnt++; 180 if (kup->ku_freecnt >= kbp->kb_elmpercl) 181 if (kup->ku_freecnt > kbp->kb_elmpercl) 182 panic("free: multiple frees"); 183 else if (kbp->kb_totalfree > kbp->kb_highwat) 184 kbp->kb_couldfree++; 185 kbp->kb_totalfree++; 186 ksp->ks_memuse -= size; 187 if (ksp->ks_memuse + size >= ksp->ks_limit && 188 ksp->ks_memuse < ksp->ks_limit) 189 wakeup((caddr_t)ksp); 190 ksp->ks_inuse--; 191 #endif 192 *(caddr_t *)addr = kbp->kb_next; 193 kbp->kb_next = addr; 194 splx(s); 195 } 196 197 /* 198 * Initialize the kernel memory allocator 199 */ 200 kmeminit() 201 { 202 register long indx; 203 int npg; 204 205 #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0) 206 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2 207 #endif 208 #if (MAXALLOCSAVE > MINALLOCSIZE * 32768) 209 ERROR!_kmeminit:_MAXALLOCSAVE_too_big 210 #endif 211 #if (MAXALLOCSAVE < CLBYTES) 212 ERROR!_kmeminit:_MAXALLOCSAVE_too_small 213 #endif 214 npg = ekmempt - kmempt; 215 rminit(kmemmap, (long)npg, (long)CLSIZE, "malloc map", npg); 216 #ifdef KMEMSTATS 217 for (indx = 0; indx < MINBUCKET + 16; indx++) { 218 if (1 << indx >= CLBYTES) 219 bucket[indx].kb_elmpercl = 1; 220 else 221 bucket[indx].kb_elmpercl = CLBYTES / (1 << indx); 222 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; 223 } 224 for (indx = 0; indx < M_LAST; indx++) 225 kmemstats[indx].ks_limit = npg * NBPG * 6 / 10; 226 #endif 227 } 228