1 /* 2 * Copyright (c) 1987 Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms are permitted 6 * provided that this notice is preserved and that due credit is given 7 * to the University of California at Berkeley. The name of the University 8 * may not be used to endorse or promote products derived from this 9 * software without specific prior written permission. This software 10 * is provided ``as is'' without express or implied warranty. 11 * 12 * @(#)kern_malloc.c 7.6 (Berkeley) 02/06/88 13 */ 14 15 #include "param.h" 16 #include "vm.h" 17 #include "cmap.h" 18 #include "time.h" 19 #include "proc.h" 20 #include "map.h" 21 #include "kernel.h" 22 #include "malloc.h" 23 24 #include "../machine/pte.h" 25 26 struct kmembuckets bucket[MINBUCKET + 16]; 27 struct kmemstats kmemstats[M_LAST]; 28 struct kmemusage *kmemusage; 29 long wantkmemmap; 30 31 /* 32 * Allocate a block of memory 33 */ 34 qaddr_t malloc(size, type, flags) 35 unsigned long size; 36 long type, flags; 37 { 38 register struct kmembuckets *kbp; 39 register struct kmemusage *kup; 40 long indx, npg, alloc, allocsize, s; 41 caddr_t va, cp; 42 #ifdef KMEMSTATS 43 register struct kmemstats *ksp = &kmemstats[type]; 44 #endif 45 46 indx = BUCKETINDX(size); 47 kbp = &bucket[indx]; 48 s = splimp(); 49 again: 50 #ifdef KMEMSTATS 51 while (ksp->ks_inuse >= ksp->ks_limit) { 52 if (flags & M_NOWAIT) { 53 splx(s); 54 return (0); 55 } 56 if (ksp->ks_limblocks < 65535) 57 ksp->ks_limblocks++; 58 sleep((caddr_t)ksp, PSWP+2); 59 } 60 #endif 61 if (kbp->kb_next == NULL) { 62 if (size > MAXALLOCSAVE) 63 allocsize = roundup(size, CLBYTES); 64 else 65 allocsize = 1 << indx; 66 npg = clrnd(btoc(allocsize)); 67 if ((flags & M_NOWAIT) && freemem < npg) { 68 splx(s); 69 return (0); 70 } 71 alloc = rmalloc(kmemmap, npg); 72 if (alloc == 0) { 73 if (flags & M_NOWAIT) { 74 splx(s); 75 return (0); 76 } 77 #ifdef KMEMSTATS 78 if (ksp->ks_mapblocks < 65535) 79 ksp->ks_mapblocks++; 80 #endif 81 wantkmemmap++; 82 sleep((caddr_t)&wantkmemmap, PSWP+2); 83 goto again; 84 } 85 alloc -= CLSIZE; /* convert to base 0 */ 86 (void) vmemall(&kmempt[alloc], npg, &proc[0], CSYS); 87 va = (caddr_t) kmemxtob(alloc); 88 vmaccess(&kmempt[alloc], va, npg); 89 #ifdef KMEMSTATS 90 kbp->kb_total += kbp->kb_elmpercl; 91 #endif 92 kup = btokup(va); 93 kup->ku_indx = indx; 94 if (allocsize > MAXALLOCSAVE) { 95 if (npg > 65535) 96 panic("malloc: allocation too large"); 97 kup->ku_pagecnt = npg; 98 #ifdef KMEMSTATS 99 ksp->ks_memuse += allocsize; 100 #endif 101 goto out; 102 } 103 #ifdef KMEMSTATS 104 kup->ku_freecnt = kbp->kb_elmpercl; 105 kbp->kb_totalfree += kbp->kb_elmpercl; 106 #endif 107 kbp->kb_next = va + (npg * NBPG) - allocsize; 108 for (cp = kbp->kb_next; cp > va; cp -= allocsize) 109 *(caddr_t *)cp = cp - allocsize; 110 *(caddr_t *)cp = NULL; 111 } 112 va = kbp->kb_next; 113 kbp->kb_next = *(caddr_t *)va; 114 #ifdef KMEMSTATS 115 kup = btokup(va); 116 if (kup->ku_indx != indx) 117 panic("malloc: wrong bucket"); 118 if (kup->ku_freecnt == 0) 119 panic("malloc: lost data"); 120 kup->ku_freecnt--; 121 kbp->kb_totalfree--; 122 ksp->ks_memuse += 1 << indx; 123 out: 124 kbp->kb_calls++; 125 ksp->ks_inuse++; 126 ksp->ks_calls++; 127 if (ksp->ks_inuse > ksp->ks_maxused) 128 ksp->ks_maxused = ksp->ks_inuse; 129 #else 130 out: 131 #endif 132 splx(s); 133 return ((qaddr_t)va); 134 } 135 136 /* 137 * Free a block of memory allocated by malloc. 138 */ 139 void free(addr, type) 140 caddr_t addr; 141 long type; 142 { 143 register struct kmembuckets *kbp; 144 register struct kmemusage *kup; 145 long alloc, s; 146 #ifdef KMEMSTATS 147 register struct kmemstats *ksp = &kmemstats[type]; 148 #endif 149 150 kup = btokup(addr); 151 s = splimp(); 152 if (1 << kup->ku_indx > MAXALLOCSAVE) { 153 alloc = btokmemx(addr); 154 (void) memfree(&kmempt[alloc], kup->ku_pagecnt, 0); 155 rmfree(kmemmap, (long)kup->ku_pagecnt, alloc + CLSIZE); 156 if (wantkmemmap) { 157 wakeup((caddr_t)&wantkmemmap); 158 wantkmemmap = 0; 159 } 160 #ifdef KMEMSTATS 161 ksp->ks_memuse -= kup->ku_pagecnt << PGSHIFT; 162 kup->ku_indx = 0; 163 kup->ku_pagecnt = 0; 164 if (ksp->ks_inuse == ksp->ks_limit) 165 wakeup((caddr_t)ksp); 166 ksp->ks_inuse--; 167 #endif 168 splx(s); 169 return; 170 } 171 kbp = &bucket[kup->ku_indx]; 172 #ifdef KMEMSTATS 173 kup->ku_freecnt++; 174 if (kup->ku_freecnt >= kbp->kb_elmpercl) 175 if (kup->ku_freecnt > kbp->kb_elmpercl) 176 panic("free: multiple frees"); 177 else if (kbp->kb_totalfree > kbp->kb_highwat) 178 kbp->kb_couldfree++; 179 kbp->kb_totalfree++; 180 if (ksp->ks_inuse == ksp->ks_limit) 181 wakeup((caddr_t)ksp); 182 ksp->ks_inuse--; 183 ksp->ks_memuse -= 1 << kup->ku_indx; 184 #endif 185 *(caddr_t *)addr = kbp->kb_next; 186 kbp->kb_next = addr; 187 splx(s); 188 } 189 190 /* 191 * Initialize the kernel memory allocator 192 */ 193 kmeminit() 194 { 195 register long indx; 196 197 if (!powerof2(MAXALLOCSAVE)) 198 panic("kmeminit: MAXALLOCSAVE not power of 2"); 199 if (MAXALLOCSAVE > MINALLOCSIZE * 32768) 200 panic("kmeminit: MAXALLOCSAVE too big"); 201 if (MAXALLOCSAVE < CLBYTES) 202 panic("kmeminit: MAXALLOCSAVE too small"); 203 rminit(kmemmap, ekmempt - kmempt, (long)CLSIZE, 204 "malloc map", ekmempt - kmempt); 205 #ifdef KMEMSTATS 206 for (indx = 0; indx < MINBUCKET + 16; indx++) { 207 if (1 << indx >= CLBYTES) 208 bucket[indx].kb_elmpercl = 1; 209 else 210 bucket[indx].kb_elmpercl = CLBYTES / (1 << indx); 211 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; 212 } 213 for (indx = 0; indx < M_LAST; indx++) 214 kmemstats[indx].ks_limit = 215 (ekmempt - kmempt) * CLBYTES * 9 / 10; 216 #endif 217 } 218