1 /* 2 * Copyright (c) 1987, 1991 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)kern_malloc.c 7.30 (Berkeley) 02/15/92 8 */ 9 10 #include "param.h" 11 #include "proc.h" 12 #include "map.h" 13 #include "kernel.h" 14 #include "malloc.h" 15 #include "vm/vm.h" 16 #include "vm/vm_kern.h" 17 18 struct kmembuckets bucket[MINBUCKET + 16]; 19 struct kmemstats kmemstats[M_LAST]; 20 struct kmemusage *kmemusage; 21 char *kmembase, *kmemlimit; 22 char *memname[] = INITKMEMNAMES; 23 24 #ifdef DIAGNOSTIC 25 /* 26 * This structure serves two purposes. 27 * The first is to provide a set of masks to catch unaligned frees. 28 * The second is to provide known text to copy into free objects so 29 * that modifications after frees can be detected. 30 */ 31 #define WEIRD_ADDR 0xdeadbeef 32 long addrmask[] = { WEIRD_ADDR, 33 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 34 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 35 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, 36 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff, 37 }; 38 39 /* 40 * Normally the first word of the structure is used to hold the list 41 * pointer for free objects. However, when running with diagnostics, 42 * we use the third and fourth fields, so as to catch modifications 43 * in the most commonly trashed first two words. 44 */ 45 struct freelist { 46 long spare0; 47 long spare1; 48 short type; 49 short spare2; 50 caddr_t next; 51 }; 52 #else /* !DIAGNOSTIC */ 53 struct freelist { 54 caddr_t next; 55 }; 56 #endif /* DIAGNOSTIC */ 57 58 /* 59 * Allocate a block of memory 60 */ 61 void * 62 malloc(size, type, flags) 63 unsigned long size; 64 int type, flags; 65 { 66 register struct kmembuckets *kbp; 67 register struct kmemusage *kup; 68 register struct freelist *freep; 69 long indx, npg, alloc, allocsize; 70 int s; 71 caddr_t va, cp, savedlist; 72 #ifdef DIAGNOSTIC 73 int i, copysize; 74 short savedtype; 75 #endif 76 #ifdef KMEMSTATS 77 register struct kmemstats *ksp = &kmemstats[type]; 78 79 if (((unsigned long)type) > M_LAST) 80 panic("malloc - bogus type"); 81 #endif 82 83 indx = BUCKETINDX(size); 84 kbp = &bucket[indx]; 85 s = splimp(); 86 #ifdef KMEMSTATS 87 while (ksp->ks_memuse >= ksp->ks_limit) { 88 if (flags & M_NOWAIT) { 89 splx(s); 90 return ((void *) NULL); 91 } 92 if (ksp->ks_limblocks < 65535) 93 ksp->ks_limblocks++; 94 tsleep((caddr_t)ksp, PSWP+2, memname[type], 0); 95 } 96 #endif 97 #ifdef DIAGNOSTIC 98 copysize = 1 << indx < sizeof addrmask ? 1 << indx : sizeof addrmask; 99 #endif 100 if (kbp->kb_next == NULL) { 101 if (size > MAXALLOCSAVE) 102 allocsize = roundup(size, CLBYTES); 103 else 104 allocsize = 1 << indx; 105 npg = clrnd(btoc(allocsize)); 106 va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg), 107 !(flags & M_NOWAIT)); 108 if (va == NULL) { 109 splx(s); 110 return ((void *) NULL); 111 } 112 #ifdef KMEMSTATS 113 kbp->kb_total += kbp->kb_elmpercl; 114 #endif 115 kup = btokup(va); 116 kup->ku_indx = indx; 117 if (allocsize > MAXALLOCSAVE) { 118 if (npg > 65535) 119 panic("malloc: allocation too large"); 120 kup->ku_pagecnt = npg; 121 #ifdef KMEMSTATS 122 ksp->ks_memuse += allocsize; 123 #endif 124 goto out; 125 } 126 #ifdef KMEMSTATS 127 kup->ku_freecnt = kbp->kb_elmpercl; 128 kbp->kb_totalfree += kbp->kb_elmpercl; 129 #endif 130 /* 131 * Just in case we blocked while allocating memory, 132 * and someone else also allocated memory for this 133 * bucket, don't assume the list is still empty. 134 */ 135 savedlist = kbp->kb_next; 136 kbp->kb_next = va + (npg * NBPG) - allocsize; 137 for (cp = kbp->kb_next; ; cp -= allocsize) { 138 freep = (struct freelist *)cp; 139 #ifdef DIAGNOSTIC 140 /* 141 * Copy in known text to detect modification 142 * after freeing. 143 */ 144 bcopy(addrmask, cp, copysize); 145 freep->type = M_FREE; 146 #endif /* DIAGNOSTIC */ 147 if (cp <= va) 148 break; 149 freep->next = cp - allocsize; 150 } 151 freep->next = savedlist; 152 } 153 va = kbp->kb_next; 154 kbp->kb_next = ((struct freelist *)va)->next; 155 #ifdef DIAGNOSTIC 156 freep = (struct freelist *)va; 157 savedtype = freep->type; 158 freep->type = ((struct freelist *)addrmask)->type; 159 freep->next = ((struct freelist *)addrmask)->next; 160 if (bcmp(addrmask, va, copysize)) { 161 copysize >>= 2; 162 for (i = 0; i < copysize && addrmask[i] == ((int *)va)[i]; i++) 163 continue; 164 printf("%s %d of object 0x%x size %d %s %s (0x%x != 0x%x)\n", 165 "Data modified on freelist: word", i, va, size, 166 "previous type", memname[savedtype], ((int *)va)[i], 167 addrmask[i]); 168 /* panic("malloc: data modified on freelist"); */ 169 } 170 freep->spare0 = 0; 171 #endif /* DIAGNOSTIC */ 172 #ifdef KMEMSTATS 173 kup = btokup(va); 174 if (kup->ku_indx != indx) 175 panic("malloc: wrong bucket"); 176 if (kup->ku_freecnt == 0) 177 panic("malloc: lost data"); 178 kup->ku_freecnt--; 179 kbp->kb_totalfree--; 180 ksp->ks_memuse += 1 << indx; 181 out: 182 kbp->kb_calls++; 183 ksp->ks_inuse++; 184 ksp->ks_calls++; 185 if (ksp->ks_memuse > ksp->ks_maxused) 186 ksp->ks_maxused = ksp->ks_memuse; 187 #else 188 out: 189 #endif 190 splx(s); 191 return ((void *) va); 192 } 193 194 /* 195 * Free a block of memory allocated by malloc. 196 */ 197 void 198 free(addr, type) 199 void *addr; 200 int type; 201 { 202 register struct kmembuckets *kbp; 203 register struct kmemusage *kup; 204 register struct freelist *freep; 205 long size; 206 int s; 207 #ifdef DIAGNOSTIC 208 caddr_t cp; 209 long alloc, copysize; 210 #endif 211 #ifdef KMEMSTATS 212 register struct kmemstats *ksp = &kmemstats[type]; 213 #endif 214 215 kup = btokup(addr); 216 size = 1 << kup->ku_indx; 217 kbp = &bucket[kup->ku_indx]; 218 s = splimp(); 219 #ifdef DIAGNOSTIC 220 /* 221 * Check for returns of data that do not point to the 222 * beginning of the allocation. 223 */ 224 if (size > NBPG * CLSIZE) 225 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)]; 226 else 227 alloc = addrmask[kup->ku_indx]; 228 if (((u_long)addr & alloc) != 0) 229 panic("free: unaligned addr 0x%x, size %d, type %s, mask %d\n", 230 addr, size, memname[type], alloc); 231 #endif /* DIAGNOSTIC */ 232 if (size > MAXALLOCSAVE) { 233 kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt)); 234 #ifdef KMEMSTATS 235 size = kup->ku_pagecnt << PGSHIFT; 236 ksp->ks_memuse -= size; 237 kup->ku_indx = 0; 238 kup->ku_pagecnt = 0; 239 if (ksp->ks_memuse + size >= ksp->ks_limit && 240 ksp->ks_memuse < ksp->ks_limit) 241 wakeup((caddr_t)ksp); 242 ksp->ks_inuse--; 243 kbp->kb_total -= 1; 244 #endif 245 splx(s); 246 return; 247 } 248 freep = (struct freelist *)addr; 249 #ifdef DIAGNOSTIC 250 /* 251 * Check for multiple frees. Use a quick check to see if 252 * it looks free before laboriously searching the freelist. 253 */ 254 copysize = size < sizeof addrmask ? size : sizeof addrmask; 255 if (freep->spare0 == WEIRD_ADDR) { 256 freep->type = ((struct freelist *)addrmask)->type; 257 freep->next = ((struct freelist *)addrmask)->next; 258 if (!bcmp(addrmask, addr, copysize)) { 259 for (cp = kbp->kb_next; cp; cp = *(caddr_t *)cp) { 260 if (addr == cp) { 261 printf("multiply freed item 0x%x\n", 262 addr); 263 panic("free: duplicated free"); 264 } 265 } 266 } 267 } 268 /* 269 * Copy in known text to detect modification after freeing 270 * and to make it look free. Also, save the type being freed 271 * so we can list likely culprit if modification is detected 272 * when the object is reallocated. 273 */ 274 bcopy(addrmask, addr, copysize); 275 freep->type = type; 276 #endif /* DIAGNOSTIC */ 277 #ifdef KMEMSTATS 278 kup->ku_freecnt++; 279 if (kup->ku_freecnt >= kbp->kb_elmpercl) 280 if (kup->ku_freecnt > kbp->kb_elmpercl) 281 panic("free: multiple frees"); 282 else if (kbp->kb_totalfree > kbp->kb_highwat) 283 kbp->kb_couldfree++; 284 kbp->kb_totalfree++; 285 ksp->ks_memuse -= size; 286 if (ksp->ks_memuse + size >= ksp->ks_limit && 287 ksp->ks_memuse < ksp->ks_limit) 288 wakeup((caddr_t)ksp); 289 ksp->ks_inuse--; 290 #endif 291 freep->next = kbp->kb_next; 292 kbp->kb_next = addr; 293 splx(s); 294 } 295 296 /* 297 * Initialize the kernel memory allocator 298 */ 299 kmeminit() 300 { 301 register long indx; 302 int npg; 303 304 #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0) 305 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2 306 #endif 307 #if (MAXALLOCSAVE > MINALLOCSIZE * 32768) 308 ERROR!_kmeminit:_MAXALLOCSAVE_too_big 309 #endif 310 #if (MAXALLOCSAVE < CLBYTES) 311 ERROR!_kmeminit:_MAXALLOCSAVE_too_small 312 #endif 313 npg = VM_KMEM_SIZE/ NBPG; 314 kmemusage = (struct kmemusage *) kmem_alloc(kernel_map, 315 (vm_size_t)(npg * sizeof(struct kmemusage))); 316 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase, 317 (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE); 318 #ifdef KMEMSTATS 319 for (indx = 0; indx < MINBUCKET + 16; indx++) { 320 if (1 << indx >= CLBYTES) 321 bucket[indx].kb_elmpercl = 1; 322 else 323 bucket[indx].kb_elmpercl = CLBYTES / (1 << indx); 324 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; 325 } 326 for (indx = 0; indx < M_LAST; indx++) 327 kmemstats[indx].ks_limit = npg * NBPG * 6 / 10; 328 #endif 329 } 330