1 /* 2 * Copyright (c) 1987, 1991 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)kern_malloc.c 7.31 (Berkeley) 03/13/92 8 */ 9 10 #include "param.h" 11 #include "proc.h" 12 #include "map.h" 13 #include "kernel.h" 14 #include "malloc.h" 15 #include "vm/vm.h" 16 #include "vm/vm_kern.h" 17 18 struct kmembuckets bucket[MINBUCKET + 16]; 19 struct kmemstats kmemstats[M_LAST]; 20 struct kmemusage *kmemusage; 21 char *kmembase, *kmemlimit; 22 char *memname[] = INITKMEMNAMES; 23 24 #ifdef DIAGNOSTIC 25 /* 26 * This structure provides a set of masks to catch unaligned frees. 27 */ 28 long addrmask[] = { 0, 29 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 30 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 31 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, 32 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff, 33 }; 34 35 /* 36 * The WEIRD_ADDR is used as known text to copy into free objects so 37 * that modifications after frees can be detected. 38 */ 39 #define WEIRD_ADDR 0xdeadbeef 40 #define MAX_COPY 32 41 42 /* 43 * Normally the first word of the structure is used to hold the list 44 * pointer for free objects. However, when running with diagnostics, 45 * we use the third and fourth fields, so as to catch modifications 46 * in the most commonly trashed first two words. 47 */ 48 struct freelist { 49 long spare0; 50 long spare1; 51 short type; 52 short spare2; 53 caddr_t next; 54 }; 55 #else /* !DIAGNOSTIC */ 56 struct freelist { 57 caddr_t next; 58 }; 59 #endif /* DIAGNOSTIC */ 60 61 /* 62 * Allocate a block of memory 63 */ 64 void * 65 malloc(size, type, flags) 66 unsigned long size; 67 int type, flags; 68 { 69 register struct kmembuckets *kbp; 70 register struct kmemusage *kup; 71 register struct freelist *freep; 72 long indx, npg, alloc, allocsize; 73 int s; 74 caddr_t va, cp, savedlist; 75 #ifdef DIAGNOSTIC 76 long *end, *lp; 77 int copysize; 78 short savedtype; 79 #endif 80 #ifdef KMEMSTATS 81 register struct kmemstats *ksp = &kmemstats[type]; 82 83 if (((unsigned long)type) > M_LAST) 84 panic("malloc - bogus type"); 85 #endif 86 87 indx = BUCKETINDX(size); 88 kbp = &bucket[indx]; 89 s = splimp(); 90 #ifdef KMEMSTATS 91 while (ksp->ks_memuse >= ksp->ks_limit) { 92 if (flags & M_NOWAIT) { 93 splx(s); 94 return ((void *) NULL); 95 } 96 if (ksp->ks_limblocks < 65535) 97 ksp->ks_limblocks++; 98 tsleep((caddr_t)ksp, PSWP+2, memname[type], 0); 99 } 100 #endif 101 #ifdef DIAGNOSTIC 102 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY; 103 #endif 104 if (kbp->kb_next == NULL) { 105 if (size > MAXALLOCSAVE) 106 allocsize = roundup(size, CLBYTES); 107 else 108 allocsize = 1 << indx; 109 npg = clrnd(btoc(allocsize)); 110 va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg), 111 !(flags & M_NOWAIT)); 112 if (va == NULL) { 113 splx(s); 114 return ((void *) NULL); 115 } 116 #ifdef KMEMSTATS 117 kbp->kb_total += kbp->kb_elmpercl; 118 #endif 119 kup = btokup(va); 120 kup->ku_indx = indx; 121 if (allocsize > MAXALLOCSAVE) { 122 if (npg > 65535) 123 panic("malloc: allocation too large"); 124 kup->ku_pagecnt = npg; 125 #ifdef KMEMSTATS 126 ksp->ks_memuse += allocsize; 127 #endif 128 goto out; 129 } 130 #ifdef KMEMSTATS 131 kup->ku_freecnt = kbp->kb_elmpercl; 132 kbp->kb_totalfree += kbp->kb_elmpercl; 133 #endif 134 /* 135 * Just in case we blocked while allocating memory, 136 * and someone else also allocated memory for this 137 * bucket, don't assume the list is still empty. 138 */ 139 savedlist = kbp->kb_next; 140 kbp->kb_next = va + (npg * NBPG) - allocsize; 141 for (cp = kbp->kb_next; ; cp -= allocsize) { 142 freep = (struct freelist *)cp; 143 #ifdef DIAGNOSTIC 144 /* 145 * Copy in known text to detect modification 146 * after freeing. 147 */ 148 end = (long *)&cp[copysize]; 149 for (lp = (long *)cp; lp < end; lp++) 150 *lp = WEIRD_ADDR; 151 freep->type = M_FREE; 152 #endif /* DIAGNOSTIC */ 153 if (cp <= va) 154 break; 155 freep->next = cp - allocsize; 156 } 157 freep->next = savedlist; 158 } 159 va = kbp->kb_next; 160 kbp->kb_next = ((struct freelist *)va)->next; 161 #ifdef DIAGNOSTIC 162 freep = (struct freelist *)va; 163 savedtype = freep->type; 164 freep->type = WEIRD_ADDR >> 16; 165 freep->next = (caddr_t)WEIRD_ADDR; 166 end = (long *)&va[copysize]; 167 for (lp = (long *)va; lp < end; lp++) { 168 if (*lp == WEIRD_ADDR) 169 continue; 170 printf("%s %d of object 0x%x size %d %s %s (0x%x != 0x%x)\n", 171 "Data modified on freelist: word", lp - (long *)va, 172 va, size, "previous type", memname[savedtype], *lp, 173 WEIRD_ADDR); 174 break; 175 } 176 freep->spare0 = 0; 177 #endif /* DIAGNOSTIC */ 178 #ifdef KMEMSTATS 179 kup = btokup(va); 180 if (kup->ku_indx != indx) 181 panic("malloc: wrong bucket"); 182 if (kup->ku_freecnt == 0) 183 panic("malloc: lost data"); 184 kup->ku_freecnt--; 185 kbp->kb_totalfree--; 186 ksp->ks_memuse += 1 << indx; 187 out: 188 kbp->kb_calls++; 189 ksp->ks_inuse++; 190 ksp->ks_calls++; 191 if (ksp->ks_memuse > ksp->ks_maxused) 192 ksp->ks_maxused = ksp->ks_memuse; 193 #else 194 out: 195 #endif 196 splx(s); 197 return ((void *) va); 198 } 199 200 /* 201 * Free a block of memory allocated by malloc. 202 */ 203 void 204 free(addr, type) 205 void *addr; 206 int type; 207 { 208 register struct kmembuckets *kbp; 209 register struct kmemusage *kup; 210 register struct freelist *freep; 211 long size; 212 int s; 213 #ifdef DIAGNOSTIC 214 caddr_t cp; 215 long *end, *lp, alloc, copysize; 216 #endif 217 #ifdef KMEMSTATS 218 register struct kmemstats *ksp = &kmemstats[type]; 219 #endif 220 221 kup = btokup(addr); 222 size = 1 << kup->ku_indx; 223 kbp = &bucket[kup->ku_indx]; 224 s = splimp(); 225 #ifdef DIAGNOSTIC 226 /* 227 * Check for returns of data that do not point to the 228 * beginning of the allocation. 229 */ 230 if (size > NBPG * CLSIZE) 231 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)]; 232 else 233 alloc = addrmask[kup->ku_indx]; 234 if (((u_long)addr & alloc) != 0) 235 panic("free: unaligned addr 0x%x, size %d, type %s, mask %d\n", 236 addr, size, memname[type], alloc); 237 #endif /* DIAGNOSTIC */ 238 if (size > MAXALLOCSAVE) { 239 kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt)); 240 #ifdef KMEMSTATS 241 size = kup->ku_pagecnt << PGSHIFT; 242 ksp->ks_memuse -= size; 243 kup->ku_indx = 0; 244 kup->ku_pagecnt = 0; 245 if (ksp->ks_memuse + size >= ksp->ks_limit && 246 ksp->ks_memuse < ksp->ks_limit) 247 wakeup((caddr_t)ksp); 248 ksp->ks_inuse--; 249 kbp->kb_total -= 1; 250 #endif 251 splx(s); 252 return; 253 } 254 freep = (struct freelist *)addr; 255 #ifdef DIAGNOSTIC 256 /* 257 * Check for multiple frees. Use a quick check to see if 258 * it looks free before laboriously searching the freelist. 259 */ 260 if (freep->spare0 == WEIRD_ADDR) { 261 for (cp = kbp->kb_next; cp; cp = *(caddr_t *)cp) { 262 if (addr != cp) 263 continue; 264 printf("multiply freed item 0x%x\n", addr); 265 panic("free: duplicated free"); 266 } 267 } 268 /* 269 * Copy in known text to detect modification after freeing 270 * and to make it look free. Also, save the type being freed 271 * so we can list likely culprit if modification is detected 272 * when the object is reallocated. 273 */ 274 copysize = size < MAX_COPY ? size : MAX_COPY; 275 end = (long *)&((caddr_t)addr)[copysize]; 276 for (lp = (long *)addr; lp < end; lp++) 277 *lp = WEIRD_ADDR; 278 freep->type = type; 279 #endif /* DIAGNOSTIC */ 280 #ifdef KMEMSTATS 281 kup->ku_freecnt++; 282 if (kup->ku_freecnt >= kbp->kb_elmpercl) 283 if (kup->ku_freecnt > kbp->kb_elmpercl) 284 panic("free: multiple frees"); 285 else if (kbp->kb_totalfree > kbp->kb_highwat) 286 kbp->kb_couldfree++; 287 kbp->kb_totalfree++; 288 ksp->ks_memuse -= size; 289 if (ksp->ks_memuse + size >= ksp->ks_limit && 290 ksp->ks_memuse < ksp->ks_limit) 291 wakeup((caddr_t)ksp); 292 ksp->ks_inuse--; 293 #endif 294 freep->next = kbp->kb_next; 295 kbp->kb_next = addr; 296 splx(s); 297 } 298 299 /* 300 * Initialize the kernel memory allocator 301 */ 302 kmeminit() 303 { 304 register long indx; 305 int npg; 306 307 #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0) 308 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2 309 #endif 310 #if (MAXALLOCSAVE > MINALLOCSIZE * 32768) 311 ERROR!_kmeminit:_MAXALLOCSAVE_too_big 312 #endif 313 #if (MAXALLOCSAVE < CLBYTES) 314 ERROR!_kmeminit:_MAXALLOCSAVE_too_small 315 #endif 316 npg = VM_KMEM_SIZE/ NBPG; 317 kmemusage = (struct kmemusage *) kmem_alloc(kernel_map, 318 (vm_size_t)(npg * sizeof(struct kmemusage))); 319 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase, 320 (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE); 321 #ifdef KMEMSTATS 322 for (indx = 0; indx < MINBUCKET + 16; indx++) { 323 if (1 << indx >= CLBYTES) 324 bucket[indx].kb_elmpercl = 1; 325 else 326 bucket[indx].kb_elmpercl = CLBYTES / (1 << indx); 327 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; 328 } 329 for (indx = 0; indx < M_LAST; indx++) 330 kmemstats[indx].ks_limit = npg * NBPG * 6 / 10; 331 #endif 332 } 333