1 /* 2 * Copyright (c) 1987 Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)kern_malloc.c 7.21 (Berkeley) 12/05/90 8 */ 9 10 #include "param.h" 11 #include "cmap.h" 12 #include "time.h" 13 #include "proc.h" 14 #include "map.h" 15 #include "kernel.h" 16 #include "malloc.h" 17 #include "../vm/vm_param.h" 18 #include "../vm/vm_map.h" 19 #include "../vm/vm_kern.h" 20 21 struct kmembuckets bucket[MINBUCKET + 16]; 22 struct kmemstats kmemstats[M_LAST]; 23 struct kmemusage *kmemusage; 24 char *kmembase, *kmemlimit; 25 char *memname[] = INITKMEMNAMES; 26 27 /* 28 * Allocate a block of memory 29 */ 30 qaddr_t 31 malloc(size, type, flags) 32 unsigned long size; 33 int type, flags; 34 { 35 register struct kmembuckets *kbp; 36 register struct kmemusage *kup; 37 long indx, npg, alloc, allocsize; 38 int s; 39 caddr_t va, cp; 40 #ifdef KMEMSTATS 41 register struct kmemstats *ksp = &kmemstats[type]; 42 43 if (((unsigned long)type) > M_LAST) 44 panic("malloc - bogus type"); 45 #endif 46 47 indx = BUCKETINDX(size); 48 kbp = &bucket[indx]; 49 s = splimp(); 50 #ifdef KMEMSTATS 51 while (ksp->ks_memuse >= ksp->ks_limit) { 52 if (flags & M_NOWAIT) { 53 splx(s); 54 return (0); 55 } 56 if (ksp->ks_limblocks < 65535) 57 ksp->ks_limblocks++; 58 tsleep((caddr_t)ksp, PSWP+2, memname[type], 0); 59 } 60 #endif 61 if (kbp->kb_next == NULL) { 62 if (size > MAXALLOCSAVE) 63 allocsize = roundup(size, CLBYTES); 64 else 65 allocsize = 1 << indx; 66 npg = clrnd(btoc(allocsize)); 67 va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg), 68 !(flags & M_NOWAIT)); 69 if (va == NULL) { 70 splx(s); 71 return (0); 72 } 73 #ifdef KMEMSTATS 74 kbp->kb_total += kbp->kb_elmpercl; 75 #endif 76 kup = btokup(va); 77 kup->ku_indx = indx; 78 if (allocsize > MAXALLOCSAVE) { 79 if (npg > 65535) 80 panic("malloc: allocation too large"); 81 kup->ku_pagecnt = npg; 82 #ifdef KMEMSTATS 83 ksp->ks_memuse += allocsize; 84 #endif 85 goto out; 86 } 87 #ifdef KMEMSTATS 88 kup->ku_freecnt = kbp->kb_elmpercl; 89 kbp->kb_totalfree += kbp->kb_elmpercl; 90 #endif 91 kbp->kb_next = va + (npg * NBPG) - allocsize; 92 for (cp = kbp->kb_next; cp > va; cp -= allocsize) 93 *(caddr_t *)cp = cp - allocsize; 94 *(caddr_t *)cp = NULL; 95 } 96 va = kbp->kb_next; 97 kbp->kb_next = *(caddr_t *)va; 98 #ifdef KMEMSTATS 99 kup = btokup(va); 100 if (kup->ku_indx != indx) 101 panic("malloc: wrong bucket"); 102 if (kup->ku_freecnt == 0) 103 panic("malloc: lost data"); 104 kup->ku_freecnt--; 105 kbp->kb_totalfree--; 106 ksp->ks_memuse += 1 << indx; 107 out: 108 kbp->kb_calls++; 109 ksp->ks_inuse++; 110 ksp->ks_calls++; 111 if (ksp->ks_memuse > ksp->ks_maxused) 112 ksp->ks_maxused = ksp->ks_memuse; 113 #else 114 out: 115 #endif 116 splx(s); 117 return ((qaddr_t)va); 118 } 119 120 #ifdef DIAGNOSTIC 121 long addrmask[] = { 0x00000000, 122 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 123 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 124 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, 125 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff, 126 }; 127 #endif /* DIAGNOSTIC */ 128 129 /* 130 * Free a block of memory allocated by malloc. 131 */ 132 void 133 free(addr, type) 134 caddr_t addr; 135 int type; 136 { 137 register struct kmembuckets *kbp; 138 register struct kmemusage *kup; 139 long alloc, size; 140 int s; 141 #ifdef KMEMSTATS 142 register struct kmemstats *ksp = &kmemstats[type]; 143 #endif 144 145 kup = btokup(addr); 146 size = 1 << kup->ku_indx; 147 #ifdef DIAGNOSTIC 148 if (size > NBPG * CLSIZE) 149 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)]; 150 else 151 alloc = addrmask[kup->ku_indx]; 152 if (((u_long)addr & alloc) != 0) { 153 printf("free: unaligned addr 0x%x, size %d, type %d, mask %d\n", 154 addr, size, type, alloc); 155 panic("free: unaligned addr"); 156 } 157 #endif /* DIAGNOSTIC */ 158 kbp = &bucket[kup->ku_indx]; 159 s = splimp(); 160 if (size > MAXALLOCSAVE) { 161 kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt)); 162 #ifdef KMEMSTATS 163 size = kup->ku_pagecnt << PGSHIFT; 164 ksp->ks_memuse -= size; 165 kup->ku_indx = 0; 166 kup->ku_pagecnt = 0; 167 if (ksp->ks_memuse + size >= ksp->ks_limit && 168 ksp->ks_memuse < ksp->ks_limit) 169 wakeup((caddr_t)ksp); 170 ksp->ks_inuse--; 171 kbp->kb_total -= 1; 172 #endif 173 splx(s); 174 return; 175 } 176 #ifdef KMEMSTATS 177 kup->ku_freecnt++; 178 if (kup->ku_freecnt >= kbp->kb_elmpercl) 179 if (kup->ku_freecnt > kbp->kb_elmpercl) 180 panic("free: multiple frees"); 181 else if (kbp->kb_totalfree > kbp->kb_highwat) 182 kbp->kb_couldfree++; 183 kbp->kb_totalfree++; 184 ksp->ks_memuse -= size; 185 if (ksp->ks_memuse + size >= ksp->ks_limit && 186 ksp->ks_memuse < ksp->ks_limit) 187 wakeup((caddr_t)ksp); 188 ksp->ks_inuse--; 189 #endif 190 *(caddr_t *)addr = kbp->kb_next; 191 kbp->kb_next = addr; 192 splx(s); 193 } 194 195 /* 196 * Initialize the kernel memory allocator 197 */ 198 kmeminit() 199 { 200 register long indx; 201 int npg; 202 203 #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0) 204 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2 205 #endif 206 #if (MAXALLOCSAVE > MINALLOCSIZE * 32768) 207 ERROR!_kmeminit:_MAXALLOCSAVE_too_big 208 #endif 209 #if (MAXALLOCSAVE < CLBYTES) 210 ERROR!_kmeminit:_MAXALLOCSAVE_too_small 211 #endif 212 npg = VM_KMEM_SIZE/ NBPG; 213 kmemusage = (struct kmemusage *) kmem_alloc(kernel_map, 214 (vm_size_t)(npg * sizeof(struct kmemusage))); 215 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t)&kmembase, 216 (vm_offset_t)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE); 217 #ifdef KMEMSTATS 218 for (indx = 0; indx < MINBUCKET + 16; indx++) { 219 if (1 << indx >= CLBYTES) 220 bucket[indx].kb_elmpercl = 1; 221 else 222 bucket[indx].kb_elmpercl = CLBYTES / (1 << indx); 223 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; 224 } 225 for (indx = 0; indx < M_LAST; indx++) 226 kmemstats[indx].ks_limit = npg * NBPG * 6 / 10; 227 #endif 228 } 229