1 /* $NetBSD: malloc.c,v 1.1.1.1 2016/01/13 21:42:18 christos Exp $ */ 2 3 /* Memory allocator `malloc'. 4 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. 5 Written May 1989 by Mike Haertel. 6 7 This library is free software; you can redistribute it and/or 8 modify it under the terms of the GNU Library General Public License as 9 published by the Free Software Foundation; either version 2 of the 10 License, or (at your option) any later version. 11 12 This library is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 Library General Public License for more details. 16 17 You should have received a copy of the GNU Library General Public 18 License along with this library; see the file COPYING.LIB. If 19 not, write to the Free Software Foundation, Inc., 675 Mass Ave, 20 Cambridge, MA 02139, USA. 21 22 The author may be reached (Email) at the address mike@ai.mit.edu, 23 or (US mail) as Mike Haertel c/o Free Software Foundation. */ 24 25 #ifndef _MALLOC_INTERNAL 26 #define _MALLOC_INTERNAL 27 #include <malloc.h> 28 #endif 29 30 /* How to really get more memory. */ 31 __ptr_t (*__morecore) __P ((ptrdiff_t __size)) = __default_morecore; 32 33 /* Debugging hook for `malloc'. */ 34 __ptr_t (*__malloc_hook) __P ((__malloc_size_t __size)); 35 36 /* Pointer to the base of the first block. */ 37 char *_heapbase; 38 39 /* Block information table. Allocated with align/__free (not malloc/free). */ 40 malloc_info *_heapinfo; 41 42 /* Number of info entries. */ 43 static __malloc_size_t heapsize; 44 45 /* Search index in the info table. */ 46 __malloc_size_t _heapindex; 47 48 /* Limit of valid info table indices. */ 49 __malloc_size_t _heaplimit; 50 51 /* Free lists for each fragment size. */ 52 struct list _fraghead[BLOCKLOG]; 53 54 /* Instrumentation. */ 55 __malloc_size_t _chunks_used; 56 __malloc_size_t _bytes_used; 57 __malloc_size_t _chunks_free; 58 __malloc_size_t _bytes_free; 59 60 /* Are you experienced? */ 61 int __malloc_initialized; 62 63 void (*__malloc_initialize_hook) __P ((void)); 64 void (*__after_morecore_hook) __P ((void)); 65 66 /* Aligned allocation. */ 67 static __ptr_t align __P ((__malloc_size_t)); 68 static __ptr_t 69 align (size) 70 __malloc_size_t size; 71 { 72 __ptr_t result; 73 unsigned long int adj; 74 75 result = (*__morecore) (size); 76 adj = (unsigned long int) ((unsigned long int) ((char *) result - 77 (char *) NULL)) % BLOCKSIZE; 78 if (adj != 0) 79 { 80 adj = BLOCKSIZE - adj; 81 (void) (*__morecore) (adj); 82 result = (char *) result + adj; 83 } 84 85 if (__after_morecore_hook) 86 (*__after_morecore_hook) (); 87 88 return result; 89 } 90 91 /* Set everything up and remember that we have. */ 92 static int initialize __P ((void)); 93 static int 94 initialize () 95 { 96 if (__malloc_initialize_hook) 97 (*__malloc_initialize_hook) (); 98 99 heapsize = HEAP / BLOCKSIZE; 100 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info)); 101 if (_heapinfo == NULL) 102 return 0; 103 memset (_heapinfo, 0, heapsize * sizeof (malloc_info)); 104 _heapinfo[0].free.size = 0; 105 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0; 106 _heapindex = 0; 107 _heapbase = (char *) _heapinfo; 108 109 /* Account for the _heapinfo block itself in the statistics. */ 110 _bytes_used = heapsize * sizeof (malloc_info); 111 _chunks_used = 1; 112 113 __malloc_initialized = 1; 114 return 1; 115 } 116 117 /* Get neatly aligned memory, initializing or 118 growing the heap info table as necessary. */ 119 static __ptr_t morecore __P ((__malloc_size_t)); 120 static __ptr_t 121 morecore (size) 122 __malloc_size_t size; 123 { 124 __ptr_t result; 125 malloc_info *newinfo, *oldinfo; 126 __malloc_size_t newsize; 127 128 result = align (size); 129 if (result == NULL) 130 return NULL; 131 132 /* Check if we need to grow the info table. */ 133 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize) 134 { 135 newsize = heapsize; 136 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize) 137 newsize *= 2; 138 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info)); 139 if (newinfo == NULL) 140 { 141 (*__morecore) (-size); 142 return NULL; 143 } 144 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info)); 145 memset (&newinfo[heapsize], 0, 146 (newsize - heapsize) * sizeof (malloc_info)); 147 oldinfo = _heapinfo; 148 newinfo[BLOCK (oldinfo)].busy.type = 0; 149 newinfo[BLOCK (oldinfo)].busy.info.size 150 = BLOCKIFY (heapsize * sizeof (malloc_info)); 151 _heapinfo = newinfo; 152 /* Account for the _heapinfo block itself in the statistics. */ 153 _bytes_used += newsize * sizeof (malloc_info); 154 ++_chunks_used; 155 _free_internal (oldinfo); 156 heapsize = newsize; 157 } 158 159 _heaplimit = BLOCK ((char *) result + size); 160 return result; 161 } 162 163 /* Allocate memory from the heap. */ 164 __ptr_t 165 malloc (size) 166 __malloc_size_t size; 167 { 168 __ptr_t result; 169 __malloc_size_t block, blocks, lastblocks, start; 170 register __malloc_size_t i; 171 struct list *next; 172 173 /* ANSI C allows `malloc (0)' to either return NULL, or to return a 174 valid address you can realloc and free (though not dereference). 175 176 It turns out that some extant code (sunrpc, at least Ultrix's version) 177 expects `malloc (0)' to return non-NULL and breaks otherwise. 178 Be compatible. */ 179 180 #if 0 181 if (size == 0) 182 return NULL; 183 #endif 184 185 if (__malloc_hook != NULL) 186 return (*__malloc_hook) (size); 187 188 if (!__malloc_initialized) 189 if (!initialize ()) 190 return NULL; 191 192 if (size < sizeof (struct list)) 193 size = sizeof (struct list); 194 195 #ifdef SUNOS_LOCALTIME_BUG 196 if (size < 16) 197 size = 16; 198 #endif 199 200 /* Determine the allocation policy based on the request size. */ 201 if (size <= BLOCKSIZE / 2) 202 { 203 /* Small allocation to receive a fragment of a block. 204 Determine the logarithm to base two of the fragment size. */ 205 register __malloc_size_t log = 1; 206 --size; 207 while ((size /= 2) != 0) 208 ++log; 209 210 /* Look in the fragment lists for a 211 free fragment of the desired size. */ 212 next = _fraghead[log].next; 213 if (next != NULL) 214 { 215 /* There are free fragments of this size. 216 Pop a fragment out of the fragment list and return it. 217 Update the block's nfree and first counters. */ 218 result = (__ptr_t) next; 219 next->prev->next = next->next; 220 if (next->next != NULL) 221 next->next->prev = next->prev; 222 block = BLOCK (result); 223 if (--_heapinfo[block].busy.info.frag.nfree != 0) 224 _heapinfo[block].busy.info.frag.first = (unsigned long int) 225 ((unsigned long int) ((char *) next->next - (char *) NULL) 226 % BLOCKSIZE) >> log; 227 228 /* Update the statistics. */ 229 ++_chunks_used; 230 _bytes_used += 1 << log; 231 --_chunks_free; 232 _bytes_free -= 1 << log; 233 } 234 else 235 { 236 /* No free fragments of the desired size, so get a new block 237 and break it into fragments, returning the first. */ 238 result = malloc (BLOCKSIZE); 239 if (result == NULL) 240 return NULL; 241 242 /* Link all fragments but the first into the free list. */ 243 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i) 244 { 245 next = (struct list *) ((char *) result + (i << log)); 246 next->next = _fraghead[log].next; 247 next->prev = &_fraghead[log]; 248 next->prev->next = next; 249 if (next->next != NULL) 250 next->next->prev = next; 251 } 252 253 /* Initialize the nfree and first counters for this block. */ 254 block = BLOCK (result); 255 _heapinfo[block].busy.type = log; 256 _heapinfo[block].busy.info.frag.nfree = i - 1; 257 _heapinfo[block].busy.info.frag.first = i - 1; 258 259 _chunks_free += (BLOCKSIZE >> log) - 1; 260 _bytes_free += BLOCKSIZE - (1 << log); 261 _bytes_used -= BLOCKSIZE - (1 << log); 262 } 263 } 264 else 265 { 266 /* Large allocation to receive one or more blocks. 267 Search the free list in a circle starting at the last place visited. 268 If we loop completely around without finding a large enough 269 space we will have to get more memory from the system. */ 270 blocks = BLOCKIFY (size); 271 start = block = _heapindex; 272 while (_heapinfo[block].free.size < blocks) 273 { 274 block = _heapinfo[block].free.next; 275 if (block == start) 276 { 277 /* Need to get more from the system. Check to see if 278 the new core will be contiguous with the final free 279 block; if so we don't need to get as much. */ 280 block = _heapinfo[0].free.prev; 281 lastblocks = _heapinfo[block].free.size; 282 if (_heaplimit != 0 && block + lastblocks == _heaplimit && 283 (*__morecore) (0) == ADDRESS (block + lastblocks) && 284 (morecore ((blocks - lastblocks) * BLOCKSIZE)) != NULL) 285 { 286 /* Which block we are extending (the `final free 287 block' referred to above) might have changed, if 288 it got combined with a freed info table. */ 289 block = _heapinfo[0].free.prev; 290 _heapinfo[block].free.size += (blocks - lastblocks); 291 _bytes_free += (blocks - lastblocks) * BLOCKSIZE; 292 continue; 293 } 294 result = morecore (blocks * BLOCKSIZE); 295 if (result == NULL) 296 return NULL; 297 block = BLOCK (result); 298 _heapinfo[block].busy.type = 0; 299 _heapinfo[block].busy.info.size = blocks; 300 ++_chunks_used; 301 _bytes_used += blocks * BLOCKSIZE; 302 return result; 303 } 304 } 305 306 /* At this point we have found a suitable free list entry. 307 Figure out how to remove what we need from the list. */ 308 result = ADDRESS (block); 309 if (_heapinfo[block].free.size > blocks) 310 { 311 /* The block we found has a bit left over, 312 so relink the tail end back into the free list. */ 313 _heapinfo[block + blocks].free.size 314 = _heapinfo[block].free.size - blocks; 315 _heapinfo[block + blocks].free.next 316 = _heapinfo[block].free.next; 317 _heapinfo[block + blocks].free.prev 318 = _heapinfo[block].free.prev; 319 _heapinfo[_heapinfo[block].free.prev].free.next 320 = _heapinfo[_heapinfo[block].free.next].free.prev 321 = _heapindex = block + blocks; 322 } 323 else 324 { 325 /* The block exactly matches our requirements, 326 so just remove it from the list. */ 327 _heapinfo[_heapinfo[block].free.next].free.prev 328 = _heapinfo[block].free.prev; 329 _heapinfo[_heapinfo[block].free.prev].free.next 330 = _heapindex = _heapinfo[block].free.next; 331 --_chunks_free; 332 } 333 334 _heapinfo[block].busy.type = 0; 335 _heapinfo[block].busy.info.size = blocks; 336 ++_chunks_used; 337 _bytes_used += blocks * BLOCKSIZE; 338 _bytes_free -= blocks * BLOCKSIZE; 339 340 /* Mark all the blocks of the object just allocated except for the 341 first with a negative number so you can find the first block by 342 adding that adjustment. */ 343 while (--blocks > 0) 344 _heapinfo[block + blocks].busy.info.size = -blocks; 345 } 346 347 return result; 348 } 349 350 #ifndef _LIBC 351 352 /* On some ANSI C systems, some libc functions call _malloc, _free 353 and _realloc. Make them use the GNU functions. */ 354 355 __ptr_t 356 _malloc (size) 357 __malloc_size_t size; 358 { 359 return malloc (size); 360 } 361 362 void 363 _free (ptr) 364 __ptr_t ptr; 365 { 366 free (ptr); 367 } 368 369 __ptr_t 370 _realloc (ptr, size) 371 __ptr_t ptr; 372 __malloc_size_t size; 373 { 374 return realloc (ptr, size); 375 } 376 377 #endif 378