xref: /netbsd-src/external/gpl2/libmalloc/dist/gmalloc.c (revision 478e07dd80b73ca7b8dd26ada880f65ffb83aad5)
1 /*	$NetBSD: gmalloc.c,v 1.1.1.1 2016/01/13 21:42:18 christos Exp $	*/
2 
3 /* DO NOT EDIT THIS FILE -- it is automagically generated.  -*- C -*- */
4 
5 #define _MALLOC_INTERNAL
6 
7 /* The malloc headers and source files from the C library follow here.  */
8 
9 /* Declarations for `malloc' and friends.
10    Copyright 1990, 1991, 1992, 1993, 1995 Free Software Foundation, Inc.
11 		  Written May 1989 by Mike Haertel.
12 
13 This library is free software; you can redistribute it and/or
14 modify it under the terms of the GNU Library General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17 
18 This library is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21 Library General Public License for more details.
22 
23 You should have received a copy of the GNU Library General Public
24 License along with this library; see the file COPYING.LIB.  If
25 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
26 Cambridge, MA 02139, USA.
27 
28    The author may be reached (Email) at the address mike@ai.mit.edu,
29    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
30 
31 #ifndef _MALLOC_H
32 
33 #define _MALLOC_H	1
34 
35 #ifdef _MALLOC_INTERNAL
36 
37 #ifdef	HAVE_CONFIG_H
38 #include <config.h>
39 #endif
40 
41 #if	defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
42 #include <string.h>
43 #else
44 #ifndef memset
45 #define	memset(s, zero, n)	bzero ((s), (n))
46 #endif
47 #ifndef memcpy
48 #define	memcpy(d, s, n)		bcopy ((s), (d), (n))
49 #endif
50 #endif
51 
52 #if	defined (__GNU_LIBRARY__) || (defined (__STDC__) && __STDC__)
53 #include <limits.h>
54 #else
55 #ifndef CHAR_BIT
56 #define	CHAR_BIT	8
57 #endif
58 #endif
59 
60 #ifdef	HAVE_UNISTD_H
61 #include <unistd.h>
62 #endif
63 
64 #endif	/* _MALLOC_INTERNAL.  */
65 
66 
67 #ifdef	__cplusplus
68 extern "C"
69 {
70 #endif
71 
72 #if defined (__cplusplus) || (defined (__STDC__) && __STDC__)
73 #undef	__P
74 #define	__P(args)	args
75 #undef	__ptr_t
76 #define	__ptr_t		void *
77 #else /* Not C++ or ANSI C.  */
78 #undef	__P
79 #define	__P(args)	()
80 #undef	const
81 #define	const
82 #undef	__ptr_t
83 #define	__ptr_t		char *
84 #endif /* C++ or ANSI C.  */
85 
86 #if defined (__STDC__) && __STDC__
87 #include <stddef.h>
88 #define	__malloc_size_t		size_t
89 #define	__malloc_ptrdiff_t	ptrdiff_t
90 #else
91 #define	__malloc_size_t		unsigned int
92 #define	__malloc_ptrdiff_t	int
93 #endif
94 
95 #ifndef	NULL
96 #define	NULL	0
97 #endif
98 
99 
100 /* Allocate SIZE bytes of memory.  */
101 extern __ptr_t malloc __P ((__malloc_size_t __size));
102 /* Re-allocate the previously allocated block
103    in __ptr_t, making the new block SIZE bytes long.  */
104 extern __ptr_t realloc __P ((__ptr_t __ptr, __malloc_size_t __size));
105 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0.  */
106 extern __ptr_t calloc __P ((__malloc_size_t __nmemb, __malloc_size_t __size));
107 /* Free a block allocated by `malloc', `realloc' or `calloc'.  */
108 extern void free __P ((__ptr_t __ptr));
109 
110 /* Allocate SIZE bytes allocated to ALIGNMENT bytes.  */
111 extern __ptr_t memalign __P ((__malloc_size_t __alignment,
112 			      __malloc_size_t __size));
113 
114 /* Allocate SIZE bytes on a page boundary.  */
115 extern __ptr_t valloc __P ((__malloc_size_t __size));
116 
117 
118 #ifdef _MALLOC_INTERNAL
119 
120 /* The allocator divides the heap into blocks of fixed size; large
121    requests receive one or more whole blocks, and small requests
122    receive a fragment of a block.  Fragment sizes are powers of two,
123    and all fragments of a block are the same size.  When all the
124    fragments in a block have been freed, the block itself is freed.  */
125 #define INT_BIT		(CHAR_BIT * sizeof(int))
126 #define BLOCKLOG	(INT_BIT > 16 ? 12 : 9)
127 #define BLOCKSIZE	(1 << BLOCKLOG)
128 #define BLOCKIFY(SIZE)	(((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
129 
130 /* Determine the amount of memory spanned by the initial heap table
131    (not an absolute limit).  */
132 #define HEAP		(INT_BIT > 16 ? 4194304 : 65536)
133 
134 /* Number of contiguous free blocks allowed to build up at the end of
135    memory before they will be returned to the system.  */
136 #define FINAL_FREE_BLOCKS	8
137 
138 /* Data structure giving per-block information.  */
139 typedef union
140   {
141     /* Heap information for a busy block.  */
142     struct
143       {
144 	/* Zero for a large (multiblock) object, or positive giving the
145 	   logarithm to the base two of the fragment size.  */
146 	int type;
147 	union
148 	  {
149 	    struct
150 	      {
151 		__malloc_size_t nfree; /* Free frags in a fragmented block.  */
152 		__malloc_size_t first; /* First free fragment of the block.  */
153 	      } frag;
154 	    /* For a large object, in its first block, this has the number
155 	       of blocks in the object.  In the other blocks, this has a
156 	       negative number which says how far back the first block is.  */
157 	    __malloc_ptrdiff_t size;
158 	  } info;
159       } busy;
160     /* Heap information for a free block
161        (that may be the first of a free cluster).  */
162     struct
163       {
164 	__malloc_size_t size;	/* Size (in blocks) of a free cluster.  */
165 	__malloc_size_t next;	/* Index of next free cluster.  */
166 	__malloc_size_t prev;	/* Index of previous free cluster.  */
167       } free;
168   } malloc_info;
169 
170 /* Pointer to first block of the heap.  */
171 extern char *_heapbase;
172 
173 /* Table indexed by block number giving per-block information.  */
174 extern malloc_info *_heapinfo;
175 
176 /* Address to block number and vice versa.  */
177 #define BLOCK(A)	(((char *) (A) - _heapbase) / BLOCKSIZE + 1)
178 #define ADDRESS(B)	((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
179 
180 /* Current search index for the heap table.  */
181 extern __malloc_size_t _heapindex;
182 
183 /* Limit of valid info table indices.  */
184 extern __malloc_size_t _heaplimit;
185 
186 /* Doubly linked lists of free fragments.  */
187 struct list
188   {
189     struct list *next;
190     struct list *prev;
191   };
192 
193 /* Free list headers for each fragment size.  */
194 extern struct list _fraghead[];
195 
196 /* List of blocks allocated with `memalign' (or `valloc').  */
197 struct alignlist
198   {
199     struct alignlist *next;
200     __ptr_t aligned;		/* The address that memaligned returned.  */
201     __ptr_t exact;		/* The address that malloc returned.  */
202   };
203 extern struct alignlist *_aligned_blocks;
204 
205 /* Instrumentation.  */
206 extern __malloc_size_t _chunks_used;
207 extern __malloc_size_t _bytes_used;
208 extern __malloc_size_t _chunks_free;
209 extern __malloc_size_t _bytes_free;
210 
211 /* Internal version of `free' used in `morecore' (malloc.c). */
212 extern void _free_internal __P ((__ptr_t __ptr));
213 
214 #endif /* _MALLOC_INTERNAL.  */
215 
216 /* Given an address in the middle of a malloc'd object,
217    return the address of the beginning of the object.  */
218 extern __ptr_t malloc_find_object_address __P ((__ptr_t __ptr));
219 
220 /* Underlying allocation function; successive calls should
221    return contiguous pieces of memory.  */
222 extern __ptr_t (*__morecore) __P ((__malloc_ptrdiff_t __size));
223 
224 /* Default value of `__morecore'.  */
225 extern __ptr_t __default_morecore __P ((__malloc_ptrdiff_t __size));
226 
227 /* If not NULL, this function is called after each time
228    `__morecore' is called to increase the data size.  */
229 extern void (*__after_morecore_hook) __P ((void));
230 
231 /* Nonzero if `malloc' has been called and done its initialization.  */
232 extern int __malloc_initialized;
233 
234 /* Hooks for debugging versions.  */
235 extern void (*__malloc_initialize_hook) __P ((void));
236 extern void (*__free_hook) __P ((__ptr_t __ptr));
237 extern __ptr_t (*__malloc_hook) __P ((__malloc_size_t __size));
238 extern __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size));
239 extern __ptr_t (*__memalign_hook) __P ((__malloc_size_t __size,
240 					__malloc_size_t __alignment));
241 
242 /* Return values for `mprobe': these are the kinds of inconsistencies that
243    `mcheck' enables detection of.  */
244 enum mcheck_status
245   {
246     MCHECK_DISABLED = -1,	/* Consistency checking is not turned on.  */
247     MCHECK_OK,			/* Block is fine.  */
248     MCHECK_FREE,		/* Block freed twice.  */
249     MCHECK_HEAD,		/* Memory before the block was clobbered.  */
250     MCHECK_TAIL			/* Memory after the block was clobbered.  */
251   };
252 
253 /* Activate a standard collection of debugging hooks.  This must be called
254    before `malloc' is ever called.  ABORTFUNC is called with an error code
255    (see enum above) when an inconsistency is detected.  If ABORTFUNC is
256    null, the standard function prints on stderr and then calls `abort'.  */
257 extern int mcheck __P ((void (*__abortfunc) __P ((enum mcheck_status))));
258 
259 /* Check for aberrations in a particular malloc'd block.  You must have
260    called `mcheck' already.  These are the same checks that `mcheck' does
261    when you free or reallocate a block.  */
262 extern enum mcheck_status mprobe __P ((__ptr_t __ptr));
263 
264 /* Activate a standard collection of tracing hooks.  */
265 extern void mtrace __P ((void));
266 extern void muntrace __P ((void));
267 
268 /* Statistics available to the user.  */
269 struct mstats
270   {
271     __malloc_size_t bytes_total; /* Total size of the heap. */
272     __malloc_size_t chunks_used; /* Chunks allocated by the user. */
273     __malloc_size_t bytes_used;	/* Byte total of user-allocated chunks. */
274     __malloc_size_t chunks_free; /* Chunks in the free list. */
275     __malloc_size_t bytes_free;	/* Byte total of chunks in the free list. */
276   };
277 
278 /* Pick up the current statistics. */
279 extern struct mstats mstats __P ((void));
280 
281 /* Call WARNFUN with a warning message when memory usage is high.  */
282 extern void memory_warnings __P ((__ptr_t __start,
283 				  void (*__warnfun) __P ((const char *))));
284 
285 
286 /* Relocating allocator.  */
287 
288 /* Allocate SIZE bytes, and store the address in *HANDLEPTR.  */
289 extern __ptr_t r_alloc __P ((__ptr_t *__handleptr, __malloc_size_t __size));
290 
291 /* Free the storage allocated in HANDLEPTR.  */
292 extern void r_alloc_free __P ((__ptr_t *__handleptr));
293 
294 /* Adjust the block at HANDLEPTR to be SIZE bytes long.  */
295 extern __ptr_t r_re_alloc __P ((__ptr_t *__handleptr, __malloc_size_t __size));
296 
297 
298 #ifdef	__cplusplus
299 }
300 #endif
301 
302 #endif /* malloc.h  */
303 /* Allocate memory on a page boundary.
304    Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
305 
306 This library is free software; you can redistribute it and/or
307 modify it under the terms of the GNU Library General Public License as
308 published by the Free Software Foundation; either version 2 of the
309 License, or (at your option) any later version.
310 
311 This library is distributed in the hope that it will be useful,
312 but WITHOUT ANY WARRANTY; without even the implied warranty of
313 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
314 Library General Public License for more details.
315 
316 You should have received a copy of the GNU Library General Public
317 License along with this library; see the file COPYING.LIB.  If
318 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
319 Cambridge, MA 02139, USA.
320 
321    The author may be reached (Email) at the address mike@ai.mit.edu,
322    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
323 
324 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
325 #include <stddef.h>
326 #include <sys/cdefs.h>
327 extern size_t __getpagesize __P ((void));
328 #else
329 #include "getpagesize.h"
330 #define	 __getpagesize()	getpagesize()
331 #endif
332 
333 #ifndef	_MALLOC_INTERNAL
334 #define	_MALLOC_INTERNAL
335 #include <malloc.h>
336 #endif
337 
338 static __malloc_size_t pagesize;
339 
340 __ptr_t
valloc(size)341 valloc (size)
342      __malloc_size_t size;
343 {
344   if (pagesize == 0)
345     pagesize = __getpagesize ();
346 
347   return memalign (pagesize, size);
348 }
349 /* Memory allocator `malloc'.
350    Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
351 		  Written May 1989 by Mike Haertel.
352 
353 This library is free software; you can redistribute it and/or
354 modify it under the terms of the GNU Library General Public License as
355 published by the Free Software Foundation; either version 2 of the
356 License, or (at your option) any later version.
357 
358 This library is distributed in the hope that it will be useful,
359 but WITHOUT ANY WARRANTY; without even the implied warranty of
360 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
361 Library General Public License for more details.
362 
363 You should have received a copy of the GNU Library General Public
364 License along with this library; see the file COPYING.LIB.  If
365 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
366 Cambridge, MA 02139, USA.
367 
368    The author may be reached (Email) at the address mike@ai.mit.edu,
369    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
370 
371 #ifndef	_MALLOC_INTERNAL
372 #define _MALLOC_INTERNAL
373 #include <malloc.h>
374 #endif
375 
376 /* How to really get more memory.  */
377 __ptr_t (*__morecore) __P ((ptrdiff_t __size)) = __default_morecore;
378 
379 /* Debugging hook for `malloc'.  */
380 __ptr_t (*__malloc_hook) __P ((__malloc_size_t __size));
381 
382 /* Pointer to the base of the first block.  */
383 char *_heapbase;
384 
385 /* Block information table.  Allocated with align/__free (not malloc/free).  */
386 malloc_info *_heapinfo;
387 
388 /* Number of info entries.  */
389 static __malloc_size_t heapsize;
390 
391 /* Search index in the info table.  */
392 __malloc_size_t _heapindex;
393 
394 /* Limit of valid info table indices.  */
395 __malloc_size_t _heaplimit;
396 
397 /* Free lists for each fragment size.  */
398 struct list _fraghead[BLOCKLOG];
399 
400 /* Instrumentation.  */
401 __malloc_size_t _chunks_used;
402 __malloc_size_t _bytes_used;
403 __malloc_size_t _chunks_free;
404 __malloc_size_t _bytes_free;
405 
406 /* Are you experienced?  */
407 int __malloc_initialized;
408 
409 void (*__malloc_initialize_hook) __P ((void));
410 void (*__after_morecore_hook) __P ((void));
411 
412 /* Aligned allocation.  */
413 static __ptr_t align __P ((__malloc_size_t));
414 static __ptr_t
align(size)415 align (size)
416      __malloc_size_t size;
417 {
418   __ptr_t result;
419   unsigned long int adj;
420 
421   result = (*__morecore) (size);
422   adj = (unsigned long int) ((unsigned long int) ((char *) result -
423 						  (char *) NULL)) % BLOCKSIZE;
424   if (adj != 0)
425     {
426       adj = BLOCKSIZE - adj;
427       (void) (*__morecore) (adj);
428       result = (char *) result + adj;
429     }
430 
431   if (__after_morecore_hook)
432     (*__after_morecore_hook) ();
433 
434   return result;
435 }
436 
437 /* Set everything up and remember that we have.  */
438 static int initialize __P ((void));
439 static int
initialize()440 initialize ()
441 {
442   if (__malloc_initialize_hook)
443     (*__malloc_initialize_hook) ();
444 
445   heapsize = HEAP / BLOCKSIZE;
446   _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
447   if (_heapinfo == NULL)
448     return 0;
449   memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
450   _heapinfo[0].free.size = 0;
451   _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
452   _heapindex = 0;
453   _heapbase = (char *) _heapinfo;
454 
455   /* Account for the _heapinfo block itself in the statistics.  */
456   _bytes_used = heapsize * sizeof (malloc_info);
457   _chunks_used = 1;
458 
459   __malloc_initialized = 1;
460   return 1;
461 }
462 
463 /* Get neatly aligned memory, initializing or
464    growing the heap info table as necessary. */
465 static __ptr_t morecore __P ((__malloc_size_t));
466 static __ptr_t
morecore(size)467 morecore (size)
468      __malloc_size_t size;
469 {
470   __ptr_t result;
471   malloc_info *newinfo, *oldinfo;
472   __malloc_size_t newsize;
473 
474   result = align (size);
475   if (result == NULL)
476     return NULL;
477 
478   /* Check if we need to grow the info table.  */
479   if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
480     {
481       newsize = heapsize;
482       while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize)
483 	newsize *= 2;
484       newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
485       if (newinfo == NULL)
486 	{
487 	  (*__morecore) (-size);
488 	  return NULL;
489 	}
490       memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
491       memset (&newinfo[heapsize], 0,
492 	      (newsize - heapsize) * sizeof (malloc_info));
493       oldinfo = _heapinfo;
494       newinfo[BLOCK (oldinfo)].busy.type = 0;
495       newinfo[BLOCK (oldinfo)].busy.info.size
496 	= BLOCKIFY (heapsize * sizeof (malloc_info));
497       _heapinfo = newinfo;
498       /* Account for the _heapinfo block itself in the statistics.  */
499       _bytes_used += newsize * sizeof (malloc_info);
500       ++_chunks_used;
501       _free_internal (oldinfo);
502       heapsize = newsize;
503     }
504 
505   _heaplimit = BLOCK ((char *) result + size);
506   return result;
507 }
508 
509 /* Allocate memory from the heap.  */
510 __ptr_t
malloc(size)511 malloc (size)
512      __malloc_size_t size;
513 {
514   __ptr_t result;
515   __malloc_size_t block, blocks, lastblocks, start;
516   register __malloc_size_t i;
517   struct list *next;
518 
519   /* ANSI C allows `malloc (0)' to either return NULL, or to return a
520      valid address you can realloc and free (though not dereference).
521 
522      It turns out that some extant code (sunrpc, at least Ultrix's version)
523      expects `malloc (0)' to return non-NULL and breaks otherwise.
524      Be compatible.  */
525 
526 #if	0
527   if (size == 0)
528     return NULL;
529 #endif
530 
531   if (__malloc_hook != NULL)
532     return (*__malloc_hook) (size);
533 
534   if (!__malloc_initialized)
535     if (!initialize ())
536       return NULL;
537 
538   if (size < sizeof (struct list))
539     size = sizeof (struct list);
540 
541 #ifdef SUNOS_LOCALTIME_BUG
542   if (size < 16)
543     size = 16;
544 #endif
545 
546   /* Determine the allocation policy based on the request size.  */
547   if (size <= BLOCKSIZE / 2)
548     {
549       /* Small allocation to receive a fragment of a block.
550 	 Determine the logarithm to base two of the fragment size. */
551       register __malloc_size_t log = 1;
552       --size;
553       while ((size /= 2) != 0)
554 	++log;
555 
556       /* Look in the fragment lists for a
557 	 free fragment of the desired size. */
558       next = _fraghead[log].next;
559       if (next != NULL)
560 	{
561 	  /* There are free fragments of this size.
562 	     Pop a fragment out of the fragment list and return it.
563 	     Update the block's nfree and first counters. */
564 	  result = (__ptr_t) next;
565 	  next->prev->next = next->next;
566 	  if (next->next != NULL)
567 	    next->next->prev = next->prev;
568 	  block = BLOCK (result);
569 	  if (--_heapinfo[block].busy.info.frag.nfree != 0)
570 	    _heapinfo[block].busy.info.frag.first = (unsigned long int)
571 	      ((unsigned long int) ((char *) next->next - (char *) NULL)
572 	       % BLOCKSIZE) >> log;
573 
574 	  /* Update the statistics.  */
575 	  ++_chunks_used;
576 	  _bytes_used += 1 << log;
577 	  --_chunks_free;
578 	  _bytes_free -= 1 << log;
579 	}
580       else
581 	{
582 	  /* No free fragments of the desired size, so get a new block
583 	     and break it into fragments, returning the first.  */
584 	  result = malloc (BLOCKSIZE);
585 	  if (result == NULL)
586 	    return NULL;
587 
588 	  /* Link all fragments but the first into the free list.  */
589 	  for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
590 	    {
591 	      next = (struct list *) ((char *) result + (i << log));
592 	      next->next = _fraghead[log].next;
593 	      next->prev = &_fraghead[log];
594 	      next->prev->next = next;
595 	      if (next->next != NULL)
596 		next->next->prev = next;
597 	    }
598 
599 	  /* Initialize the nfree and first counters for this block.  */
600 	  block = BLOCK (result);
601 	  _heapinfo[block].busy.type = log;
602 	  _heapinfo[block].busy.info.frag.nfree = i - 1;
603 	  _heapinfo[block].busy.info.frag.first = i - 1;
604 
605 	  _chunks_free += (BLOCKSIZE >> log) - 1;
606 	  _bytes_free += BLOCKSIZE - (1 << log);
607 	  _bytes_used -= BLOCKSIZE - (1 << log);
608 	}
609     }
610   else
611     {
612       /* Large allocation to receive one or more blocks.
613 	 Search the free list in a circle starting at the last place visited.
614 	 If we loop completely around without finding a large enough
615 	 space we will have to get more memory from the system.  */
616       blocks = BLOCKIFY (size);
617       start = block = _heapindex;
618       while (_heapinfo[block].free.size < blocks)
619 	{
620 	  block = _heapinfo[block].free.next;
621 	  if (block == start)
622 	    {
623 	      /* Need to get more from the system.  Check to see if
624 		 the new core will be contiguous with the final free
625 		 block; if so we don't need to get as much.  */
626 	      block = _heapinfo[0].free.prev;
627 	      lastblocks = _heapinfo[block].free.size;
628 	      if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
629 		  (*__morecore) (0) == ADDRESS (block + lastblocks) &&
630 		  (morecore ((blocks - lastblocks) * BLOCKSIZE)) != NULL)
631 		{
632  		  /* Which block we are extending (the `final free
633  		     block' referred to above) might have changed, if
634  		     it got combined with a freed info table.  */
635  		  block = _heapinfo[0].free.prev;
636   		  _heapinfo[block].free.size += (blocks - lastblocks);
637 		  _bytes_free += (blocks - lastblocks) * BLOCKSIZE;
638 		  continue;
639 		}
640 	      result = morecore (blocks * BLOCKSIZE);
641 	      if (result == NULL)
642 		return NULL;
643 	      block = BLOCK (result);
644 	      _heapinfo[block].busy.type = 0;
645 	      _heapinfo[block].busy.info.size = blocks;
646 	      ++_chunks_used;
647 	      _bytes_used += blocks * BLOCKSIZE;
648 	      return result;
649 	    }
650 	}
651 
652       /* At this point we have found a suitable free list entry.
653 	 Figure out how to remove what we need from the list. */
654       result = ADDRESS (block);
655       if (_heapinfo[block].free.size > blocks)
656 	{
657 	  /* The block we found has a bit left over,
658 	     so relink the tail end back into the free list. */
659 	  _heapinfo[block + blocks].free.size
660 	    = _heapinfo[block].free.size - blocks;
661 	  _heapinfo[block + blocks].free.next
662 	    = _heapinfo[block].free.next;
663 	  _heapinfo[block + blocks].free.prev
664 	    = _heapinfo[block].free.prev;
665 	  _heapinfo[_heapinfo[block].free.prev].free.next
666 	    = _heapinfo[_heapinfo[block].free.next].free.prev
667 	    = _heapindex = block + blocks;
668 	}
669       else
670 	{
671 	  /* The block exactly matches our requirements,
672 	     so just remove it from the list. */
673 	  _heapinfo[_heapinfo[block].free.next].free.prev
674 	    = _heapinfo[block].free.prev;
675 	  _heapinfo[_heapinfo[block].free.prev].free.next
676 	    = _heapindex = _heapinfo[block].free.next;
677 	  --_chunks_free;
678 	}
679 
680       _heapinfo[block].busy.type = 0;
681       _heapinfo[block].busy.info.size = blocks;
682       ++_chunks_used;
683       _bytes_used += blocks * BLOCKSIZE;
684       _bytes_free -= blocks * BLOCKSIZE;
685 
686       /* Mark all the blocks of the object just allocated except for the
687 	 first with a negative number so you can find the first block by
688 	 adding that adjustment.  */
689       while (--blocks > 0)
690 	_heapinfo[block + blocks].busy.info.size = -blocks;
691     }
692 
693   return result;
694 }
695 
696 #ifndef _LIBC
697 
698 /* On some ANSI C systems, some libc functions call _malloc, _free
699    and _realloc.  Make them use the GNU functions.  */
700 
701 __ptr_t
_malloc(size)702 _malloc (size)
703      __malloc_size_t size;
704 {
705   return malloc (size);
706 }
707 
708 void
_free(ptr)709 _free (ptr)
710      __ptr_t ptr;
711 {
712   free (ptr);
713 }
714 
715 __ptr_t
_realloc(ptr,size)716 _realloc (ptr, size)
717      __ptr_t ptr;
718      __malloc_size_t size;
719 {
720   return realloc (ptr, size);
721 }
722 
723 #endif
724 /* Free a block of memory allocated by `malloc'.
725    Copyright 1990, 1991, 1992, 1994 Free Software Foundation, Inc.
726 		  Written May 1989 by Mike Haertel.
727 
728 This library is free software; you can redistribute it and/or
729 modify it under the terms of the GNU Library General Public License as
730 published by the Free Software Foundation; either version 2 of the
731 License, or (at your option) any later version.
732 
733 This library is distributed in the hope that it will be useful,
734 but WITHOUT ANY WARRANTY; without even the implied warranty of
735 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
736 Library General Public License for more details.
737 
738 You should have received a copy of the GNU Library General Public
739 License along with this library; see the file COPYING.LIB.  If
740 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
741 Cambridge, MA 02139, USA.
742 
743    The author may be reached (Email) at the address mike@ai.mit.edu,
744    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
745 
746 #ifndef	_MALLOC_INTERNAL
747 #define _MALLOC_INTERNAL
748 #include <malloc.h>
749 #endif
750 
751 /* Debugging hook for free.  */
752 void (*__free_hook) __P ((__ptr_t __ptr));
753 
754 /* List of blocks allocated by memalign.  */
755 struct alignlist *_aligned_blocks = NULL;
756 
757 /* Return memory to the heap.
758    Like `free' but don't call a __free_hook if there is one.  */
759 void
_free_internal(ptr)760 _free_internal (ptr)
761      __ptr_t ptr;
762 {
763   int type;
764   __malloc_size_t block, blocks;
765   register __malloc_size_t i;
766   struct list *prev, *next;
767 
768   block = BLOCK (ptr);
769 
770   type = _heapinfo[block].busy.type;
771   switch (type)
772     {
773     case 0:
774       /* Get as many statistics as early as we can.  */
775       --_chunks_used;
776       _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
777       _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
778 
779       /* Find the free cluster previous to this one in the free list.
780 	 Start searching at the last block referenced; this may benefit
781 	 programs with locality of allocation.  */
782       i = _heapindex;
783       if (i > block)
784 	while (i > block)
785 	  i = _heapinfo[i].free.prev;
786       else
787 	{
788 	  do
789 	    i = _heapinfo[i].free.next;
790 	  while (i > 0 && i < block);
791 	  i = _heapinfo[i].free.prev;
792 	}
793 
794       /* Determine how to link this block into the free list.  */
795       if (block == i + _heapinfo[i].free.size)
796 	{
797 	  /* Coalesce this block with its predecessor.  */
798 	  _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
799 	  block = i;
800 	}
801       else
802 	{
803 	  /* Really link this block back into the free list.  */
804 	  _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
805 	  _heapinfo[block].free.next = _heapinfo[i].free.next;
806 	  _heapinfo[block].free.prev = i;
807 	  _heapinfo[i].free.next = block;
808 	  _heapinfo[_heapinfo[block].free.next].free.prev = block;
809 	  ++_chunks_free;
810 	}
811 
812       /* Now that the block is linked in, see if we can coalesce it
813 	 with its successor (by deleting its successor from the list
814 	 and adding in its size).  */
815       if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
816 	{
817 	  _heapinfo[block].free.size
818 	    += _heapinfo[_heapinfo[block].free.next].free.size;
819 	  _heapinfo[block].free.next
820 	    = _heapinfo[_heapinfo[block].free.next].free.next;
821 	  _heapinfo[_heapinfo[block].free.next].free.prev = block;
822 	  --_chunks_free;
823 	}
824 
825       /* Now see if we can return stuff to the system.  */
826       blocks = _heapinfo[block].free.size;
827       if (blocks >= FINAL_FREE_BLOCKS && block + blocks == _heaplimit
828 	  && (*__morecore) (0) == ADDRESS (block + blocks))
829 	{
830 	  register __malloc_size_t bytes = blocks * BLOCKSIZE;
831 	  _heaplimit -= blocks;
832 	  (*__morecore) (-bytes);
833 	  _heapinfo[_heapinfo[block].free.prev].free.next
834 	    = _heapinfo[block].free.next;
835 	  _heapinfo[_heapinfo[block].free.next].free.prev
836 	    = _heapinfo[block].free.prev;
837 	  block = _heapinfo[block].free.prev;
838 	  --_chunks_free;
839 	  _bytes_free -= bytes;
840 	}
841 
842       /* Set the next search to begin at this block.  */
843       _heapindex = block;
844       break;
845 
846     default:
847       /* Do some of the statistics.  */
848       --_chunks_used;
849       _bytes_used -= 1 << type;
850       ++_chunks_free;
851       _bytes_free += 1 << type;
852 
853       /* Get the address of the first free fragment in this block.  */
854       prev = (struct list *) ((char *) ADDRESS (block) +
855 			   (_heapinfo[block].busy.info.frag.first << type));
856 
857       if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
858 	{
859 	  /* If all fragments of this block are free, remove them
860 	     from the fragment list and free the whole block.  */
861 	  next = prev;
862 	  for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
863 	    next = next->next;
864 	  prev->prev->next = next;
865 	  if (next != NULL)
866 	    next->prev = prev->prev;
867 	  _heapinfo[block].busy.type = 0;
868 	  _heapinfo[block].busy.info.size = 1;
869 
870 	  /* Keep the statistics accurate.  */
871 	  ++_chunks_used;
872 	  _bytes_used += BLOCKSIZE;
873 	  _chunks_free -= BLOCKSIZE >> type;
874 	  _bytes_free -= BLOCKSIZE;
875 
876 	  free (ADDRESS (block));
877 	}
878       else if (_heapinfo[block].busy.info.frag.nfree != 0)
879 	{
880 	  /* If some fragments of this block are free, link this
881 	     fragment into the fragment list after the first free
882 	     fragment of this block. */
883 	  next = (struct list *) ptr;
884 	  next->next = prev->next;
885 	  next->prev = prev;
886 	  prev->next = next;
887 	  if (next->next != NULL)
888 	    next->next->prev = next;
889 	  ++_heapinfo[block].busy.info.frag.nfree;
890 	}
891       else
892 	{
893 	  /* No fragments of this block are free, so link this
894 	     fragment into the fragment list and announce that
895 	     it is the first free fragment of this block. */
896 	  prev = (struct list *) ptr;
897 	  _heapinfo[block].busy.info.frag.nfree = 1;
898 	  _heapinfo[block].busy.info.frag.first = (unsigned long int)
899 	    ((unsigned long int) ((char *) ptr - (char *) NULL)
900 	     % BLOCKSIZE >> type);
901 	  prev->next = _fraghead[type].next;
902 	  prev->prev = &_fraghead[type];
903 	  prev->prev->next = prev;
904 	  if (prev->next != NULL)
905 	    prev->next->prev = prev;
906 	}
907       break;
908     }
909 }
910 
911 /* Return memory to the heap.  */
912 void
free(ptr)913 free (ptr)
914      __ptr_t ptr;
915 {
916   register struct alignlist *l;
917 
918   if (ptr == NULL)
919     return;
920 
921   for (l = _aligned_blocks; l != NULL; l = l->next)
922     if (l->aligned == ptr)
923       {
924 	l->aligned = NULL;	/* Mark the slot in the list as free.  */
925 	ptr = l->exact;
926 	break;
927       }
928 
929   if (__free_hook != NULL)
930     (*__free_hook) (ptr);
931   else
932     _free_internal (ptr);
933 }
934 /* Copyright (C) 1991, 1993, 1994 Free Software Foundation, Inc.
935 This file is part of the GNU C Library.
936 
937 The GNU C Library is free software; you can redistribute it and/or
938 modify it under the terms of the GNU Library General Public License as
939 published by the Free Software Foundation; either version 2 of the
940 License, or (at your option) any later version.
941 
942 The GNU C Library is distributed in the hope that it will be useful,
943 but WITHOUT ANY WARRANTY; without even the implied warranty of
944 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
945 Library General Public License for more details.
946 
947 You should have received a copy of the GNU Library General Public
948 License along with the GNU C Library; see the file COPYING.LIB.  If
949 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
950 Cambridge, MA 02139, USA.  */
951 
952 #ifndef	_MALLOC_INTERNAL
953 #define _MALLOC_INTERNAL
954 #include <malloc.h>
955 #endif
956 
957 #ifdef _LIBC
958 
959 #include <ansidecl.h>
960 #include <gnu-stabs.h>
961 
962 #undef	cfree
963 
964 function_alias(cfree, free, void, (ptr),
965 	       DEFUN(cfree, (ptr), PTR ptr))
966 
967 #else
968 
969 void
970 cfree (ptr)
971      __ptr_t ptr;
972 {
973   free (ptr);
974 }
975 
976 #endif
977 /* Change the size of a block allocated by `malloc'.
978    Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
979 		     Written May 1989 by Mike Haertel.
980 
981 This library is free software; you can redistribute it and/or
982 modify it under the terms of the GNU Library General Public License as
983 published by the Free Software Foundation; either version 2 of the
984 License, or (at your option) any later version.
985 
986 This library is distributed in the hope that it will be useful,
987 but WITHOUT ANY WARRANTY; without even the implied warranty of
988 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
989 Library General Public License for more details.
990 
991 You should have received a copy of the GNU Library General Public
992 License along with this library; see the file COPYING.LIB.  If
993 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
994 Cambridge, MA 02139, USA.
995 
996    The author may be reached (Email) at the address mike@ai.mit.edu,
997    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
998 
999 #ifndef	_MALLOC_INTERNAL
1000 #define _MALLOC_INTERNAL
1001 #include <malloc.h>
1002 #endif
1003 
1004 #if  (defined (MEMMOVE_MISSING) || \
1005       !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1006 
1007 /* Snarfed directly from Emacs src/dispnew.c:
1008    XXX Should use system bcopy if it handles overlap.  */
1009 #ifndef emacs
1010 
1011 /* Like bcopy except never gets confused by overlap.  */
1012 
1013 static void
1014 safe_bcopy (from, to, size)
1015      char *from, *to;
1016      int size;
1017 {
1018   if (size <= 0 || from == to)
1019     return;
1020 
1021   /* If the source and destination don't overlap, then bcopy can
1022      handle it.  If they do overlap, but the destination is lower in
1023      memory than the source, we'll assume bcopy can handle that.  */
1024   if (to < from || from + size <= to)
1025     bcopy (from, to, size);
1026 
1027   /* Otherwise, we'll copy from the end.  */
1028   else
1029     {
1030       register char *endf = from + size;
1031       register char *endt = to + size;
1032 
1033       /* If TO - FROM is large, then we should break the copy into
1034 	 nonoverlapping chunks of TO - FROM bytes each.  However, if
1035 	 TO - FROM is small, then the bcopy function call overhead
1036 	 makes this not worth it.  The crossover point could be about
1037 	 anywhere.  Since I don't think the obvious copy loop is too
1038 	 bad, I'm trying to err in its favor.  */
1039       if (to - from < 64)
1040 	{
1041 	  do
1042 	    *--endt = *--endf;
1043 	  while (endf != from);
1044 	}
1045       else
1046 	{
1047 	  for (;;)
1048 	    {
1049 	      endt -= (to - from);
1050 	      endf -= (to - from);
1051 
1052 	      if (endt < to)
1053 		break;
1054 
1055 	      bcopy (endf, endt, to - from);
1056 	    }
1057 
1058 	  /* If SIZE wasn't a multiple of TO - FROM, there will be a
1059 	     little left over.  The amount left over is
1060 	     (endt + (to - from)) - to, which is endt - from.  */
1061 	  bcopy (from, to, endt - from);
1062 	}
1063     }
1064 }
1065 #endif	/* Not emacs.  */
1066 
1067 #define memmove(to, from, size) safe_bcopy ((from), (to), (size))
1068 
1069 #endif
1070 
1071 
1072 #define min(A, B) ((A) < (B) ? (A) : (B))
1073 
1074 /* Debugging hook for realloc.  */
1075 __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size));
1076 
1077 /* Resize the given region to the new size, returning a pointer
1078    to the (possibly moved) region.  This is optimized for speed;
1079    some benchmarks seem to indicate that greater compactness is
1080    achieved by unconditionally allocating and copying to a
1081    new region.  This module has incestuous knowledge of the
1082    internals of both free and malloc. */
1083 __ptr_t
realloc(ptr,size)1084 realloc (ptr, size)
1085      __ptr_t ptr;
1086      __malloc_size_t size;
1087 {
1088   __ptr_t result;
1089   int type;
1090   __malloc_size_t block, blocks, oldlimit;
1091 
1092   if (size == 0)
1093     {
1094       free (ptr);
1095       return malloc (0);
1096     }
1097   else if (ptr == NULL)
1098     return malloc (size);
1099 
1100   if (__realloc_hook != NULL)
1101     return (*__realloc_hook) (ptr, size);
1102 
1103   block = BLOCK (ptr);
1104 
1105   type = _heapinfo[block].busy.type;
1106   switch (type)
1107     {
1108     case 0:
1109       /* Maybe reallocate a large block to a small fragment.  */
1110       if (size <= BLOCKSIZE / 2)
1111 	{
1112 	  result = malloc (size);
1113 	  if (result != NULL)
1114 	    {
1115 	      memcpy (result, ptr, size);
1116 	      _free_internal (ptr);
1117 	      return result;
1118 	    }
1119 	}
1120 
1121       /* The new size is a large allocation as well;
1122 	 see if we can hold it in place. */
1123       blocks = BLOCKIFY (size);
1124       if (blocks < _heapinfo[block].busy.info.size)
1125 	{
1126 	  /* The new size is smaller; return
1127 	     excess memory to the free list. */
1128 	  _heapinfo[block + blocks].busy.type = 0;
1129 	  _heapinfo[block + blocks].busy.info.size
1130 	    = _heapinfo[block].busy.info.size - blocks;
1131 	  _heapinfo[block].busy.info.size = blocks;
1132 	  /* We have just created a new chunk by splitting a chunk in two.
1133 	     Now we will free this chunk; increment the statistics counter
1134 	     so it doesn't become wrong when _free_internal decrements it.  */
1135 	  ++_chunks_used;
1136 	  _free_internal (ADDRESS (block + blocks));
1137 	  result = ptr;
1138 	}
1139       else if (blocks == _heapinfo[block].busy.info.size)
1140 	/* No size change necessary.  */
1141 	result = ptr;
1142       else
1143 	{
1144 	  /* Won't fit, so allocate a new region that will.
1145 	     Free the old region first in case there is sufficient
1146 	     adjacent free space to grow without moving. */
1147 	  blocks = _heapinfo[block].busy.info.size;
1148 	  /* Prevent free from actually returning memory to the system.  */
1149 	  oldlimit = _heaplimit;
1150 	  _heaplimit = 0;
1151 	  _free_internal (ptr);
1152 	  _heaplimit = oldlimit;
1153 	  result = malloc (size);
1154 	  if (result == NULL)
1155 	    {
1156 	      /* Now we're really in trouble.  We have to unfree
1157 		 the thing we just freed.  Unfortunately it might
1158 		 have been coalesced with its neighbors.  */
1159 	      if (_heapindex == block)
1160 	        (void) malloc (blocks * BLOCKSIZE);
1161 	      else
1162 		{
1163 		  __ptr_t previous = malloc ((block - _heapindex) * BLOCKSIZE);
1164 		  (void) malloc (blocks * BLOCKSIZE);
1165 		  _free_internal (previous);
1166 		}
1167 	      return NULL;
1168 	    }
1169 	  if (ptr != result)
1170 	    memmove (result, ptr, blocks * BLOCKSIZE);
1171 	}
1172       break;
1173 
1174     default:
1175       /* Old size is a fragment; type is logarithm
1176 	 to base two of the fragment size.  */
1177       if (size > (__malloc_size_t) (1 << (type - 1)) &&
1178 	  size <= (__malloc_size_t) (1 << type))
1179 	/* The new size is the same kind of fragment.  */
1180 	result = ptr;
1181       else
1182 	{
1183 	  /* The new size is different; allocate a new space,
1184 	     and copy the lesser of the new size and the old. */
1185 	  result = malloc (size);
1186 	  if (result == NULL)
1187 	    return NULL;
1188 	  memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1189 	  free (ptr);
1190 	}
1191       break;
1192     }
1193 
1194   return result;
1195 }
1196 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1197 
1198 This library is free software; you can redistribute it and/or
1199 modify it under the terms of the GNU Library General Public License as
1200 published by the Free Software Foundation; either version 2 of the
1201 License, or (at your option) any later version.
1202 
1203 This library is distributed in the hope that it will be useful,
1204 but WITHOUT ANY WARRANTY; without even the implied warranty of
1205 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1206 Library General Public License for more details.
1207 
1208 You should have received a copy of the GNU Library General Public
1209 License along with this library; see the file COPYING.LIB.  If
1210 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1211 Cambridge, MA 02139, USA.
1212 
1213    The author may be reached (Email) at the address mike@ai.mit.edu,
1214    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
1215 
1216 #ifndef	_MALLOC_INTERNAL
1217 #define	_MALLOC_INTERNAL
1218 #include <malloc.h>
1219 #endif
1220 
1221 /* Allocate an array of NMEMB elements each SIZE bytes long.
1222    The entire array is initialized to zeros.  */
1223 __ptr_t
calloc(nmemb,size)1224 calloc (nmemb, size)
1225      register __malloc_size_t nmemb;
1226      register __malloc_size_t size;
1227 {
1228   register __ptr_t result = malloc (nmemb * size);
1229 
1230   if (result != NULL)
1231     (void) memset (result, 0, nmemb * size);
1232 
1233   return result;
1234 }
1235 /* Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1236 This file is part of the GNU C Library.
1237 
1238 The GNU C Library is free software; you can redistribute it and/or modify
1239 it under the terms of the GNU General Public License as published by
1240 the Free Software Foundation; either version 2, or (at your option)
1241 any later version.
1242 
1243 The GNU C Library is distributed in the hope that it will be useful,
1244 but WITHOUT ANY WARRANTY; without even the implied warranty of
1245 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1246 GNU General Public License for more details.
1247 
1248 You should have received a copy of the GNU General Public License
1249 along with the GNU C Library; see the file COPYING.  If not, write to
1250 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.  */
1251 
1252 #ifndef	_MALLOC_INTERNAL
1253 #define	_MALLOC_INTERNAL
1254 #include <malloc.h>
1255 #endif
1256 
1257 #ifndef	__GNU_LIBRARY__
1258 #define	__sbrk	sbrk
1259 #endif
1260 
1261 #ifdef __GNU_LIBRARY__
1262 /* It is best not to declare this and cast its result on foreign operating
1263    systems with potentially hostile include files.  */
1264 extern __ptr_t __sbrk __P ((int increment));
1265 #endif
1266 
1267 #ifndef NULL
1268 #define NULL 0
1269 #endif
1270 
1271 /* Allocate INCREMENT more bytes of data space,
1272    and return the start of data space, or NULL on errors.
1273    If INCREMENT is negative, shrink data space.  */
1274 __ptr_t
__default_morecore(increment)1275 __default_morecore (increment)
1276 #ifdef __STDC__
1277      ptrdiff_t increment;
1278 #else
1279      int increment;
1280 #endif
1281 {
1282   __ptr_t result = (__ptr_t) __sbrk ((int) increment);
1283   if (result == (__ptr_t) -1)
1284     return NULL;
1285   return result;
1286 }
1287 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1288 
1289 This library is free software; you can redistribute it and/or
1290 modify it under the terms of the GNU Library General Public License as
1291 published by the Free Software Foundation; either version 2 of the
1292 License, or (at your option) any later version.
1293 
1294 This library is distributed in the hope that it will be useful,
1295 but WITHOUT ANY WARRANTY; without even the implied warranty of
1296 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1297 Library General Public License for more details.
1298 
1299 You should have received a copy of the GNU Library General Public
1300 License along with this library; see the file COPYING.LIB.  If
1301 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1302 Cambridge, MA 02139, USA.  */
1303 
1304 #ifndef	_MALLOC_INTERNAL
1305 #define _MALLOC_INTERNAL
1306 #include <malloc.h>
1307 #endif
1308 
1309 __ptr_t (*__memalign_hook) __P ((size_t __size, size_t __alignment));
1310 
1311 __ptr_t
memalign(alignment,size)1312 memalign (alignment, size)
1313      __malloc_size_t alignment;
1314      __malloc_size_t size;
1315 {
1316   __ptr_t result;
1317   unsigned long int adj;
1318 
1319   if (__memalign_hook)
1320     return (*__memalign_hook) (alignment, size);
1321 
1322   size = ((size + alignment - 1) / alignment) * alignment;
1323 
1324   result = malloc (size);
1325   if (result == NULL)
1326     return NULL;
1327   adj = (unsigned long int) ((unsigned long int) ((char *) result -
1328 						  (char *) NULL)) % alignment;
1329   if (adj != 0)
1330     {
1331       struct alignlist *l;
1332       for (l = _aligned_blocks; l != NULL; l = l->next)
1333 	if (l->aligned == NULL)
1334 	  /* This slot is free.  Use it.  */
1335 	  break;
1336       if (l == NULL)
1337 	{
1338 	  l = (struct alignlist *) malloc (sizeof (struct alignlist));
1339 	  if (l == NULL)
1340 	    {
1341 	      free (result);
1342 	      return NULL;
1343 	    }
1344 	  l->next = _aligned_blocks;
1345 	  _aligned_blocks = l;
1346 	}
1347       l->exact = result;
1348       result = l->aligned = (char *) result + alignment - adj;
1349     }
1350 
1351   return result;
1352 }
1353