1 /* $NetBSD: malloc.c,v 1.60 2020/05/15 14:37:21 joerg Exp $ */
2
3 /*
4 * ----------------------------------------------------------------------------
5 * "THE BEER-WARE LICENSE" (Revision 42):
6 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
7 * can do whatever you want with this stuff. If we meet some day, and you think
8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 * ----------------------------------------------------------------------------
10 *
11 * From FreeBSD: malloc.c,v 1.91 2006/01/12 07:28:20 jasone
12 *
13 */
14
15 /*
16 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
17 * to internal conditions and consistency in malloc.c. This has a
18 * noticeable runtime performance hit, and generally will not do you
19 * any good unless you fiddle with the internals of malloc or want
20 * to catch random pointer corruption as early as possible.
21 */
22 #ifndef MALLOC_EXTRA_SANITY
23 #undef MALLOC_EXTRA_SANITY
24 #endif
25
26 /*
27 * What to use for Junk. This is the byte value we use to fill with
28 * when the 'J' option is enabled.
29 */
30 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */
31
32 /*
33 * The basic parameters you can tweak.
34 *
35 * malloc_minsize minimum size of an allocation in bytes.
36 * If this is too small it's too much work
37 * to manage them. This is also the smallest
38 * unit of alignment used for the storage
39 * returned by malloc/realloc.
40 *
41 */
42
43 #include "namespace.h"
44 #if defined(__FreeBSD__)
45 # if defined(__i386__)
46 # define malloc_minsize 16U
47 # endif
48 # if defined(__ia64__)
49 # define malloc_pageshift 13U
50 # define malloc_minsize 16U
51 # endif
52 # if defined(__alpha__)
53 # define malloc_pageshift 13U
54 # define malloc_minsize 16U
55 # endif
56 # if defined(__sparc64__)
57 # define malloc_pageshift 13U
58 # define malloc_minsize 16U
59 # endif
60 # if defined(__amd64__)
61 # define malloc_pageshift 12U
62 # define malloc_minsize 16U
63 # endif
64 # if defined(__arm__)
65 # define malloc_pageshift 12U
66 # define malloc_minsize 16U
67 # endif
68 # define HAS_UTRACE
69 # define UTRACE_LABEL
70
71 #include <sys/cdefs.h>
72 void utrace(struct ut *, int);
73
74 /*
75 * Make malloc/free/realloc thread-safe in libc for use with
76 * kernel threads.
77 */
78 # include "libc_private.h"
79 # include "spinlock.h"
80 static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
81 # define _MALLOC_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock);
82 # define _MALLOC_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock);
83 #endif /* __FreeBSD__ */
84
85 #include <sys/types.h>
86 #if defined(__NetBSD__)
87 # define malloc_minsize 16U
88 # ifdef _LIBC
89 # define HAS_UTRACE
90 # define UTRACE_LABEL "malloc",
91 int utrace(const char *, void *, size_t);
92 # endif
93 # include <sys/cdefs.h>
94 # include "extern.h"
95 # if defined(LIBC_SCCS) && !defined(lint)
96 __RCSID("$NetBSD: malloc.c,v 1.60 2020/05/15 14:37:21 joerg Exp $");
97 # endif /* LIBC_SCCS and not lint */
98 # include <reentrant.h>
99 # ifdef _REENTRANT
100 extern int __isthreaded;
101 static mutex_t thread_lock = MUTEX_INITIALIZER;
102 # define _MALLOC_LOCK() if (__isthreaded) mutex_lock(&thread_lock);
103 # define _MALLOC_UNLOCK() if (__isthreaded) mutex_unlock(&thread_lock);
104 # else
105 # define _MALLOC_LOCK()
106 # define _MALLOC_UNLOCK()
107 # endif
108 #endif /* __NetBSD__ */
109
110 #if defined(__sparc__) && defined(sun)
111 # define malloc_minsize 16U
112 # define MAP_ANON (0)
113 static int fdzero;
114 # define MMAP_FD fdzero
115 # define INIT_MMAP() \
116 { if ((fdzero = open(_PATH_DEVZERO, O_RDWR | O_CLOEXEC, 0000)) == -1) \
117 wrterror("open of /dev/zero"); }
118 #endif /* __sparc__ */
119
120 /* Insert your combination here... */
121 #if defined(__FOOCPU__) && defined(__BAROS__)
122 # define malloc_minsize 16U
123 #endif /* __FOOCPU__ && __BAROS__ */
124
125 #ifndef ZEROSIZEPTR
126 #define ZEROSIZEPTR ((void *)(uintptr_t)(1UL << (malloc_pageshift - 1)))
127 #endif
128
129 /*
130 * No user serviceable parts behind this point.
131 */
132 #include <sys/types.h>
133 #include <sys/mman.h>
134 #include <errno.h>
135 #include <fcntl.h>
136 #include <paths.h>
137 #include <stddef.h>
138 #include <stdio.h>
139 #include <stdlib.h>
140 #include <string.h>
141 #include <unistd.h>
142
143 /*
144 * This structure describes a page worth of chunks.
145 */
146
147 struct pginfo {
148 struct pginfo *next; /* next on the free list */
149 void *page; /* Pointer to the page */
150 u_short size; /* size of this page's chunks */
151 u_short shift; /* How far to shift for this size chunks */
152 u_short free; /* How many free chunks */
153 u_short total; /* How many chunk */
154 u_int bits[1]; /* Which chunks are free */
155 };
156
157 /*
158 * This structure describes a number of free pages.
159 */
160
161 struct pgfree {
162 struct pgfree *next; /* next run of free pages */
163 struct pgfree *prev; /* prev run of free pages */
164 void *page; /* pointer to free pages */
165 void *end; /* pointer to end of free pages */
166 size_t size; /* number of bytes free */
167 };
168
169 /*
170 * How many bits per u_int in the bitmap.
171 * Change only if not 8 bits/byte
172 */
173 #define MALLOC_BITS ((int)(8*sizeof(u_int)))
174
175 /*
176 * Magic values to put in the page_directory
177 */
178 #define MALLOC_NOT_MINE ((struct pginfo*) 0)
179 #define MALLOC_FREE ((struct pginfo*) 1)
180 #define MALLOC_FIRST ((struct pginfo*) 2)
181 #define MALLOC_FOLLOW ((struct pginfo*) 3)
182 #define MALLOC_MAGIC ((struct pginfo*) 4)
183
184 /*
185 * Page size related parameters, computed at run-time.
186 */
187 static size_t malloc_pagesize;
188 static size_t malloc_pageshift;
189 static size_t malloc_pagemask;
190
191 #ifndef malloc_minsize
192 #define malloc_minsize 16U
193 #endif
194
195 #ifndef malloc_maxsize
196 #define malloc_maxsize ((malloc_pagesize)>>1)
197 #endif
198
199 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
200 #define ptr2idx(foo) \
201 (((size_t)(uintptr_t)(foo) >> malloc_pageshift)-malloc_origo)
202
203 #ifndef _MALLOC_LOCK
204 #define _MALLOC_LOCK()
205 #endif
206
207 #ifndef _MALLOC_UNLOCK
208 #define _MALLOC_UNLOCK()
209 #endif
210
211 #ifndef MMAP_FD
212 #define MMAP_FD (-1)
213 #endif
214
215 #ifndef INIT_MMAP
216 #define INIT_MMAP()
217 #endif
218
219 #ifndef MADV_FREE
220 #define MADV_FREE MADV_DONTNEED
221 #endif
222
223 /* Number of free pages we cache */
224 static size_t malloc_cache = 16;
225
226 /* The offset from pagenumber to index into the page directory */
227 static size_t malloc_origo;
228
229 /* The last index in the page directory we care about */
230 static size_t last_idx;
231
232 /* Pointer to page directory. Allocated "as if with" malloc */
233 static struct pginfo **page_dir;
234
235 /* How many slots in the page directory */
236 static size_t malloc_ninfo;
237
238 /* Free pages line up here */
239 static struct pgfree free_list;
240
241 /* Abort(), user doesn't handle problems. */
242 static int malloc_abort;
243
244 /* Are we trying to die ? */
245 static int suicide;
246
247 /* always realloc ? */
248 static int malloc_realloc;
249
250 /* pass the kernel a hint on free pages ? */
251 #if defined(MADV_FREE)
252 static int malloc_hint = 0;
253 #endif
254
255 /* xmalloc behaviour ? */
256 static int malloc_xmalloc;
257
258 /* sysv behaviour for malloc(0) ? */
259 static int malloc_sysv;
260
261 /* zero fill ? */
262 static int malloc_zero;
263
264 /* junk fill ? */
265 static int malloc_junk;
266
267 #ifdef HAS_UTRACE
268
269 /* utrace ? */
270 static int malloc_utrace;
271
272 struct ut { void *p; size_t s; void *r; };
273
274 #define UTRACE(a, b, c) \
275 if (malloc_utrace) { \
276 struct ut u; \
277 u.p=a; u.s = b; u.r=c; \
278 utrace(UTRACE_LABEL (void *) &u, sizeof u); \
279 }
280 #else /* !HAS_UTRACE */
281 #define UTRACE(a,b,c)
282 #endif /* HAS_UTRACE */
283
284 /* my last break. */
285 static void *malloc_brk;
286
287 /* one location cache for free-list holders */
288 static struct pgfree *px;
289
290 /* compile-time options */
291 const char *_malloc_options;
292
293 /* Name of the current public function */
294 static const char *malloc_func;
295
296 /* Macro for mmap */
297 #define MMAP(size) \
298 mmap(NULL, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
299 MMAP_FD, (off_t)0);
300
301 /*
302 * Necessary function declarations
303 */
304 static int extend_pgdir(size_t idx);
305 static void *imalloc(size_t size);
306 static void ifree(void *ptr);
307 static void *irealloc(void *ptr, size_t size);
308
309 static void
wrtmessage(const char * p1,const char * p2,const char * p3,const char * p4)310 wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
311 {
312
313 write(STDERR_FILENO, p1, strlen(p1));
314 write(STDERR_FILENO, p2, strlen(p2));
315 write(STDERR_FILENO, p3, strlen(p3));
316 write(STDERR_FILENO, p4, strlen(p4));
317 }
318
319 void (*_malloc_message)(const char *p1, const char *p2, const char *p3,
320 const char *p4) = wrtmessage;
321 static void
wrterror(const char * p)322 wrterror(const char *p)
323 {
324
325 suicide = 1;
326 _malloc_message(getprogname(), malloc_func, " error: ", p);
327 abort();
328 }
329
330 static void
wrtwarning(const char * p)331 wrtwarning(const char *p)
332 {
333
334 /*
335 * Sensitive processes, somewhat arbitrarily defined here as setuid,
336 * setgid, root and wheel cannot afford to have malloc mistakes.
337 */
338 if (malloc_abort || issetugid() || getuid() == 0 || getgid() == 0)
339 wrterror(p);
340 }
341
342 /*
343 * Allocate a number of pages from the OS
344 */
345 static void *
map_pages(size_t pages)346 map_pages(size_t pages)
347 {
348 caddr_t result, rresult, tail;
349 intptr_t bytes = pages << malloc_pageshift;
350
351 if (bytes < 0 || (size_t)bytes < pages) {
352 errno = ENOMEM;
353 return NULL;
354 }
355
356 if ((result = sbrk(bytes)) == (void *)-1)
357 return NULL;
358
359 /*
360 * Round to a page, in case sbrk(2) did not do this for us
361 */
362 rresult = (caddr_t)pageround((size_t)(uintptr_t)result);
363 if (result < rresult) {
364 /* make sure we have enough space to fit bytes */
365 if (sbrk((intptr_t)(rresult - result)) == (void *) -1) {
366 /* we failed, put everything back */
367 if (brk(result)) {
368 wrterror("brk(2) failed [internal error]\n");
369 }
370 }
371 }
372 tail = rresult + (size_t)bytes;
373
374 last_idx = ptr2idx(tail) - 1;
375 malloc_brk = tail;
376
377 if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx)) {
378 malloc_brk = result;
379 last_idx = ptr2idx(malloc_brk) - 1;
380 /* Put back break point since we failed. */
381 if (brk(malloc_brk))
382 wrterror("brk(2) failed [internal error]\n");
383 return 0;
384 }
385
386 return rresult;
387 }
388
389 /*
390 * Extend page directory
391 */
392 static int
extend_pgdir(size_t idx)393 extend_pgdir(size_t idx)
394 {
395 struct pginfo **new, **old;
396 size_t newlen, oldlen;
397
398 /* check for overflow */
399 if ((((~(1UL << ((sizeof(size_t) * NBBY) - 1)) / sizeof(*page_dir)) + 1)
400 + (malloc_pagesize / sizeof *page_dir)) < idx) {
401 errno = ENOMEM;
402 return 0;
403 }
404
405 /* Make it this many pages */
406 newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
407
408 /* remember the old mapping size */
409 oldlen = malloc_ninfo * sizeof *page_dir;
410
411 /*
412 * NOTE: we allocate new pages and copy the directory rather than tempt
413 * fate by trying to "grow" the region.. There is nothing to prevent
414 * us from accidentally re-mapping space that's been allocated by our caller
415 * via dlopen() or other mmap().
416 *
417 * The copy problem is not too bad, as there is 4K of page index per
418 * 4MB of malloc arena.
419 *
420 * We can totally avoid the copy if we open a file descriptor to associate
421 * the anon mappings with. Then, when we remap the pages at the new
422 * address, the old pages will be "magically" remapped.. But this means
423 * keeping open a "secret" file descriptor.....
424 */
425
426 /* Get new pages */
427 new = MMAP(newlen);
428 if (new == MAP_FAILED)
429 return 0;
430
431 /* Copy the old stuff */
432 memcpy(new, page_dir, oldlen);
433
434 /* register the new size */
435 malloc_ninfo = newlen / sizeof *page_dir;
436
437 /* swap the pointers */
438 old = page_dir;
439 page_dir = new;
440
441 /* Now free the old stuff */
442 munmap(old, oldlen);
443 return 1;
444 }
445
446 /*
447 * Initialize the world
448 */
449 static void
malloc_init(void)450 malloc_init(void)
451 {
452 const char *p;
453 char b[64];
454 size_t i;
455 ssize_t j;
456 int serrno = errno;
457
458 /*
459 * Compute page-size related variables.
460 */
461 malloc_pagesize = getpagesize();
462 malloc_pagemask = malloc_pagesize - 1;
463 for (malloc_pageshift = 0;
464 (1UL << malloc_pageshift) != malloc_pagesize;
465 malloc_pageshift++)
466 /* nothing */ ;
467
468 INIT_MMAP();
469
470 #ifdef MALLOC_EXTRA_SANITY
471 malloc_junk = 1;
472 #endif /* MALLOC_EXTRA_SANITY */
473
474 for (i = 0; i < 3; i++) {
475 if (i == 0) {
476 j = readlink("/etc/malloc.conf", b, sizeof b - 1);
477 if (j == -1)
478 continue;
479 b[j] = '\0';
480 p = b;
481 #ifdef _LIBC
482 } else if (i == 1 && issetugid() == 0) {
483 p = getenv("MALLOC_OPTIONS");
484 #endif
485 } else if (i == 1) {
486 continue;
487 } else {
488 p = _malloc_options;
489 }
490 for (; p != NULL && *p != '\0'; p++) {
491 switch (*p) {
492 case '>': malloc_cache <<= 1; break;
493 case '<': malloc_cache >>= 1; break;
494 case 'a': malloc_abort = 0; break;
495 case 'A': malloc_abort = 1; break;
496 case 'h': malloc_hint = 0; break;
497 case 'H': malloc_hint = 1; break;
498 case 'r': malloc_realloc = 0; break;
499 case 'R': malloc_realloc = 1; break;
500 case 'j': malloc_junk = 0; break;
501 case 'J': malloc_junk = 1; break;
502 #ifdef HAS_UTRACE
503 case 'u': malloc_utrace = 0; break;
504 case 'U': malloc_utrace = 1; break;
505 #endif
506 case 'v': malloc_sysv = 0; break;
507 case 'V': malloc_sysv = 1; break;
508 case 'x': malloc_xmalloc = 0; break;
509 case 'X': malloc_xmalloc = 1; break;
510 case 'z': malloc_zero = 0; break;
511 case 'Z': malloc_zero = 1; break;
512 default:
513 _malloc_message(getprogname(), malloc_func,
514 " warning: ", "unknown char in MALLOC_OPTIONS\n");
515 break;
516 }
517 }
518 }
519
520 UTRACE(0, 0, 0);
521
522 /*
523 * We want junk in the entire allocation, and zero only in the part
524 * the user asked for.
525 */
526 if (malloc_zero)
527 malloc_junk = 1;
528
529 /* Allocate one page for the page directory */
530 page_dir = MMAP(malloc_pagesize);
531
532 if (page_dir == MAP_FAILED)
533 wrterror("mmap(2) failed, check limits.\n");
534
535 /*
536 * We need a maximum of malloc_pageshift buckets, steal these from the
537 * front of the page_directory;
538 */
539 malloc_origo = pageround((size_t)(uintptr_t)sbrk((intptr_t)0))
540 >> malloc_pageshift;
541 malloc_origo -= malloc_pageshift;
542
543 malloc_ninfo = malloc_pagesize / sizeof *page_dir;
544
545 /* Recalculate the cache size in bytes, and make sure it's nonzero */
546
547 if (!malloc_cache)
548 malloc_cache++;
549
550 malloc_cache <<= malloc_pageshift;
551
552 /*
553 * This is a nice hack from Kaleb Keithly (kaleb@x.org).
554 * We can sbrk(2) further back when we keep this on a low address.
555 */
556 px = imalloc(sizeof *px);
557
558 errno = serrno;
559 }
560
561 /*
562 * Allocate a number of complete pages
563 */
564 static void *
malloc_pages(size_t size)565 malloc_pages(size_t size)
566 {
567 void *p, *delay_free = NULL;
568 size_t i;
569 struct pgfree *pf;
570 size_t idx;
571
572 idx = pageround(size);
573 if (idx < size) {
574 errno = ENOMEM;
575 return NULL;
576 } else
577 size = idx;
578
579 p = NULL;
580
581 /* Look for free pages before asking for more */
582 for(pf = free_list.next; pf; pf = pf->next) {
583
584 #ifdef MALLOC_EXTRA_SANITY
585 if (pf->size & malloc_pagemask)
586 wrterror("(ES): junk length entry on free_list.\n");
587 if (!pf->size)
588 wrterror("(ES): zero length entry on free_list.\n");
589 if (pf->page == pf->end)
590 wrterror("(ES): zero entry on free_list.\n");
591 if (pf->page > pf->end)
592 wrterror("(ES): sick entry on free_list.\n");
593 if ((void*)pf->page >= (void*)sbrk(0))
594 wrterror("(ES): entry on free_list past brk.\n");
595 if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
596 wrterror("(ES): non-free first page on free-list.\n");
597 if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
598 wrterror("(ES): non-free last page on free-list.\n");
599 #endif /* MALLOC_EXTRA_SANITY */
600
601 if (pf->size < size)
602 continue;
603
604 if (pf->size == size) {
605 p = pf->page;
606 if (pf->next != NULL)
607 pf->next->prev = pf->prev;
608 pf->prev->next = pf->next;
609 delay_free = pf;
610 break;
611 }
612
613 p = pf->page;
614 pf->page = (char *)pf->page + size;
615 pf->size -= size;
616 break;
617 }
618
619 #ifdef MALLOC_EXTRA_SANITY
620 if (p != NULL && page_dir[ptr2idx(p)] != MALLOC_FREE)
621 wrterror("(ES): allocated non-free page on free-list.\n");
622 #endif /* MALLOC_EXTRA_SANITY */
623
624 size >>= malloc_pageshift;
625
626 /* Map new pages */
627 if (p == NULL)
628 p = map_pages(size);
629
630 if (p != NULL) {
631
632 idx = ptr2idx(p);
633 page_dir[idx] = MALLOC_FIRST;
634 for (i=1;i<size;i++)
635 page_dir[idx+i] = MALLOC_FOLLOW;
636
637 if (malloc_junk)
638 memset(p, SOME_JUNK, size << malloc_pageshift);
639 }
640
641 if (delay_free) {
642 if (px == NULL)
643 px = delay_free;
644 else
645 ifree(delay_free);
646 }
647
648 return p;
649 }
650
651 /*
652 * Allocate a page of fragments
653 */
654
655 static inline int
malloc_make_chunks(int bits)656 malloc_make_chunks(int bits)
657 {
658 struct pginfo *bp;
659 void *pp;
660 int i, k;
661 long l;
662
663 /* Allocate a new bucket */
664 pp = malloc_pages(malloc_pagesize);
665 if (pp == NULL)
666 return 0;
667
668 /* Find length of admin structure */
669 l = (long)offsetof(struct pginfo, bits[0]);
670 l += (long)sizeof bp->bits[0] *
671 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
672
673 /* Don't waste more than two chunks on this */
674 if ((1<<(bits)) <= l+l) {
675 bp = (struct pginfo *)pp;
676 } else {
677 bp = imalloc((size_t)l);
678 if (bp == NULL) {
679 ifree(pp);
680 return 0;
681 }
682 }
683
684 bp->size = (1<<bits);
685 bp->shift = bits;
686 bp->total = bp->free = (u_short)(malloc_pagesize >> bits);
687 bp->page = pp;
688
689 /* set all valid bits in the bitmap */
690 k = bp->total;
691 i = 0;
692
693 /* Do a bunch at a time */
694 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
695 bp->bits[i / MALLOC_BITS] = ~0U;
696
697 for(; i < k; i++)
698 bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
699
700 if (bp == bp->page) {
701 /* Mark the ones we stole for ourselves */
702 for(i = 0; l > 0; i++) {
703 bp->bits[i / MALLOC_BITS] &= ~(1 << (i % MALLOC_BITS));
704 bp->free--;
705 bp->total--;
706 l -= (long)(1 << bits);
707 }
708 }
709
710 /* MALLOC_LOCK */
711
712 page_dir[ptr2idx(pp)] = bp;
713
714 bp->next = page_dir[bits];
715 page_dir[bits] = bp;
716
717 /* MALLOC_UNLOCK */
718
719 return 1;
720 }
721
722 /*
723 * Allocate a fragment
724 */
725 static void *
malloc_bytes(size_t size)726 malloc_bytes(size_t size)
727 {
728 size_t i;
729 int j;
730 u_int u;
731 struct pginfo *bp;
732 size_t k;
733 u_int *lp;
734
735 /* Don't bother with anything less than this */
736 if (size < malloc_minsize)
737 size = malloc_minsize;
738
739
740 /* Find the right bucket */
741 j = 1;
742 i = size-1;
743 while (i >>= 1)
744 j++;
745
746 /* If it's empty, make a page more of that size chunks */
747 if (page_dir[j] == NULL && !malloc_make_chunks(j))
748 return NULL;
749
750 bp = page_dir[j];
751
752 /* Find first word of bitmap which isn't empty */
753 for (lp = bp->bits; !*lp; lp++)
754 ;
755
756 /* Find that bit, and tweak it */
757 u = 1;
758 k = 0;
759 while (!(*lp & u)) {
760 u += u;
761 k++;
762 }
763 *lp ^= u;
764
765 /* If there are no more free, remove from free-list */
766 if (!--bp->free) {
767 page_dir[j] = bp->next;
768 bp->next = NULL;
769 }
770
771 /* Adjust to the real offset of that chunk */
772 k += (lp-bp->bits)*MALLOC_BITS;
773 k <<= bp->shift;
774
775 if (malloc_junk)
776 memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
777
778 return (u_char *)bp->page + k;
779 }
780
781 /*
782 * Allocate a piece of memory
783 */
784 static void *
imalloc(size_t size)785 imalloc(size_t size)
786 {
787 void *result;
788
789 if (suicide)
790 abort();
791
792 if ((size + malloc_pagesize) < size) /* Check for overflow */
793 result = NULL;
794 else if ((size + malloc_pagesize) >= (uintptr_t)page_dir)
795 result = NULL;
796 else if (size <= malloc_maxsize)
797 result = malloc_bytes(size);
798 else
799 result = malloc_pages(size);
800
801 if (malloc_abort && result == NULL)
802 wrterror("allocation failed.\n");
803
804 if (malloc_zero && result != NULL)
805 memset(result, 0, size);
806
807 return result;
808 }
809
810 /*
811 * Change the size of an allocation.
812 */
813 static void *
irealloc(void * ptr,size_t size)814 irealloc(void *ptr, size_t size)
815 {
816 void *p;
817 size_t osize, idx;
818 struct pginfo **mp;
819 size_t i;
820
821 if (suicide)
822 abort();
823
824 idx = ptr2idx(ptr);
825
826 if (idx < malloc_pageshift) {
827 wrtwarning("junk pointer, too low to make sense.\n");
828 return 0;
829 }
830
831 if (idx > last_idx) {
832 wrtwarning("junk pointer, too high to make sense.\n");
833 return 0;
834 }
835
836 mp = &page_dir[idx];
837
838 if (*mp == MALLOC_FIRST) { /* Page allocation */
839
840 /* Check the pointer */
841 if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
842 wrtwarning("modified (page-) pointer.\n");
843 return NULL;
844 }
845
846 /* Find the size in bytes */
847 for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
848 osize += malloc_pagesize;
849
850 if (!malloc_realloc && /* unless we have to, */
851 size <= osize && /* .. or are too small, */
852 size > (osize - malloc_pagesize)) { /* .. or can free a page, */
853 if (malloc_junk)
854 memset((u_char *)ptr + size, SOME_JUNK, osize-size);
855 return ptr; /* don't do anything. */
856 }
857
858 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */
859
860 /* Check the pointer for sane values */
861 if (((size_t)(uintptr_t)ptr & ((*mp)->size-1))) {
862 wrtwarning("modified (chunk-) pointer.\n");
863 return NULL;
864 }
865
866 /* Find the chunk index in the page */
867 i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> (*mp)->shift;
868
869 /* Verify that it isn't a free chunk already */
870 if ((*mp)->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
871 wrtwarning("chunk is already free.\n");
872 return NULL;
873 }
874
875 osize = (*mp)->size;
876
877 if (!malloc_realloc && /* Unless we have to, */
878 size <= osize && /* ..or are too small, */
879 (size > osize / 2 || /* ..or could use a smaller size, */
880 osize == malloc_minsize)) { /* ..(if there is one) */
881 if (malloc_junk)
882 memset((u_char *)ptr + size, SOME_JUNK, osize-size);
883 return ptr; /* ..Don't do anything */
884 }
885
886 } else {
887 wrtwarning("pointer to wrong page.\n");
888 return NULL;
889 }
890
891 p = imalloc(size);
892
893 if (p != NULL) {
894 /* copy the lesser of the two sizes, and free the old one */
895 if (!size || !osize)
896 ;
897 else if (osize < size)
898 memcpy(p, ptr, osize);
899 else
900 memcpy(p, ptr, size);
901 ifree(ptr);
902 }
903 return p;
904 }
905
906 /*
907 * Free a sequence of pages
908 */
909
910 static inline void
free_pages(void * ptr,size_t idx,struct pginfo * info)911 free_pages(void *ptr, size_t idx, struct pginfo *info)
912 {
913 size_t i;
914 struct pgfree *pf, *pt=NULL;
915 size_t l;
916 void *tail;
917
918 if (info == MALLOC_FREE) {
919 wrtwarning("page is already free.\n");
920 return;
921 }
922
923 if (info != MALLOC_FIRST) {
924 wrtwarning("pointer to wrong page.\n");
925 return;
926 }
927
928 if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
929 wrtwarning("modified (page-) pointer.\n");
930 return;
931 }
932
933 /* Count how many pages and mark them free at the same time */
934 page_dir[idx] = MALLOC_FREE;
935 for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
936 page_dir[idx + i] = MALLOC_FREE;
937
938 l = i << malloc_pageshift;
939
940 if (malloc_junk)
941 memset(ptr, SOME_JUNK, l);
942
943 if (malloc_hint)
944 madvise(ptr, l, MADV_FREE);
945
946 tail = (char *)ptr+l;
947
948 /* add to free-list */
949 if (px == NULL)
950 px = imalloc(sizeof *px); /* This cannot fail... */
951 px->page = ptr;
952 px->end = tail;
953 px->size = l;
954 if (free_list.next == NULL) {
955
956 /* Nothing on free list, put this at head */
957 px->next = free_list.next;
958 px->prev = &free_list;
959 free_list.next = px;
960 pf = px;
961 px = NULL;
962
963 } else {
964
965 /* Find the right spot, leave pf pointing to the modified entry. */
966 tail = (char *)ptr+l;
967
968 for(pf = free_list.next; pf->end < ptr && pf->next != NULL;
969 pf = pf->next)
970 ; /* Race ahead here */
971
972 if (pf->page > tail) {
973 /* Insert before entry */
974 px->next = pf;
975 px->prev = pf->prev;
976 pf->prev = px;
977 px->prev->next = px;
978 pf = px;
979 px = NULL;
980 } else if (pf->end == ptr ) {
981 /* Append to the previous entry */
982 pf->end = (char *)pf->end + l;
983 pf->size += l;
984 if (pf->next != NULL && pf->end == pf->next->page ) {
985 /* And collapse the next too. */
986 pt = pf->next;
987 pf->end = pt->end;
988 pf->size += pt->size;
989 pf->next = pt->next;
990 if (pf->next != NULL)
991 pf->next->prev = pf;
992 }
993 } else if (pf->page == tail) {
994 /* Prepend to entry */
995 pf->size += l;
996 pf->page = ptr;
997 } else if (pf->next == NULL) {
998 /* Append at tail of chain */
999 px->next = NULL;
1000 px->prev = pf;
1001 pf->next = px;
1002 pf = px;
1003 px = NULL;
1004 } else {
1005 wrterror("freelist is destroyed.\n");
1006 }
1007 }
1008
1009 /* Return something to OS ? */
1010 if (pf->next == NULL && /* If we're the last one, */
1011 pf->size > malloc_cache && /* ..and the cache is full, */
1012 pf->end == malloc_brk && /* ..and none behind us, */
1013 malloc_brk == sbrk((intptr_t)0)) { /* ..and it's OK to do... */
1014
1015 /*
1016 * Keep the cache intact. Notice that the '>' above guarantees that
1017 * the pf will always have at least one page afterwards.
1018 */
1019 pf->end = (char *)pf->page + malloc_cache;
1020 pf->size = malloc_cache;
1021
1022 brk(pf->end);
1023 malloc_brk = pf->end;
1024
1025 idx = ptr2idx(pf->end);
1026
1027 for(i=idx;i <= last_idx;)
1028 page_dir[i++] = MALLOC_NOT_MINE;
1029
1030 last_idx = idx - 1;
1031
1032 /* XXX: We could realloc/shrink the pagedir here I guess. */
1033 }
1034 if (pt != NULL)
1035 ifree(pt);
1036 }
1037
1038 /*
1039 * Free a chunk, and possibly the page it's on, if the page becomes empty.
1040 */
1041
1042 static inline void
free_bytes(void * ptr,size_t idx,struct pginfo * info)1043 free_bytes(void *ptr, size_t idx, struct pginfo *info)
1044 {
1045 size_t i;
1046 struct pginfo **mp;
1047 void *vp;
1048
1049 /* Find the chunk number on the page */
1050 i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> info->shift;
1051
1052 if (((size_t)(uintptr_t)ptr & (info->size-1))) {
1053 wrtwarning("modified (chunk-) pointer.\n");
1054 return;
1055 }
1056
1057 if (info->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
1058 wrtwarning("chunk is already free.\n");
1059 return;
1060 }
1061
1062 if (malloc_junk)
1063 memset(ptr, SOME_JUNK, (size_t)info->size);
1064
1065 info->bits[i/MALLOC_BITS] |= (u_int)(1UL << (i % MALLOC_BITS));
1066 info->free++;
1067
1068 mp = page_dir + info->shift;
1069
1070 if (info->free == 1) {
1071
1072 /* Page became non-full */
1073
1074 mp = page_dir + info->shift;
1075 /* Insert in address order */
1076 while (*mp && (*mp)->next && (*mp)->next->page < info->page)
1077 mp = &(*mp)->next;
1078 info->next = *mp;
1079 *mp = info;
1080 return;
1081 }
1082
1083 if (info->free != info->total)
1084 return;
1085
1086 /* Find & remove this page in the queue */
1087 while (*mp != info) {
1088 mp = &((*mp)->next);
1089 #ifdef MALLOC_EXTRA_SANITY
1090 if (!*mp)
1091 wrterror("(ES): Not on queue.\n");
1092 #endif /* MALLOC_EXTRA_SANITY */
1093 }
1094 *mp = info->next;
1095
1096 /* Free the page & the info structure if need be */
1097 page_dir[idx] = MALLOC_FIRST;
1098 vp = info->page; /* Order is important ! */
1099 if(vp != (void*)info)
1100 ifree(info);
1101 ifree(vp);
1102 }
1103
1104 static void
ifree(void * ptr)1105 ifree(void *ptr)
1106 {
1107 struct pginfo *info;
1108 size_t idx;
1109
1110 /* This is legal */
1111 if (ptr == NULL)
1112 return;
1113
1114 /* If we're already sinking, don't make matters any worse. */
1115 if (suicide)
1116 return;
1117
1118 idx = ptr2idx(ptr);
1119
1120 if (idx < malloc_pageshift) {
1121 wrtwarning("junk pointer, too low to make sense.\n");
1122 return;
1123 }
1124
1125 if (idx > last_idx) {
1126 wrtwarning("junk pointer, too high to make sense.\n");
1127 return;
1128 }
1129
1130 info = page_dir[idx];
1131
1132 if (info < MALLOC_MAGIC)
1133 free_pages(ptr, idx, info);
1134 else
1135 free_bytes(ptr, idx, info);
1136 return;
1137 }
1138
1139 static int malloc_active; /* Recursion flag for public interface. */
1140 static unsigned malloc_started; /* Set when initialization has been done */
1141
1142 static void *
pubrealloc(void * ptr,size_t size,const char * func)1143 pubrealloc(void *ptr, size_t size, const char *func)
1144 {
1145 void *r;
1146 int err = 0;
1147
1148 /*
1149 * If a thread is inside our code with a functional lock held, and then
1150 * catches a signal which calls us again, we would get a deadlock if the
1151 * lock is not of a recursive type.
1152 */
1153 _MALLOC_LOCK();
1154 malloc_func = func;
1155 if (malloc_active > 0) {
1156 if (malloc_active == 1) {
1157 wrtwarning("recursive call\n");
1158 malloc_active = 2;
1159 }
1160 _MALLOC_UNLOCK();
1161 errno = EINVAL;
1162 return (NULL);
1163 }
1164 malloc_active = 1;
1165
1166 if (!malloc_started) {
1167 if (ptr != NULL) {
1168 wrtwarning("malloc() has never been called\n");
1169 malloc_active = 0;
1170 _MALLOC_UNLOCK();
1171 errno = EINVAL;
1172 return (NULL);
1173 }
1174 malloc_init();
1175 malloc_started = 1;
1176 }
1177
1178 if (ptr == ZEROSIZEPTR)
1179 ptr = NULL;
1180 if (malloc_sysv && !size) {
1181 if (ptr != NULL)
1182 ifree(ptr);
1183 r = NULL;
1184 } else if (!size) {
1185 if (ptr != NULL)
1186 ifree(ptr);
1187 r = ZEROSIZEPTR;
1188 } else if (ptr == NULL) {
1189 r = imalloc(size);
1190 err = (r == NULL);
1191 } else {
1192 r = irealloc(ptr, size);
1193 err = (r == NULL);
1194 }
1195 UTRACE(ptr, size, r);
1196 malloc_active = 0;
1197 _MALLOC_UNLOCK();
1198 if (malloc_xmalloc && err)
1199 wrterror("out of memory\n");
1200 if (err)
1201 errno = ENOMEM;
1202 return (r);
1203 }
1204
1205 /*
1206 * These are the public exported interface routines.
1207 */
1208
1209 void *
malloc(size_t size)1210 malloc(size_t size)
1211 {
1212
1213 return pubrealloc(NULL, size, " in malloc():");
1214 }
1215
1216 int
posix_memalign(void ** memptr,size_t alignment,size_t size)1217 posix_memalign(void **memptr, size_t alignment, size_t size)
1218 {
1219 int err;
1220 void *result;
1221
1222 if (!malloc_started) {
1223 malloc_init();
1224 malloc_started = 1;
1225 }
1226 /* Make sure that alignment is a large enough power of 2. */
1227 if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *) ||
1228 alignment > malloc_pagesize)
1229 return EINVAL;
1230
1231 /*
1232 * (size | alignment) is enough to assure the requested alignment, since
1233 * the allocator always allocates power-of-two blocks.
1234 */
1235 err = errno; /* Protect errno against changes in pubrealloc(). */
1236 result = pubrealloc(NULL, (size | alignment), " in posix_memalign()");
1237 errno = err;
1238
1239 if (result == NULL)
1240 return ENOMEM;
1241
1242 *memptr = result;
1243 return 0;
1244 }
1245
1246 void *
calloc(size_t num,size_t size)1247 calloc(size_t num, size_t size)
1248 {
1249 void *ret;
1250
1251 if (size != 0 && (num * size) / size != num) {
1252 /* size_t overflow. */
1253 errno = ENOMEM;
1254 return (NULL);
1255 }
1256
1257 ret = pubrealloc(NULL, num * size, " in calloc():");
1258
1259 if (ret != NULL)
1260 memset(ret, 0, num * size);
1261
1262 return ret;
1263 }
1264
1265 void
free(void * ptr)1266 free(void *ptr)
1267 {
1268
1269 pubrealloc(ptr, 0, " in free():");
1270 }
1271
1272 void *
realloc(void * ptr,size_t size)1273 realloc(void *ptr, size_t size)
1274 {
1275
1276 return pubrealloc(ptr, size, " in realloc():");
1277 }
1278
1279 /*
1280 * Begin library-private functions, used by threading libraries for protection
1281 * of malloc during fork(). These functions are only called if the program is
1282 * running in threaded mode, so there is no need to check whether the program
1283 * is threaded here.
1284 */
1285
1286 void
_malloc_prefork(void)1287 _malloc_prefork(void)
1288 {
1289
1290 _MALLOC_LOCK();
1291 }
1292
1293 void
_malloc_postfork(void)1294 _malloc_postfork(void)
1295 {
1296
1297 _MALLOC_UNLOCK();
1298 }
1299
1300 void
_malloc_postfork_child(void)1301 _malloc_postfork_child(void)
1302 {
1303
1304 _MALLOC_UNLOCK();
1305 }
1306