1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /* Copyright (c) 1988 AT&T */
28 /* All Rights Reserved */
29
30 #pragma ident "%Z%%M% %I% %E% SMI"
31
32 #include <sys/types.h>
33
34 #ifndef debug
35 #define NDEBUG
36 #endif
37
38 #include <stdlib.h>
39 #include <string.h>
40 #include "assert.h"
41 #include "malloc.h"
42 #include "mallint.h"
43 #include <thread.h>
44 #include <pthread.h>
45 #include <synch.h>
46 #include <unistd.h>
47 #include <limits.h>
48
49 static mutex_t mlock = DEFAULTMUTEX;
50 static ssize_t freespace(struct holdblk *);
51 static void *malloc_unlocked(size_t, int);
52 static void *realloc_unlocked(void *, size_t);
53 static void free_unlocked(void *);
54 static void *morecore(size_t);
55
56 /*
57 * use level memory allocater (malloc, free, realloc)
58 *
59 * -malloc, free, realloc and mallopt form a memory allocator
60 * similar to malloc, free, and realloc. The routines
61 * here are much faster than the original, with slightly worse
62 * space usage (a few percent difference on most input). They
63 * do not have the property that data in freed blocks is left
64 * untouched until the space is reallocated.
65 *
66 * -Memory is kept in the "arena", a singly linked list of blocks.
67 * These blocks are of 3 types.
68 * 1. A free block. This is a block not in use by the
69 * user. It has a 3 word header. (See description
70 * of the free queue.)
71 * 2. An allocated block. This is a block the user has
72 * requested. It has only a 1 word header, pointing
73 * to the next block of any sort.
74 * 3. A permanently allocated block. This covers space
75 * aquired by the user directly through sbrk(). It
76 * has a 1 word header, as does 2.
77 * Blocks of type 1 have the lower bit of the pointer to the
78 * nextblock = 0. Blocks of type 2 and 3 have that bit set,
79 * to mark them busy.
80 *
81 * -Unallocated blocks are kept on an unsorted doubly linked
82 * free list.
83 *
84 * -Memory is allocated in blocks, with sizes specified by the
85 * user. A circular first-fit startegy is used, with a roving
86 * head of the free queue, which prevents bunching of small
87 * blocks at the head of the queue.
88 *
89 * -Compaction is performed at free time of any blocks immediately
90 * following the freed block. The freed block will be combined
91 * with a preceding block during the search phase of malloc.
92 * Since a freed block is added at the front of the free queue,
93 * which is moved to the end of the queue if considered and
94 * rejected during the search, fragmentation only occurs if
95 * a block with a contiguious preceding block that is free is
96 * freed and reallocated on the next call to malloc. The
97 * time savings of this strategy is judged to be worth the
98 * occasional waste of memory.
99 *
100 * -Small blocks (of size < MAXSIZE) are not allocated directly.
101 * A large "holding" block is allocated via a recursive call to
102 * malloc. This block contains a header and ?????? small blocks.
103 * Holding blocks for a given size of small block (rounded to the
104 * nearest ALIGNSZ bytes) are kept on a queue with the property that any
105 * holding block with an unused small block is in front of any without.
106 * A list of free blocks is kept within the holding block.
107 */
108
109 /*
110 * description of arena, free queue, holding blocks etc.
111 *
112 * New compiler and linker does not guarentee order of initialized data.
113 * Define freeptr as arena[2-3] to guarentee it follows arena in memory.
114 * Later code depends on this order.
115 */
116
117 static struct header arena[4] = {
118 {0, 0, 0},
119 {0, 0, 0},
120 {0, 0, 0},
121 {0, 0, 0}
122 };
123 /*
124 * the second word is a minimal block to
125 * start the arena. The first is a busy
126 * block to be pointed to by the last block.
127 */
128
129 #define freeptr (arena + 2)
130 /* first and last entry in free list */
131 static struct header *arenaend; /* ptr to block marking high end of arena */
132 static struct header *lastblk; /* the highest block in the arena */
133 static struct holdblk **holdhead; /* pointer to array of head pointers */
134 /* to holding block chains */
135 /*
136 * In order to save time calculating indices, the array is 1 too
137 * large, and the first element is unused
138 *
139 * Variables controlling algorithm, esp. how holding blocs are used
140 */
141 static int numlblks = NUMLBLKS;
142 static int minhead = MINHEAD;
143 static int change = 0; /* != 0, once param changes are no longer allowed */
144 static int fastct = FASTCT;
145 static unsigned int maxfast = MAXFAST;
146 /* number of small block sizes to map to one size */
147
148 static int grain = ALIGNSZ;
149
150 #ifdef debug
151 static int case1count = 0;
152
153 static void
checkq(void)154 checkq(void)
155 {
156 register struct header *p;
157
158 p = &freeptr[0];
159
160 /* check forward */
161 /*CSTYLED*/
162 while (p != &freeptr[1]) {
163 p = p->nextfree;
164 assert(p->prevfree->nextfree == p);
165 }
166
167 /* check backward */
168 /*CSTYLED*/
169 while (p != &freeptr[0]) {
170 p = p->prevfree;
171 assert(p->nextfree->prevfree == p);
172 }
173 }
174 #endif
175
176
177 /*
178 * malloc(nbytes) - give a user nbytes to use
179 */
180
181 void *
malloc(size_t nbytes)182 malloc(size_t nbytes)
183 {
184 void *ret;
185
186 (void) mutex_lock(&mlock);
187 ret = malloc_unlocked(nbytes, 0);
188 (void) mutex_unlock(&mlock);
189 return (ret);
190 }
191
192 /*
193 * Use malloc_unlocked() to get the address to start with; Given this
194 * address, find out the closest address that aligns with the request
195 * and return that address after doing some house keeping (refer to the
196 * ascii art below).
197 */
198 void *
memalign(size_t alignment,size_t size)199 memalign(size_t alignment, size_t size)
200 {
201 void *alloc_buf;
202 struct header *hd;
203 size_t alloc_size;
204 uintptr_t fr;
205 static int realloc;
206
207 if (size == 0 || alignment == 0 ||
208 (alignment & (alignment - 1)) != 0) {
209 return (NULL);
210 }
211 if (alignment <= ALIGNSZ)
212 return (malloc(size));
213
214 alloc_size = size + alignment;
215 if (alloc_size < size) { /* overflow */
216 return (NULL);
217 }
218
219 (void) mutex_lock(&mlock);
220 alloc_buf = malloc_unlocked(alloc_size, 1);
221 (void) mutex_unlock(&mlock);
222
223 if (alloc_buf == NULL)
224 return (NULL);
225 fr = (uintptr_t)alloc_buf;
226
227 fr = (fr + alignment - 1) / alignment * alignment;
228
229 if (fr == (uintptr_t)alloc_buf)
230 return (alloc_buf);
231
232 if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
233 /*
234 * we hit an edge case, where the space ahead of aligned
235 * address is not sufficient to hold 'header' and hence we
236 * can't free it. So double the allocation request.
237 */
238 realloc++;
239 free(alloc_buf);
240 alloc_size = size + alignment*2;
241 if (alloc_size < size) {
242 return (NULL);
243 }
244
245 (void) mutex_lock(&mlock);
246 alloc_buf = malloc_unlocked(alloc_size, 1);
247 (void) mutex_unlock(&mlock);
248
249 if (alloc_buf == NULL)
250 return (NULL);
251 fr = (uintptr_t)alloc_buf;
252
253 fr = (fr + alignment - 1) / alignment * alignment;
254 if (fr == (uintptr_t)alloc_buf)
255 return (alloc_buf);
256 if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
257 fr = fr + alignment;
258 }
259 }
260
261 /*
262 * +-------+ +-------+
263 * +---| <a> | | <a> |--+
264 * | +-------+<--alloc_buf-->+-------+ |
265 * | | | | | |
266 * | | | | | |
267 * | | | | | |
268 * | | | hd--> +-------+ |
269 * | | | +---| <b> |<-+
270 * | | | | +-------+<--- fr
271 * | | | | | |
272 * | | | | | |
273 * | | | | | |
274 * | | | | | |
275 * | | | | | |
276 * | | | | | |
277 * | +-------+ | +-------+
278 * +-->| next | +-->| next |
279 * +-------+ +-------+
280 *
281 */
282 hd = (struct header *)((char *)fr - minhead);
283 (void) mutex_lock(&mlock);
284 hd->nextblk = ((struct header *)((char *)alloc_buf - minhead))->nextblk;
285 ((struct header *)((char *)alloc_buf - minhead))->nextblk = SETBUSY(hd);
286 (void) mutex_unlock(&mlock);
287 free(alloc_buf);
288 CHECKQ
289 return ((void *)fr);
290 }
291
292 void *
valloc(size_t size)293 valloc(size_t size)
294 {
295 static unsigned pagesize;
296 if (size == 0)
297 return (NULL);
298
299 if (!pagesize)
300 pagesize = sysconf(_SC_PAGESIZE);
301
302 return (memalign(pagesize, size));
303 }
304
305 /*
306 * malloc_unlocked(nbytes, nosmall) - Do the real work for malloc
307 */
308
309 static void *
malloc_unlocked(size_t nbytes,int nosmall)310 malloc_unlocked(size_t nbytes, int nosmall)
311 {
312 struct header *blk;
313 size_t nb; /* size of entire block we need */
314
315 /* on first call, initialize */
316 if (freeptr[0].nextfree == GROUND) {
317 /* initialize arena */
318 arena[1].nextblk = (struct header *)BUSY;
319 arena[0].nextblk = (struct header *)BUSY;
320 lastblk = arenaend = &(arena[1]);
321 /* initialize free queue */
322 freeptr[0].nextfree = &(freeptr[1]);
323 freeptr[1].nextblk = &(arena[0]);
324 freeptr[1].prevfree = &(freeptr[0]);
325 /* mark that small blocks not init yet */
326 }
327 if (nbytes == 0)
328 return (NULL);
329
330 if (nbytes <= maxfast && !nosmall) {
331 /*
332 * We can allocate out of a holding block
333 */
334 struct holdblk *holdblk; /* head of right sized queue */
335 struct lblk *lblk; /* pointer to a little block */
336 struct holdblk *newhold;
337
338 if (!change) {
339 int i;
340 /*
341 * This allocates space for hold block
342 * pointers by calling malloc recursively.
343 * Maxfast is temporarily set to 0, to
344 * avoid infinite recursion. allocate
345 * space for an extra ptr so that an index
346 * is just ->blksz/grain, with the first
347 * ptr unused.
348 */
349 change = 1; /* change to algorithm params */
350 /* no longer allowed */
351 /*
352 * temporarily alter maxfast, to avoid
353 * infinite recursion
354 */
355 maxfast = 0;
356 holdhead = (struct holdblk **)
357 malloc_unlocked(sizeof (struct holdblk *) *
358 (fastct + 1), 0);
359 if (holdhead == NULL)
360 return (malloc_unlocked(nbytes, 0));
361 for (i = 1; i <= fastct; i++) {
362 holdhead[i] = HGROUND;
363 }
364 maxfast = fastct * grain;
365 }
366 /*
367 * Note that this uses the absolute min header size (MINHEAD)
368 * unlike the large block case which uses minhead
369 *
370 * round up to nearest multiple of grain
371 * code assumes grain is a multiple of MINHEAD
372 */
373 /* round up to grain */
374 nb = (nbytes + grain - 1) / grain * grain;
375 holdblk = holdhead[nb / grain];
376 nb = nb + MINHEAD;
377 /*
378 * look for space in the holding block. Blocks with
379 * space will be in front of those without
380 */
381 if ((holdblk != HGROUND) && (holdblk->lfreeq != LGROUND)) {
382 /* there is space */
383 lblk = holdblk->lfreeq;
384
385 /*
386 * Now make lfreeq point to a free block.
387 * If lblk has been previously allocated and
388 * freed, it has a valid pointer to use.
389 * Otherwise, lblk is at the beginning of
390 * the unallocated blocks at the end of
391 * the holding block, so, if there is room, take
392 * the next space. If not, mark holdblk full,
393 * and move holdblk to the end of the queue
394 */
395 if (lblk < holdblk->unused) {
396 /* move to next holdblk, if this one full */
397 if ((holdblk->lfreeq =
398 CLRSMAL(lblk->header.nextfree)) ==
399 LGROUND) {
400 holdhead[(nb-MINHEAD) / grain] =
401 holdblk->nexthblk;
402 }
403 } else if (((char *)holdblk->unused + nb) <
404 ((char *)holdblk + HOLDSZ(nb))) {
405 holdblk->unused = (struct lblk *)
406 ((char *)holdblk->unused+nb);
407 holdblk->lfreeq = holdblk->unused;
408 } else {
409 holdblk->unused = (struct lblk *)
410 ((char *)holdblk->unused+nb);
411 holdblk->lfreeq = LGROUND;
412 holdhead[(nb-MINHEAD)/grain] =
413 holdblk->nexthblk;
414 }
415 /* mark as busy and small */
416 lblk->header.holder = (struct holdblk *)SETALL(holdblk);
417 } else {
418 /* we need a new holding block */
419 newhold = (struct holdblk *)
420 malloc_unlocked(HOLDSZ(nb), 0);
421 if ((char *)newhold == NULL) {
422 return (NULL);
423 }
424 /* add to head of free queue */
425 if (holdblk != HGROUND) {
426 newhold->nexthblk = holdblk;
427 newhold->prevhblk = holdblk->prevhblk;
428 holdblk->prevhblk = newhold;
429 newhold->prevhblk->nexthblk = newhold;
430 } else {
431 newhold->nexthblk = newhold->prevhblk = newhold;
432 }
433 holdhead[(nb-MINHEAD)/grain] = newhold;
434 /* set up newhold */
435 lblk = (struct lblk *)(newhold->space);
436 newhold->lfreeq = newhold->unused =
437 (struct lblk *)((char *)newhold->space+nb);
438 lblk->header.holder = (struct holdblk *)SETALL(newhold);
439 newhold->blksz = nb-MINHEAD;
440 }
441 #ifdef debug
442 assert(((struct holdblk *)CLRALL(lblk->header.holder))->blksz >=
443 nbytes);
444 #endif /* debug */
445 return ((char *)lblk + MINHEAD);
446 } else {
447 /*
448 * We need an ordinary block
449 */
450 struct header *newblk; /* used for creating a block */
451
452 /* get number of bytes we need */
453 nb = nbytes + minhead;
454 nb = (nb + ALIGNSZ - 1) / ALIGNSZ * ALIGNSZ; /* align */
455 nb = (nb > MINBLKSZ) ? nb : MINBLKSZ;
456 /*
457 * see if there is a big enough block
458 * If none exists, you will get to freeptr[1].
459 * freeptr[1].next = &arena[0], so when you do the test,
460 * the result is a large positive number, since arena[0]
461 * comes before all blocks. Arena[0] is marked busy so
462 * that it will not be compacted. This kludge is for the
463 * sake of the almighty efficiency.
464 */
465 /* check that a very large request won't cause an inf. loop */
466
467 if ((freeptr[1].nextblk-&(freeptr[1])) < nb) {
468 return (NULL);
469 } else {
470 struct header *next; /* following block */
471 struct header *nextnext; /* block after next */
472
473 blk = freeptr;
474 do {
475 blk = blk->nextfree;
476 /* see if we can compact */
477 next = blk->nextblk;
478 if (!TESTBUSY(nextnext = next->nextblk)) {
479 do {
480 DELFREEQ(next);
481 next = nextnext;
482 nextnext = next->nextblk;
483 } while (!TESTBUSY(nextnext));
484 /*
485 * next will be at most == to lastblk,
486 * but I think the >= test is faster
487 */
488 if (next >= arenaend)
489 lastblk = blk;
490 blk->nextblk = next;
491 }
492 } while (((char *)(next) - (char *)blk) < nb);
493 }
494 /*
495 * if we didn't find a block, get more memory
496 */
497 if (blk == &(freeptr[1])) {
498 /*
499 * careful coding could likely replace
500 * newend with arenaend
501 */
502 struct header *newend; /* new end of arena */
503 ssize_t nget; /* number of words to get */
504
505 /*
506 * Three cases - 1. There is space between arenaend
507 * and the break value that will become
508 * a permanently allocated block.
509 * 2. Case 1 is not true, and the last
510 * block is allocated.
511 * 3. Case 1 is not true, and the last
512 * block is free
513 */
514 if ((newblk = (struct header *)sbrk(0)) !=
515 (struct header *)((char *)arenaend + HEADSZ)) {
516 /* case 1 */
517 #ifdef debug
518 if (case1count++ > 0)
519 (void) write(2, "Case 1 hit more that once."
520 " brk or sbrk?\n", 41);
521 #endif
522 /* get size to fetch */
523 nget = nb + HEADSZ;
524 /* round up to a block */
525 nget = (nget + BLOCKSZ - 1)/BLOCKSZ * BLOCKSZ;
526 assert((uintptr_t)newblk % ALIGNSZ == 0);
527 /* get memory */
528 if (morecore(nget) == (void *)-1)
529 return (NULL);
530 /* add to arena */
531 newend = (struct header *)((char *)newblk + nget
532 - HEADSZ);
533 assert((uintptr_t)newblk % ALIGNSZ == 0);
534 newend->nextblk = SETBUSY(&(arena[1]));
535 /* ??? newblk ?? */
536 newblk->nextblk = newend;
537
538 /*
539 * space becomes a permanently allocated block.
540 * This is likely not mt-safe as lock is not
541 * shared with brk or sbrk
542 */
543 arenaend->nextblk = SETBUSY(newblk);
544 /* adjust other pointers */
545 arenaend = newend;
546 lastblk = newblk;
547 blk = newblk;
548 } else if (TESTBUSY(lastblk->nextblk)) {
549 /* case 2 */
550 nget = (nb + BLOCKSZ - 1) / BLOCKSZ * BLOCKSZ;
551 if (morecore(nget) == (void *)-1)
552 return (NULL);
553 /* block must be word aligned */
554 assert(((uintptr_t)newblk%ALIGNSZ) == 0);
555 /*
556 * stub at old arenaend becomes first word
557 * in blk
558 */
559 /* ??? newblk = arenaend; */
560
561 newend =
562 (struct header *)((char *)arenaend+nget);
563 newend->nextblk = SETBUSY(&(arena[1]));
564 arenaend->nextblk = newend;
565 lastblk = blk = arenaend;
566 arenaend = newend;
567 } else {
568 /* case 3 */
569 /*
570 * last block in arena is at end of memory and
571 * is free
572 */
573 /* 1.7 had this backward without cast */
574 nget = nb -
575 ((char *)arenaend - (char *)lastblk);
576 nget = (nget + (BLOCKSZ - 1)) /
577 BLOCKSZ * BLOCKSZ;
578 assert(((uintptr_t)newblk % ALIGNSZ) == 0);
579 if (morecore(nget) == (void *)-1)
580 return (NULL);
581 /* combine with last block, put in arena */
582 newend = (struct header *)
583 ((char *)arenaend + nget);
584 arenaend = lastblk->nextblk = newend;
585 newend->nextblk = SETBUSY(&(arena[1]));
586 /* set which block to use */
587 blk = lastblk;
588 DELFREEQ(blk);
589 }
590 } else {
591 struct header *nblk; /* next block */
592
593 /* take block found of free queue */
594 DELFREEQ(blk);
595 /*
596 * make head of free queue immediately follow blk,
597 * unless blk was at the end of the queue
598 */
599 nblk = blk->nextfree;
600 if (nblk != &(freeptr[1])) {
601 MOVEHEAD(nblk);
602 }
603 }
604 /* blk now points to an adequate block */
605 if (((char *)blk->nextblk - (char *)blk) - nb >= MINBLKSZ) {
606 /* carve out the right size block */
607 /* newblk will be the remainder */
608 newblk = (struct header *)((char *)blk + nb);
609 newblk->nextblk = blk->nextblk;
610 /* mark the block busy */
611 blk->nextblk = SETBUSY(newblk);
612 ADDFREEQ(newblk);
613 /* if blk was lastblk, make newblk lastblk */
614 if (blk == lastblk)
615 lastblk = newblk;
616 } else {
617 /* just mark the block busy */
618 blk->nextblk = SETBUSY(blk->nextblk);
619 }
620 }
621 CHECKQ
622 assert((char *)CLRALL(blk->nextblk) -
623 ((char *)blk + minhead) >= nbytes);
624 assert((char *)CLRALL(blk->nextblk) -
625 ((char *)blk + minhead) < nbytes + MINBLKSZ);
626 return ((char *)blk + minhead);
627 }
628
629 /*
630 * free(ptr) - free block that user thinks starts at ptr
631 *
632 * input - ptr-1 contains the block header.
633 * If the header points forward, we have a normal
634 * block pointing to the next block
635 * if the header points backward, we have a small
636 * block from a holding block.
637 * In both cases, the busy bit must be set
638 */
639
640 void
free(void * ptr)641 free(void *ptr)
642 {
643 (void) mutex_lock(&mlock);
644 free_unlocked(ptr);
645 (void) mutex_unlock(&mlock);
646 }
647
648 /*
649 * free_unlocked(ptr) - Do the real work for free()
650 */
651
652 void
free_unlocked(void * ptr)653 free_unlocked(void *ptr)
654 {
655 struct holdblk *holdblk; /* block holding blk */
656 struct holdblk *oldhead; /* former head of the hold block */
657 /* queue containing blk's holder */
658
659 if (ptr == NULL)
660 return;
661 if (TESTSMAL(((struct header *)((char *)ptr - MINHEAD))->nextblk)) {
662 struct lblk *lblk; /* pointer to freed block */
663 ssize_t offset; /* choice of header lists */
664
665 lblk = (struct lblk *)CLRBUSY((char *)ptr - MINHEAD);
666 assert((struct header *)lblk < arenaend);
667 assert((struct header *)lblk > arena);
668 /* allow twits (e.g. awk) to free a block twice */
669 holdblk = lblk->header.holder;
670 if (!TESTBUSY(holdblk))
671 return;
672 holdblk = (struct holdblk *)CLRALL(holdblk);
673 /* put lblk on its hold block's free list */
674 lblk->header.nextfree = SETSMAL(holdblk->lfreeq);
675 holdblk->lfreeq = lblk;
676 /* move holdblk to head of queue, if its not already there */
677 offset = holdblk->blksz / grain;
678 oldhead = holdhead[offset];
679 if (oldhead != holdblk) {
680 /* first take out of current spot */
681 holdhead[offset] = holdblk;
682 holdblk->nexthblk->prevhblk = holdblk->prevhblk;
683 holdblk->prevhblk->nexthblk = holdblk->nexthblk;
684 /* now add at front */
685 holdblk->nexthblk = oldhead;
686 holdblk->prevhblk = oldhead->prevhblk;
687 oldhead->prevhblk = holdblk;
688 holdblk->prevhblk->nexthblk = holdblk;
689 }
690 } else {
691 struct header *blk; /* real start of block */
692 struct header *next; /* next = blk->nextblk */
693 struct header *nextnext; /* block after next */
694
695 blk = (struct header *)((char *)ptr - minhead);
696 next = blk->nextblk;
697 /* take care of twits (e.g. awk) who return blocks twice */
698 if (!TESTBUSY(next))
699 return;
700 blk->nextblk = next = CLRBUSY(next);
701 ADDFREEQ(blk);
702 /* see if we can compact */
703 if (!TESTBUSY(nextnext = next->nextblk)) {
704 do {
705 DELFREEQ(next);
706 next = nextnext;
707 } while (!TESTBUSY(nextnext = next->nextblk));
708 if (next == arenaend) lastblk = blk;
709 blk->nextblk = next;
710 }
711 }
712 CHECKQ
713 }
714
715
716 /*
717 * realloc(ptr, size) - give the user a block of size "size", with
718 * the contents pointed to by ptr. Free ptr.
719 */
720
721 void *
realloc(void * ptr,size_t size)722 realloc(void *ptr, size_t size)
723 {
724 void *retval;
725
726 (void) mutex_lock(&mlock);
727 retval = realloc_unlocked(ptr, size);
728 (void) mutex_unlock(&mlock);
729 return (retval);
730 }
731
732
733 /*
734 * realloc_unlocked(ptr) - Do the real work for realloc()
735 */
736
737 static void *
realloc_unlocked(void * ptr,size_t size)738 realloc_unlocked(void *ptr, size_t size)
739 {
740 struct header *blk; /* block ptr is contained in */
741 size_t trusize; /* block size as allocater sees it */
742 char *newptr; /* pointer to user's new block */
743 size_t cpysize; /* amount to copy */
744 struct header *next; /* block after blk */
745
746 if (ptr == NULL)
747 return (malloc_unlocked(size, 0));
748
749 if (size == 0) {
750 free_unlocked(ptr);
751 return (NULL);
752 }
753
754 if (TESTSMAL(((struct lblk *)((char *)ptr - MINHEAD))->
755 header.holder)) {
756 /*
757 * we have a special small block which can't be expanded
758 *
759 * This makes the assumption that even if the user is
760 * reallocating a free block, malloc doesn't alter the contents
761 * of small blocks
762 */
763 newptr = malloc_unlocked(size, 0);
764 if (newptr == NULL)
765 return (NULL);
766 /* this isn't to save time--its to protect the twits */
767 if ((char *)ptr != newptr) {
768 struct lblk *lblk;
769 lblk = (struct lblk *)((char *)ptr - MINHEAD);
770 cpysize = ((struct holdblk *)
771 CLRALL(lblk->header.holder))->blksz;
772 cpysize = (size > cpysize) ? cpysize : size;
773 (void) memcpy(newptr, ptr, cpysize);
774 free_unlocked(ptr);
775 }
776 } else {
777 blk = (struct header *)((char *)ptr - minhead);
778 next = blk->nextblk;
779 /*
780 * deal with twits who reallocate free blocks
781 *
782 * if they haven't reset minblk via getopt, that's
783 * their problem
784 */
785 if (!TESTBUSY(next)) {
786 DELFREEQ(blk);
787 blk->nextblk = SETBUSY(next);
788 }
789 next = CLRBUSY(next);
790 /* make blk as big as possible */
791 if (!TESTBUSY(next->nextblk)) {
792 do {
793 DELFREEQ(next);
794 next = next->nextblk;
795 } while (!TESTBUSY(next->nextblk));
796 blk->nextblk = SETBUSY(next);
797 if (next >= arenaend) lastblk = blk;
798 }
799 /* get size we really need */
800 trusize = size+minhead;
801 trusize = (trusize + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
802 trusize = (trusize >= MINBLKSZ) ? trusize : MINBLKSZ;
803 /* see if we have enough */
804 /* this isn't really the copy size, but I need a register */
805 cpysize = (char *)next - (char *)blk;
806 if (cpysize >= trusize) {
807 /* carve out the size we need */
808 struct header *newblk; /* remainder */
809
810 if (cpysize - trusize >= MINBLKSZ) {
811 /*
812 * carve out the right size block
813 * newblk will be the remainder
814 */
815 newblk = (struct header *)((char *)blk +
816 trusize);
817 newblk->nextblk = next;
818 blk->nextblk = SETBUSY(newblk);
819 /* at this point, next is invalid */
820 ADDFREEQ(newblk);
821 /* if blk was lastblk, make newblk lastblk */
822 if (blk == lastblk)
823 lastblk = newblk;
824 }
825 newptr = ptr;
826 } else {
827 /* bite the bullet, and call malloc */
828 cpysize = (size > cpysize) ? cpysize : size;
829 newptr = malloc_unlocked(size, 0);
830 if (newptr == NULL)
831 return (NULL);
832 (void) memcpy(newptr, ptr, cpysize);
833 free_unlocked(ptr);
834 }
835 }
836 return (newptr);
837 }
838
839
840 /*
841 * calloc - allocate and clear memory block
842 */
843
844 void *
calloc(size_t num,size_t size)845 calloc(size_t num, size_t size)
846 {
847 char *mp;
848
849 num *= size;
850 mp = malloc(num);
851 if (mp == NULL)
852 return (NULL);
853 (void) memset(mp, 0, num);
854 return (mp);
855 }
856
857
858 /*
859 * Mallopt - set options for allocation
860 *
861 * Mallopt provides for control over the allocation algorithm.
862 * The cmds available are:
863 *
864 * M_MXFAST Set maxfast to value. Maxfast is the size of the
865 * largest small, quickly allocated block. Maxfast
866 * may be set to 0 to disable fast allocation entirely.
867 *
868 * M_NLBLKS Set numlblks to value. Numlblks is the number of
869 * small blocks per holding block. Value must be
870 * greater than 0.
871 *
872 * M_GRAIN Set grain to value. The sizes of all blocks
873 * smaller than maxfast are considered to be rounded
874 * up to the nearest multiple of grain. The default
875 * value of grain is the smallest number of bytes
876 * which will allow alignment of any data type. Grain
877 * will be rounded up to a multiple of its default,
878 * and maxsize will be rounded up to a multiple of
879 * grain. Value must be greater than 0.
880 *
881 * M_KEEP Retain data in freed block until the next malloc,
882 * realloc, or calloc. Value is ignored.
883 * This option is provided only for compatibility with
884 * the old version of malloc, and is not recommended.
885 *
886 * returns - 0, upon successful completion
887 * 1, if malloc has previously been called or
888 * if value or cmd have illegal values
889 */
890
891 int
mallopt(int cmd,int value)892 mallopt(int cmd, int value)
893 {
894 /* disallow changes once a small block is allocated */
895 (void) mutex_lock(&mlock);
896 if (change) {
897 (void) mutex_unlock(&mlock);
898 return (1);
899 }
900 switch (cmd) {
901 case M_MXFAST:
902 if (value < 0) {
903 (void) mutex_unlock(&mlock);
904 return (1);
905 }
906 fastct = (value + grain - 1) / grain;
907 maxfast = grain*fastct;
908 break;
909 case M_NLBLKS:
910 if (value <= 1) {
911 (void) mutex_unlock(&mlock);
912 return (1);
913 }
914 numlblks = value;
915 break;
916 case M_GRAIN:
917 if (value <= 0) {
918 (void) mutex_unlock(&mlock);
919 return (1);
920 }
921
922 /* round grain up to a multiple of ALIGNSZ */
923 grain = (value + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
924
925 /* reduce fastct appropriately */
926 fastct = (maxfast + grain - 1) / grain;
927 maxfast = grain * fastct;
928 break;
929 case M_KEEP:
930 if (change && holdhead != NULL) {
931 (void) mutex_unlock(&mlock);
932 return (1);
933 }
934 minhead = HEADSZ;
935 break;
936 default:
937 (void) mutex_unlock(&mlock);
938 return (1);
939 }
940 (void) mutex_unlock(&mlock);
941 return (0);
942 }
943
944 /*
945 * mallinfo-provide information about space usage
946 *
947 * input - max; mallinfo will return the size of the
948 * largest block < max.
949 *
950 * output - a structure containing a description of
951 * of space usage, defined in malloc.h
952 */
953
954 struct mallinfo
mallinfo(void)955 mallinfo(void)
956 {
957 struct header *blk, *next; /* ptr to ordinary blocks */
958 struct holdblk *hblk; /* ptr to holding blocks */
959 struct mallinfo inf; /* return value */
960 int i; /* the ubiquitous counter */
961 ssize_t size; /* size of a block */
962 ssize_t fsp; /* free space in 1 hold block */
963
964 (void) mutex_lock(&mlock);
965 (void) memset(&inf, 0, sizeof (struct mallinfo));
966 if (freeptr[0].nextfree == GROUND) {
967 (void) mutex_unlock(&mlock);
968 return (inf);
969 }
970 blk = CLRBUSY(arena[1].nextblk);
971 /* return total space used */
972 inf.arena = (char *)arenaend - (char *)blk;
973
974 /*
975 * loop through arena, counting # of blocks, and
976 * and space used by blocks
977 */
978 next = CLRBUSY(blk->nextblk);
979 while (next != &(arena[1])) {
980 inf.ordblks++;
981 size = (char *)next - (char *)blk;
982 if (TESTBUSY(blk->nextblk)) {
983 inf.uordblks += size;
984 inf.keepcost += HEADSZ-MINHEAD;
985 } else {
986 inf.fordblks += size;
987 }
988 blk = next;
989 next = CLRBUSY(blk->nextblk);
990 }
991
992 /*
993 * if any holding block have been allocated
994 * then examine space in holding blks
995 */
996 if (change && holdhead != NULL) {
997 for (i = fastct; i > 0; i--) { /* loop thru ea. chain */
998 hblk = holdhead[i];
999 /* do only if chain not empty */
1000 if (hblk != HGROUND) {
1001 size = hblk->blksz +
1002 sizeof (struct lblk) - sizeof (int);
1003 do { /* loop thru 1 hold blk chain */
1004 inf.hblks++;
1005 fsp = freespace(hblk);
1006 inf.fsmblks += fsp;
1007 inf.usmblks += numlblks*size - fsp;
1008 inf.smblks += numlblks;
1009 hblk = hblk->nexthblk;
1010 } while (hblk != holdhead[i]);
1011 }
1012 }
1013 }
1014 inf.hblkhd = (inf.smblks / numlblks) * sizeof (struct holdblk);
1015 /* holding block were counted in ordblks, so subtract off */
1016 inf.ordblks -= inf.hblks;
1017 inf.uordblks -= inf.hblkhd + inf.usmblks + inf.fsmblks;
1018 inf.keepcost -= inf.hblks*(HEADSZ - MINHEAD);
1019 (void) mutex_unlock(&mlock);
1020 return (inf);
1021 }
1022
1023
1024 /*
1025 * freespace - calc. how much space is used in the free
1026 * small blocks in a given holding block
1027 *
1028 * input - hblk = given holding block
1029 *
1030 * returns space used in free small blocks of hblk
1031 */
1032
1033 static ssize_t
freespace(struct holdblk * holdblk)1034 freespace(struct holdblk *holdblk)
1035 {
1036 struct lblk *lblk;
1037 ssize_t space = 0;
1038 ssize_t size;
1039 struct lblk *unused;
1040
1041 lblk = CLRSMAL(holdblk->lfreeq);
1042 size = holdblk->blksz + sizeof (struct lblk) - sizeof (int);
1043 unused = CLRSMAL(holdblk->unused);
1044 /* follow free chain */
1045 while ((lblk != LGROUND) && (lblk != unused)) {
1046 space += size;
1047 lblk = CLRSMAL(lblk->header.nextfree);
1048 }
1049 space += ((char *)holdblk + HOLDSZ(size)) - (char *)unused;
1050 return (space);
1051 }
1052
1053 static void *
morecore(size_t bytes)1054 morecore(size_t bytes)
1055 {
1056 void * ret;
1057
1058 if (bytes > LONG_MAX) {
1059 intptr_t wad;
1060 /*
1061 * The request size is too big. We need to do this in
1062 * chunks. Sbrk only takes an int for an arg.
1063 */
1064 if (bytes == ULONG_MAX)
1065 return ((void *)-1);
1066
1067 ret = sbrk(0);
1068 wad = LONG_MAX;
1069 while (wad > 0) {
1070 if (sbrk(wad) == (void *)-1) {
1071 if (ret != sbrk(0))
1072 (void) sbrk(-LONG_MAX);
1073 return ((void *)-1);
1074 }
1075 bytes -= LONG_MAX;
1076 wad = bytes;
1077 }
1078 } else
1079 ret = sbrk(bytes);
1080
1081 return (ret);
1082 }
1083
1084 #ifdef debug
1085 int
check_arena(void)1086 check_arena(void)
1087 {
1088 struct header *blk, *prev, *next; /* ptr to ordinary blocks */
1089
1090 (void) mutex_lock(&mlock);
1091 if (freeptr[0].nextfree == GROUND) {
1092 (void) mutex_unlock(&mlock);
1093 return (-1);
1094 }
1095 blk = arena + 1;
1096
1097 /* loop through arena, checking */
1098 blk = (struct header *)CLRALL(blk->nextblk);
1099 next = (struct header *)CLRALL(blk->nextblk);
1100 while (next != arena + 1) {
1101 assert(blk >= arena + 1);
1102 assert(blk <= lastblk);
1103 assert(next >= blk + 1);
1104 assert(((uintptr_t)((struct header *)blk->nextblk) &
1105 (4 | SMAL)) == 0);
1106
1107 if (TESTBUSY(blk->nextblk) == 0) {
1108 assert(blk->nextfree >= freeptr);
1109 assert(blk->prevfree >= freeptr);
1110 assert(blk->nextfree <= lastblk);
1111 assert(blk->prevfree <= lastblk);
1112 assert(((uintptr_t)((struct header *)blk->nextfree) &
1113 7) == 0);
1114 assert(((uintptr_t)((struct header *)blk->prevfree) &
1115 7) == 0 || blk->prevfree == freeptr);
1116 }
1117 blk = next;
1118 next = CLRBUSY(blk->nextblk);
1119 }
1120 (void) mutex_unlock(&mlock);
1121 return (0);
1122 }
1123
1124 #define RSTALLOC 1
1125 #endif
1126
1127 #ifdef RSTALLOC
1128 /*
1129 * rstalloc - reset alloc routines
1130 *
1131 * description - return allocated memory and reset
1132 * allocation pointers.
1133 *
1134 * Warning - This is for debugging purposes only.
1135 * It will return all memory allocated after
1136 * the first call to malloc, even if some
1137 * of it was fetched by a user's sbrk().
1138 */
1139
1140 void
rstalloc(void)1141 rstalloc(void)
1142 {
1143 (void) mutex_lock(&mlock);
1144 minhead = MINHEAD;
1145 grain = ALIGNSZ;
1146 numlblks = NUMLBLKS;
1147 fastct = FASTCT;
1148 maxfast = MAXFAST;
1149 change = 0;
1150 if (freeptr[0].nextfree == GROUND) {
1151 (void) mutex_unlock(&mlock);
1152 return;
1153 }
1154 brk(CLRBUSY(arena[1].nextblk));
1155 freeptr[0].nextfree = GROUND;
1156 #ifdef debug
1157 case1count = 0;
1158 #endif
1159 (void) mutex_unlock(&mlock);
1160 }
1161 #endif /* RSTALLOC */
1162
1163 /*
1164 * cfree is an undocumented, obsolete function
1165 */
1166
1167 /* ARGSUSED1 */
1168 void
cfree(void * p,size_t num,size_t size)1169 cfree(void *p, size_t num, size_t size)
1170 {
1171 free(p);
1172 }
1173
1174 static void
malloc_prepare()1175 malloc_prepare()
1176 {
1177 (void) mutex_lock(&mlock);
1178 }
1179
1180 static void
malloc_release()1181 malloc_release()
1182 {
1183 (void) mutex_unlock(&mlock);
1184 }
1185
1186 #pragma init(malloc_init)
1187 static void
malloc_init(void)1188 malloc_init(void)
1189 {
1190 (void) pthread_atfork(malloc_prepare, malloc_release, malloc_release);
1191 }
1192