10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
52658Sraf * Common Development and Distribution License (the "License").
62658Sraf * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
212658Sraf
223866Sraf /*
23*6812Sraf * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
243866Sraf * Use is subject to license terms.
253866Sraf */
263866Sraf
270Sstevel@tonic-gate /* Copyright (c) 1988 AT&T */
280Sstevel@tonic-gate /* All Rights Reserved */
290Sstevel@tonic-gate
300Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI"
310Sstevel@tonic-gate
320Sstevel@tonic-gate #include <sys/types.h>
330Sstevel@tonic-gate
340Sstevel@tonic-gate #ifndef debug
350Sstevel@tonic-gate #define NDEBUG
360Sstevel@tonic-gate #endif
370Sstevel@tonic-gate
380Sstevel@tonic-gate #include <stdlib.h>
390Sstevel@tonic-gate #include <string.h>
400Sstevel@tonic-gate #include "assert.h"
410Sstevel@tonic-gate #include "malloc.h"
420Sstevel@tonic-gate #include "mallint.h"
430Sstevel@tonic-gate #include <thread.h>
443866Sraf #include <pthread.h>
450Sstevel@tonic-gate #include <synch.h>
460Sstevel@tonic-gate #include <unistd.h>
470Sstevel@tonic-gate #include <limits.h>
480Sstevel@tonic-gate
490Sstevel@tonic-gate static mutex_t mlock = DEFAULTMUTEX;
500Sstevel@tonic-gate static ssize_t freespace(struct holdblk *);
510Sstevel@tonic-gate static void *malloc_unlocked(size_t, int);
520Sstevel@tonic-gate static void *realloc_unlocked(void *, size_t);
530Sstevel@tonic-gate static void free_unlocked(void *);
540Sstevel@tonic-gate static void *morecore(size_t);
550Sstevel@tonic-gate
560Sstevel@tonic-gate /*
570Sstevel@tonic-gate * use level memory allocater (malloc, free, realloc)
580Sstevel@tonic-gate *
590Sstevel@tonic-gate * -malloc, free, realloc and mallopt form a memory allocator
600Sstevel@tonic-gate * similar to malloc, free, and realloc. The routines
610Sstevel@tonic-gate * here are much faster than the original, with slightly worse
620Sstevel@tonic-gate * space usage (a few percent difference on most input). They
630Sstevel@tonic-gate * do not have the property that data in freed blocks is left
640Sstevel@tonic-gate * untouched until the space is reallocated.
650Sstevel@tonic-gate *
660Sstevel@tonic-gate * -Memory is kept in the "arena", a singly linked list of blocks.
670Sstevel@tonic-gate * These blocks are of 3 types.
680Sstevel@tonic-gate * 1. A free block. This is a block not in use by the
690Sstevel@tonic-gate * user. It has a 3 word header. (See description
700Sstevel@tonic-gate * of the free queue.)
710Sstevel@tonic-gate * 2. An allocated block. This is a block the user has
720Sstevel@tonic-gate * requested. It has only a 1 word header, pointing
730Sstevel@tonic-gate * to the next block of any sort.
740Sstevel@tonic-gate * 3. A permanently allocated block. This covers space
750Sstevel@tonic-gate * aquired by the user directly through sbrk(). It
760Sstevel@tonic-gate * has a 1 word header, as does 2.
770Sstevel@tonic-gate * Blocks of type 1 have the lower bit of the pointer to the
780Sstevel@tonic-gate * nextblock = 0. Blocks of type 2 and 3 have that bit set,
790Sstevel@tonic-gate * to mark them busy.
800Sstevel@tonic-gate *
810Sstevel@tonic-gate * -Unallocated blocks are kept on an unsorted doubly linked
820Sstevel@tonic-gate * free list.
830Sstevel@tonic-gate *
840Sstevel@tonic-gate * -Memory is allocated in blocks, with sizes specified by the
850Sstevel@tonic-gate * user. A circular first-fit startegy is used, with a roving
860Sstevel@tonic-gate * head of the free queue, which prevents bunching of small
870Sstevel@tonic-gate * blocks at the head of the queue.
880Sstevel@tonic-gate *
890Sstevel@tonic-gate * -Compaction is performed at free time of any blocks immediately
900Sstevel@tonic-gate * following the freed block. The freed block will be combined
910Sstevel@tonic-gate * with a preceding block during the search phase of malloc.
920Sstevel@tonic-gate * Since a freed block is added at the front of the free queue,
930Sstevel@tonic-gate * which is moved to the end of the queue if considered and
940Sstevel@tonic-gate * rejected during the search, fragmentation only occurs if
950Sstevel@tonic-gate * a block with a contiguious preceding block that is free is
960Sstevel@tonic-gate * freed and reallocated on the next call to malloc. The
970Sstevel@tonic-gate * time savings of this strategy is judged to be worth the
980Sstevel@tonic-gate * occasional waste of memory.
990Sstevel@tonic-gate *
1000Sstevel@tonic-gate * -Small blocks (of size < MAXSIZE) are not allocated directly.
1010Sstevel@tonic-gate * A large "holding" block is allocated via a recursive call to
1020Sstevel@tonic-gate * malloc. This block contains a header and ?????? small blocks.
1030Sstevel@tonic-gate * Holding blocks for a given size of small block (rounded to the
1040Sstevel@tonic-gate * nearest ALIGNSZ bytes) are kept on a queue with the property that any
1050Sstevel@tonic-gate * holding block with an unused small block is in front of any without.
1060Sstevel@tonic-gate * A list of free blocks is kept within the holding block.
1070Sstevel@tonic-gate */
1080Sstevel@tonic-gate
1090Sstevel@tonic-gate /*
1100Sstevel@tonic-gate * description of arena, free queue, holding blocks etc.
1110Sstevel@tonic-gate *
1120Sstevel@tonic-gate * New compiler and linker does not guarentee order of initialized data.
1130Sstevel@tonic-gate * Define freeptr as arena[2-3] to guarentee it follows arena in memory.
1140Sstevel@tonic-gate * Later code depends on this order.
1150Sstevel@tonic-gate */
1160Sstevel@tonic-gate
1170Sstevel@tonic-gate static struct header arena[4] = {
1180Sstevel@tonic-gate {0, 0, 0},
1190Sstevel@tonic-gate {0, 0, 0},
1200Sstevel@tonic-gate {0, 0, 0},
1210Sstevel@tonic-gate {0, 0, 0}
1220Sstevel@tonic-gate };
1230Sstevel@tonic-gate /*
1240Sstevel@tonic-gate * the second word is a minimal block to
1250Sstevel@tonic-gate * start the arena. The first is a busy
1260Sstevel@tonic-gate * block to be pointed to by the last block.
1270Sstevel@tonic-gate */
1280Sstevel@tonic-gate
1290Sstevel@tonic-gate #define freeptr (arena + 2)
1300Sstevel@tonic-gate /* first and last entry in free list */
1310Sstevel@tonic-gate static struct header *arenaend; /* ptr to block marking high end of arena */
1320Sstevel@tonic-gate static struct header *lastblk; /* the highest block in the arena */
1330Sstevel@tonic-gate static struct holdblk **holdhead; /* pointer to array of head pointers */
1340Sstevel@tonic-gate /* to holding block chains */
1350Sstevel@tonic-gate /*
1360Sstevel@tonic-gate * In order to save time calculating indices, the array is 1 too
1370Sstevel@tonic-gate * large, and the first element is unused
1380Sstevel@tonic-gate *
1390Sstevel@tonic-gate * Variables controlling algorithm, esp. how holding blocs are used
1400Sstevel@tonic-gate */
1410Sstevel@tonic-gate static int numlblks = NUMLBLKS;
1420Sstevel@tonic-gate static int minhead = MINHEAD;
1430Sstevel@tonic-gate static int change = 0; /* != 0, once param changes are no longer allowed */
1440Sstevel@tonic-gate static int fastct = FASTCT;
1450Sstevel@tonic-gate static unsigned int maxfast = MAXFAST;
1460Sstevel@tonic-gate /* number of small block sizes to map to one size */
1470Sstevel@tonic-gate
1480Sstevel@tonic-gate static int grain = ALIGNSZ;
1490Sstevel@tonic-gate
1500Sstevel@tonic-gate #ifdef debug
1510Sstevel@tonic-gate static int case1count = 0;
1520Sstevel@tonic-gate
1530Sstevel@tonic-gate static void
checkq(void)1540Sstevel@tonic-gate checkq(void)
1550Sstevel@tonic-gate {
1560Sstevel@tonic-gate register struct header *p;
1570Sstevel@tonic-gate
1580Sstevel@tonic-gate p = &freeptr[0];
1590Sstevel@tonic-gate
1600Sstevel@tonic-gate /* check forward */
1610Sstevel@tonic-gate /*CSTYLED*/
1620Sstevel@tonic-gate while (p != &freeptr[1]) {
1630Sstevel@tonic-gate p = p->nextfree;
1640Sstevel@tonic-gate assert(p->prevfree->nextfree == p);
1650Sstevel@tonic-gate }
1660Sstevel@tonic-gate
1670Sstevel@tonic-gate /* check backward */
1680Sstevel@tonic-gate /*CSTYLED*/
1690Sstevel@tonic-gate while (p != &freeptr[0]) {
1700Sstevel@tonic-gate p = p->prevfree;
1710Sstevel@tonic-gate assert(p->nextfree->prevfree == p);
1720Sstevel@tonic-gate }
1730Sstevel@tonic-gate }
1740Sstevel@tonic-gate #endif
1750Sstevel@tonic-gate
1760Sstevel@tonic-gate
1770Sstevel@tonic-gate /*
1780Sstevel@tonic-gate * malloc(nbytes) - give a user nbytes to use
1790Sstevel@tonic-gate */
1800Sstevel@tonic-gate
1810Sstevel@tonic-gate void *
malloc(size_t nbytes)1820Sstevel@tonic-gate malloc(size_t nbytes)
1830Sstevel@tonic-gate {
1840Sstevel@tonic-gate void *ret;
1850Sstevel@tonic-gate
1860Sstevel@tonic-gate (void) mutex_lock(&mlock);
1870Sstevel@tonic-gate ret = malloc_unlocked(nbytes, 0);
1880Sstevel@tonic-gate (void) mutex_unlock(&mlock);
1890Sstevel@tonic-gate return (ret);
1900Sstevel@tonic-gate }
1910Sstevel@tonic-gate
1920Sstevel@tonic-gate /*
1930Sstevel@tonic-gate * Use malloc_unlocked() to get the address to start with; Given this
1940Sstevel@tonic-gate * address, find out the closest address that aligns with the request
1950Sstevel@tonic-gate * and return that address after doing some house keeping (refer to the
1960Sstevel@tonic-gate * ascii art below).
1970Sstevel@tonic-gate */
1980Sstevel@tonic-gate void *
memalign(size_t alignment,size_t size)1992658Sraf memalign(size_t alignment, size_t size)
2000Sstevel@tonic-gate {
2010Sstevel@tonic-gate void *alloc_buf;
2020Sstevel@tonic-gate struct header *hd;
2030Sstevel@tonic-gate size_t alloc_size;
2040Sstevel@tonic-gate uintptr_t fr;
2050Sstevel@tonic-gate static int realloc;
2060Sstevel@tonic-gate
2070Sstevel@tonic-gate if (size == 0 || alignment == 0 ||
208*6812Sraf (alignment & (alignment - 1)) != 0) {
2090Sstevel@tonic-gate return (NULL);
2100Sstevel@tonic-gate }
2110Sstevel@tonic-gate if (alignment <= ALIGNSZ)
2120Sstevel@tonic-gate return (malloc(size));
2130Sstevel@tonic-gate
2140Sstevel@tonic-gate alloc_size = size + alignment;
2150Sstevel@tonic-gate if (alloc_size < size) { /* overflow */
2160Sstevel@tonic-gate return (NULL);
2170Sstevel@tonic-gate }
2180Sstevel@tonic-gate
2190Sstevel@tonic-gate (void) mutex_lock(&mlock);
2200Sstevel@tonic-gate alloc_buf = malloc_unlocked(alloc_size, 1);
2210Sstevel@tonic-gate (void) mutex_unlock(&mlock);
2220Sstevel@tonic-gate
2230Sstevel@tonic-gate if (alloc_buf == NULL)
2240Sstevel@tonic-gate return (NULL);
2250Sstevel@tonic-gate fr = (uintptr_t)alloc_buf;
2260Sstevel@tonic-gate
2270Sstevel@tonic-gate fr = (fr + alignment - 1) / alignment * alignment;
2280Sstevel@tonic-gate
2290Sstevel@tonic-gate if (fr == (uintptr_t)alloc_buf)
2300Sstevel@tonic-gate return (alloc_buf);
2310Sstevel@tonic-gate
2320Sstevel@tonic-gate if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
2330Sstevel@tonic-gate /*
2340Sstevel@tonic-gate * we hit an edge case, where the space ahead of aligned
2350Sstevel@tonic-gate * address is not sufficient to hold 'header' and hence we
2360Sstevel@tonic-gate * can't free it. So double the allocation request.
2370Sstevel@tonic-gate */
2380Sstevel@tonic-gate realloc++;
2390Sstevel@tonic-gate free(alloc_buf);
2400Sstevel@tonic-gate alloc_size = size + alignment*2;
2410Sstevel@tonic-gate if (alloc_size < size) {
2420Sstevel@tonic-gate return (NULL);
2430Sstevel@tonic-gate }
2440Sstevel@tonic-gate
2450Sstevel@tonic-gate (void) mutex_lock(&mlock);
2460Sstevel@tonic-gate alloc_buf = malloc_unlocked(alloc_size, 1);
2470Sstevel@tonic-gate (void) mutex_unlock(&mlock);
2480Sstevel@tonic-gate
2490Sstevel@tonic-gate if (alloc_buf == NULL)
2500Sstevel@tonic-gate return (NULL);
2510Sstevel@tonic-gate fr = (uintptr_t)alloc_buf;
2520Sstevel@tonic-gate
2530Sstevel@tonic-gate fr = (fr + alignment - 1) / alignment * alignment;
2540Sstevel@tonic-gate if (fr == (uintptr_t)alloc_buf)
2550Sstevel@tonic-gate return (alloc_buf);
2560Sstevel@tonic-gate if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
2570Sstevel@tonic-gate fr = fr + alignment;
2580Sstevel@tonic-gate }
2590Sstevel@tonic-gate }
2600Sstevel@tonic-gate
2610Sstevel@tonic-gate /*
2620Sstevel@tonic-gate * +-------+ +-------+
2630Sstevel@tonic-gate * +---| <a> | | <a> |--+
2640Sstevel@tonic-gate * | +-------+<--alloc_buf-->+-------+ |
2650Sstevel@tonic-gate * | | | | | |
2660Sstevel@tonic-gate * | | | | | |
2670Sstevel@tonic-gate * | | | | | |
2680Sstevel@tonic-gate * | | | hd--> +-------+ |
2690Sstevel@tonic-gate * | | | +---| <b> |<-+
2700Sstevel@tonic-gate * | | | | +-------+<--- fr
2710Sstevel@tonic-gate * | | | | | |
2720Sstevel@tonic-gate * | | | | | |
2730Sstevel@tonic-gate * | | | | | |
2740Sstevel@tonic-gate * | | | | | |
2750Sstevel@tonic-gate * | | | | | |
2760Sstevel@tonic-gate * | | | | | |
2770Sstevel@tonic-gate * | +-------+ | +-------+
2780Sstevel@tonic-gate * +-->| next | +-->| next |
2790Sstevel@tonic-gate * +-------+ +-------+
2800Sstevel@tonic-gate *
2810Sstevel@tonic-gate */
2820Sstevel@tonic-gate hd = (struct header *)((char *)fr - minhead);
2830Sstevel@tonic-gate (void) mutex_lock(&mlock);
2840Sstevel@tonic-gate hd->nextblk = ((struct header *)((char *)alloc_buf - minhead))->nextblk;
2850Sstevel@tonic-gate ((struct header *)((char *)alloc_buf - minhead))->nextblk = SETBUSY(hd);
2860Sstevel@tonic-gate (void) mutex_unlock(&mlock);
2870Sstevel@tonic-gate free(alloc_buf);
2880Sstevel@tonic-gate CHECKQ
2890Sstevel@tonic-gate return ((void *)fr);
2900Sstevel@tonic-gate }
2910Sstevel@tonic-gate
2920Sstevel@tonic-gate void *
valloc(size_t size)2932658Sraf valloc(size_t size)
2940Sstevel@tonic-gate {
2950Sstevel@tonic-gate static unsigned pagesize;
2960Sstevel@tonic-gate if (size == 0)
2970Sstevel@tonic-gate return (NULL);
2980Sstevel@tonic-gate
2990Sstevel@tonic-gate if (!pagesize)
3000Sstevel@tonic-gate pagesize = sysconf(_SC_PAGESIZE);
3010Sstevel@tonic-gate
3020Sstevel@tonic-gate return (memalign(pagesize, size));
3030Sstevel@tonic-gate }
3040Sstevel@tonic-gate
3050Sstevel@tonic-gate /*
3060Sstevel@tonic-gate * malloc_unlocked(nbytes, nosmall) - Do the real work for malloc
3070Sstevel@tonic-gate */
3080Sstevel@tonic-gate
3090Sstevel@tonic-gate static void *
malloc_unlocked(size_t nbytes,int nosmall)3100Sstevel@tonic-gate malloc_unlocked(size_t nbytes, int nosmall)
3110Sstevel@tonic-gate {
3120Sstevel@tonic-gate struct header *blk;
3130Sstevel@tonic-gate size_t nb; /* size of entire block we need */
3140Sstevel@tonic-gate
3150Sstevel@tonic-gate /* on first call, initialize */
3160Sstevel@tonic-gate if (freeptr[0].nextfree == GROUND) {
3170Sstevel@tonic-gate /* initialize arena */
3180Sstevel@tonic-gate arena[1].nextblk = (struct header *)BUSY;
3190Sstevel@tonic-gate arena[0].nextblk = (struct header *)BUSY;
3200Sstevel@tonic-gate lastblk = arenaend = &(arena[1]);
3210Sstevel@tonic-gate /* initialize free queue */
3220Sstevel@tonic-gate freeptr[0].nextfree = &(freeptr[1]);
3230Sstevel@tonic-gate freeptr[1].nextblk = &(arena[0]);
3240Sstevel@tonic-gate freeptr[1].prevfree = &(freeptr[0]);
3250Sstevel@tonic-gate /* mark that small blocks not init yet */
3260Sstevel@tonic-gate }
3270Sstevel@tonic-gate if (nbytes == 0)
3280Sstevel@tonic-gate return (NULL);
3290Sstevel@tonic-gate
3300Sstevel@tonic-gate if (nbytes <= maxfast && !nosmall) {
3310Sstevel@tonic-gate /*
3320Sstevel@tonic-gate * We can allocate out of a holding block
3330Sstevel@tonic-gate */
3340Sstevel@tonic-gate struct holdblk *holdblk; /* head of right sized queue */
3350Sstevel@tonic-gate struct lblk *lblk; /* pointer to a little block */
3360Sstevel@tonic-gate struct holdblk *newhold;
3370Sstevel@tonic-gate
3380Sstevel@tonic-gate if (!change) {
3390Sstevel@tonic-gate int i;
3400Sstevel@tonic-gate /*
3410Sstevel@tonic-gate * This allocates space for hold block
3420Sstevel@tonic-gate * pointers by calling malloc recursively.
3430Sstevel@tonic-gate * Maxfast is temporarily set to 0, to
3440Sstevel@tonic-gate * avoid infinite recursion. allocate
3450Sstevel@tonic-gate * space for an extra ptr so that an index
3460Sstevel@tonic-gate * is just ->blksz/grain, with the first
3470Sstevel@tonic-gate * ptr unused.
3480Sstevel@tonic-gate */
3490Sstevel@tonic-gate change = 1; /* change to algorithm params */
3500Sstevel@tonic-gate /* no longer allowed */
3510Sstevel@tonic-gate /*
3520Sstevel@tonic-gate * temporarily alter maxfast, to avoid
3530Sstevel@tonic-gate * infinite recursion
3540Sstevel@tonic-gate */
3550Sstevel@tonic-gate maxfast = 0;
3560Sstevel@tonic-gate holdhead = (struct holdblk **)
3570Sstevel@tonic-gate malloc_unlocked(sizeof (struct holdblk *) *
3580Sstevel@tonic-gate (fastct + 1), 0);
3590Sstevel@tonic-gate if (holdhead == NULL)
3600Sstevel@tonic-gate return (malloc_unlocked(nbytes, 0));
3610Sstevel@tonic-gate for (i = 1; i <= fastct; i++) {
3620Sstevel@tonic-gate holdhead[i] = HGROUND;
3630Sstevel@tonic-gate }
3640Sstevel@tonic-gate maxfast = fastct * grain;
3650Sstevel@tonic-gate }
3660Sstevel@tonic-gate /*
3670Sstevel@tonic-gate * Note that this uses the absolute min header size (MINHEAD)
3680Sstevel@tonic-gate * unlike the large block case which uses minhead
3690Sstevel@tonic-gate *
3700Sstevel@tonic-gate * round up to nearest multiple of grain
3710Sstevel@tonic-gate * code assumes grain is a multiple of MINHEAD
3720Sstevel@tonic-gate */
3730Sstevel@tonic-gate /* round up to grain */
3740Sstevel@tonic-gate nb = (nbytes + grain - 1) / grain * grain;
3750Sstevel@tonic-gate holdblk = holdhead[nb / grain];
3760Sstevel@tonic-gate nb = nb + MINHEAD;
3770Sstevel@tonic-gate /*
3780Sstevel@tonic-gate * look for space in the holding block. Blocks with
3790Sstevel@tonic-gate * space will be in front of those without
3800Sstevel@tonic-gate */
3810Sstevel@tonic-gate if ((holdblk != HGROUND) && (holdblk->lfreeq != LGROUND)) {
3820Sstevel@tonic-gate /* there is space */
3830Sstevel@tonic-gate lblk = holdblk->lfreeq;
3840Sstevel@tonic-gate
3850Sstevel@tonic-gate /*
3860Sstevel@tonic-gate * Now make lfreeq point to a free block.
3870Sstevel@tonic-gate * If lblk has been previously allocated and
3880Sstevel@tonic-gate * freed, it has a valid pointer to use.
3890Sstevel@tonic-gate * Otherwise, lblk is at the beginning of
3900Sstevel@tonic-gate * the unallocated blocks at the end of
3910Sstevel@tonic-gate * the holding block, so, if there is room, take
3920Sstevel@tonic-gate * the next space. If not, mark holdblk full,
3930Sstevel@tonic-gate * and move holdblk to the end of the queue
3940Sstevel@tonic-gate */
3950Sstevel@tonic-gate if (lblk < holdblk->unused) {
3960Sstevel@tonic-gate /* move to next holdblk, if this one full */
3970Sstevel@tonic-gate if ((holdblk->lfreeq =
3980Sstevel@tonic-gate CLRSMAL(lblk->header.nextfree)) ==
3990Sstevel@tonic-gate LGROUND) {
4000Sstevel@tonic-gate holdhead[(nb-MINHEAD) / grain] =
4010Sstevel@tonic-gate holdblk->nexthblk;
4020Sstevel@tonic-gate }
4030Sstevel@tonic-gate } else if (((char *)holdblk->unused + nb) <
4040Sstevel@tonic-gate ((char *)holdblk + HOLDSZ(nb))) {
4050Sstevel@tonic-gate holdblk->unused = (struct lblk *)
4060Sstevel@tonic-gate ((char *)holdblk->unused+nb);
4070Sstevel@tonic-gate holdblk->lfreeq = holdblk->unused;
4080Sstevel@tonic-gate } else {
4090Sstevel@tonic-gate holdblk->unused = (struct lblk *)
4100Sstevel@tonic-gate ((char *)holdblk->unused+nb);
4110Sstevel@tonic-gate holdblk->lfreeq = LGROUND;
4120Sstevel@tonic-gate holdhead[(nb-MINHEAD)/grain] =
4130Sstevel@tonic-gate holdblk->nexthblk;
4140Sstevel@tonic-gate }
4150Sstevel@tonic-gate /* mark as busy and small */
4160Sstevel@tonic-gate lblk->header.holder = (struct holdblk *)SETALL(holdblk);
4170Sstevel@tonic-gate } else {
4180Sstevel@tonic-gate /* we need a new holding block */
4190Sstevel@tonic-gate newhold = (struct holdblk *)
4200Sstevel@tonic-gate malloc_unlocked(HOLDSZ(nb), 0);
4210Sstevel@tonic-gate if ((char *)newhold == NULL) {
4220Sstevel@tonic-gate return (NULL);
4230Sstevel@tonic-gate }
4240Sstevel@tonic-gate /* add to head of free queue */
4250Sstevel@tonic-gate if (holdblk != HGROUND) {
4260Sstevel@tonic-gate newhold->nexthblk = holdblk;
4270Sstevel@tonic-gate newhold->prevhblk = holdblk->prevhblk;
4280Sstevel@tonic-gate holdblk->prevhblk = newhold;
4290Sstevel@tonic-gate newhold->prevhblk->nexthblk = newhold;
4300Sstevel@tonic-gate } else {
4310Sstevel@tonic-gate newhold->nexthblk = newhold->prevhblk = newhold;
4320Sstevel@tonic-gate }
4330Sstevel@tonic-gate holdhead[(nb-MINHEAD)/grain] = newhold;
4340Sstevel@tonic-gate /* set up newhold */
4350Sstevel@tonic-gate lblk = (struct lblk *)(newhold->space);
4360Sstevel@tonic-gate newhold->lfreeq = newhold->unused =
4370Sstevel@tonic-gate (struct lblk *)((char *)newhold->space+nb);
4380Sstevel@tonic-gate lblk->header.holder = (struct holdblk *)SETALL(newhold);
4390Sstevel@tonic-gate newhold->blksz = nb-MINHEAD;
4400Sstevel@tonic-gate }
4410Sstevel@tonic-gate #ifdef debug
4420Sstevel@tonic-gate assert(((struct holdblk *)CLRALL(lblk->header.holder))->blksz >=
4430Sstevel@tonic-gate nbytes);
4440Sstevel@tonic-gate #endif /* debug */
4450Sstevel@tonic-gate return ((char *)lblk + MINHEAD);
4460Sstevel@tonic-gate } else {
4470Sstevel@tonic-gate /*
4480Sstevel@tonic-gate * We need an ordinary block
4490Sstevel@tonic-gate */
4500Sstevel@tonic-gate struct header *newblk; /* used for creating a block */
4510Sstevel@tonic-gate
4520Sstevel@tonic-gate /* get number of bytes we need */
4530Sstevel@tonic-gate nb = nbytes + minhead;
4540Sstevel@tonic-gate nb = (nb + ALIGNSZ - 1) / ALIGNSZ * ALIGNSZ; /* align */
4550Sstevel@tonic-gate nb = (nb > MINBLKSZ) ? nb : MINBLKSZ;
4560Sstevel@tonic-gate /*
4570Sstevel@tonic-gate * see if there is a big enough block
4580Sstevel@tonic-gate * If none exists, you will get to freeptr[1].
4590Sstevel@tonic-gate * freeptr[1].next = &arena[0], so when you do the test,
4600Sstevel@tonic-gate * the result is a large positive number, since arena[0]
4610Sstevel@tonic-gate * comes before all blocks. Arena[0] is marked busy so
4620Sstevel@tonic-gate * that it will not be compacted. This kludge is for the
4630Sstevel@tonic-gate * sake of the almighty efficiency.
4640Sstevel@tonic-gate */
4650Sstevel@tonic-gate /* check that a very large request won't cause an inf. loop */
4660Sstevel@tonic-gate
4670Sstevel@tonic-gate if ((freeptr[1].nextblk-&(freeptr[1])) < nb) {
4680Sstevel@tonic-gate return (NULL);
4690Sstevel@tonic-gate } else {
4700Sstevel@tonic-gate struct header *next; /* following block */
4710Sstevel@tonic-gate struct header *nextnext; /* block after next */
4720Sstevel@tonic-gate
4730Sstevel@tonic-gate blk = freeptr;
4740Sstevel@tonic-gate do {
4750Sstevel@tonic-gate blk = blk->nextfree;
4760Sstevel@tonic-gate /* see if we can compact */
4770Sstevel@tonic-gate next = blk->nextblk;
4780Sstevel@tonic-gate if (!TESTBUSY(nextnext = next->nextblk)) {
4790Sstevel@tonic-gate do {
4800Sstevel@tonic-gate DELFREEQ(next);
4810Sstevel@tonic-gate next = nextnext;
4820Sstevel@tonic-gate nextnext = next->nextblk;
4830Sstevel@tonic-gate } while (!TESTBUSY(nextnext));
4840Sstevel@tonic-gate /*
4850Sstevel@tonic-gate * next will be at most == to lastblk,
4860Sstevel@tonic-gate * but I think the >= test is faster
4870Sstevel@tonic-gate */
4880Sstevel@tonic-gate if (next >= arenaend)
4890Sstevel@tonic-gate lastblk = blk;
4900Sstevel@tonic-gate blk->nextblk = next;
4910Sstevel@tonic-gate }
4920Sstevel@tonic-gate } while (((char *)(next) - (char *)blk) < nb);
4930Sstevel@tonic-gate }
4940Sstevel@tonic-gate /*
4950Sstevel@tonic-gate * if we didn't find a block, get more memory
4960Sstevel@tonic-gate */
4970Sstevel@tonic-gate if (blk == &(freeptr[1])) {
4980Sstevel@tonic-gate /*
4990Sstevel@tonic-gate * careful coding could likely replace
5000Sstevel@tonic-gate * newend with arenaend
5010Sstevel@tonic-gate */
5020Sstevel@tonic-gate struct header *newend; /* new end of arena */
5030Sstevel@tonic-gate ssize_t nget; /* number of words to get */
5040Sstevel@tonic-gate
5050Sstevel@tonic-gate /*
5060Sstevel@tonic-gate * Three cases - 1. There is space between arenaend
5070Sstevel@tonic-gate * and the break value that will become
5080Sstevel@tonic-gate * a permanently allocated block.
5090Sstevel@tonic-gate * 2. Case 1 is not true, and the last
5100Sstevel@tonic-gate * block is allocated.
5110Sstevel@tonic-gate * 3. Case 1 is not true, and the last
5120Sstevel@tonic-gate * block is free
5130Sstevel@tonic-gate */
5140Sstevel@tonic-gate if ((newblk = (struct header *)sbrk(0)) !=
5150Sstevel@tonic-gate (struct header *)((char *)arenaend + HEADSZ)) {
5160Sstevel@tonic-gate /* case 1 */
5170Sstevel@tonic-gate #ifdef debug
5180Sstevel@tonic-gate if (case1count++ > 0)
5190Sstevel@tonic-gate (void) write(2, "Case 1 hit more that once."
5200Sstevel@tonic-gate " brk or sbrk?\n", 41);
5210Sstevel@tonic-gate #endif
5220Sstevel@tonic-gate /* get size to fetch */
5230Sstevel@tonic-gate nget = nb + HEADSZ;
5240Sstevel@tonic-gate /* round up to a block */
5250Sstevel@tonic-gate nget = (nget + BLOCKSZ - 1)/BLOCKSZ * BLOCKSZ;
5260Sstevel@tonic-gate assert((uintptr_t)newblk % ALIGNSZ == 0);
5270Sstevel@tonic-gate /* get memory */
5280Sstevel@tonic-gate if (morecore(nget) == (void *)-1)
5290Sstevel@tonic-gate return (NULL);
5300Sstevel@tonic-gate /* add to arena */
5310Sstevel@tonic-gate newend = (struct header *)((char *)newblk + nget
5320Sstevel@tonic-gate - HEADSZ);
5330Sstevel@tonic-gate assert((uintptr_t)newblk % ALIGNSZ == 0);
5340Sstevel@tonic-gate newend->nextblk = SETBUSY(&(arena[1]));
5350Sstevel@tonic-gate /* ??? newblk ?? */
5360Sstevel@tonic-gate newblk->nextblk = newend;
5370Sstevel@tonic-gate
5380Sstevel@tonic-gate /*
5390Sstevel@tonic-gate * space becomes a permanently allocated block.
5400Sstevel@tonic-gate * This is likely not mt-safe as lock is not
5410Sstevel@tonic-gate * shared with brk or sbrk
5420Sstevel@tonic-gate */
5430Sstevel@tonic-gate arenaend->nextblk = SETBUSY(newblk);
5440Sstevel@tonic-gate /* adjust other pointers */
5450Sstevel@tonic-gate arenaend = newend;
5460Sstevel@tonic-gate lastblk = newblk;
5470Sstevel@tonic-gate blk = newblk;
5480Sstevel@tonic-gate } else if (TESTBUSY(lastblk->nextblk)) {
5490Sstevel@tonic-gate /* case 2 */
5500Sstevel@tonic-gate nget = (nb + BLOCKSZ - 1) / BLOCKSZ * BLOCKSZ;
5510Sstevel@tonic-gate if (morecore(nget) == (void *)-1)
5520Sstevel@tonic-gate return (NULL);
5530Sstevel@tonic-gate /* block must be word aligned */
5540Sstevel@tonic-gate assert(((uintptr_t)newblk%ALIGNSZ) == 0);
5550Sstevel@tonic-gate /*
5560Sstevel@tonic-gate * stub at old arenaend becomes first word
5570Sstevel@tonic-gate * in blk
5580Sstevel@tonic-gate */
5590Sstevel@tonic-gate /* ??? newblk = arenaend; */
5600Sstevel@tonic-gate
5610Sstevel@tonic-gate newend =
5620Sstevel@tonic-gate (struct header *)((char *)arenaend+nget);
5630Sstevel@tonic-gate newend->nextblk = SETBUSY(&(arena[1]));
5640Sstevel@tonic-gate arenaend->nextblk = newend;
5650Sstevel@tonic-gate lastblk = blk = arenaend;
5660Sstevel@tonic-gate arenaend = newend;
5670Sstevel@tonic-gate } else {
5680Sstevel@tonic-gate /* case 3 */
5690Sstevel@tonic-gate /*
5700Sstevel@tonic-gate * last block in arena is at end of memory and
5710Sstevel@tonic-gate * is free
5720Sstevel@tonic-gate */
5730Sstevel@tonic-gate /* 1.7 had this backward without cast */
5740Sstevel@tonic-gate nget = nb -
5750Sstevel@tonic-gate ((char *)arenaend - (char *)lastblk);
5760Sstevel@tonic-gate nget = (nget + (BLOCKSZ - 1)) /
5770Sstevel@tonic-gate BLOCKSZ * BLOCKSZ;
5780Sstevel@tonic-gate assert(((uintptr_t)newblk % ALIGNSZ) == 0);
5790Sstevel@tonic-gate if (morecore(nget) == (void *)-1)
5800Sstevel@tonic-gate return (NULL);
5810Sstevel@tonic-gate /* combine with last block, put in arena */
5820Sstevel@tonic-gate newend = (struct header *)
5830Sstevel@tonic-gate ((char *)arenaend + nget);
5840Sstevel@tonic-gate arenaend = lastblk->nextblk = newend;
5850Sstevel@tonic-gate newend->nextblk = SETBUSY(&(arena[1]));
5860Sstevel@tonic-gate /* set which block to use */
5870Sstevel@tonic-gate blk = lastblk;
5880Sstevel@tonic-gate DELFREEQ(blk);
5890Sstevel@tonic-gate }
5900Sstevel@tonic-gate } else {
5910Sstevel@tonic-gate struct header *nblk; /* next block */
5920Sstevel@tonic-gate
5930Sstevel@tonic-gate /* take block found of free queue */
5940Sstevel@tonic-gate DELFREEQ(blk);
5950Sstevel@tonic-gate /*
5960Sstevel@tonic-gate * make head of free queue immediately follow blk,
5970Sstevel@tonic-gate * unless blk was at the end of the queue
5980Sstevel@tonic-gate */
5990Sstevel@tonic-gate nblk = blk->nextfree;
6000Sstevel@tonic-gate if (nblk != &(freeptr[1])) {
6010Sstevel@tonic-gate MOVEHEAD(nblk);
6020Sstevel@tonic-gate }
6030Sstevel@tonic-gate }
6040Sstevel@tonic-gate /* blk now points to an adequate block */
6050Sstevel@tonic-gate if (((char *)blk->nextblk - (char *)blk) - nb >= MINBLKSZ) {
6060Sstevel@tonic-gate /* carve out the right size block */
6070Sstevel@tonic-gate /* newblk will be the remainder */
6080Sstevel@tonic-gate newblk = (struct header *)((char *)blk + nb);
6090Sstevel@tonic-gate newblk->nextblk = blk->nextblk;
6100Sstevel@tonic-gate /* mark the block busy */
6110Sstevel@tonic-gate blk->nextblk = SETBUSY(newblk);
6120Sstevel@tonic-gate ADDFREEQ(newblk);
6130Sstevel@tonic-gate /* if blk was lastblk, make newblk lastblk */
6140Sstevel@tonic-gate if (blk == lastblk)
6150Sstevel@tonic-gate lastblk = newblk;
6160Sstevel@tonic-gate } else {
6170Sstevel@tonic-gate /* just mark the block busy */
6180Sstevel@tonic-gate blk->nextblk = SETBUSY(blk->nextblk);
6190Sstevel@tonic-gate }
6200Sstevel@tonic-gate }
6210Sstevel@tonic-gate CHECKQ
6220Sstevel@tonic-gate assert((char *)CLRALL(blk->nextblk) -
6230Sstevel@tonic-gate ((char *)blk + minhead) >= nbytes);
6240Sstevel@tonic-gate assert((char *)CLRALL(blk->nextblk) -
6250Sstevel@tonic-gate ((char *)blk + minhead) < nbytes + MINBLKSZ);
6260Sstevel@tonic-gate return ((char *)blk + minhead);
6270Sstevel@tonic-gate }
6280Sstevel@tonic-gate
6290Sstevel@tonic-gate /*
6300Sstevel@tonic-gate * free(ptr) - free block that user thinks starts at ptr
6310Sstevel@tonic-gate *
6320Sstevel@tonic-gate * input - ptr-1 contains the block header.
6330Sstevel@tonic-gate * If the header points forward, we have a normal
6340Sstevel@tonic-gate * block pointing to the next block
6350Sstevel@tonic-gate * if the header points backward, we have a small
6360Sstevel@tonic-gate * block from a holding block.
6370Sstevel@tonic-gate * In both cases, the busy bit must be set
6380Sstevel@tonic-gate */
6390Sstevel@tonic-gate
6400Sstevel@tonic-gate void
free(void * ptr)6410Sstevel@tonic-gate free(void *ptr)
6420Sstevel@tonic-gate {
6430Sstevel@tonic-gate (void) mutex_lock(&mlock);
6440Sstevel@tonic-gate free_unlocked(ptr);
6450Sstevel@tonic-gate (void) mutex_unlock(&mlock);
6460Sstevel@tonic-gate }
6470Sstevel@tonic-gate
6480Sstevel@tonic-gate /*
6490Sstevel@tonic-gate * free_unlocked(ptr) - Do the real work for free()
6500Sstevel@tonic-gate */
6510Sstevel@tonic-gate
6520Sstevel@tonic-gate void
free_unlocked(void * ptr)6530Sstevel@tonic-gate free_unlocked(void *ptr)
6540Sstevel@tonic-gate {
6550Sstevel@tonic-gate struct holdblk *holdblk; /* block holding blk */
6560Sstevel@tonic-gate struct holdblk *oldhead; /* former head of the hold block */
6570Sstevel@tonic-gate /* queue containing blk's holder */
6580Sstevel@tonic-gate
6590Sstevel@tonic-gate if (ptr == NULL)
6600Sstevel@tonic-gate return;
6610Sstevel@tonic-gate if (TESTSMAL(((struct header *)((char *)ptr - MINHEAD))->nextblk)) {
6620Sstevel@tonic-gate struct lblk *lblk; /* pointer to freed block */
6630Sstevel@tonic-gate ssize_t offset; /* choice of header lists */
6640Sstevel@tonic-gate
6650Sstevel@tonic-gate lblk = (struct lblk *)CLRBUSY((char *)ptr - MINHEAD);
6660Sstevel@tonic-gate assert((struct header *)lblk < arenaend);
6670Sstevel@tonic-gate assert((struct header *)lblk > arena);
6680Sstevel@tonic-gate /* allow twits (e.g. awk) to free a block twice */
6690Sstevel@tonic-gate holdblk = lblk->header.holder;
6700Sstevel@tonic-gate if (!TESTBUSY(holdblk))
6710Sstevel@tonic-gate return;
6720Sstevel@tonic-gate holdblk = (struct holdblk *)CLRALL(holdblk);
6730Sstevel@tonic-gate /* put lblk on its hold block's free list */
6740Sstevel@tonic-gate lblk->header.nextfree = SETSMAL(holdblk->lfreeq);
6750Sstevel@tonic-gate holdblk->lfreeq = lblk;
6760Sstevel@tonic-gate /* move holdblk to head of queue, if its not already there */
6770Sstevel@tonic-gate offset = holdblk->blksz / grain;
6780Sstevel@tonic-gate oldhead = holdhead[offset];
6790Sstevel@tonic-gate if (oldhead != holdblk) {
6800Sstevel@tonic-gate /* first take out of current spot */
6810Sstevel@tonic-gate holdhead[offset] = holdblk;
6820Sstevel@tonic-gate holdblk->nexthblk->prevhblk = holdblk->prevhblk;
6830Sstevel@tonic-gate holdblk->prevhblk->nexthblk = holdblk->nexthblk;
6840Sstevel@tonic-gate /* now add at front */
6850Sstevel@tonic-gate holdblk->nexthblk = oldhead;
6860Sstevel@tonic-gate holdblk->prevhblk = oldhead->prevhblk;
6870Sstevel@tonic-gate oldhead->prevhblk = holdblk;
6880Sstevel@tonic-gate holdblk->prevhblk->nexthblk = holdblk;
6890Sstevel@tonic-gate }
6900Sstevel@tonic-gate } else {
6910Sstevel@tonic-gate struct header *blk; /* real start of block */
6920Sstevel@tonic-gate struct header *next; /* next = blk->nextblk */
6930Sstevel@tonic-gate struct header *nextnext; /* block after next */
6940Sstevel@tonic-gate
6950Sstevel@tonic-gate blk = (struct header *)((char *)ptr - minhead);
6960Sstevel@tonic-gate next = blk->nextblk;
6970Sstevel@tonic-gate /* take care of twits (e.g. awk) who return blocks twice */
6980Sstevel@tonic-gate if (!TESTBUSY(next))
6990Sstevel@tonic-gate return;
7000Sstevel@tonic-gate blk->nextblk = next = CLRBUSY(next);
7010Sstevel@tonic-gate ADDFREEQ(blk);
7020Sstevel@tonic-gate /* see if we can compact */
7030Sstevel@tonic-gate if (!TESTBUSY(nextnext = next->nextblk)) {
7040Sstevel@tonic-gate do {
7050Sstevel@tonic-gate DELFREEQ(next);
7060Sstevel@tonic-gate next = nextnext;
7070Sstevel@tonic-gate } while (!TESTBUSY(nextnext = next->nextblk));
7080Sstevel@tonic-gate if (next == arenaend) lastblk = blk;
7090Sstevel@tonic-gate blk->nextblk = next;
7100Sstevel@tonic-gate }
7110Sstevel@tonic-gate }
7120Sstevel@tonic-gate CHECKQ
7130Sstevel@tonic-gate }
7140Sstevel@tonic-gate
7150Sstevel@tonic-gate
7160Sstevel@tonic-gate /*
7170Sstevel@tonic-gate * realloc(ptr, size) - give the user a block of size "size", with
7180Sstevel@tonic-gate * the contents pointed to by ptr. Free ptr.
7190Sstevel@tonic-gate */
7200Sstevel@tonic-gate
7210Sstevel@tonic-gate void *
realloc(void * ptr,size_t size)7220Sstevel@tonic-gate realloc(void *ptr, size_t size)
7230Sstevel@tonic-gate {
7240Sstevel@tonic-gate void *retval;
7250Sstevel@tonic-gate
7260Sstevel@tonic-gate (void) mutex_lock(&mlock);
7270Sstevel@tonic-gate retval = realloc_unlocked(ptr, size);
7280Sstevel@tonic-gate (void) mutex_unlock(&mlock);
7290Sstevel@tonic-gate return (retval);
7300Sstevel@tonic-gate }
7310Sstevel@tonic-gate
7320Sstevel@tonic-gate
7330Sstevel@tonic-gate /*
7340Sstevel@tonic-gate * realloc_unlocked(ptr) - Do the real work for realloc()
7350Sstevel@tonic-gate */
7360Sstevel@tonic-gate
7370Sstevel@tonic-gate static void *
realloc_unlocked(void * ptr,size_t size)7380Sstevel@tonic-gate realloc_unlocked(void *ptr, size_t size)
7390Sstevel@tonic-gate {
7400Sstevel@tonic-gate struct header *blk; /* block ptr is contained in */
7410Sstevel@tonic-gate size_t trusize; /* block size as allocater sees it */
7420Sstevel@tonic-gate char *newptr; /* pointer to user's new block */
7430Sstevel@tonic-gate size_t cpysize; /* amount to copy */
7440Sstevel@tonic-gate struct header *next; /* block after blk */
7450Sstevel@tonic-gate
7460Sstevel@tonic-gate if (ptr == NULL)
7470Sstevel@tonic-gate return (malloc_unlocked(size, 0));
7480Sstevel@tonic-gate
7490Sstevel@tonic-gate if (size == 0) {
7500Sstevel@tonic-gate free_unlocked(ptr);
7510Sstevel@tonic-gate return (NULL);
7520Sstevel@tonic-gate }
7530Sstevel@tonic-gate
7540Sstevel@tonic-gate if (TESTSMAL(((struct lblk *)((char *)ptr - MINHEAD))->
7550Sstevel@tonic-gate header.holder)) {
7560Sstevel@tonic-gate /*
7570Sstevel@tonic-gate * we have a special small block which can't be expanded
7580Sstevel@tonic-gate *
7590Sstevel@tonic-gate * This makes the assumption that even if the user is
7600Sstevel@tonic-gate * reallocating a free block, malloc doesn't alter the contents
7610Sstevel@tonic-gate * of small blocks
7620Sstevel@tonic-gate */
7630Sstevel@tonic-gate newptr = malloc_unlocked(size, 0);
7640Sstevel@tonic-gate if (newptr == NULL)
7650Sstevel@tonic-gate return (NULL);
7660Sstevel@tonic-gate /* this isn't to save time--its to protect the twits */
7670Sstevel@tonic-gate if ((char *)ptr != newptr) {
7680Sstevel@tonic-gate struct lblk *lblk;
7690Sstevel@tonic-gate lblk = (struct lblk *)((char *)ptr - MINHEAD);
7700Sstevel@tonic-gate cpysize = ((struct holdblk *)
7710Sstevel@tonic-gate CLRALL(lblk->header.holder))->blksz;
7720Sstevel@tonic-gate cpysize = (size > cpysize) ? cpysize : size;
7730Sstevel@tonic-gate (void) memcpy(newptr, ptr, cpysize);
7740Sstevel@tonic-gate free_unlocked(ptr);
7750Sstevel@tonic-gate }
7760Sstevel@tonic-gate } else {
7770Sstevel@tonic-gate blk = (struct header *)((char *)ptr - minhead);
7780Sstevel@tonic-gate next = blk->nextblk;
7790Sstevel@tonic-gate /*
7800Sstevel@tonic-gate * deal with twits who reallocate free blocks
7810Sstevel@tonic-gate *
7820Sstevel@tonic-gate * if they haven't reset minblk via getopt, that's
7830Sstevel@tonic-gate * their problem
7840Sstevel@tonic-gate */
7850Sstevel@tonic-gate if (!TESTBUSY(next)) {
7860Sstevel@tonic-gate DELFREEQ(blk);
7870Sstevel@tonic-gate blk->nextblk = SETBUSY(next);
7880Sstevel@tonic-gate }
7890Sstevel@tonic-gate next = CLRBUSY(next);
7900Sstevel@tonic-gate /* make blk as big as possible */
7910Sstevel@tonic-gate if (!TESTBUSY(next->nextblk)) {
7920Sstevel@tonic-gate do {
7930Sstevel@tonic-gate DELFREEQ(next);
7940Sstevel@tonic-gate next = next->nextblk;
7950Sstevel@tonic-gate } while (!TESTBUSY(next->nextblk));
7960Sstevel@tonic-gate blk->nextblk = SETBUSY(next);
7970Sstevel@tonic-gate if (next >= arenaend) lastblk = blk;
7980Sstevel@tonic-gate }
7990Sstevel@tonic-gate /* get size we really need */
8000Sstevel@tonic-gate trusize = size+minhead;
8010Sstevel@tonic-gate trusize = (trusize + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
8020Sstevel@tonic-gate trusize = (trusize >= MINBLKSZ) ? trusize : MINBLKSZ;
8030Sstevel@tonic-gate /* see if we have enough */
8040Sstevel@tonic-gate /* this isn't really the copy size, but I need a register */
8050Sstevel@tonic-gate cpysize = (char *)next - (char *)blk;
8060Sstevel@tonic-gate if (cpysize >= trusize) {
8070Sstevel@tonic-gate /* carve out the size we need */
8080Sstevel@tonic-gate struct header *newblk; /* remainder */
8090Sstevel@tonic-gate
8100Sstevel@tonic-gate if (cpysize - trusize >= MINBLKSZ) {
8110Sstevel@tonic-gate /*
8120Sstevel@tonic-gate * carve out the right size block
8130Sstevel@tonic-gate * newblk will be the remainder
8140Sstevel@tonic-gate */
8150Sstevel@tonic-gate newblk = (struct header *)((char *)blk +
8160Sstevel@tonic-gate trusize);
8170Sstevel@tonic-gate newblk->nextblk = next;
8180Sstevel@tonic-gate blk->nextblk = SETBUSY(newblk);
8190Sstevel@tonic-gate /* at this point, next is invalid */
8200Sstevel@tonic-gate ADDFREEQ(newblk);
8210Sstevel@tonic-gate /* if blk was lastblk, make newblk lastblk */
8220Sstevel@tonic-gate if (blk == lastblk)
8230Sstevel@tonic-gate lastblk = newblk;
8240Sstevel@tonic-gate }
8250Sstevel@tonic-gate newptr = ptr;
8260Sstevel@tonic-gate } else {
8270Sstevel@tonic-gate /* bite the bullet, and call malloc */
8280Sstevel@tonic-gate cpysize = (size > cpysize) ? cpysize : size;
8290Sstevel@tonic-gate newptr = malloc_unlocked(size, 0);
8300Sstevel@tonic-gate if (newptr == NULL)
8310Sstevel@tonic-gate return (NULL);
8320Sstevel@tonic-gate (void) memcpy(newptr, ptr, cpysize);
8330Sstevel@tonic-gate free_unlocked(ptr);
8340Sstevel@tonic-gate }
8350Sstevel@tonic-gate }
8360Sstevel@tonic-gate return (newptr);
8370Sstevel@tonic-gate }
8380Sstevel@tonic-gate
8390Sstevel@tonic-gate
8400Sstevel@tonic-gate /*
8410Sstevel@tonic-gate * calloc - allocate and clear memory block
8420Sstevel@tonic-gate */
8430Sstevel@tonic-gate
8440Sstevel@tonic-gate void *
calloc(size_t num,size_t size)8450Sstevel@tonic-gate calloc(size_t num, size_t size)
8460Sstevel@tonic-gate {
8470Sstevel@tonic-gate char *mp;
8480Sstevel@tonic-gate
8490Sstevel@tonic-gate num *= size;
8500Sstevel@tonic-gate mp = malloc(num);
8510Sstevel@tonic-gate if (mp == NULL)
8520Sstevel@tonic-gate return (NULL);
8530Sstevel@tonic-gate (void) memset(mp, 0, num);
8540Sstevel@tonic-gate return (mp);
8550Sstevel@tonic-gate }
8560Sstevel@tonic-gate
8570Sstevel@tonic-gate
8580Sstevel@tonic-gate /*
8590Sstevel@tonic-gate * Mallopt - set options for allocation
8600Sstevel@tonic-gate *
8610Sstevel@tonic-gate * Mallopt provides for control over the allocation algorithm.
8620Sstevel@tonic-gate * The cmds available are:
8630Sstevel@tonic-gate *
8640Sstevel@tonic-gate * M_MXFAST Set maxfast to value. Maxfast is the size of the
8650Sstevel@tonic-gate * largest small, quickly allocated block. Maxfast
8660Sstevel@tonic-gate * may be set to 0 to disable fast allocation entirely.
8670Sstevel@tonic-gate *
8680Sstevel@tonic-gate * M_NLBLKS Set numlblks to value. Numlblks is the number of
8690Sstevel@tonic-gate * small blocks per holding block. Value must be
8700Sstevel@tonic-gate * greater than 0.
8710Sstevel@tonic-gate *
8720Sstevel@tonic-gate * M_GRAIN Set grain to value. The sizes of all blocks
8730Sstevel@tonic-gate * smaller than maxfast are considered to be rounded
8740Sstevel@tonic-gate * up to the nearest multiple of grain. The default
8750Sstevel@tonic-gate * value of grain is the smallest number of bytes
8760Sstevel@tonic-gate * which will allow alignment of any data type. Grain
8770Sstevel@tonic-gate * will be rounded up to a multiple of its default,
8780Sstevel@tonic-gate * and maxsize will be rounded up to a multiple of
8790Sstevel@tonic-gate * grain. Value must be greater than 0.
8800Sstevel@tonic-gate *
8810Sstevel@tonic-gate * M_KEEP Retain data in freed block until the next malloc,
8820Sstevel@tonic-gate * realloc, or calloc. Value is ignored.
8830Sstevel@tonic-gate * This option is provided only for compatibility with
8840Sstevel@tonic-gate * the old version of malloc, and is not recommended.
8850Sstevel@tonic-gate *
8860Sstevel@tonic-gate * returns - 0, upon successful completion
8870Sstevel@tonic-gate * 1, if malloc has previously been called or
8880Sstevel@tonic-gate * if value or cmd have illegal values
8890Sstevel@tonic-gate */
8900Sstevel@tonic-gate
8910Sstevel@tonic-gate int
mallopt(int cmd,int value)8922658Sraf mallopt(int cmd, int value)
8930Sstevel@tonic-gate {
8940Sstevel@tonic-gate /* disallow changes once a small block is allocated */
8950Sstevel@tonic-gate (void) mutex_lock(&mlock);
8960Sstevel@tonic-gate if (change) {
8970Sstevel@tonic-gate (void) mutex_unlock(&mlock);
8980Sstevel@tonic-gate return (1);
8990Sstevel@tonic-gate }
9000Sstevel@tonic-gate switch (cmd) {
9010Sstevel@tonic-gate case M_MXFAST:
9020Sstevel@tonic-gate if (value < 0) {
9030Sstevel@tonic-gate (void) mutex_unlock(&mlock);
9040Sstevel@tonic-gate return (1);
9050Sstevel@tonic-gate }
9060Sstevel@tonic-gate fastct = (value + grain - 1) / grain;
9070Sstevel@tonic-gate maxfast = grain*fastct;
9080Sstevel@tonic-gate break;
9090Sstevel@tonic-gate case M_NLBLKS:
9100Sstevel@tonic-gate if (value <= 1) {
9110Sstevel@tonic-gate (void) mutex_unlock(&mlock);
9120Sstevel@tonic-gate return (1);
9130Sstevel@tonic-gate }
9140Sstevel@tonic-gate numlblks = value;
9150Sstevel@tonic-gate break;
9160Sstevel@tonic-gate case M_GRAIN:
9170Sstevel@tonic-gate if (value <= 0) {
9180Sstevel@tonic-gate (void) mutex_unlock(&mlock);
9190Sstevel@tonic-gate return (1);
9200Sstevel@tonic-gate }
9210Sstevel@tonic-gate
9220Sstevel@tonic-gate /* round grain up to a multiple of ALIGNSZ */
9230Sstevel@tonic-gate grain = (value + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
9240Sstevel@tonic-gate
9250Sstevel@tonic-gate /* reduce fastct appropriately */
9260Sstevel@tonic-gate fastct = (maxfast + grain - 1) / grain;
9270Sstevel@tonic-gate maxfast = grain * fastct;
9280Sstevel@tonic-gate break;
9290Sstevel@tonic-gate case M_KEEP:
9300Sstevel@tonic-gate if (change && holdhead != NULL) {
931*6812Sraf (void) mutex_unlock(&mlock);
9320Sstevel@tonic-gate return (1);
9330Sstevel@tonic-gate }
9340Sstevel@tonic-gate minhead = HEADSZ;
9350Sstevel@tonic-gate break;
9360Sstevel@tonic-gate default:
9370Sstevel@tonic-gate (void) mutex_unlock(&mlock);
9380Sstevel@tonic-gate return (1);
9390Sstevel@tonic-gate }
9400Sstevel@tonic-gate (void) mutex_unlock(&mlock);
9410Sstevel@tonic-gate return (0);
9420Sstevel@tonic-gate }
9430Sstevel@tonic-gate
9440Sstevel@tonic-gate /*
9450Sstevel@tonic-gate * mallinfo-provide information about space usage
9460Sstevel@tonic-gate *
9470Sstevel@tonic-gate * input - max; mallinfo will return the size of the
9480Sstevel@tonic-gate * largest block < max.
9490Sstevel@tonic-gate *
9500Sstevel@tonic-gate * output - a structure containing a description of
9510Sstevel@tonic-gate * of space usage, defined in malloc.h
9520Sstevel@tonic-gate */
9530Sstevel@tonic-gate
9540Sstevel@tonic-gate struct mallinfo
mallinfo(void)9552658Sraf mallinfo(void)
9560Sstevel@tonic-gate {
9570Sstevel@tonic-gate struct header *blk, *next; /* ptr to ordinary blocks */
9580Sstevel@tonic-gate struct holdblk *hblk; /* ptr to holding blocks */
9590Sstevel@tonic-gate struct mallinfo inf; /* return value */
9600Sstevel@tonic-gate int i; /* the ubiquitous counter */
9610Sstevel@tonic-gate ssize_t size; /* size of a block */
9620Sstevel@tonic-gate ssize_t fsp; /* free space in 1 hold block */
9630Sstevel@tonic-gate
9640Sstevel@tonic-gate (void) mutex_lock(&mlock);
9650Sstevel@tonic-gate (void) memset(&inf, 0, sizeof (struct mallinfo));
9660Sstevel@tonic-gate if (freeptr[0].nextfree == GROUND) {
9670Sstevel@tonic-gate (void) mutex_unlock(&mlock);
9680Sstevel@tonic-gate return (inf);
9690Sstevel@tonic-gate }
9700Sstevel@tonic-gate blk = CLRBUSY(arena[1].nextblk);
9710Sstevel@tonic-gate /* return total space used */
9720Sstevel@tonic-gate inf.arena = (char *)arenaend - (char *)blk;
9730Sstevel@tonic-gate
9740Sstevel@tonic-gate /*
9750Sstevel@tonic-gate * loop through arena, counting # of blocks, and
9760Sstevel@tonic-gate * and space used by blocks
9770Sstevel@tonic-gate */
9780Sstevel@tonic-gate next = CLRBUSY(blk->nextblk);
9790Sstevel@tonic-gate while (next != &(arena[1])) {
9800Sstevel@tonic-gate inf.ordblks++;
9810Sstevel@tonic-gate size = (char *)next - (char *)blk;
9820Sstevel@tonic-gate if (TESTBUSY(blk->nextblk)) {
9830Sstevel@tonic-gate inf.uordblks += size;
9840Sstevel@tonic-gate inf.keepcost += HEADSZ-MINHEAD;
9850Sstevel@tonic-gate } else {
9860Sstevel@tonic-gate inf.fordblks += size;
9870Sstevel@tonic-gate }
9880Sstevel@tonic-gate blk = next;
9890Sstevel@tonic-gate next = CLRBUSY(blk->nextblk);
9900Sstevel@tonic-gate }
9910Sstevel@tonic-gate
9920Sstevel@tonic-gate /*
9930Sstevel@tonic-gate * if any holding block have been allocated
9940Sstevel@tonic-gate * then examine space in holding blks
9950Sstevel@tonic-gate */
9960Sstevel@tonic-gate if (change && holdhead != NULL) {
9970Sstevel@tonic-gate for (i = fastct; i > 0; i--) { /* loop thru ea. chain */
9980Sstevel@tonic-gate hblk = holdhead[i];
9990Sstevel@tonic-gate /* do only if chain not empty */
10000Sstevel@tonic-gate if (hblk != HGROUND) {
10010Sstevel@tonic-gate size = hblk->blksz +
10020Sstevel@tonic-gate sizeof (struct lblk) - sizeof (int);
10030Sstevel@tonic-gate do { /* loop thru 1 hold blk chain */
10040Sstevel@tonic-gate inf.hblks++;
10050Sstevel@tonic-gate fsp = freespace(hblk);
10060Sstevel@tonic-gate inf.fsmblks += fsp;
10070Sstevel@tonic-gate inf.usmblks += numlblks*size - fsp;
10080Sstevel@tonic-gate inf.smblks += numlblks;
10090Sstevel@tonic-gate hblk = hblk->nexthblk;
10100Sstevel@tonic-gate } while (hblk != holdhead[i]);
10110Sstevel@tonic-gate }
10120Sstevel@tonic-gate }
10130Sstevel@tonic-gate }
10140Sstevel@tonic-gate inf.hblkhd = (inf.smblks / numlblks) * sizeof (struct holdblk);
10150Sstevel@tonic-gate /* holding block were counted in ordblks, so subtract off */
10160Sstevel@tonic-gate inf.ordblks -= inf.hblks;
10170Sstevel@tonic-gate inf.uordblks -= inf.hblkhd + inf.usmblks + inf.fsmblks;
10180Sstevel@tonic-gate inf.keepcost -= inf.hblks*(HEADSZ - MINHEAD);
10190Sstevel@tonic-gate (void) mutex_unlock(&mlock);
10200Sstevel@tonic-gate return (inf);
10210Sstevel@tonic-gate }
10220Sstevel@tonic-gate
10230Sstevel@tonic-gate
10240Sstevel@tonic-gate /*
10250Sstevel@tonic-gate * freespace - calc. how much space is used in the free
10260Sstevel@tonic-gate * small blocks in a given holding block
10270Sstevel@tonic-gate *
10280Sstevel@tonic-gate * input - hblk = given holding block
10290Sstevel@tonic-gate *
10300Sstevel@tonic-gate * returns space used in free small blocks of hblk
10310Sstevel@tonic-gate */
10320Sstevel@tonic-gate
10330Sstevel@tonic-gate static ssize_t
freespace(struct holdblk * holdblk)10340Sstevel@tonic-gate freespace(struct holdblk *holdblk)
10350Sstevel@tonic-gate {
10360Sstevel@tonic-gate struct lblk *lblk;
10370Sstevel@tonic-gate ssize_t space = 0;
10380Sstevel@tonic-gate ssize_t size;
10390Sstevel@tonic-gate struct lblk *unused;
10400Sstevel@tonic-gate
10410Sstevel@tonic-gate lblk = CLRSMAL(holdblk->lfreeq);
10420Sstevel@tonic-gate size = holdblk->blksz + sizeof (struct lblk) - sizeof (int);
10430Sstevel@tonic-gate unused = CLRSMAL(holdblk->unused);
10440Sstevel@tonic-gate /* follow free chain */
10450Sstevel@tonic-gate while ((lblk != LGROUND) && (lblk != unused)) {
10460Sstevel@tonic-gate space += size;
10470Sstevel@tonic-gate lblk = CLRSMAL(lblk->header.nextfree);
10480Sstevel@tonic-gate }
10490Sstevel@tonic-gate space += ((char *)holdblk + HOLDSZ(size)) - (char *)unused;
10500Sstevel@tonic-gate return (space);
10510Sstevel@tonic-gate }
10520Sstevel@tonic-gate
10530Sstevel@tonic-gate static void *
morecore(size_t bytes)10540Sstevel@tonic-gate morecore(size_t bytes)
10550Sstevel@tonic-gate {
10560Sstevel@tonic-gate void * ret;
10570Sstevel@tonic-gate
10580Sstevel@tonic-gate if (bytes > LONG_MAX) {
10590Sstevel@tonic-gate intptr_t wad;
10600Sstevel@tonic-gate /*
10610Sstevel@tonic-gate * The request size is too big. We need to do this in
10620Sstevel@tonic-gate * chunks. Sbrk only takes an int for an arg.
10630Sstevel@tonic-gate */
10640Sstevel@tonic-gate if (bytes == ULONG_MAX)
10650Sstevel@tonic-gate return ((void *)-1);
10660Sstevel@tonic-gate
10670Sstevel@tonic-gate ret = sbrk(0);
10680Sstevel@tonic-gate wad = LONG_MAX;
10690Sstevel@tonic-gate while (wad > 0) {
10700Sstevel@tonic-gate if (sbrk(wad) == (void *)-1) {
10710Sstevel@tonic-gate if (ret != sbrk(0))
10720Sstevel@tonic-gate (void) sbrk(-LONG_MAX);
10730Sstevel@tonic-gate return ((void *)-1);
10740Sstevel@tonic-gate }
10750Sstevel@tonic-gate bytes -= LONG_MAX;
10760Sstevel@tonic-gate wad = bytes;
10770Sstevel@tonic-gate }
10780Sstevel@tonic-gate } else
10790Sstevel@tonic-gate ret = sbrk(bytes);
10800Sstevel@tonic-gate
10810Sstevel@tonic-gate return (ret);
10820Sstevel@tonic-gate }
10830Sstevel@tonic-gate
10840Sstevel@tonic-gate #ifdef debug
10850Sstevel@tonic-gate int
check_arena(void)10860Sstevel@tonic-gate check_arena(void)
10870Sstevel@tonic-gate {
10880Sstevel@tonic-gate struct header *blk, *prev, *next; /* ptr to ordinary blocks */
10890Sstevel@tonic-gate
10900Sstevel@tonic-gate (void) mutex_lock(&mlock);
10910Sstevel@tonic-gate if (freeptr[0].nextfree == GROUND) {
10920Sstevel@tonic-gate (void) mutex_unlock(&mlock);
10930Sstevel@tonic-gate return (-1);
10940Sstevel@tonic-gate }
10950Sstevel@tonic-gate blk = arena + 1;
10960Sstevel@tonic-gate
10970Sstevel@tonic-gate /* loop through arena, checking */
10980Sstevel@tonic-gate blk = (struct header *)CLRALL(blk->nextblk);
10990Sstevel@tonic-gate next = (struct header *)CLRALL(blk->nextblk);
11000Sstevel@tonic-gate while (next != arena + 1) {
11010Sstevel@tonic-gate assert(blk >= arena + 1);
11020Sstevel@tonic-gate assert(blk <= lastblk);
11030Sstevel@tonic-gate assert(next >= blk + 1);
11040Sstevel@tonic-gate assert(((uintptr_t)((struct header *)blk->nextblk) &
11050Sstevel@tonic-gate (4 | SMAL)) == 0);
11060Sstevel@tonic-gate
11070Sstevel@tonic-gate if (TESTBUSY(blk->nextblk) == 0) {
11080Sstevel@tonic-gate assert(blk->nextfree >= freeptr);
11090Sstevel@tonic-gate assert(blk->prevfree >= freeptr);
11100Sstevel@tonic-gate assert(blk->nextfree <= lastblk);
11110Sstevel@tonic-gate assert(blk->prevfree <= lastblk);
11120Sstevel@tonic-gate assert(((uintptr_t)((struct header *)blk->nextfree) &
11130Sstevel@tonic-gate 7) == 0);
11140Sstevel@tonic-gate assert(((uintptr_t)((struct header *)blk->prevfree) &
11150Sstevel@tonic-gate 7) == 0 || blk->prevfree == freeptr);
11160Sstevel@tonic-gate }
11170Sstevel@tonic-gate blk = next;
11180Sstevel@tonic-gate next = CLRBUSY(blk->nextblk);
11190Sstevel@tonic-gate }
11200Sstevel@tonic-gate (void) mutex_unlock(&mlock);
11210Sstevel@tonic-gate return (0);
11220Sstevel@tonic-gate }
11230Sstevel@tonic-gate
11240Sstevel@tonic-gate #define RSTALLOC 1
11250Sstevel@tonic-gate #endif
11260Sstevel@tonic-gate
11270Sstevel@tonic-gate #ifdef RSTALLOC
11280Sstevel@tonic-gate /*
11290Sstevel@tonic-gate * rstalloc - reset alloc routines
11300Sstevel@tonic-gate *
11310Sstevel@tonic-gate * description - return allocated memory and reset
11320Sstevel@tonic-gate * allocation pointers.
11330Sstevel@tonic-gate *
11340Sstevel@tonic-gate * Warning - This is for debugging purposes only.
11350Sstevel@tonic-gate * It will return all memory allocated after
11360Sstevel@tonic-gate * the first call to malloc, even if some
11370Sstevel@tonic-gate * of it was fetched by a user's sbrk().
11380Sstevel@tonic-gate */
11390Sstevel@tonic-gate
11400Sstevel@tonic-gate void
rstalloc(void)11410Sstevel@tonic-gate rstalloc(void)
11420Sstevel@tonic-gate {
11430Sstevel@tonic-gate (void) mutex_lock(&mlock);
11440Sstevel@tonic-gate minhead = MINHEAD;
11450Sstevel@tonic-gate grain = ALIGNSZ;
11460Sstevel@tonic-gate numlblks = NUMLBLKS;
11470Sstevel@tonic-gate fastct = FASTCT;
11480Sstevel@tonic-gate maxfast = MAXFAST;
11490Sstevel@tonic-gate change = 0;
11500Sstevel@tonic-gate if (freeptr[0].nextfree == GROUND) {
11510Sstevel@tonic-gate (void) mutex_unlock(&mlock);
11520Sstevel@tonic-gate return;
11530Sstevel@tonic-gate }
11540Sstevel@tonic-gate brk(CLRBUSY(arena[1].nextblk));
11550Sstevel@tonic-gate freeptr[0].nextfree = GROUND;
11560Sstevel@tonic-gate #ifdef debug
11570Sstevel@tonic-gate case1count = 0;
11580Sstevel@tonic-gate #endif
11590Sstevel@tonic-gate (void) mutex_unlock(&mlock);
11600Sstevel@tonic-gate }
11610Sstevel@tonic-gate #endif /* RSTALLOC */
11620Sstevel@tonic-gate
11630Sstevel@tonic-gate /*
11640Sstevel@tonic-gate * cfree is an undocumented, obsolete function
11650Sstevel@tonic-gate */
11660Sstevel@tonic-gate
11672658Sraf /* ARGSUSED1 */
11680Sstevel@tonic-gate void
cfree(void * p,size_t num,size_t size)11692658Sraf cfree(void *p, size_t num, size_t size)
11700Sstevel@tonic-gate {
11710Sstevel@tonic-gate free(p);
11720Sstevel@tonic-gate }
11733866Sraf
11743866Sraf static void
malloc_prepare()11753866Sraf malloc_prepare()
11763866Sraf {
11773866Sraf (void) mutex_lock(&mlock);
11783866Sraf }
11793866Sraf
11803866Sraf static void
malloc_release()11813866Sraf malloc_release()
11823866Sraf {
11833866Sraf (void) mutex_unlock(&mlock);
11843866Sraf }
11853866Sraf
11863866Sraf #pragma init(malloc_init)
11873866Sraf static void
malloc_init(void)11883866Sraf malloc_init(void)
11893866Sraf {
11903866Sraf (void) pthread_atfork(malloc_prepare, malloc_release, malloc_release);
11913866Sraf }
1192