xref: /onnv-gate/usr/src/lib/libast/common/vmalloc/vmbest.c (revision 12068:08a39a083754)
14887Schin /***********************************************************************
24887Schin *                                                                      *
34887Schin *               This software is part of the ast package               *
4*12068SRoger.Faulkner@Oracle.COM *          Copyright (c) 1985-2010 AT&T Intellectual Property          *
54887Schin *                      and is licensed under the                       *
64887Schin *                  Common Public License, Version 1.0                  *
78462SApril.Chin@Sun.COM *                    by AT&T Intellectual Property                     *
84887Schin *                                                                      *
94887Schin *                A copy of the License is available at                 *
104887Schin *            http://www.opensource.org/licenses/cpl1.0.txt             *
114887Schin *         (with md5 checksum 059e8cd6165cb4c31e351f2b69388fd9)         *
124887Schin *                                                                      *
134887Schin *              Information and Software Systems Research               *
144887Schin *                            AT&T Research                             *
154887Schin *                           Florham Park NJ                            *
164887Schin *                                                                      *
174887Schin *                 Glenn Fowler <gsf@research.att.com>                  *
184887Schin *                  David Korn <dgk@research.att.com>                   *
194887Schin *                   Phong Vo <kpv@research.att.com>                    *
204887Schin *                                                                      *
214887Schin ***********************************************************************/
224887Schin #if defined(_UWIN) && defined(_BLD_ast)
234887Schin 
_STUB_vmbest()244887Schin void _STUB_vmbest(){}
254887Schin 
264887Schin #else
274887Schin 
284887Schin #include	"vmhdr.h"
294887Schin 
304887Schin /*	Best-fit allocation method. This is based on a best-fit strategy
314887Schin **	using a splay tree for storage of lists of free blocks of the same
324887Schin **	size. Recent free blocks may be cached for fast reuse.
334887Schin **
344887Schin **	Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
354887Schin */
364887Schin 
374887Schin #ifdef DEBUG
384887Schin static int	N_free;		/* # of free calls			*/
394887Schin static int	N_alloc;	/* # of alloc calls			*/
404887Schin static int	N_resize;	/* # of resize calls			*/
414887Schin static int	N_wild;		/* # allocated from the wild block	*/
424887Schin static int	N_last;		/* # allocated from last free block	*/
434887Schin static int	N_reclaim;	/* # of bestreclaim calls		*/
444887Schin 
454887Schin #undef	VM_TRUST		/* always check for locking, etc.s	*/
464887Schin #define	VM_TRUST	0
474887Schin #endif /*DEBUG*/
484887Schin 
49*12068SRoger.Faulkner@Oracle.COM #if _BLD_posix
50*12068SRoger.Faulkner@Oracle.COM #define logmsg(d,a ...)	logsrc(d,__FILE__,__LINE__,a)
51*12068SRoger.Faulkner@Oracle.COM 
52*12068SRoger.Faulkner@Oracle.COM extern int	logsrc(int, const char*, int, const char*, ...);
53*12068SRoger.Faulkner@Oracle.COM #endif /*_BLD_posix*/
54*12068SRoger.Faulkner@Oracle.COM 
554887Schin #define COMPACT		8	/* factor to decide when to compact	*/
564887Schin 
574887Schin /* Check to see if a block is in the free tree */
584887Schin #if __STD_C
vmintree(Block_t * node,Block_t * b)594887Schin static int vmintree(Block_t* node, Block_t* b)
604887Schin #else
614887Schin static int vmintree(node,b)
624887Schin Block_t*	node;
634887Schin Block_t*	b;
644887Schin #endif
654887Schin {	Block_t*	t;
664887Schin 
674887Schin 	for(t = node; t; t = LINK(t))
684887Schin 		if(t == b)
694887Schin 			return 1;
704887Schin 	if(LEFT(node) && vmintree(LEFT(node),b))
714887Schin 		return 1;
724887Schin 	if(RIGHT(node) && vmintree(RIGHT(node),b))
734887Schin 		return 1;
744887Schin 	return 0;
754887Schin }
764887Schin 
774887Schin #if __STD_C
vmonlist(Block_t * list,Block_t * b)784887Schin static int vmonlist(Block_t* list, Block_t* b)
794887Schin #else
804887Schin static int vmonlist(list,b)
814887Schin Block_t*	list;
824887Schin Block_t*	b;
834887Schin #endif
844887Schin {
854887Schin 	for(; list; list = LINK(list))
864887Schin 		if(list == b)
874887Schin 			return 1;
884887Schin 	return 0;
894887Schin }
904887Schin 
914887Schin /* Check to see if a block is known to be free */
924887Schin #if __STD_C
vmisfree(Vmdata_t * vd,Block_t * b)934887Schin static int vmisfree(Vmdata_t* vd, Block_t* b)
944887Schin #else
954887Schin static int vmisfree(vd,b)
964887Schin Vmdata_t*	vd;
974887Schin Block_t*	b;
984887Schin #endif
994887Schin {
1004887Schin 	if(SIZE(b) & (BUSY|JUNK|PFREE))
1014887Schin 		return 0;
1024887Schin 
1034887Schin 	if(b == vd->wild)
1044887Schin 		return 1;
1054887Schin 
1064887Schin 	if(SIZE(b) < MAXTINY)
1074887Schin 		return vmonlist(TINY(vd)[INDEX(SIZE(b))], b);
1084887Schin 
1094887Schin 	if(vd->root)
1104887Schin 		return vmintree(vd->root, b);
1114887Schin 
1124887Schin 	return 0;
1134887Schin }
1144887Schin 
1154887Schin /* Check to see if a block is known to be junked */
1164887Schin #if __STD_C
vmisjunk(Vmdata_t * vd,Block_t * b)1174887Schin static int vmisjunk(Vmdata_t* vd, Block_t* b)
1184887Schin #else
1194887Schin static int vmisjunk(vd,b)
1204887Schin Vmdata_t*	vd;
1214887Schin Block_t*	b;
1224887Schin #endif
1234887Schin {
1244887Schin 	Block_t*	t;
1254887Schin 
1264887Schin 	if((SIZE(b)&BUSY) == 0 || (SIZE(b)&JUNK) == 0)
1274887Schin 		return 0;
1284887Schin 
1294887Schin 	if(b == vd->free) /* recently freed */
1304887Schin 		return 1;
1314887Schin 
1324887Schin 	/* check the list that b is supposed to be in */
1334887Schin 	for(t = CACHE(vd)[C_INDEX(SIZE(b))]; t; t = LINK(t))
1344887Schin 		if(t == b)
1354887Schin 			return 1;
1364887Schin 
1374887Schin 	/* on occasions, b may be put onto the catch-all list */
1384887Schin 	if(C_INDEX(SIZE(b)) < S_CACHE)
1394887Schin 		for(t = CACHE(vd)[S_CACHE]; t; t = LINK(t))
1404887Schin 			if(t == b)
1414887Schin 				return 1;
1424887Schin 
1434887Schin 	return 0;
1444887Schin }
1454887Schin 
1464887Schin /* check to see if the free tree is in good shape */
1474887Schin #if __STD_C
vmchktree(Block_t * node)1484887Schin static int vmchktree(Block_t* node)
1494887Schin #else
1504887Schin static int vmchktree(node)
1514887Schin Block_t*	node;
1524887Schin #endif
1534887Schin {	Block_t*	t;
1544887Schin 
1554887Schin 	if(SIZE(node) & BITS)
1564887Schin 		{ /**/ASSERT(0); return -1; }
1574887Schin 
1584887Schin 	for(t = LINK(node); t; t = LINK(t))
1594887Schin 		if(SIZE(t) != SIZE(node))
1604887Schin 			{ /**/ASSERT(0); return -1; }
1614887Schin 
1624887Schin 	if((t = LEFT(node)) )
1634887Schin 	{	if(SIZE(t) >= SIZE(node) )
1644887Schin 			{ /**/ASSERT(0); return -1; }
1654887Schin 		else	return vmchktree(t);
1664887Schin 	}
1674887Schin 	if((t = RIGHT(node)) )
1684887Schin 	{	if(SIZE(t) <= SIZE(node) )
1694887Schin 			{ /**/ASSERT(0); return -1; }
1704887Schin 		else	return vmchktree(t);
1714887Schin 	}
1724887Schin 
1734887Schin 	return 0;
1744887Schin }
1754887Schin 
1764887Schin #if __STD_C
_vmbestcheck(Vmdata_t * vd,Block_t * freeb)1774887Schin int _vmbestcheck(Vmdata_t* vd, Block_t* freeb)
1784887Schin #else
1794887Schin int _vmbestcheck(vd, freeb)
1804887Schin Vmdata_t*	vd;
1814887Schin Block_t*	freeb; /* known to be free but not on any free list */
1824887Schin #endif
1834887Schin {
1844887Schin 	reg Seg_t	*seg;
1854887Schin 	reg Block_t	*b, *endb, *nextb;
1864887Schin 	int		rv = 0;
1874887Schin 
1884887Schin 	if(!CHECK())
1894887Schin 		return 0;
1904887Schin 
1914887Schin 	/* make sure the free tree is still in shape */
1924887Schin 	if(vd->root && vmchktree(vd->root) < 0 )
1934887Schin 		{ rv = -1; /**/ASSERT(0); }
1944887Schin 
1954887Schin 	for(seg = vd->seg; seg && rv == 0; seg = seg->next)
1964887Schin 	{	b = SEGBLOCK(seg);
1974887Schin 		endb = (Block_t*)(seg->baddr - sizeof(Head_t));
1984887Schin 		for(; b < endb && rv == 0; b = nextb)
1994887Schin 		{	nextb = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) );
2004887Schin 
2014887Schin 			if(!ISBUSY(SIZE(b)) ) /* a completely free block */
2024887Schin 			{	/* there should be no marked bits of any type */
2034887Schin 				if(SIZE(b) & (BUSY|JUNK|PFREE) )
2044887Schin 					{ rv = -1; /**/ASSERT(0); }
2054887Schin 
2064887Schin 				/* next block must be busy and marked PFREE */
2074887Schin 				if(!ISBUSY(SIZE(nextb)) || !ISPFREE(SIZE(nextb)) )
2084887Schin 					{ rv = -1; /**/ASSERT(0); }
2094887Schin 
2104887Schin 				/* must have a self-reference pointer */
2114887Schin 				if(*SELF(b) != b)
2124887Schin 					{ rv = -1; /**/ASSERT(0); }
2134887Schin 
2144887Schin 				/* segment pointer should be well-defined */
2154887Schin 				if(!TINIEST(b) && SEG(b) != seg)
2164887Schin 					{ rv = -1; /**/ASSERT(0); }
2174887Schin 
2184887Schin 				/* must be on a free list */
2194887Schin 				if(b != freeb && !vmisfree(vd, b) )
2204887Schin 					{ rv = -1; /**/ASSERT(0); }
2214887Schin 			}
2224887Schin 			else
2234887Schin 			{	/* segment pointer should be well-defined */
2244887Schin 				if(SEG(b) != seg)
2254887Schin 					{ rv = -1; /**/ASSERT(0); }
2264887Schin 
2274887Schin 				/* next block should not be marked PFREE */
2284887Schin 				if(ISPFREE(SIZE(nextb)) )
2294887Schin 					{ rv = -1; /**/ASSERT(0); }
2304887Schin 
2314887Schin 				/* if PFREE, last block should be free */
2324887Schin 				if(ISPFREE(SIZE(b)) && LAST(b) != freeb &&
2334887Schin 				   !vmisfree(vd, LAST(b)) )
2344887Schin 					{ rv = -1; /**/ASSERT(0); }
2354887Schin 
2364887Schin 				/* if free but unreclaimed, should be junk */
2374887Schin 				if(ISJUNK(SIZE(b)) && !vmisjunk(vd, b))
2384887Schin 					{ rv = -1; /**/ASSERT(0); }
2394887Schin 			}
2404887Schin 		}
2414887Schin 	}
2424887Schin 
2434887Schin 	return rv;
2444887Schin }
2454887Schin 
2464887Schin /* Tree rotation functions */
2474887Schin #define RROTATE(x,y)	(LEFT(x) = RIGHT(y), RIGHT(y) = (x), (x) = (y))
2484887Schin #define LROTATE(x,y)	(RIGHT(x) = LEFT(y), LEFT(y) = (x), (x) = (y))
2494887Schin #define RLINK(s,x)	((s) = LEFT(s) = (x))
2504887Schin #define LLINK(s,x)	((s) = RIGHT(s) = (x))
2514887Schin 
2524887Schin /* Find and delete a suitable element in the free tree. */
2534887Schin #if __STD_C
bestsearch(Vmdata_t * vd,reg size_t size,Block_t * wanted)2544887Schin static Block_t* bestsearch(Vmdata_t* vd, reg size_t size, Block_t* wanted)
2554887Schin #else
2564887Schin static Block_t* bestsearch(vd, size, wanted)
2574887Schin Vmdata_t*	vd;
2584887Schin reg size_t	size;
2594887Schin Block_t*	wanted;
2604887Schin #endif
2614887Schin {
2624887Schin 	reg size_t	s;
2634887Schin 	reg Block_t	*t, *root, *l, *r;
2644887Schin 	Block_t		link;
2654887Schin 
2664887Schin 	/* extracting a tiniest block from its list */
2674887Schin 	if((root = wanted) && size == TINYSIZE)
2684887Schin 	{	reg Seg_t*	seg;
2694887Schin 
2704887Schin 		l = TLEFT(root);
2714887Schin 		if((r = LINK(root)) )
2724887Schin 			TLEFT(r) = l;
2734887Schin 		if(l)
2744887Schin 			LINK(l) = r;
2754887Schin 		else	TINY(vd)[0] = r;
2764887Schin 
2774887Schin 		seg = vd->seg;
2784887Schin 		if(!seg->next)
2794887Schin 			SEG(root) = seg;
2804887Schin 		else for(;; seg = seg->next)
2814887Schin 		{	if((Vmuchar_t*)root > (Vmuchar_t*)seg->addr &&
2824887Schin 			   (Vmuchar_t*)root < seg->baddr)
2834887Schin 			{	SEG(root) = seg;
2844887Schin 				break;
2854887Schin 			}
2864887Schin 		}
2874887Schin 
2884887Schin 		return root;
2894887Schin 	}
2904887Schin 
2914887Schin 	/**/ASSERT(!vd->root || vmchktree(vd->root) == 0);
2924887Schin 
2934887Schin 	/* find the right one to delete */
2944887Schin 	l = r = &link;
2954887Schin 	if((root = vd->root) ) do
2964887Schin 	{	/**/ ASSERT(!ISBITS(size) && !ISBITS(SIZE(root)));
2974887Schin 		if(size == (s = SIZE(root)) )
2984887Schin 			break;
2994887Schin 		if(size < s)
3004887Schin 		{	if((t = LEFT(root)) )
3014887Schin 			{	if(size <= (s = SIZE(t)) )
3024887Schin 				{	RROTATE(root,t);
3034887Schin 					if(size == s)
3044887Schin 						break;
3054887Schin 					t = LEFT(root);
3064887Schin 				}
3074887Schin 				else
3084887Schin 				{	LLINK(l,t);
3094887Schin 					t = RIGHT(t);
3104887Schin 				}
3114887Schin 			}
3124887Schin 			RLINK(r,root);
3134887Schin 		}
3144887Schin 		else
3154887Schin 		{	if((t = RIGHT(root)) )
3164887Schin 			{	if(size >= (s = SIZE(t)) )
3174887Schin 				{	LROTATE(root,t);
3184887Schin 					if(size == s)
3194887Schin 						break;
3204887Schin 					t = RIGHT(root);
3214887Schin 				}
3224887Schin 				else
3234887Schin 				{	RLINK(r,t);
3244887Schin 					t = LEFT(t);
3254887Schin 				}
3264887Schin 			}
3274887Schin 			LLINK(l,root);
3284887Schin 		}
3294887Schin 		/**/ ASSERT(root != t);
3304887Schin 	} while((root = t) );
3314887Schin 
3324887Schin 	if(root)	/* found it, now isolate it */
3334887Schin 	{	RIGHT(l) = LEFT(root);
3344887Schin 		LEFT(r) = RIGHT(root);
3354887Schin 	}
3364887Schin 	else		/* nothing exactly fit	*/
3374887Schin 	{	LEFT(r) = NIL(Block_t*);
3384887Schin 		RIGHT(l) = NIL(Block_t*);
3394887Schin 
3404887Schin 		/* grab the least one from the right tree */
3414887Schin 		if((root = LEFT(&link)) )
3424887Schin 		{	while((t = LEFT(root)) )
3434887Schin 				RROTATE(root,t);
3444887Schin 			LEFT(&link) = RIGHT(root);
3454887Schin 		}
3464887Schin 	}
3474887Schin 
3484887Schin 	if(root && (r = LINK(root)) )
3494887Schin 	{	/* head of a link list, use next one for root */
3504887Schin 		LEFT(r) = RIGHT(&link);
3514887Schin 		RIGHT(r) = LEFT(&link);
3524887Schin 	}
3534887Schin 	else if(!(r = LEFT(&link)) )
3544887Schin 		r = RIGHT(&link);
3554887Schin 	else /* graft left tree to right tree */
3564887Schin 	{	while((t = LEFT(r)) )
3574887Schin 			RROTATE(r,t);
3584887Schin 		LEFT(r) = RIGHT(&link);
3594887Schin 	}
3604887Schin 	vd->root = r; /**/ASSERT(!r || !ISBITS(SIZE(r)));
3614887Schin 
3624887Schin 	/**/ASSERT(!vd->root || vmchktree(vd->root) == 0);
3634887Schin 	/**/ASSERT(!wanted || wanted == root);
3644887Schin 
3654887Schin 	return root;
3664887Schin }
3674887Schin 
3684887Schin /* Reclaim all delayed free blocks into the free tree */
3694887Schin #if __STD_C
bestreclaim(reg Vmdata_t * vd,Block_t * wanted,int c)3704887Schin static int bestreclaim(reg Vmdata_t* vd, Block_t* wanted, int c)
3714887Schin #else
3724887Schin static int bestreclaim(vd, wanted, c)
3734887Schin reg Vmdata_t*	vd;
3744887Schin Block_t*	wanted;
3754887Schin int		c;
3764887Schin #endif
3774887Schin {
3784887Schin 	reg size_t	size, s;
3794887Schin 	reg Block_t	*fp, *np, *t, *list;
3804887Schin 	reg int		n, saw_wanted;
3814887Schin 	reg Seg_t	*seg;
3824887Schin 
3834887Schin 	/**/COUNT(N_reclaim);
3844887Schin 	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
3854887Schin 
3864887Schin 	if((fp = vd->free) )
3874887Schin 	{	LINK(fp) = CACHE(vd)[S_CACHE]; CACHE(vd)[S_CACHE] = fp;
3884887Schin 		vd->free = NIL(Block_t*);
3894887Schin 	}
3904887Schin 
3914887Schin 	saw_wanted = wanted ? 0 : 1;
3924887Schin 	for(n = S_CACHE; n >= c; --n)
3934887Schin 	{	list = CACHE(vd)[n]; CACHE(vd)[n] = NIL(Block_t*);
3944887Schin 		while((fp = list) )
3954887Schin 		{	/* Note that below here we allow ISJUNK blocks to be
3964887Schin 			** forward-merged even though they are not removed from
3974887Schin 			** the list immediately. In this way, the list is
3984887Schin 			** scanned only once. It works because the LINK and SIZE
3994887Schin 			** fields are not destroyed during the merging. This can
4004887Schin 			** be seen by observing that a tiniest block has a 2-word
4014887Schin 			** header and a 2-word body. Merging a tiniest block
4024887Schin 			** (1seg) and the next block (2seg) looks like this:
4034887Schin 			**	1seg  size  link  left  2seg size link left ....
4044887Schin 			**	1seg  size  link  left  rite xxxx xxxx .... self
4054887Schin 			** After the merge, the 2seg word is replaced by the RIGHT
4064887Schin 			** pointer of the new block and somewhere beyond the
4074887Schin 			** two xxxx fields, the SELF pointer will replace some
4084887Schin 			** other word. The important part is that the two xxxx
4094887Schin 			** fields are kept intact.
4104887Schin 			*/
4114887Schin 			list = LINK(list); /**/ASSERT(!vmonlist(list,fp));
4124887Schin 
4134887Schin 			size = SIZE(fp);
4144887Schin 			if(!ISJUNK(size))	/* already done */
4154887Schin 				continue;
4164887Schin 
4174887Schin 			if(_Vmassert & VM_region)
4184887Schin 			{	/* see if this address is from region */
4194887Schin 				for(seg = vd->seg; seg; seg = seg->next)
4204887Schin 					if(fp >= SEGBLOCK(seg) && fp < (Block_t*)seg->baddr )
4214887Schin 						break;
4224887Schin 				if(!seg) /* must be a bug in application code! */
4234887Schin 				{	/**/ ASSERT(seg != NIL(Seg_t*));
4244887Schin 					continue;
4254887Schin 				}
4264887Schin 			}
4274887Schin 
4284887Schin 			if(ISPFREE(size))	/* backward merge */
4294887Schin 			{	fp = LAST(fp);
430*12068SRoger.Faulkner@Oracle.COM #if _BLD_posix
431*12068SRoger.Faulkner@Oracle.COM 				if (fp < (Block_t*)0x00120000)
432*12068SRoger.Faulkner@Oracle.COM 				{
433*12068SRoger.Faulkner@Oracle.COM 					logmsg(0, "bestreclaim fp=%p", fp);
434*12068SRoger.Faulkner@Oracle.COM 					ASSERT(!fp);
435*12068SRoger.Faulkner@Oracle.COM 				}
436*12068SRoger.Faulkner@Oracle.COM #endif
4374887Schin 				s = SIZE(fp); /**/ASSERT(!(s&BITS));
4384887Schin 				REMOVE(vd,fp,INDEX(s),t,bestsearch);
4394887Schin 				size = (size&~BITS) + s + sizeof(Head_t);
4404887Schin 			}
4414887Schin 			else	size &= ~BITS;
4424887Schin 
4434887Schin 			for(;;)	/* forward merge */
4444887Schin 			{	np = (Block_t*)((Vmuchar_t*)fp+size+sizeof(Head_t));
445*12068SRoger.Faulkner@Oracle.COM #if _BLD_posix
446*12068SRoger.Faulkner@Oracle.COM 				if (np < (Block_t*)0x00120000)
447*12068SRoger.Faulkner@Oracle.COM 				{
448*12068SRoger.Faulkner@Oracle.COM 					logmsg(0, "bestreclaim np=%p", np);
449*12068SRoger.Faulkner@Oracle.COM 					ASSERT(!np);
450*12068SRoger.Faulkner@Oracle.COM 				}
451*12068SRoger.Faulkner@Oracle.COM #endif
4524887Schin 				s = SIZE(np);	/**/ASSERT(s > 0);
4534887Schin 				if(!ISBUSY(s))
4544887Schin 				{	/**/ASSERT((s&BITS) == 0);
4554887Schin 					if(np == vd->wild)
4564887Schin 						vd->wild = NIL(Block_t*);
4574887Schin 					else	REMOVE(vd,np,INDEX(s),t,bestsearch);
4584887Schin 				}
4594887Schin 				else if(ISJUNK(s))
4604887Schin 				{	/* reclaim any touched junk list */
4614887Schin 					if((int)C_INDEX(s) < c)
4624887Schin 						c = C_INDEX(s);
4634887Schin 					SIZE(np) = 0;
4644887Schin 					CLRBITS(s);
4654887Schin 				}
4664887Schin 				else	break;
4674887Schin 				size += s + sizeof(Head_t);
4684887Schin 			}
4694887Schin 			SIZE(fp) = size;
4704887Schin 
4714887Schin 			/* tell next block that this one is free */
4724887Schin 			np = NEXT(fp);	/**/ASSERT(ISBUSY(SIZE(np)));
4734887Schin 					/**/ASSERT(!ISJUNK(SIZE(np)));
4744887Schin 			SETPFREE(SIZE(np));
4754887Schin 			*(SELF(fp)) = fp;
4764887Schin 
4774887Schin 			if(fp == wanted) /* to be consumed soon */
4784887Schin 			{	/**/ASSERT(!saw_wanted); /* should be seen just once */
4794887Schin 				saw_wanted = 1;
4804887Schin 				continue;
4814887Schin 			}
4824887Schin 
4834887Schin 			/* wilderness preservation */
4844887Schin 			if(np->body.data >= vd->seg->baddr)
4854887Schin 			{	vd->wild = fp;
4864887Schin 				continue;
4874887Schin 			}
4884887Schin 
4894887Schin 			/* tiny block goes to tiny list */
4904887Schin 			if(size < MAXTINY)
4914887Schin 			{	s = INDEX(size);
4924887Schin 				np = LINK(fp) = TINY(vd)[s];
4934887Schin 				if(s == 0)	/* TINIEST block */
4944887Schin 				{	if(np)
4954887Schin 						TLEFT(np) = fp;
4964887Schin 					TLEFT(fp) = NIL(Block_t*);
4974887Schin 				}
4984887Schin 				else
4994887Schin 				{	if(np)
5004887Schin 						LEFT(np)  = fp;
5014887Schin 					LEFT(fp) = NIL(Block_t*);
5024887Schin 					SETLINK(fp);
5034887Schin 				}
5044887Schin 				TINY(vd)[s] = fp;
5054887Schin 				continue;
5064887Schin 			}
5074887Schin 
5084887Schin 			LEFT(fp) = RIGHT(fp) = LINK(fp) = NIL(Block_t*);
5094887Schin 			if(!(np = vd->root) )	/* inserting into an empty tree	*/
5104887Schin 			{	vd->root = fp;
5114887Schin 				continue;
5124887Schin 			}
5134887Schin 
5144887Schin 			size = SIZE(fp);
5154887Schin 			while(1)	/* leaf insertion */
5164887Schin 			{	/**/ASSERT(np != fp);
5174887Schin 				if((s = SIZE(np)) > size)
5184887Schin 				{	if((t = LEFT(np)) )
5194887Schin 					{	/**/ ASSERT(np != t);
5204887Schin 						np = t;
5214887Schin 					}
5224887Schin 					else
5234887Schin 					{	LEFT(np) = fp;
5244887Schin 						break;
5254887Schin 					}
5264887Schin 				}
5274887Schin 				else if(s < size)
5284887Schin 				{	if((t = RIGHT(np)) )
5294887Schin 					{	/**/ ASSERT(np != t);
5304887Schin 						np = t;
5314887Schin 					}
5324887Schin 					else
5334887Schin 					{	RIGHT(np) = fp;
5344887Schin 						break;
5354887Schin 					}
5364887Schin 				}
5374887Schin 				else /* s == size */
5384887Schin 				{	if((t = LINK(np)) )
5394887Schin 					{	LINK(fp) = t;
5404887Schin 						LEFT(t) = fp;
5414887Schin 					}
5424887Schin 					LINK(np) = fp;
5434887Schin 					LEFT(fp) = np;
5444887Schin 					SETLINK(fp);
5454887Schin 					break;
5464887Schin 				}
5474887Schin 			}
5484887Schin 		}
5494887Schin 	}
5504887Schin 
5514887Schin 	/**/ASSERT(!wanted || saw_wanted == 1);
5524887Schin 	/**/ASSERT(_vmbestcheck(vd, wanted) == 0);
5534887Schin 	return saw_wanted;
5544887Schin }
5554887Schin 
5564887Schin #if __STD_C
bestcompact(Vmalloc_t * vm)5574887Schin static int bestcompact(Vmalloc_t* vm)
5584887Schin #else
5594887Schin static int bestcompact(vm)
5604887Schin Vmalloc_t*	vm;
5614887Schin #endif
5624887Schin {
5634887Schin 	reg Seg_t	*seg, *next;
5644887Schin 	reg Block_t	*bp, *t;
5654887Schin 	reg size_t	size, segsize, round;
5668462SApril.Chin@Sun.COM 	reg int		local, inuse;
5674887Schin 	reg Vmdata_t*	vd = vm->data;
5684887Schin 
5698462SApril.Chin@Sun.COM 	SETINUSE(vd, inuse);
5708462SApril.Chin@Sun.COM 
5714887Schin 	if(!(local = vd->mode&VM_TRUST) )
5724887Schin 	{	GETLOCAL(vd,local);
5734887Schin 		if(ISLOCK(vd,local))
5748462SApril.Chin@Sun.COM 		{	CLRINUSE(vd, inuse);
5754887Schin 			return -1;
5768462SApril.Chin@Sun.COM 		}
5774887Schin 		SETLOCK(vd,local);
5784887Schin 	}
5794887Schin 
5804887Schin 	bestreclaim(vd,NIL(Block_t*),0);
5814887Schin 
5824887Schin 	for(seg = vd->seg; seg; seg = next)
5834887Schin 	{	next = seg->next;
5844887Schin 
5854887Schin 		bp = BLOCK(seg->baddr);
5864887Schin 		if(!ISPFREE(SIZE(bp)) )
5874887Schin 			continue;
5884887Schin 
5894887Schin 		bp = LAST(bp);	/**/ASSERT(vmisfree(vd,bp));
5904887Schin 		size = SIZE(bp);
5914887Schin 		if(bp == vd->wild)
5924887Schin 		{	/* During large block allocations, _Vmextend might
5934887Schin 			** have been enlarged the rounding factor. Reducing
5944887Schin 			** it a bit help avoiding getting large raw memory.
5954887Schin 			*/
5964887Schin 			if((round = vm->disc->round) == 0)
5974887Schin 				round = _Vmpagesize;
5984887Schin 			if(size > COMPACT*vd->incr && vd->incr > round)
5994887Schin 				vd->incr /= 2;
6004887Schin 
6014887Schin 			/* for the bottom segment, we don't necessarily want
6024887Schin 			** to return raw memory too early. vd->pool has an
6034887Schin 			** approximation of the average size of recently freed
6044887Schin 			** blocks. If this is large, the application is managing
6054887Schin 			** large blocks so we throttle back memory chopping
6064887Schin 			** to avoid thrashing the underlying memory system.
6074887Schin 			*/
6084887Schin 			if(size <= COMPACT*vd->incr || size <= COMPACT*vd->pool)
6094887Schin 				continue;
6104887Schin 
6114887Schin 			vd->wild = NIL(Block_t*);
6124887Schin 			vd->pool = 0;
6134887Schin 		}
6144887Schin 		else	REMOVE(vd,bp,INDEX(size),t,bestsearch);
6154887Schin 		CLRPFREE(SIZE(NEXT(bp)));
6164887Schin 
6174887Schin 		if(size < (segsize = seg->size))
6184887Schin 			size += sizeof(Head_t);
6194887Schin 
6204887Schin 		if((size = (*_Vmtruncate)(vm,seg,size,0)) > 0)
6214887Schin 		{	if(size >= segsize) /* entire segment deleted */
6224887Schin 				continue;
6234887Schin 			/**/ASSERT(SEG(BLOCK(seg->baddr)) == seg);
6244887Schin 
6254887Schin 			if((size = (seg->baddr - ((Vmuchar_t*)bp) - sizeof(Head_t))) > 0)
6264887Schin 				SIZE(bp) = size - sizeof(Head_t);
6274887Schin 			else	bp = NIL(Block_t*);
6284887Schin 		}
6294887Schin 
6304887Schin 		if(bp)
6314887Schin 		{	/**/ ASSERT(SIZE(bp) >= BODYSIZE);
6324887Schin 			/**/ ASSERT(SEGWILD(bp));
6334887Schin 			/**/ ASSERT(!vd->root || !vmintree(vd->root,bp));
6344887Schin 			SIZE(bp) |= BUSY|JUNK;
6354887Schin 			LINK(bp) = CACHE(vd)[C_INDEX(SIZE(bp))];
6364887Schin 			CACHE(vd)[C_INDEX(SIZE(bp))] = bp;
6374887Schin 		}
6384887Schin 	}
6394887Schin 
6404887Schin 	if(!local && _Vmtrace && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST)
6414887Schin 		(*_Vmtrace)(vm, (Vmuchar_t*)0, (Vmuchar_t*)0, 0, 0);
6424887Schin 
6434887Schin 	CLRLOCK(vd,local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
6444887Schin 
6458462SApril.Chin@Sun.COM 	CLRINUSE(vd, inuse);
6464887Schin 	return 0;
6474887Schin }
6484887Schin 
6494887Schin #if __STD_C
bestalloc(Vmalloc_t * vm,reg size_t size)6504887Schin static Void_t* bestalloc(Vmalloc_t* vm, reg size_t size )
6514887Schin #else
6524887Schin static Void_t* bestalloc(vm,size)
6534887Schin Vmalloc_t*	vm;	/* region allocating from	*/
6544887Schin reg size_t	size;	/* desired block size		*/
6554887Schin #endif
6564887Schin {
6574887Schin 	reg Vmdata_t*	vd = vm->data;
6584887Schin 	reg size_t	s;
6594887Schin 	reg int		n;
6604887Schin 	reg Block_t	*tp, *np;
6618462SApril.Chin@Sun.COM 	reg int		local, inuse;
6624887Schin 	size_t		orgsize = 0;
6634887Schin 
664*12068SRoger.Faulkner@Oracle.COM 	VMOPTIONS();
665*12068SRoger.Faulkner@Oracle.COM 
6664887Schin 	/**/COUNT(N_alloc);
6674887Schin 
6688462SApril.Chin@Sun.COM 	SETINUSE(vd, inuse);
6698462SApril.Chin@Sun.COM 
6704887Schin 	if(!(local = vd->mode&VM_TRUST))
6714887Schin 	{	GETLOCAL(vd,local);	/**/ASSERT(!ISLOCK(vd,local));
6724887Schin 		if(ISLOCK(vd,local) )
6738462SApril.Chin@Sun.COM 		{	CLRINUSE(vd, inuse);
6744887Schin 			return NIL(Void_t*);
6758462SApril.Chin@Sun.COM 		}
6764887Schin 		SETLOCK(vd,local);
6774887Schin 		orgsize = size;
6784887Schin 	}
6794887Schin 
6804887Schin 	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
6814887Schin 	/**/ ASSERT(HEADSIZE == sizeof(Head_t));
6824887Schin 	/**/ ASSERT(BODYSIZE == sizeof(Body_t));
6834887Schin 	/**/ ASSERT((ALIGN%(BITS+1)) == 0 );
6844887Schin 	/**/ ASSERT((sizeof(Head_t)%ALIGN) == 0 );
6854887Schin 	/**/ ASSERT((sizeof(Body_t)%ALIGN) == 0 );
6864887Schin 	/**/ ASSERT((BODYSIZE%ALIGN) == 0 );
6874887Schin 	/**/ ASSERT(sizeof(Block_t) == (sizeof(Body_t)+sizeof(Head_t)) );
6884887Schin 
6894887Schin 	/* for ANSI requirement that malloc(0) returns non-NULL pointer */
6904887Schin 	size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
6914887Schin 
6924887Schin 	if((tp = vd->free) )	/* reuse last free piece if appropriate */
6934887Schin 	{	/**/ASSERT(ISBUSY(SIZE(tp)) );
6944887Schin 		/**/ASSERT(ISJUNK(SIZE(tp)) );
6954887Schin 		/**/COUNT(N_last);
6964887Schin 
6974887Schin 		vd->free = NIL(Block_t*);
6984887Schin 		if((s = SIZE(tp)) >= size && s < (size << 1) )
6994887Schin 		{	if(s >= size + (sizeof(Head_t)+BODYSIZE) )
7004887Schin 			{	SIZE(tp) = size;
7014887Schin 				np = NEXT(tp);
7024887Schin 				SEG(np) = SEG(tp);
7034887Schin 				SIZE(np) = ((s&~BITS) - (size+sizeof(Head_t)))|JUNK|BUSY;
7044887Schin 				vd->free = np;
7054887Schin 				SIZE(tp) |= s&BITS;
7064887Schin 			}
7074887Schin 			CLRJUNK(SIZE(tp));
7084887Schin 			goto done;
7094887Schin 		}
7104887Schin 
7114887Schin 		LINK(tp) = CACHE(vd)[S_CACHE];
7124887Schin 		CACHE(vd)[S_CACHE] = tp;
7134887Schin 	}
7144887Schin 
7154887Schin 	for(;;)
7164887Schin 	{	for(n = S_CACHE; n >= 0; --n)	/* best-fit except for coalescing */
7174887Schin 		{	bestreclaim(vd,NIL(Block_t*),n);
7184887Schin 			if(vd->root && (tp = bestsearch(vd,size,NIL(Block_t*))) )
7194887Schin 				goto got_block;
7204887Schin 		}
7214887Schin 
7224887Schin 		/**/ASSERT(!vd->free);
7234887Schin 		if((tp = vd->wild) && SIZE(tp) >= size)
7244887Schin 		{	/**/COUNT(N_wild);
7254887Schin 			vd->wild = NIL(Block_t*);
7264887Schin 			goto got_block;
7274887Schin 		}
7284887Schin 
7294887Schin 		KPVCOMPACT(vm,bestcompact);
7304887Schin 		if((tp = (*_Vmextend)(vm,size,bestsearch)) )
7314887Schin 			goto got_block;
7324887Schin 		else if(vd->mode&VM_AGAIN)
7334887Schin 			vd->mode &= ~VM_AGAIN;
7344887Schin 		else
7354887Schin 		{	CLRLOCK(vd,local);
7368462SApril.Chin@Sun.COM 			CLRINUSE(vd, inuse);
7374887Schin 			return NIL(Void_t*);
7384887Schin 		}
7394887Schin 	}
7404887Schin 
7414887Schin got_block:
7424887Schin 	/**/ ASSERT(!ISBITS(SIZE(tp)));
7434887Schin 	/**/ ASSERT(SIZE(tp) >= size);
7444887Schin 	/**/ ASSERT((SIZE(tp)%ALIGN) == 0);
7454887Schin 	/**/ ASSERT(!vd->free);
7464887Schin 
7474887Schin 	/* tell next block that we are no longer a free block */
7484887Schin 	CLRPFREE(SIZE(NEXT(tp)));	/**/ ASSERT(ISBUSY(SIZE(NEXT(tp))));
7494887Schin 
7504887Schin 	if((s = SIZE(tp)-size) >= (sizeof(Head_t)+BODYSIZE) )
7514887Schin 	{	SIZE(tp) = size;
7524887Schin 
7534887Schin 		np = NEXT(tp);
7544887Schin 		SEG(np) = SEG(tp);
7554887Schin 		SIZE(np) = (s - sizeof(Head_t)) | BUSY|JUNK;
7564887Schin 
7574887Schin 		if(VMWILD(vd,np))
7584887Schin 		{	SIZE(np) &= ~BITS;
7594887Schin 			*SELF(np) = np; /**/ASSERT(ISBUSY(SIZE(NEXT(np))));
7604887Schin 			SETPFREE(SIZE(NEXT(np)));
7614887Schin 			vd->wild = np;
7624887Schin 		}
7634887Schin 		else	vd->free = np;
7644887Schin 	}
7654887Schin 
7664887Schin 	SETBUSY(SIZE(tp));
7674887Schin 
7684887Schin done:
7694887Schin 	if(!local && (vd->mode&VM_TRACE) && _Vmtrace && VMETHOD(vd) == VM_MTBEST)
7704887Schin 		(*_Vmtrace)(vm,NIL(Vmuchar_t*),(Vmuchar_t*)DATA(tp),orgsize,0);
7714887Schin 
7724887Schin 	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
7734887Schin 	CLRLOCK(vd,local);
7744887Schin 	ANNOUNCE(local, vm, VM_ALLOC, DATA(tp), vm->disc);
7754887Schin 
7768462SApril.Chin@Sun.COM 	CLRINUSE(vd, inuse);
7774887Schin 	return DATA(tp);
7784887Schin }
7794887Schin 
7804887Schin #if __STD_C
bestaddr(Vmalloc_t * vm,Void_t * addr)7814887Schin static long bestaddr(Vmalloc_t* vm, Void_t* addr )
7824887Schin #else
7834887Schin static long bestaddr(vm, addr)
7844887Schin Vmalloc_t*	vm;	/* region allocating from	*/
7854887Schin Void_t*		addr;	/* address to check		*/
7864887Schin #endif
7874887Schin {
7884887Schin 	reg Seg_t*	seg;
7894887Schin 	reg Block_t	*b, *endb;
7904887Schin 	reg long	offset;
7914887Schin 	reg Vmdata_t*	vd = vm->data;
7928462SApril.Chin@Sun.COM 	reg int		local, inuse;
7938462SApril.Chin@Sun.COM 
7948462SApril.Chin@Sun.COM 	SETINUSE(vd, inuse);
7954887Schin 
7964887Schin 	if(!(local = vd->mode&VM_TRUST) )
7974887Schin 	{	GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local));
7984887Schin 		if(ISLOCK(vd,local))
7998462SApril.Chin@Sun.COM 		{	CLRINUSE(vd, inuse);
8004887Schin 			return -1L;
8018462SApril.Chin@Sun.COM 		}
8024887Schin 		SETLOCK(vd,local);
8034887Schin 	}
8044887Schin 
8054887Schin 	offset = -1L; b = endb = NIL(Block_t*);
8064887Schin 	for(seg = vd->seg; seg; seg = seg->next)
8074887Schin 	{	b = SEGBLOCK(seg);
8084887Schin 		endb = (Block_t*)(seg->baddr - sizeof(Head_t));
8094887Schin 		if((Vmuchar_t*)addr > (Vmuchar_t*)b &&
8104887Schin 		   (Vmuchar_t*)addr < (Vmuchar_t*)endb)
8114887Schin 			break;
8124887Schin 	}
8134887Schin 
8144887Schin 	if(local && !(vd->mode&VM_TRUST) ) /* from bestfree or bestresize */
8154887Schin 	{	b = BLOCK(addr);
8164887Schin 		if(seg && SEG(b) == seg && ISBUSY(SIZE(b)) && !ISJUNK(SIZE(b)) )
8174887Schin 			offset = 0;
8184887Schin 		if(offset != 0 && vm->disc->exceptf)
8194887Schin 			(void)(*vm->disc->exceptf)(vm,VM_BADADDR,addr,vm->disc);
8204887Schin 	}
8214887Schin 	else if(seg)
8224887Schin 	{	while(b < endb)
8234887Schin 		{	reg Vmuchar_t*	data = (Vmuchar_t*)DATA(b);
8244887Schin 			reg size_t	size = SIZE(b)&~BITS;
8254887Schin 
8264887Schin 			if((Vmuchar_t*)addr >= data && (Vmuchar_t*)addr < data+size)
8274887Schin 			{	if(ISJUNK(SIZE(b)) || !ISBUSY(SIZE(b)))
8284887Schin 					offset = -1L;
8294887Schin 				else	offset = (Vmuchar_t*)addr - data;
8304887Schin 				goto done;
8314887Schin 			}
8324887Schin 
8334887Schin 			b = (Block_t*)((Vmuchar_t*)DATA(b) + size);
8344887Schin 		}
8354887Schin 	}
8364887Schin 
8374887Schin done:
8384887Schin 	CLRLOCK(vd,local);
8398462SApril.Chin@Sun.COM 	CLRINUSE(vd, inuse);
8404887Schin 	return offset;
8414887Schin }
8424887Schin 
8434887Schin #if __STD_C
bestfree(Vmalloc_t * vm,Void_t * data)8444887Schin static int bestfree(Vmalloc_t* vm, Void_t* data )
8454887Schin #else
8464887Schin static int bestfree(vm, data )
8474887Schin Vmalloc_t*	vm;
8484887Schin Void_t*		data;
8494887Schin #endif
8504887Schin {
8514887Schin 	reg Vmdata_t*	vd = vm->data;
8524887Schin 	reg Block_t	*bp;
8534887Schin 	reg size_t	s;
8548462SApril.Chin@Sun.COM 	reg int		local, inuse;
8554887Schin 
8564887Schin #ifdef DEBUG
85710898Sroland.mainz@nrubsig.org 	if((local = (int)integralof(data)) >= 0 && local <= 0xf)
8584887Schin 	{	int	vmassert = _Vmassert;
8594887Schin 		_Vmassert = local ? local : vmassert ? vmassert : (VM_check|VM_abort);
8604887Schin 		_vmbestcheck(vd, NIL(Block_t*));
8614887Schin 		_Vmassert = local ? local : vmassert;
8624887Schin 		return 0;
8634887Schin 	}
8644887Schin #endif
8654887Schin 
8664887Schin 	if(!data) /* ANSI-ism */
8674887Schin 		return 0;
8684887Schin 
8694887Schin 	/**/COUNT(N_free);
8704887Schin 
8718462SApril.Chin@Sun.COM 	SETINUSE(vd, inuse);
8728462SApril.Chin@Sun.COM 
8734887Schin 	if(!(local = vd->mode&VM_TRUST) )
8744887Schin 	{	GETLOCAL(vd,local);	/**/ASSERT(!ISLOCK(vd,local));
8758462SApril.Chin@Sun.COM 		if(ISLOCK(vd,local) || KPVADDR(vm,data,bestaddr) != 0 )
8768462SApril.Chin@Sun.COM 		{	CLRINUSE(vd, inuse);
8774887Schin 			return -1;
8788462SApril.Chin@Sun.COM 		}
8794887Schin 		SETLOCK(vd,local);
8804887Schin 	}
8814887Schin 
8824887Schin 	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
8834887Schin 	bp = BLOCK(data); s = SIZE(bp);
8844887Schin 
8854887Schin 	/* Keep an approximate average free block size.
8864887Schin 	** This is used in bestcompact() to decide when to release
8874887Schin 	** raw memory back to the underlying memory system.
8884887Schin 	*/
8894887Schin 	vd->pool = (vd->pool + (s&~BITS))/2;
8904887Schin 
8914887Schin 	if(ISBUSY(s) && !ISJUNK(s))
8924887Schin 	{	SETJUNK(SIZE(bp));
8934887Schin 	        if(s < MAXCACHE)
8944887Schin 	        {       /**/ASSERT(!vmonlist(CACHE(vd)[INDEX(s)], bp) );
8954887Schin 	                LINK(bp) = CACHE(vd)[INDEX(s)];
8964887Schin 	                CACHE(vd)[INDEX(s)] = bp;
8974887Schin 	        }
8984887Schin 	        else if(!vd->free)
8994887Schin 	                vd->free = bp;
9004887Schin 	        else
9014887Schin 	        {       /**/ASSERT(!vmonlist(CACHE(vd)[S_CACHE], bp) );
9024887Schin 	                LINK(bp) = CACHE(vd)[S_CACHE];
9034887Schin 	                CACHE(vd)[S_CACHE] = bp;
9044887Schin 	        }
9054887Schin 
9064887Schin 		/* coalesce on freeing large blocks to avoid fragmentation */
9074887Schin 		if(SIZE(bp) >= 2*vd->incr)
9084887Schin 		{	bestreclaim(vd,NIL(Block_t*),0);
9094887Schin 			if(vd->wild && SIZE(vd->wild) >= COMPACT*vd->incr)
9104887Schin 				KPVCOMPACT(vm,bestcompact);
9114887Schin 		}
9124887Schin 	}
9134887Schin 
9144887Schin 	if(!local && _Vmtrace && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST )
9154887Schin 		(*_Vmtrace)(vm,(Vmuchar_t*)data,NIL(Vmuchar_t*), (s&~BITS), 0);
9164887Schin 
9174887Schin 	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
9184887Schin 	CLRLOCK(vd,local);
9194887Schin 	ANNOUNCE(local, vm, VM_FREE, data, vm->disc);
9204887Schin 
9218462SApril.Chin@Sun.COM 	CLRINUSE(vd, inuse);
9224887Schin 	return 0;
9234887Schin }
9244887Schin 
9254887Schin #if __STD_C
bestresize(Vmalloc_t * vm,Void_t * data,reg size_t size,int type)9264887Schin static Void_t* bestresize(Vmalloc_t* vm, Void_t* data, reg size_t size, int type)
9274887Schin #else
9284887Schin static Void_t* bestresize(vm,data,size,type)
9294887Schin Vmalloc_t*	vm;		/* region allocating from	*/
9304887Schin Void_t*		data;		/* old block of data		*/
9314887Schin reg size_t	size;		/* new size			*/
9324887Schin int		type;		/* !=0 to move, <0 for not copy */
9334887Schin #endif
9344887Schin {
9354887Schin 	reg Block_t	*rp, *np, *t;
9368462SApril.Chin@Sun.COM 	int		local, inuse;
9374887Schin 	size_t		s, bs, oldsize = 0, orgsize = 0;
9384887Schin 	Void_t		*oldd, *orgdata = NIL(Void_t*);
9394887Schin 	Vmdata_t	*vd = vm->data;
9404887Schin 
9414887Schin 	/**/COUNT(N_resize);
9424887Schin 
9438462SApril.Chin@Sun.COM 	SETINUSE(vd, inuse);
9448462SApril.Chin@Sun.COM 
9454887Schin 	if(!data)
9464887Schin 	{	if((data = bestalloc(vm,size)) )
9474887Schin 		{	oldsize = 0;
9484887Schin 			size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
9494887Schin 		}
9504887Schin 		goto done;
9514887Schin 	}
9524887Schin 	if(size == 0)
9534887Schin 	{	(void)bestfree(vm,data);
9548462SApril.Chin@Sun.COM 		CLRINUSE(vd, inuse);
9554887Schin 		return NIL(Void_t*);
9564887Schin 	}
9574887Schin 
9584887Schin 	if(!(local = vd->mode&VM_TRUST) )
9594887Schin 	{	GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local));
9608462SApril.Chin@Sun.COM 		if(ISLOCK(vd,local) || (!local && KPVADDR(vm,data,bestaddr) != 0 ) )
9618462SApril.Chin@Sun.COM 		{	CLRINUSE(vd, inuse);
9624887Schin 			return NIL(Void_t*);
9638462SApril.Chin@Sun.COM 		}
9644887Schin 		SETLOCK(vd,local);
9654887Schin 
9664887Schin 		orgdata = data;	/* for tracing */
9674887Schin 		orgsize = size;
9684887Schin 	}
9694887Schin 
9704887Schin 	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
9714887Schin 	size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
9724887Schin 	rp = BLOCK(data);	/**/ASSERT(ISBUSY(SIZE(rp)) && !ISJUNK(SIZE(rp)));
9734887Schin 	oldsize = SIZE(rp); CLRBITS(oldsize);
9744887Schin 	if(oldsize < size)
9754887Schin 	{	np = (Block_t*)((Vmuchar_t*)rp + oldsize + sizeof(Head_t));
9764887Schin 		do	/* forward merge as much as possible */
9774887Schin 		{	s = SIZE(np); /**/ASSERT(!ISPFREE(s));
9784887Schin 			if(np == vd->free)
9794887Schin 			{	vd->free = NIL(Block_t*);
9804887Schin 				CLRBITS(s);
9814887Schin 			}
9824887Schin 			else if(ISJUNK(s) )
9834887Schin 			{	if(!bestreclaim(vd,np,C_INDEX(s)) )
9844887Schin 					/**/ASSERT(0); /* oops: did not see np! */
9854887Schin 				s = SIZE(np); /**/ASSERT(s%ALIGN == 0);
9864887Schin 			}
9874887Schin 			else if(!ISBUSY(s) )
9884887Schin 			{	if(np == vd->wild)
9894887Schin 					vd->wild = NIL(Block_t*);
9904887Schin 				else	REMOVE(vd,np,INDEX(s),t,bestsearch);
9914887Schin 			}
9924887Schin 			else	break;
9934887Schin 
9944887Schin 			SIZE(rp) += (s += sizeof(Head_t)); /**/ASSERT((s%ALIGN) == 0);
9954887Schin 			np = (Block_t*)((Vmuchar_t*)np + s);
9964887Schin 			CLRPFREE(SIZE(np));
9974887Schin 		} while(SIZE(rp) < size);
9984887Schin 
9994887Schin 		if(SIZE(rp) < size && size > vd->incr && SEGWILD(rp) )
10004887Schin 		{	reg Seg_t*	seg;
10014887Schin 
10024887Schin 			s = (size - SIZE(rp)) + sizeof(Head_t); s = ROUND(s,vd->incr);
10034887Schin 			seg = SEG(rp);
10044887Schin 			if((*vm->disc->memoryf)(vm,seg->addr,seg->extent,seg->extent+s,
10054887Schin 				      vm->disc) == seg->addr )
10064887Schin 			{	SIZE(rp) += s;
10074887Schin 				seg->extent += s;
10084887Schin 				seg->size += s;
10094887Schin 				seg->baddr += s;
10104887Schin 				s  = (SIZE(rp)&~BITS) + sizeof(Head_t);
10114887Schin 				np = (Block_t*)((Vmuchar_t*)rp + s);
10124887Schin 				SEG(np) = seg;
10134887Schin 				SIZE(np) = BUSY;
10144887Schin 			}
10154887Schin 		}
10164887Schin 	}
10174887Schin 
10184887Schin 	if((s = SIZE(rp)) >= (size + (BODYSIZE+sizeof(Head_t))) )
10194887Schin 	{	SIZE(rp) = size;
10204887Schin 		np = NEXT(rp);
10214887Schin 		SEG(np) = SEG(rp);
10224887Schin 		SIZE(np) = (((s&~BITS)-size) - sizeof(Head_t))|BUSY|JUNK;
10234887Schin 		CPYBITS(SIZE(rp),s);
10244887Schin 		rp = np;
10254887Schin 		goto do_free;
10264887Schin 	}
10274887Schin 	else if((bs = s&~BITS) < size)
10284887Schin 	{	if(!(type&(VM_RSMOVE|VM_RSCOPY)) )
10294887Schin 			data = NIL(Void_t*); /* old data is not moveable */
10304887Schin 		else
10314887Schin 		{	oldd = data;
10324887Schin 			if((data = KPVALLOC(vm,size,bestalloc)) )
10334887Schin 			{	if(type&VM_RSCOPY)
10344887Schin 					memcpy(data, oldd, bs);
10354887Schin 
10364887Schin 			do_free: /* reclaim these right away */
10374887Schin 				SETJUNK(SIZE(rp));
10384887Schin 				LINK(rp) = CACHE(vd)[S_CACHE];
10394887Schin 				CACHE(vd)[S_CACHE] = rp;
10404887Schin 				bestreclaim(vd, NIL(Block_t*), S_CACHE);
10414887Schin 			}
10424887Schin 		}
10434887Schin 	}
10444887Schin 
10454887Schin 	if(!local && _Vmtrace && data && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST)
10464887Schin 		(*_Vmtrace)(vm, (Vmuchar_t*)orgdata, (Vmuchar_t*)data, orgsize, 0);
10474887Schin 
10484887Schin 	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
10494887Schin 	CLRLOCK(vd,local);
10504887Schin 	ANNOUNCE(local, vm, VM_RESIZE, data, vm->disc);
10514887Schin 
10524887Schin done:	if(data && (type&VM_RSZERO) && (size = SIZE(BLOCK(data))&~BITS) > oldsize )
10534887Schin 		memset((Void_t*)((Vmuchar_t*)data + oldsize), 0, size-oldsize);
10544887Schin 
10558462SApril.Chin@Sun.COM 	CLRINUSE(vd, inuse);
10564887Schin 	return data;
10574887Schin }
10584887Schin 
10594887Schin #if __STD_C
bestsize(Vmalloc_t * vm,Void_t * addr)10604887Schin static long bestsize(Vmalloc_t* vm, Void_t* addr )
10614887Schin #else
10624887Schin static long bestsize(vm, addr)
10634887Schin Vmalloc_t*	vm;	/* region allocating from	*/
10644887Schin Void_t*		addr;	/* address to check		*/
10654887Schin #endif
10664887Schin {
10674887Schin 	reg Seg_t*	seg;
10684887Schin 	reg Block_t	*b, *endb;
10694887Schin 	reg long	size;
10704887Schin 	reg Vmdata_t*	vd = vm->data;
10718462SApril.Chin@Sun.COM 	reg int		inuse;
10728462SApril.Chin@Sun.COM 
10738462SApril.Chin@Sun.COM 	SETINUSE(vd, inuse);
10744887Schin 
10754887Schin 	if(!(vd->mode&VM_TRUST) )
10764887Schin 	{	if(ISLOCK(vd,0))
10778462SApril.Chin@Sun.COM 		{	CLRINUSE(vd, inuse);
10784887Schin 			return -1L;
10798462SApril.Chin@Sun.COM 		}
10804887Schin 		SETLOCK(vd,0);
10814887Schin 	}
10824887Schin 
10834887Schin 	size = -1L;
10844887Schin 	for(seg = vd->seg; seg; seg = seg->next)
10854887Schin 	{	b = SEGBLOCK(seg);
10864887Schin 		endb = (Block_t*)(seg->baddr - sizeof(Head_t));
10874887Schin 		if((Vmuchar_t*)addr <= (Vmuchar_t*)b ||
10884887Schin 		   (Vmuchar_t*)addr >= (Vmuchar_t*)endb)
10894887Schin 			continue;
10904887Schin 		while(b < endb)
10914887Schin 		{	if(addr == DATA(b))
10924887Schin 			{	if(!ISBUSY(SIZE(b)) || ISJUNK(SIZE(b)) )
10934887Schin 					size = -1L;
10944887Schin 				else	size = (long)SIZE(b)&~BITS;
10954887Schin 				goto done;
10964887Schin 			}
10974887Schin 			else if((Vmuchar_t*)addr <= (Vmuchar_t*)b)
10984887Schin 				break;
10994887Schin 
11004887Schin 			b = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) );
11014887Schin 		}
11024887Schin 	}
11034887Schin 
11044887Schin done:
11054887Schin 	CLRLOCK(vd,0);
11068462SApril.Chin@Sun.COM 	CLRINUSE(vd, inuse);
11074887Schin 	return size;
11084887Schin }
11094887Schin 
11104887Schin #if __STD_C
bestalign(Vmalloc_t * vm,size_t size,size_t align)11114887Schin static Void_t* bestalign(Vmalloc_t* vm, size_t size, size_t align)
11124887Schin #else
11134887Schin static Void_t* bestalign(vm, size, align)
11144887Schin Vmalloc_t*	vm;
11154887Schin size_t		size;
11164887Schin size_t		align;
11174887Schin #endif
11184887Schin {
11194887Schin 	reg Vmuchar_t	*data;
11204887Schin 	reg Block_t	*tp, *np;
11214887Schin 	reg Seg_t*	seg;
11228462SApril.Chin@Sun.COM 	reg int		local, inuse;
11234887Schin 	reg size_t	s, extra, orgsize = 0, orgalign = 0;
11244887Schin 	reg Vmdata_t*	vd = vm->data;
11254887Schin 
11264887Schin 	if(size <= 0 || align <= 0)
11274887Schin 		return NIL(Void_t*);
11284887Schin 
11298462SApril.Chin@Sun.COM 	SETINUSE(vd, inuse);
11308462SApril.Chin@Sun.COM 
11314887Schin 	if(!(local = vd->mode&VM_TRUST) )
11324887Schin 	{	GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local));
11334887Schin 		if(ISLOCK(vd,local) )
11348462SApril.Chin@Sun.COM 		{	CLRINUSE(vd, inuse);
11354887Schin 			return NIL(Void_t*);
11368462SApril.Chin@Sun.COM 		}
11374887Schin 		SETLOCK(vd,local);
11384887Schin 		orgsize = size;
11394887Schin 		orgalign = align;
11404887Schin 	}
11414887Schin 
11424887Schin 	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
11434887Schin 	size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
11444887Schin 	align = MULTIPLE(align,ALIGN);
11454887Schin 
11464887Schin 	/* hack so that dbalign() can store header data */
11474887Schin 	if(VMETHOD(vd) != VM_MTDEBUG)
11484887Schin 		extra = 0;
11494887Schin 	else
11504887Schin 	{	extra = DB_HEAD;
11514887Schin 		while(align < extra || (align - extra) < sizeof(Block_t))
11524887Schin 			align *= 2;
11534887Schin 	}
11544887Schin 
11554887Schin 	/* reclaim all free blocks now to avoid fragmentation */
11564887Schin 	bestreclaim(vd,NIL(Block_t*),0);
11574887Schin 
11584887Schin 	s = size + 2*(align+sizeof(Head_t)+extra);
11594887Schin 	if(!(data = (Vmuchar_t*)KPVALLOC(vm,s,bestalloc)) )
11604887Schin 		goto done;
11614887Schin 
11624887Schin 	tp = BLOCK(data);
11634887Schin 	seg = SEG(tp);
11644887Schin 
11654887Schin 	/* get an aligned address that we can live with */
11664887Schin 	if((s = (size_t)((VLONG(data)+extra)%align)) != 0)
11674887Schin 		data += align-s; /**/ASSERT(((VLONG(data)+extra)%align) == 0);
11684887Schin 
11694887Schin 	if((np = BLOCK(data)) != tp ) /* need to free left part */
11704887Schin 	{	if(((Vmuchar_t*)np - (Vmuchar_t*)tp) < (ssize_t)(sizeof(Block_t)+extra) )
11714887Schin 		{	data += align;
11724887Schin 			np = BLOCK(data);
11734887Schin 		} /**/ASSERT(((VLONG(data)+extra)%align) == 0);
11744887Schin 
11754887Schin 		s  = (Vmuchar_t*)np - (Vmuchar_t*)tp;
11764887Schin 		SIZE(np) = ((SIZE(tp)&~BITS) - s)|BUSY;
11774887Schin 		SEG(np) = seg;
11784887Schin 
11794887Schin 		SIZE(tp) = (s - sizeof(Head_t)) | (SIZE(tp)&BITS) | JUNK;
11804887Schin 		/**/ ASSERT(SIZE(tp) >= sizeof(Body_t) );
11814887Schin 		LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))];
11824887Schin 		CACHE(vd)[C_INDEX(SIZE(tp))] = tp;
11834887Schin 	}
11844887Schin 
11854887Schin 	/* free left-over if too big */
11864887Schin 	if((s = SIZE(np) - size) >= sizeof(Block_t))
11874887Schin 	{	SIZE(np) = size;
11884887Schin 
11894887Schin 		tp = NEXT(np);
11904887Schin 		SIZE(tp) = ((s & ~BITS) - sizeof(Head_t)) | BUSY | JUNK;
11914887Schin 		SEG(tp) = seg;
11924887Schin 		LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))];
11934887Schin 		CACHE(vd)[C_INDEX(SIZE(tp))] = tp;
11944887Schin 
11954887Schin 		SIZE(np) |= s&BITS;
11964887Schin 	}
11974887Schin 
11984887Schin 	bestreclaim(vd,NIL(Block_t*),0); /* coalesce all free blocks */
11994887Schin 
12004887Schin 	if(!local && !(vd->mode&VM_TRUST) && _Vmtrace && (vd->mode&VM_TRACE) )
12014887Schin 		(*_Vmtrace)(vm,NIL(Vmuchar_t*),data,orgsize,orgalign);
12024887Schin 
12034887Schin done:
12044887Schin 	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
12054887Schin 	CLRLOCK(vd,local);
12064887Schin 	ANNOUNCE(local, vm, VM_ALLOC, (Void_t*)data, vm->disc);
12074887Schin 
12088462SApril.Chin@Sun.COM 	CLRINUSE(vd, inuse);
12094887Schin 	return (Void_t*)data;
12104887Schin }
12114887Schin 
12124887Schin 
12134887Schin #if _mem_win32
12144887Schin #if _PACKAGE_ast
12154887Schin #include		<ast_windows.h>
12164887Schin #else
12174887Schin #include		<windows.h>
12184887Schin #endif
12194887Schin #endif /* _lib_win32 */
12204887Schin 
12214887Schin #if _mem_mmap_anon
12224887Schin #include		<sys/mman.h>
12234887Schin #ifndef MAP_ANON
12244887Schin #define	MAP_ANON	MAP_ANONYMOUS
12254887Schin #endif
12264887Schin #endif /* _mem_mmap_anon */
12274887Schin 
12284887Schin #if _mem_mmap_zero
12294887Schin #include		<sys/fcntl.h>
12304887Schin #include		<sys/mman.h>
12314887Schin typedef struct _mmapdisc_s
12324887Schin {	Vmdisc_t	disc;
12334887Schin 	int		fd;
12344887Schin 	off_t		offset;
12354887Schin } Mmapdisc_t;
12364887Schin 
12374887Schin #ifndef OPEN_MAX
12384887Schin #define	OPEN_MAX	64
12394887Schin #endif
12404887Schin #define OPEN_PRIVATE	(3*OPEN_MAX/4)
12414887Schin #endif /* _mem_mmap_zero */
12424887Schin 
12434887Schin /* failure mode of mmap, sbrk and brk */
12444887Schin #ifndef MAP_FAILED
12454887Schin #define MAP_FAILED	((Void_t*)(-1))
12464887Schin #endif
12474887Schin #define BRK_FAILED	((Void_t*)(-1))
12484887Schin 
124910898Sroland.mainz@nrubsig.org /* make sure that allocated memory are addressable */
125010898Sroland.mainz@nrubsig.org 
125110898Sroland.mainz@nrubsig.org #if _PACKAGE_ast
125210898Sroland.mainz@nrubsig.org #include	<sig.h>
125310898Sroland.mainz@nrubsig.org #else
125410898Sroland.mainz@nrubsig.org #include	<signal.h>
125510898Sroland.mainz@nrubsig.org typedef void	(*Sig_handler_t)(int);
125610898Sroland.mainz@nrubsig.org #endif
125710898Sroland.mainz@nrubsig.org 
125810898Sroland.mainz@nrubsig.org static int	Gotsegv = 0;
125910898Sroland.mainz@nrubsig.org 
126010898Sroland.mainz@nrubsig.org #if __STD_C
sigsegv(int sig)126110898Sroland.mainz@nrubsig.org static void sigsegv(int sig)
126210898Sroland.mainz@nrubsig.org #else
126310898Sroland.mainz@nrubsig.org static void sigsegv(sig)
126410898Sroland.mainz@nrubsig.org int	sig;
126510898Sroland.mainz@nrubsig.org #endif
126610898Sroland.mainz@nrubsig.org {
126710898Sroland.mainz@nrubsig.org 	if(sig == SIGSEGV)
126810898Sroland.mainz@nrubsig.org 		Gotsegv = 1;
126910898Sroland.mainz@nrubsig.org }
127010898Sroland.mainz@nrubsig.org 
127110898Sroland.mainz@nrubsig.org #if __STD_C
okaddr(Void_t * addr,size_t nsize)127210898Sroland.mainz@nrubsig.org static int okaddr(Void_t* addr, size_t nsize)
127310898Sroland.mainz@nrubsig.org #else
127410898Sroland.mainz@nrubsig.org static int okaddr(addr, nsize)
127510898Sroland.mainz@nrubsig.org Void_t*	addr;
127610898Sroland.mainz@nrubsig.org size_t	nsize;
127710898Sroland.mainz@nrubsig.org #endif
127810898Sroland.mainz@nrubsig.org {
127910898Sroland.mainz@nrubsig.org 	Sig_handler_t	segv;
128010898Sroland.mainz@nrubsig.org 	int		rv;
128110898Sroland.mainz@nrubsig.org 
128210898Sroland.mainz@nrubsig.org 	Gotsegv = 0; /* catch segment fault */
128310898Sroland.mainz@nrubsig.org 	segv = signal(SIGSEGV, sigsegv);
128410898Sroland.mainz@nrubsig.org 
128510898Sroland.mainz@nrubsig.org 	if(Gotsegv == 0)
128610898Sroland.mainz@nrubsig.org 		rv = *((char*)addr);
128710898Sroland.mainz@nrubsig.org 	if(Gotsegv == 0)
128810898Sroland.mainz@nrubsig.org 		rv += *(((char*)addr)+nsize-1);
128910898Sroland.mainz@nrubsig.org 	if(Gotsegv == 0)
129010898Sroland.mainz@nrubsig.org 		rv = rv == 0 ? 0 : 1;
129110898Sroland.mainz@nrubsig.org 	else	rv = -1;
129210898Sroland.mainz@nrubsig.org 
129310898Sroland.mainz@nrubsig.org 	signal(SIGSEGV, segv); /* restore signal catcher */
129410898Sroland.mainz@nrubsig.org 	Gotsegv = 0;
129510898Sroland.mainz@nrubsig.org 
129610898Sroland.mainz@nrubsig.org 	return rv;
129710898Sroland.mainz@nrubsig.org }
129810898Sroland.mainz@nrubsig.org 
12994887Schin /* A discipline to get raw memory using sbrk/VirtualAlloc/mmap */
13004887Schin #if __STD_C
sbrkmem(Vmalloc_t * vm,Void_t * caddr,size_t csize,size_t nsize,Vmdisc_t * disc)13014887Schin static Void_t* sbrkmem(Vmalloc_t* vm, Void_t* caddr,
13024887Schin 			size_t csize, size_t nsize, Vmdisc_t* disc)
13034887Schin #else
13044887Schin static Void_t* sbrkmem(vm, caddr, csize, nsize, disc)
13054887Schin Vmalloc_t*	vm;	/* region doing allocation from		*/
13064887Schin Void_t*		caddr;	/* current address			*/
13074887Schin size_t		csize;	/* current size				*/
13084887Schin size_t		nsize;	/* new size				*/
13094887Schin Vmdisc_t*	disc;	/* discipline structure			*/
13104887Schin #endif
13114887Schin {
13124887Schin #undef _done_sbrkmem
13134887Schin 
13144887Schin #if !defined(_done_sbrkmem) && defined(_mem_win32)
13154887Schin #define _done_sbrkmem	1
13164887Schin 	NOTUSED(vm);
13174887Schin 	NOTUSED(disc);
13184887Schin 	if(csize == 0)
13194887Schin 		return (Void_t*)VirtualAlloc(0,nsize,MEM_COMMIT,PAGE_READWRITE);
13204887Schin 	else if(nsize == 0)
13214887Schin 		return VirtualFree((LPVOID)caddr,0,MEM_RELEASE) ? caddr : NIL(Void_t*);
13224887Schin 	else	return NIL(Void_t*);
13234887Schin #endif /* MUST_WIN32 */
13244887Schin 
13254887Schin #if !defined(_done_sbrkmem) && (_mem_sbrk || _mem_mmap_zero || _mem_mmap_anon)
13264887Schin #define _done_sbrkmem	1
13274887Schin 	Vmuchar_t	*addr;
13284887Schin #if _mem_mmap_zero
13294887Schin 	Mmapdisc_t	*mmdc = (Mmapdisc_t*)disc;
13304887Schin #else
13314887Schin 	NOTUSED(disc);
13324887Schin #endif
13334887Schin 	NOTUSED(vm);
13344887Schin 
13354887Schin 	if(csize == 0) /* allocating new memory */
13364887Schin 	{
13378462SApril.Chin@Sun.COM 
13384887Schin #if _mem_sbrk	/* try using sbrk() and brk() */
13398462SApril.Chin@Sun.COM 		if(!(_Vmassert & VM_mmap))
13404887Schin 		{
13414887Schin 			addr = (Vmuchar_t*)sbrk(0); /* old break value */
13424887Schin 			if(addr && addr != (Vmuchar_t*)BRK_FAILED )
134310898Sroland.mainz@nrubsig.org 			{
134410898Sroland.mainz@nrubsig.org 				if((addr+nsize) < addr)
134510898Sroland.mainz@nrubsig.org 					return NIL(Void_t*);
13464887Schin 				if(brk(addr+nsize) == 0 )
134710898Sroland.mainz@nrubsig.org 				{	if(okaddr(addr,nsize) >= 0)
134810898Sroland.mainz@nrubsig.org 						return addr;
134910898Sroland.mainz@nrubsig.org 					(void)brk(addr); /* release reserved address */
135010898Sroland.mainz@nrubsig.org 				}
135110898Sroland.mainz@nrubsig.org 			}
13524887Schin 		}
13534887Schin #endif /* _mem_sbrk */
13544887Schin 
13554887Schin #if _mem_mmap_anon /* anonymous mmap */
1356*12068SRoger.Faulkner@Oracle.COM 		addr = (Vmuchar_t*)mmap(0, nsize, PROT_READ|PROT_WRITE,
1357*12068SRoger.Faulkner@Oracle.COM                                         MAP_ANON|MAP_PRIVATE, -1, 0);
1358*12068SRoger.Faulkner@Oracle.COM 		if(addr && addr != (Vmuchar_t*)MAP_FAILED)
1359*12068SRoger.Faulkner@Oracle.COM 		{	if(okaddr(addr,nsize) >= 0)
1360*12068SRoger.Faulkner@Oracle.COM 				return addr;
1361*12068SRoger.Faulkner@Oracle.COM 			(void)munmap((char*)addr, nsize); /* release reserved address */
13624887Schin 		}
13634887Schin #endif /* _mem_mmap_anon */
13644887Schin 
13654887Schin #if _mem_mmap_zero /* mmap from /dev/zero */
1366*12068SRoger.Faulkner@Oracle.COM 		if(mmdc->fd < 0)
1367*12068SRoger.Faulkner@Oracle.COM 		{	int	fd;
1368*12068SRoger.Faulkner@Oracle.COM 			if(mmdc->fd != -1)
1369*12068SRoger.Faulkner@Oracle.COM 				return NIL(Void_t*);
1370*12068SRoger.Faulkner@Oracle.COM 			if((fd = open("/dev/zero", O_RDONLY)) < 0 )
1371*12068SRoger.Faulkner@Oracle.COM 			{	mmdc->fd = -2;
1372*12068SRoger.Faulkner@Oracle.COM 				return NIL(Void_t*);
1373*12068SRoger.Faulkner@Oracle.COM 			}
1374*12068SRoger.Faulkner@Oracle.COM 			if(fd >= OPEN_PRIVATE || (mmdc->fd = dup2(fd,OPEN_PRIVATE)) < 0 )
1375*12068SRoger.Faulkner@Oracle.COM 				mmdc->fd = fd;
1376*12068SRoger.Faulkner@Oracle.COM 			else	close(fd);
13774887Schin #ifdef FD_CLOEXEC
1378*12068SRoger.Faulkner@Oracle.COM 			fcntl(mmdc->fd, F_SETFD, FD_CLOEXEC);
13794887Schin #endif
1380*12068SRoger.Faulkner@Oracle.COM 		}
1381*12068SRoger.Faulkner@Oracle.COM 		addr = (Vmuchar_t*)mmap(0, nsize, PROT_READ|PROT_WRITE,
1382*12068SRoger.Faulkner@Oracle.COM 					MAP_PRIVATE, mmdc->fd, mmdc->offset);
1383*12068SRoger.Faulkner@Oracle.COM 		if(addr && addr != (Vmuchar_t*)MAP_FAILED)
1384*12068SRoger.Faulkner@Oracle.COM 		{	if(okaddr(addr, nsize) >= 0)
1385*12068SRoger.Faulkner@Oracle.COM 			{	mmdc->offset += nsize;
1386*12068SRoger.Faulkner@Oracle.COM 				return addr;
13874887Schin 			}
1388*12068SRoger.Faulkner@Oracle.COM 			(void)munmap((char*)addr, nsize); /* release reserved address */
13894887Schin 		}
13904887Schin #endif /* _mem_mmap_zero */
13914887Schin 
13924887Schin 		return NIL(Void_t*);
13934887Schin 	}
13944887Schin 	else
1395*12068SRoger.Faulkner@Oracle.COM 	{
13968462SApril.Chin@Sun.COM 
13974887Schin #if _mem_sbrk
1398*12068SRoger.Faulkner@Oracle.COM 		addr = (Vmuchar_t*)sbrk(0);
1399*12068SRoger.Faulkner@Oracle.COM 		if(!addr || addr == (Vmuchar_t*)BRK_FAILED)
1400*12068SRoger.Faulkner@Oracle.COM 			addr = caddr;
1401*12068SRoger.Faulkner@Oracle.COM 		else if(((Vmuchar_t*)caddr+csize) == addr) /* in sbrk-space */
1402*12068SRoger.Faulkner@Oracle.COM 		{	if(nsize <= csize)
1403*12068SRoger.Faulkner@Oracle.COM 				addr -= csize-nsize;
1404*12068SRoger.Faulkner@Oracle.COM 			else if((addr += nsize-csize) < (Vmuchar_t*)caddr)
1405*12068SRoger.Faulkner@Oracle.COM 				return NIL(Void_t*); /* wrapped around address */
1406*12068SRoger.Faulkner@Oracle.COM 			else	return brk(addr) == 0 ? caddr : NIL(Void_t*);
14074887Schin 		}
1408*12068SRoger.Faulkner@Oracle.COM #else
1409*12068SRoger.Faulkner@Oracle.COM 		addr = caddr;
14104887Schin #endif /* _mem_sbrk */
14114887Schin 
14124887Schin #if _mem_mmap_zero || _mem_mmap_anon
1413*12068SRoger.Faulkner@Oracle.COM 		if(((Vmuchar_t*)caddr+csize) > addr) /* in mmap-space */
1414*12068SRoger.Faulkner@Oracle.COM 			if(nsize == 0 && munmap(caddr,csize) == 0)
1415*12068SRoger.Faulkner@Oracle.COM 				return caddr;
14164887Schin #endif /* _mem_mmap_zero || _mem_mmap_anon */
14174887Schin 
14184887Schin 		return NIL(Void_t*);
14194887Schin 	}
14204887Schin #endif /*_done_sbrkmem*/
14214887Schin 
14228462SApril.Chin@Sun.COM #if !_done_sbrkmem /* use native malloc/free as a last resort */
14234887Schin 	/**/ASSERT(_std_malloc); /* _std_malloc should be well-defined */
14244887Schin 	NOTUSED(vm);
14254887Schin 	NOTUSED(disc);
14264887Schin 	if(csize == 0)
14274887Schin 		return (Void_t*)malloc(nsize);
14284887Schin 	else if(nsize == 0)
14294887Schin 	{	free(caddr);
14304887Schin 		return caddr;
14314887Schin 	}
14324887Schin 	else	return NIL(Void_t*);
14334887Schin #endif /* _done_sbrkmem */
14344887Schin }
14354887Schin 
14364887Schin #if _mem_mmap_zero
14374887Schin static Mmapdisc_t _Vmdcsbrk = { { sbrkmem, NIL(Vmexcept_f), 64*1024 }, -1, 0 };
14384887Schin #else
14394887Schin static Vmdisc_t _Vmdcsbrk = { sbrkmem, NIL(Vmexcept_f), 0 };
14404887Schin #endif
14414887Schin 
14424887Schin static Vmethod_t _Vmbest =
14434887Schin {
14444887Schin 	bestalloc,
14454887Schin 	bestresize,
14464887Schin 	bestfree,
14474887Schin 	bestaddr,
14484887Schin 	bestsize,
14494887Schin 	bestcompact,
14504887Schin 	bestalign,
14514887Schin 	VM_MTBEST
14524887Schin };
14534887Schin 
14544887Schin /* The heap region */
14554887Schin static Vmdata_t	_Vmdata =
14564887Schin {
14574887Schin 	VM_MTBEST|VM_TRUST,		/* mode		*/
14584887Schin 	0,				/* incr		*/
14594887Schin 	0,				/* pool		*/
14604887Schin 	NIL(Seg_t*),			/* seg		*/
14614887Schin 	NIL(Block_t*),			/* free		*/
14624887Schin 	NIL(Block_t*),			/* wild		*/
14634887Schin 	NIL(Block_t*),			/* root		*/
14644887Schin };
14654887Schin Vmalloc_t _Vmheap =
14664887Schin {
14674887Schin 	{ bestalloc,
14684887Schin 	  bestresize,
14694887Schin 	  bestfree,
14704887Schin 	  bestaddr,
14714887Schin 	  bestsize,
14724887Schin 	  bestcompact,
14734887Schin 	  bestalign,
14744887Schin 	  VM_MTBEST
14754887Schin 	},
14764887Schin 	NIL(char*),			/* file		*/
14774887Schin 	0,				/* line		*/
14784887Schin 	0,				/* func		*/
14794887Schin 	(Vmdisc_t*)(&_Vmdcsbrk),	/* disc		*/
14804887Schin 	&_Vmdata,			/* data		*/
14814887Schin 	NIL(Vmalloc_t*)			/* next		*/
14824887Schin };
14834887Schin 
14844887Schin __DEFINE__(Vmalloc_t*, Vmheap, &_Vmheap);
14854887Schin __DEFINE__(Vmalloc_t*, Vmregion, &_Vmheap);
14864887Schin __DEFINE__(Vmethod_t*, Vmbest, &_Vmbest);
14874887Schin __DEFINE__(Vmdisc_t*,  Vmdcsbrk, (Vmdisc_t*)(&_Vmdcsbrk) );
14884887Schin 
14894887Schin #ifdef NoF
14904887Schin NoF(vmbest)
14914887Schin #endif
14924887Schin 
14934887Schin #endif
1494