xref: /csrg-svn/sys/kern/subr_rmap.c.sav (revision 29102)
123382Smckusick/*
2*29102Smckusick * Copyright (c) 1982, 1986 Regents of the University of California.
323382Smckusick * All rights reserved.  The Berkeley software License Agreement
423382Smckusick * specifies the terms and conditions for redistribution.
523382Smckusick *
6*29102Smckusick *	@(#)subr_rmap.c.sav	7.1 (Berkeley) 06/05/86
723382Smckusick */
827Sbill
917094Sbloom#include "param.h"
1017094Sbloom#include "systm.h"
1117094Sbloom#include "map.h"
1217094Sbloom#include "dir.h"
1317094Sbloom#include "user.h"
1417094Sbloom#include "proc.h"
1517094Sbloom#include "text.h"
1617094Sbloom#include "kernel.h"
1727Sbill
1827Sbill/*
192781Swnj * Resource map handling routines.
202781Swnj *
212781Swnj * A resource map is an array of structures each
222781Swnj * of which describes a segment of the address space of an available
232781Swnj * resource.  The segments are described by their base address and
242781Swnj * length, and sorted in address order.  Each resource map has a fixed
252781Swnj * maximum number of segments allowed.  Resources are allocated
262781Swnj * by taking part or all of one of the segments of the map.
272781Swnj *
282781Swnj * Returning of resources will require another segment if
292781Swnj * the returned resources are not adjacent in the address
302781Swnj * space to an existing segment.  If the return of a segment
312781Swnj * would require a slot which is not available, then one of
322781Swnj * the resource map segments is discarded after a warning is printed.
332781Swnj * Returning of resources may also cause the map to collapse
342781Swnj * by coalescing two existing segments and the returned space
352781Swnj * into a single segment.  In this case the resource map is
362781Swnj * made smaller by copying together to fill the resultant gap.
372781Swnj *
382781Swnj * N.B.: the current implementation uses a dense array and does
392781Swnj * not admit the value ``0'' as a legal address, since that is used
402781Swnj * as a delimiter.
412781Swnj */
422781Swnj
432781Swnj/*
442781Swnj * Initialize map mp to have (mapsize-2) segments
452781Swnj * and to be called ``name'', which we print if
462781Swnj * the slots become so fragmented that we lose space.
472781Swnj * The map itself is initialized with size elements free
482781Swnj * starting at addr.
492781Swnj */
502781Swnjrminit(mp, size, addr, name, mapsize)
512781Swnj	register struct map *mp;
528790Sroot	long size, addr;
532781Swnj	char *name;
542781Swnj	int mapsize;
552781Swnj{
562781Swnj	register struct mapent *ep = (struct mapent *)(mp+1);
572781Swnj
582781Swnj	mp->m_name = name;
592781Swnj/* N.B.: WE ASSUME HERE THAT sizeof (struct map) == sizeof (struct mapent) */
602781Swnj	/*
612781Swnj	 * One of the mapsize slots is taken by the map structure,
622781Swnj	 * segments has size 0 and addr 0, and acts as a delimiter.
632781Swnj	 * We insure that we never use segments past the end of
642781Swnj	 * the array which is given by mp->m_limit.
652781Swnj	 * Instead, when excess segments occur we discard some resources.
662781Swnj	 */
672781Swnj	mp->m_limit = (struct mapent *)&mp[mapsize];
682781Swnj	/*
692781Swnj	 * Simulate a rmfree(), but with the option to
702781Swnj	 * call with size 0 and addr 0 when we just want
712781Swnj	 * to initialize without freeing.
722781Swnj	 */
732781Swnj	ep->m_size = size;
742781Swnj	ep->m_addr = addr;
752781Swnj}
762781Swnj
772781Swnj/*
7827Sbill * Allocate 'size' units from the given
792781Swnj * map. Return the base of the allocated space.
8027Sbill * In a map, the addresses are increasing and the
8127Sbill * list is terminated by a 0 size.
822781Swnj *
8327Sbill * Algorithm is first-fit.
842781Swnj *
852781Swnj * This routine knows about the interleaving of the swapmap
862781Swnj * and handles that.
8727Sbill */
888766Srootlong
892781Swnjrmalloc(mp, size)
902781Swnj	register struct map *mp;
918766Sroot	long size;
9227Sbill{
932781Swnj	register struct mapent *ep = (struct mapent *)(mp+1);
942781Swnj	register int addr;
952781Swnj	register struct mapent *bp;
96307Sbill	swblk_t first, rest;
9727Sbill
9812489Ssam	if (size <= 0 || mp == swapmap && size > dmmax)
992781Swnj		panic("rmalloc");
1002781Swnj	/*
1012781Swnj	 * Search for a piece of the resource map which has enough
1022781Swnj	 * free space to accomodate the request.
1032781Swnj	 */
1042781Swnj	for (bp = ep; bp->m_size; bp++) {
10527Sbill		if (bp->m_size >= size) {
1062781Swnj			/*
1072781Swnj			 * If allocating from swapmap,
1082781Swnj			 * then have to respect interleaving
1092781Swnj			 * boundaries.
1102781Swnj			 */
11112489Ssam			if (mp == swapmap && nswdev > 1 &&
11212489Ssam			    (first = dmmax - bp->m_addr%dmmax) < bp->m_size) {
113307Sbill				if (bp->m_size - first < size)
114307Sbill					continue;
1152781Swnj				addr = bp->m_addr + first;
116307Sbill				rest = bp->m_size - first - size;
117307Sbill				bp->m_size = first;
118307Sbill				if (rest)
1192781Swnj					rmfree(swapmap, rest, addr+size);
1202781Swnj				return (addr);
121307Sbill			}
1222781Swnj			/*
1232781Swnj			 * Allocate from the map.
1242781Swnj			 * If there is no space left of the piece
1252781Swnj			 * we allocated from, move the rest of
1262781Swnj			 * the pieces to the left.
1272781Swnj			 */
1282781Swnj			addr = bp->m_addr;
12927Sbill			bp->m_addr += size;
13027Sbill			if ((bp->m_size -= size) == 0) {
13127Sbill				do {
13227Sbill					bp++;
13327Sbill					(bp-1)->m_addr = bp->m_addr;
13427Sbill				} while ((bp-1)->m_size = bp->m_size);
13527Sbill			}
1362781Swnj			if (mp == swapmap && addr % CLSIZE)
1372781Swnj				panic("rmalloc swapmap");
1382781Swnj			return (addr);
13927Sbill		}
14027Sbill	}
1412781Swnj	return (0);
14227Sbill}
14327Sbill
14427Sbill/*
1452781Swnj * Free the previously allocated space at addr
14627Sbill * of size units into the specified map.
1472781Swnj * Sort addr into map and combine on
14827Sbill * one or both ends if possible.
14927Sbill */
1502781Swnjrmfree(mp, size, addr)
1512781Swnj	struct map *mp;
1528766Sroot	long size, addr;
15327Sbill{
1542781Swnj	struct mapent *firstbp;
1552781Swnj	register struct mapent *bp;
15627Sbill	register int t;
15727Sbill
1582781Swnj	/*
1592781Swnj	 * Both address and size must be
1602781Swnj	 * positive, or the protocol has broken down.
1612781Swnj	 */
1622781Swnj	if (addr <= 0 || size <= 0)
1632781Swnj		goto badrmfree;
1642781Swnj	/*
1652781Swnj	 * Locate the piece of the map which starts after the
1662781Swnj	 * returned space (or the end of the map).
1672781Swnj	 */
1682781Swnj	firstbp = bp = (struct mapent *)(mp + 1);
1692781Swnj	for (; bp->m_addr <= addr && bp->m_size != 0; bp++)
17027Sbill		continue;
1712781Swnj	/*
1722781Swnj	 * If the piece on the left abuts us,
1732781Swnj	 * then we should combine with it.
1742781Swnj	 */
1752781Swnj	if (bp > firstbp && (bp-1)->m_addr+(bp-1)->m_size >= addr) {
1762781Swnj		/*
1772781Swnj		 * Check no overlap (internal error).
1782781Swnj		 */
1792781Swnj		if ((bp-1)->m_addr+(bp-1)->m_size > addr)
1802781Swnj			goto badrmfree;
1812781Swnj		/*
1822781Swnj		 * Add into piece on the left by increasing its size.
1832781Swnj		 */
18427Sbill		(bp-1)->m_size += size;
1852781Swnj		/*
1862781Swnj		 * If the combined piece abuts the piece on
1872781Swnj		 * the right now, compress it in also,
1882781Swnj		 * by shifting the remaining pieces of the map over.
1892781Swnj		 */
1902781Swnj		if (bp->m_addr && addr+size >= bp->m_addr) {
1912781Swnj			if (addr+size > bp->m_addr)
1922781Swnj				goto badrmfree;
19327Sbill			(bp-1)->m_size += bp->m_size;
19427Sbill			while (bp->m_size) {
19527Sbill				bp++;
19627Sbill				(bp-1)->m_addr = bp->m_addr;
19727Sbill				(bp-1)->m_size = bp->m_size;
19827Sbill			}
19927Sbill		}
2002781Swnj		goto done;
20127Sbill	}
2022781Swnj	/*
2032781Swnj	 * Don't abut on the left, check for abutting on
2042781Swnj	 * the right.
2052781Swnj	 */
2062781Swnj	if (addr+size >= bp->m_addr && bp->m_size) {
2072781Swnj		if (addr+size > bp->m_addr)
2082781Swnj			goto badrmfree;
2092781Swnj		bp->m_addr -= size;
2102781Swnj		bp->m_size += size;
2112781Swnj		goto done;
2122781Swnj	}
2132781Swnj	/*
2142781Swnj	 * Don't abut at all.  Make a new entry
2152781Swnj	 * and check for map overflow.
2162781Swnj	 */
2172781Swnj	do {
2182781Swnj		t = bp->m_addr;
2192781Swnj		bp->m_addr = addr;
2202781Swnj		addr = t;
2212781Swnj		t = bp->m_size;
2222781Swnj		bp->m_size = size;
2232781Swnj		bp++;
2242781Swnj	} while (size = t);
2252781Swnj	/*
2262781Swnj	 * Segment at bp is to be the delimiter;
2272781Swnj	 * If there is not room for it
2282781Swnj	 * then the table is too full
2292781Swnj	 * and we must discard something.
2302781Swnj	 */
2312781Swnj	if (bp+1 > mp->m_limit) {
2322781Swnj		/*
2332781Swnj		 * Back bp up to last available segment.
2342781Swnj		 * which contains a segment already and must
2352781Swnj		 * be made into the delimiter.
2362781Swnj		 * Discard second to last entry,
2372781Swnj		 * since it is presumably smaller than the last
2382781Swnj		 * and move the last entry back one.
2392781Swnj		 */
2402781Swnj		bp--;
2412937Swnj		printf("%s: rmap ovflo, lost [%d,%d)\n", mp->m_name,
2422781Swnj		    (bp-1)->m_addr, (bp-1)->m_addr+(bp-1)->m_size);
2432781Swnj		bp[-1] = bp[0];
2442781Swnj		bp[0].m_size = bp[0].m_addr = 0;
2452781Swnj	}
2462781Swnjdone:
2472781Swnj	/*
2482781Swnj	 * THIS IS RIDICULOUS... IT DOESN'T BELONG HERE!
2492781Swnj	 */
25027Sbill	if ((mp == kernelmap) && kmapwnt) {
25127Sbill		kmapwnt = 0;
25227Sbill		wakeup((caddr_t)kernelmap);
25327Sbill	}
2542781Swnj	return;
2552781Swnjbadrmfree:
2562781Swnj	panic("bad rmfree");
25727Sbill}
2586516Sfeldman
2596516Sfeldman/*
2606516Sfeldman * Allocate 'size' units from the given map, starting at address 'addr'.
2616516Sfeldman * Return 'addr' if successful, 0 if not.
2626516Sfeldman * This may cause the creation or destruction of a resource map segment.
2636516Sfeldman *
2646516Sfeldman * This routine will return failure status if there is not enough room
2656516Sfeldman * for a required additional map segment.
2666516Sfeldman *
2676516Sfeldman * An attempt to use this on 'swapmap' will result in
2686516Sfeldman * a failure return.  This is due mainly to laziness and could be fixed
2696516Sfeldman * to do the right thing, although it probably will never be used.
2706516Sfeldman */
2716516Sfeldmanrmget(mp, size, addr)
2726516Sfeldman	register struct map *mp;
2736516Sfeldman{
2746516Sfeldman	register struct mapent *ep = (struct mapent *)(mp+1);
2756516Sfeldman	register struct mapent *bp, *bp2;
2766516Sfeldman
2776516Sfeldman	if (size <= 0)
2786516Sfeldman		panic("rmget");
2796516Sfeldman	if (mp == swapmap)
2806516Sfeldman		return (0);
2816516Sfeldman	/*
2826516Sfeldman	 * Look for a map segment containing the requested address.
2836516Sfeldman	 * If none found, return failure.
2846516Sfeldman	 */
2856516Sfeldman	for (bp = ep; bp->m_size; bp++)
2866516Sfeldman		if (bp->m_addr <= addr && bp->m_addr + bp->m_size > addr)
2876516Sfeldman			break;
2886516Sfeldman	if (bp->m_size == 0)
2896516Sfeldman		return (0);
2906516Sfeldman
2916516Sfeldman	/*
2926516Sfeldman	 * If segment is too small, return failure.
2936516Sfeldman	 * If big enough, allocate the block, compressing or expanding
2946516Sfeldman	 * the map as necessary.
2956516Sfeldman	 */
2966516Sfeldman	if (bp->m_addr + bp->m_size < addr + size)
2976516Sfeldman		return (0);
2986516Sfeldman	if (bp->m_addr == addr)
2996516Sfeldman		if (bp->m_addr + bp->m_size == addr + size) {
3006516Sfeldman			/*
3016516Sfeldman			 * Allocate entire segment and compress map
3026516Sfeldman			 */
3036516Sfeldman			bp2 = bp;
3046516Sfeldman			while (bp2->m_size) {
3056516Sfeldman				bp2++;
3066516Sfeldman				(bp2-1)->m_addr = bp2->m_addr;
3076516Sfeldman				(bp2-1)->m_size = bp2->m_size;
3086516Sfeldman			}
3096516Sfeldman		} else {
3106516Sfeldman			/*
3116516Sfeldman			 * Allocate first part of segment
3126516Sfeldman			 */
3136516Sfeldman			bp->m_addr += size;
3146516Sfeldman			bp->m_size -= size;
3156516Sfeldman		}
3166516Sfeldman	else
3176516Sfeldman		if (bp->m_addr + bp->m_size == addr + size) {
3186516Sfeldman			/*
3196516Sfeldman			 * Allocate last part of segment
3206516Sfeldman			 */
3216516Sfeldman			bp->m_size -= size;
3226516Sfeldman		} else {
3236516Sfeldman			/*
3246516Sfeldman			 * Allocate from middle of segment, but only
3256516Sfeldman			 * if table can be expanded.
3266516Sfeldman			 */
3276516Sfeldman			for (bp2=bp; bp2->m_size; bp2++)
3286516Sfeldman				;
32925113Sbloom			if (bp2 + 1 >= mp->m_limit)
3306516Sfeldman				return (0);
3316516Sfeldman			while (bp2 > bp) {
3326516Sfeldman				(bp2+1)->m_addr = bp2->m_addr;
3336516Sfeldman				(bp2+1)->m_size = bp2->m_size;
3346516Sfeldman				bp2--;
3356516Sfeldman			}
3366516Sfeldman			(bp+1)->m_addr = addr + size;
3376516Sfeldman			(bp+1)->m_size =
3386516Sfeldman			    bp->m_addr + bp->m_size - (addr + size);
3396516Sfeldman			bp->m_size = addr - bp->m_addr;
3406516Sfeldman		}
3416516Sfeldman	return (addr);
3426516Sfeldman}
343