xref: /csrg-svn/sys/kern/subr_rmap.c.sav (revision 8447)
1/*	subr_rmap.c.sav	4.6	82/10/10	*/
2
3#include "../h/param.h"
4#include "../h/systm.h"
5#include "../h/map.h"
6#include "../h/dir.h"
7#include "../h/user.h"
8#include "../h/proc.h"
9#include "../h/text.h"
10
11/*
12 * Resource map handling routines.
13 *
14 * A resource map is an array of structures each
15 * of which describes a segment of the address space of an available
16 * resource.  The segments are described by their base address and
17 * length, and sorted in address order.  Each resource map has a fixed
18 * maximum number of segments allowed.  Resources are allocated
19 * by taking part or all of one of the segments of the map.
20 *
21 * Returning of resources will require another segment if
22 * the returned resources are not adjacent in the address
23 * space to an existing segment.  If the return of a segment
24 * would require a slot which is not available, then one of
25 * the resource map segments is discarded after a warning is printed.
26 * Returning of resources may also cause the map to collapse
27 * by coalescing two existing segments and the returned space
28 * into a single segment.  In this case the resource map is
29 * made smaller by copying together to fill the resultant gap.
30 *
31 * N.B.: the current implementation uses a dense array and does
32 * not admit the value ``0'' as a legal address, since that is used
33 * as a delimiter.
34 */
35
36/*
37 * Initialize map mp to have (mapsize-2) segments
38 * and to be called ``name'', which we print if
39 * the slots become so fragmented that we lose space.
40 * The map itself is initialized with size elements free
41 * starting at addr.
42 */
43rminit(mp, size, addr, name, mapsize)
44	register struct map *mp;
45	int size, addr;
46	char *name;
47	int mapsize;
48{
49	register struct mapent *ep = (struct mapent *)(mp+1);
50
51	mp->m_name = name;
52/* N.B.: WE ASSUME HERE THAT sizeof (struct map) == sizeof (struct mapent) */
53	/*
54	 * One of the mapsize slots is taken by the map structure,
55	 * segments has size 0 and addr 0, and acts as a delimiter.
56	 * We insure that we never use segments past the end of
57	 * the array which is given by mp->m_limit.
58	 * Instead, when excess segments occur we discard some resources.
59	 */
60	mp->m_limit = (struct mapent *)&mp[mapsize];
61	/*
62	 * Simulate a rmfree(), but with the option to
63	 * call with size 0 and addr 0 when we just want
64	 * to initialize without freeing.
65	 */
66	ep->m_size = size;
67	ep->m_addr = addr;
68}
69
70/*
71 * Allocate 'size' units from the given
72 * map. Return the base of the allocated space.
73 * In a map, the addresses are increasing and the
74 * list is terminated by a 0 size.
75 *
76 * Algorithm is first-fit.
77 *
78 * This routine knows about the interleaving of the swapmap
79 * and handles that.
80 */
81rmalloc(mp, size)
82	register struct map *mp;
83{
84	register struct mapent *ep = (struct mapent *)(mp+1);
85	register int addr;
86	register struct mapent *bp;
87	swblk_t first, rest;
88
89	if (size <= 0 || mp == swapmap && size > DMMAX)
90		panic("rmalloc");
91	/*
92	 * Search for a piece of the resource map which has enough
93	 * free space to accomodate the request.
94	 */
95	for (bp = ep; bp->m_size; bp++) {
96		if (bp->m_size >= size) {
97			/*
98			 * If allocating from swapmap,
99			 * then have to respect interleaving
100			 * boundaries.
101			 */
102			if (mp == swapmap &&
103			    (first = DMMAX - bp->m_addr%DMMAX) < bp->m_size) {
104				if (bp->m_size - first < size)
105					continue;
106				addr = bp->m_addr + first;
107				rest = bp->m_size - first - size;
108				bp->m_size = first;
109				if (rest)
110					rmfree(swapmap, rest, addr+size);
111				return (addr);
112			}
113			/*
114			 * Allocate from the map.
115			 * If there is no space left of the piece
116			 * we allocated from, move the rest of
117			 * the pieces to the left.
118			 */
119			addr = bp->m_addr;
120			bp->m_addr += size;
121			if ((bp->m_size -= size) == 0) {
122				do {
123					bp++;
124					(bp-1)->m_addr = bp->m_addr;
125				} while ((bp-1)->m_size = bp->m_size);
126			}
127			if (mp == swapmap && addr % CLSIZE)
128				panic("rmalloc swapmap");
129			return (addr);
130		}
131	}
132	return (0);
133}
134
135/*
136 * Free the previously allocated space at addr
137 * of size units into the specified map.
138 * Sort addr into map and combine on
139 * one or both ends if possible.
140 */
141rmfree(mp, size, addr)
142	struct map *mp;
143	register int size, addr;
144{
145	struct mapent *firstbp;
146	register struct mapent *bp;
147	register int t;
148
149	/*
150	 * Both address and size must be
151	 * positive, or the protocol has broken down.
152	 */
153	if (addr <= 0 || size <= 0)
154		goto badrmfree;
155	/*
156	 * Locate the piece of the map which starts after the
157	 * returned space (or the end of the map).
158	 */
159	firstbp = bp = (struct mapent *)(mp + 1);
160	for (; bp->m_addr <= addr && bp->m_size != 0; bp++)
161		continue;
162	/*
163	 * If the piece on the left abuts us,
164	 * then we should combine with it.
165	 */
166	if (bp > firstbp && (bp-1)->m_addr+(bp-1)->m_size >= addr) {
167		/*
168		 * Check no overlap (internal error).
169		 */
170		if ((bp-1)->m_addr+(bp-1)->m_size > addr)
171			goto badrmfree;
172		/*
173		 * Add into piece on the left by increasing its size.
174		 */
175		(bp-1)->m_size += size;
176		/*
177		 * If the combined piece abuts the piece on
178		 * the right now, compress it in also,
179		 * by shifting the remaining pieces of the map over.
180		 */
181		if (bp->m_addr && addr+size >= bp->m_addr) {
182			if (addr+size > bp->m_addr)
183				goto badrmfree;
184			(bp-1)->m_size += bp->m_size;
185			while (bp->m_size) {
186				bp++;
187				(bp-1)->m_addr = bp->m_addr;
188				(bp-1)->m_size = bp->m_size;
189			}
190		}
191		goto done;
192	}
193	/*
194	 * Don't abut on the left, check for abutting on
195	 * the right.
196	 */
197	if (addr+size >= bp->m_addr && bp->m_size) {
198		if (addr+size > bp->m_addr)
199			goto badrmfree;
200		bp->m_addr -= size;
201		bp->m_size += size;
202		goto done;
203	}
204	/*
205	 * Don't abut at all.  Make a new entry
206	 * and check for map overflow.
207	 */
208	do {
209		t = bp->m_addr;
210		bp->m_addr = addr;
211		addr = t;
212		t = bp->m_size;
213		bp->m_size = size;
214		bp++;
215	} while (size = t);
216	/*
217	 * Segment at bp is to be the delimiter;
218	 * If there is not room for it
219	 * then the table is too full
220	 * and we must discard something.
221	 */
222	if (bp+1 > mp->m_limit) {
223		/*
224		 * Back bp up to last available segment.
225		 * which contains a segment already and must
226		 * be made into the delimiter.
227		 * Discard second to last entry,
228		 * since it is presumably smaller than the last
229		 * and move the last entry back one.
230		 */
231		bp--;
232		printf("%s: rmap ovflo, lost [%d,%d)\n", mp->m_name,
233		    (bp-1)->m_addr, (bp-1)->m_addr+(bp-1)->m_size);
234		bp[-1] = bp[0];
235		bp[0].m_size = bp[0].m_addr = 0;
236	}
237done:
238	/*
239	 * THIS IS RIDICULOUS... IT DOESN'T BELONG HERE!
240	 */
241	if ((mp == kernelmap) && kmapwnt) {
242		kmapwnt = 0;
243		wakeup((caddr_t)kernelmap);
244	}
245	return;
246badrmfree:
247	panic("bad rmfree");
248}
249
250/*
251 * Allocate 'size' units from the given map, starting at address 'addr'.
252 * Return 'addr' if successful, 0 if not.
253 * This may cause the creation or destruction of a resource map segment.
254 *
255 * This routine will return failure status if there is not enough room
256 * for a required additional map segment.
257 *
258 * An attempt to use this on 'swapmap' will result in
259 * a failure return.  This is due mainly to laziness and could be fixed
260 * to do the right thing, although it probably will never be used.
261 */
262rmget(mp, size, addr)
263	register struct map *mp;
264{
265	register struct mapent *ep = (struct mapent *)(mp+1);
266	register struct mapent *bp, *bp2;
267
268	if (size <= 0)
269		panic("rmget");
270	if (mp == swapmap)
271		return (0);
272	/*
273	 * Look for a map segment containing the requested address.
274	 * If none found, return failure.
275	 */
276	for (bp = ep; bp->m_size; bp++)
277		if (bp->m_addr <= addr && bp->m_addr + bp->m_size > addr)
278			break;
279	if (bp->m_size == 0)
280		return (0);
281
282	/*
283	 * If segment is too small, return failure.
284	 * If big enough, allocate the block, compressing or expanding
285	 * the map as necessary.
286	 */
287	if (bp->m_addr + bp->m_size < addr + size)
288		return (0);
289	if (bp->m_addr == addr)
290		if (bp->m_addr + bp->m_size == addr + size) {
291			/*
292			 * Allocate entire segment and compress map
293			 */
294			bp2 = bp;
295			while (bp2->m_size) {
296				bp2++;
297				(bp2-1)->m_addr = bp2->m_addr;
298				(bp2-1)->m_size = bp2->m_size;
299			}
300		} else {
301			/*
302			 * Allocate first part of segment
303			 */
304			bp->m_addr += size;
305			bp->m_size -= size;
306		}
307	else
308		if (bp->m_addr + bp->m_size == addr + size) {
309			/*
310			 * Allocate last part of segment
311			 */
312			bp->m_size -= size;
313		} else {
314			/*
315			 * Allocate from middle of segment, but only
316			 * if table can be expanded.
317			 */
318			for (bp2=bp; bp2->m_size; bp2++)
319				;
320			if (bp2 == mp->m_limit)
321				return (0);
322			while (bp2 > bp) {
323				(bp2+1)->m_addr = bp2->m_addr;
324				(bp2+1)->m_size = bp2->m_size;
325				bp2--;
326			}
327			(bp+1)->m_addr = addr + size;
328			(bp+1)->m_size =
329			    bp->m_addr + bp->m_size - (addr + size);
330			bp->m_size = addr - bp->m_addr;
331		}
332	return (addr);
333}
334