xref: /openbsd-src/sys/kern/subr_extent.c (revision 8fe70e864fcc06f2db7e10a8b65d85e6bfdb5a98)
1 /*	$OpenBSD: subr_extent.c,v 1.36 2009/04/10 20:57:04 kettenis Exp $	*/
2 /*	$NetBSD: subr_extent.c,v 1.7 1996/11/21 18:46:34 cgd Exp $	*/
3 
4 /*-
5  * Copyright (c) 1996, 1998 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe and Matthias Drochner.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * General purpose extent manager.
35  */
36 
37 #ifdef _KERNEL
38 #include <sys/param.h>
39 #include <sys/extent.h>
40 #include <sys/malloc.h>
41 #include <sys/time.h>
42 #include <sys/systm.h>
43 #include <sys/proc.h>
44 #include <sys/queue.h>
45 #include <sys/pool.h>
46 #include <ddb/db_output.h>
47 #else
48 /*
49  * user-land definitions, so it can fit into a testing harness.
50  */
51 #include <sys/param.h>
52 #include <sys/pool.h>
53 #include <sys/extent.h>
54 #include <sys/queue.h>
55 #include <errno.h>
56 #include <stdlib.h>
57 #include <stdio.h>
58 
59 #define	malloc(s, t, flags)		malloc(s)
60 #define	free(p, t)			free(p)
61 #define	tsleep(chan, pri, str, timo)	(EWOULDBLOCK)
62 #define	wakeup(chan)			((void)0)
63 #define	pool_get(pool, flags)		malloc((pool)->pr_size, 0, 0)
64 #define	pool_init(a, b, c, d, e, f, g)	(a)->pr_size = (b)
65 #define	pool_put(pool, rp)		free((rp), 0)
66 #define	panic				printf
67 #define	splvm()				(1)
68 #define	splx(s)				((void)(s))
69 #endif
70 
71 #if defined(DIAGNOSTIC) || defined(DDB)
72 void	extent_print1(struct extent *, int (*)(const char *, ...));
73 #endif
74 
75 static	void extent_insert_and_optimize(struct extent *, u_long, u_long,
76 	    struct extent_region *, struct extent_region *);
77 static	struct extent_region *extent_alloc_region_descriptor(struct extent *, int);
78 static	void extent_free_region_descriptor(struct extent *,
79 	    struct extent_region *);
80 
81 /*
82  * Shortcut to align to an arbitrary power-of-two boundary.
83  */
84 static __inline__ u_long
85 extent_align(u_long start, u_long align, u_long skew)
86 {
87 	return ((((start - skew) + (align - 1)) & (-align)) + skew);
88 }
89 
90 
91 #if defined(DIAGNOSTIC) || defined(DDB)
92 /*
93  * Register the extent on a doubly linked list.
94  * Should work, no?
95  */
96 static LIST_HEAD(listhead, extent) ext_list;
97 static	void extent_register(struct extent *);
98 
99 static void
100 extent_register(struct extent *ex)
101 {
102 #ifdef DIAGNOSTIC
103 	struct extent *ep;
104 #endif
105 	static int initialized;
106 
107 	if (!initialized){
108 		LIST_INIT(&ext_list);
109 		initialized = 1;
110 	}
111 
112 #ifdef DIAGNOSTIC
113 	LIST_FOREACH(ep, &ext_list, ex_link) {
114 		if (ep == ex)
115 			panic("extent_register: already registered");
116 	}
117 #endif
118 
119 	/* Insert into list */
120 	LIST_INSERT_HEAD(&ext_list, ex, ex_link);
121 }
122 #endif	/* DIAGNOSTIC || DDB */
123 
124 struct pool ex_region_pl;
125 
126 static void
127 extent_pool_init(void)
128 {
129 	static int inited;
130 
131 	if (!inited) {
132 		pool_init(&ex_region_pl, sizeof(struct extent_region), 0, 0, 0,
133 		    "extentpl", NULL);
134 		inited = 1;
135 	}
136 }
137 
138 #ifdef DDB
139 /*
140  * Print out all extents registered.  This is used in
141  * DDB show extents
142  */
143 void
144 extent_print_all(void)
145 {
146 	struct extent *ep;
147 
148 	LIST_FOREACH(ep, &ext_list, ex_link) {
149 		extent_print1(ep, db_printf);
150 	}
151 }
152 #endif
153 
154 /*
155  * Allocate and initialize an extent map.
156  */
157 struct extent *
158 extent_create(char *name, u_long start, u_long end, int mtype, caddr_t storage,
159     size_t storagesize, int flags)
160 {
161 	struct extent *ex;
162 	caddr_t cp = storage;
163 	size_t sz = storagesize;
164 	struct extent_region *rp;
165 	int fixed_extent = (storage != NULL);
166 
167 #ifdef DIAGNOSTIC
168 	/* Check arguments. */
169 	if (name == NULL)
170 		panic("extent_create: name == NULL");
171 	if (end < start) {
172 		printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n",
173 		    name, start, end);
174 		panic("extent_create: end < start");
175 	}
176 	if (fixed_extent && (storagesize < sizeof(struct extent_fixed)))
177 		panic("extent_create: fixed extent, bad storagesize 0x%lx",
178 		    (u_long)storagesize);
179 	if (fixed_extent == 0 && (storagesize != 0 || storage != NULL))
180 		panic("extent_create: storage provided for non-fixed");
181 #endif
182 
183 	extent_pool_init();
184 
185 	/* Allocate extent descriptor. */
186 	if (fixed_extent) {
187 		struct extent_fixed *fex;
188 
189 		bzero(storage, storagesize);
190 
191 		/*
192 		 * Align all descriptors on "long" boundaries.
193 		 */
194 		fex = (struct extent_fixed *)cp;
195 		ex = (struct extent *)fex;
196 		cp += ALIGN(sizeof(struct extent_fixed));
197 		sz -= ALIGN(sizeof(struct extent_fixed));
198 		fex->fex_storage = storage;
199 		fex->fex_storagesize = storagesize;
200 
201 		/*
202 		 * In a fixed extent, we have to pre-allocate region
203 		 * descriptors and place them in the extent's freelist.
204 		 */
205 		LIST_INIT(&fex->fex_freelist);
206 		while (sz >= ALIGN(sizeof(struct extent_region))) {
207 			rp = (struct extent_region *)cp;
208 			cp += ALIGN(sizeof(struct extent_region));
209 			sz -= ALIGN(sizeof(struct extent_region));
210 			LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
211 		}
212 	} else {
213 		ex = (struct extent *)malloc(sizeof(struct extent),
214 		    mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
215 		if (ex == NULL)
216 			return (NULL);
217 	}
218 
219 	/* Fill in the extent descriptor and return it to the caller. */
220 	LIST_INIT(&ex->ex_regions);
221 	ex->ex_name = name;
222 	ex->ex_start = start;
223 	ex->ex_end = end;
224 	ex->ex_mtype = mtype;
225 	ex->ex_flags = 0;
226 	if (fixed_extent)
227 		ex->ex_flags |= EXF_FIXED;
228 	if (flags & EX_NOCOALESCE)
229 		ex->ex_flags |= EXF_NOCOALESCE;
230 
231 #if defined(DIAGNOSTIC) || defined(DDB)
232 	extent_register(ex);
233 #endif
234 	return (ex);
235 }
236 
237 /*
238  * Destroy an extent map.
239  */
240 void
241 extent_destroy(struct extent *ex)
242 {
243 	struct extent_region *rp, *orp;
244 
245 #ifdef DIAGNOSTIC
246 	/* Check arguments. */
247 	if (ex == NULL)
248 		panic("extent_destroy: NULL extent");
249 #endif
250 
251 	/* Free all region descriptors in extent. */
252 	for (rp = LIST_FIRST(&ex->ex_regions);
253 	    rp != LIST_END(&ex->ex_regions); ) {
254 		orp = rp;
255 		rp = LIST_NEXT(rp, er_link);
256 		LIST_REMOVE(orp, er_link);
257 		extent_free_region_descriptor(ex, orp);
258 	}
259 
260 #if defined(DIAGNOSTIC) || defined(DDB)
261 	/* Remove from the list of all extents. */
262 	LIST_REMOVE(ex, ex_link);
263 #endif
264 
265 	/* If we're not a fixed extent, free the extent descriptor itself. */
266 	if ((ex->ex_flags & EXF_FIXED) == 0)
267 		free(ex, ex->ex_mtype);
268 }
269 
270 /*
271  * Insert a region descriptor into the sorted region list after the
272  * entry "after" or at the head of the list (if "after" is NULL).
273  * The region descriptor we insert is passed in "rp".  We must
274  * allocate the region descriptor before calling this function!
275  * If we don't need the region descriptor, it will be freed here.
276  */
277 static void
278 extent_insert_and_optimize(struct extent *ex, u_long start, u_long size,
279     struct extent_region *after, struct extent_region *rp)
280 {
281 	struct extent_region *nextr;
282 	int appended = 0;
283 
284 	if (after == NULL) {
285 		/*
286 		 * We're the first in the region list.  If there's
287 		 * a region after us, attempt to coalesce to save
288 		 * descriptor overhead.
289 		 */
290 		if (((ex->ex_flags & EXF_NOCOALESCE) == 0) &&
291 		    !LIST_EMPTY(&ex->ex_regions) &&
292 		    ((start + size) == LIST_FIRST(&ex->ex_regions)->er_start)) {
293 			/*
294 			 * We can coalesce.  Prepend us to the first region.
295 			 */
296 			LIST_FIRST(&ex->ex_regions)->er_start = start;
297 			extent_free_region_descriptor(ex, rp);
298 			return;
299 		}
300 
301 		/*
302 		 * Can't coalesce.  Fill in the region descriptor
303 		 * in, and insert us at the head of the region list.
304 		 */
305 		rp->er_start = start;
306 		rp->er_end = start + (size - 1);
307 		LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link);
308 		return;
309 	}
310 
311 	/*
312 	 * If EXF_NOCOALESCE is set, coalescing is disallowed.
313 	 */
314 	if (ex->ex_flags & EXF_NOCOALESCE)
315 		goto cant_coalesce;
316 
317 	/*
318 	 * Attempt to coalesce with the region before us.
319 	 */
320 	if ((after->er_end + 1) == start) {
321 		/*
322 		 * We can coalesce.  Append ourselves and make
323 		 * note of it.
324 		 */
325 		after->er_end = start + (size - 1);
326 		appended = 1;
327 	}
328 
329 	/*
330 	 * Attempt to coalesce with the region after us.
331 	 */
332 	if (LIST_NEXT(after, er_link) != NULL &&
333 	    ((start + size) == LIST_NEXT(after, er_link)->er_start)) {
334 		/*
335 		 * We can coalesce.  Note that if we appended ourselves
336 		 * to the previous region, we exactly fit the gap, and
337 		 * can free the "next" region descriptor.
338 		 */
339 		if (appended) {
340 			/*
341 			 * Yup, we can free it up.
342 			 */
343 			after->er_end = LIST_NEXT(after, er_link)->er_end;
344 			nextr = LIST_NEXT(after, er_link);
345 			LIST_REMOVE(nextr, er_link);
346 			extent_free_region_descriptor(ex, nextr);
347 		} else {
348 			/*
349 			 * Nope, just prepend us to the next region.
350 			 */
351 			LIST_NEXT(after, er_link)->er_start = start;
352 		}
353 
354 		extent_free_region_descriptor(ex, rp);
355 		return;
356 	}
357 
358 	/*
359 	 * We weren't able to coalesce with the next region, but
360 	 * we don't need to allocate a region descriptor if we
361 	 * appended ourselves to the previous region.
362 	 */
363 	if (appended) {
364 		extent_free_region_descriptor(ex, rp);
365 		return;
366 	}
367 
368  cant_coalesce:
369 
370 	/*
371 	 * Fill in the region descriptor and insert ourselves
372 	 * into the region list.
373 	 */
374 	rp->er_start = start;
375 	rp->er_end = start + (size - 1);
376 	LIST_INSERT_AFTER(after, rp, er_link);
377 }
378 
379 /*
380  * Allocate a specific region in an extent map.
381  */
382 int
383 extent_alloc_region(struct extent *ex, u_long start, u_long size, int flags)
384 {
385 	struct extent_region *rp, *last, *myrp;
386 	u_long end = start + (size - 1);
387 	int error;
388 
389 #ifdef DIAGNOSTIC
390 	/* Check arguments. */
391 	if (ex == NULL)
392 		panic("extent_alloc_region: NULL extent");
393 	if (size < 1) {
394 		printf("extent_alloc_region: extent `%s', size 0x%lx\n",
395 		    ex->ex_name, size);
396 		panic("extent_alloc_region: bad size");
397 	}
398 	if (end < start) {
399 		printf(
400 		 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n",
401 		 ex->ex_name, start, size);
402 		panic("extent_alloc_region: overflow");
403 	}
404 	if ((flags & EX_CONFLICTOK) && (flags & EX_WAITSPACE))
405 		panic("extent_alloc_region: EX_CONFLICTOK and EX_WAITSPACE "
406 		    "are mutually exclusive");
407 #endif
408 
409 	/*
410 	 * Make sure the requested region lies within the
411 	 * extent.
412 	 */
413 	if ((start < ex->ex_start) || (end > ex->ex_end)) {
414 #ifdef DIAGNOSTIC
415 		printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n",
416 		    ex->ex_name, ex->ex_start, ex->ex_end);
417 		printf("extent_alloc_region: start 0x%lx, end 0x%lx\n",
418 		    start, end);
419 		panic("extent_alloc_region: region lies outside extent");
420 #else
421 		return (EINVAL);
422 #endif
423 	}
424 
425 	/*
426 	 * Allocate the region descriptor.  It will be freed later
427 	 * if we can coalesce with another region.
428 	 */
429 	myrp = extent_alloc_region_descriptor(ex, flags);
430 	if (myrp == NULL) {
431 #ifdef DIAGNOSTIC
432 		printf(
433 		    "extent_alloc_region: can't allocate region descriptor\n");
434 #endif
435 		return (ENOMEM);
436 	}
437 
438  alloc_start:
439 	/*
440 	 * Attempt to place ourselves in the desired area of the
441 	 * extent.  We save ourselves some work by keeping the list sorted.
442 	 * In other words, if the start of the current region is greater
443 	 * than the end of our region, we don't have to search any further.
444 	 */
445 
446 	/*
447 	 * Keep a pointer to the last region we looked at so
448 	 * that we don't have to traverse the list again when
449 	 * we insert ourselves.  If "last" is NULL when we
450 	 * finally insert ourselves, we go at the head of the
451 	 * list.  See extent_insert_and_optimize() for details.
452 	 */
453 	last = NULL;
454 
455 	LIST_FOREACH(rp, &ex->ex_regions, er_link) {
456 		if (rp->er_start > end) {
457 			/*
458 			 * We lie before this region and don't
459 			 * conflict.
460 			 */
461 			break;
462 		}
463 
464 		/*
465 		 * The current region begins before we end.
466 		 * Check for a conflict.
467 		 */
468 		if (rp->er_end >= start) {
469 			/*
470 			 * We conflict.  If we can (and want to) wait,
471 			 * do so.
472 			 */
473 			if (flags & EX_WAITSPACE) {
474 				ex->ex_flags |= EXF_WANTED;
475 				error = tsleep(ex,
476 				    PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
477 				    "extnt", 0);
478 				if (error)
479 					return (error);
480 				goto alloc_start;
481 			}
482 
483 			/*
484 			 * If we tolerate conflicts adjust things such
485 			 * that all space in the requested region is
486 			 * allocated.
487 			 */
488 			if (flags & EX_CONFLICTOK) {
489 				/*
490 				 * There are two possibilities:
491 				 *
492 				 * 1. The current region overlaps.
493 				 *    Adjust the requested region to
494 				 *    start at the end of the current
495 				 *    region, and try again.
496 				 *
497 				 * 2. The current region falls
498                                  *    completely within the requested
499                                  *    region.  Free the current region
500                                  *    and try again.
501 				 */
502 				if (rp->er_start <= start) {
503 					start = rp->er_end + 1;
504 					size = end - start + 1;
505 				} else {
506 					LIST_REMOVE(rp, er_link);
507 					extent_free_region_descriptor(ex, rp);
508 				}
509 				goto alloc_start;
510 			}
511 
512 			extent_free_region_descriptor(ex, myrp);
513 			return (EAGAIN);
514 		}
515 		/*
516 		 * We don't conflict, but this region lies before
517 		 * us.  Keep a pointer to this region, and keep
518 		 * trying.
519 		 */
520 		last = rp;
521 	}
522 
523 	/*
524 	 * We don't conflict with any regions.  "last" points
525 	 * to the region we fall after, or is NULL if we belong
526 	 * at the beginning of the region list.  Insert ourselves.
527 	 */
528 	extent_insert_and_optimize(ex, start, size, last, myrp);
529 	return (0);
530 }
531 
532 /*
533  * Macro to check (x + y) <= z.  This check is designed to fail
534  * if an overflow occurs.
535  */
536 #define LE_OV(x, y, z)	((((x) + (y)) >= (x)) && (((x) + (y)) <= (z)))
537 
538 /*
539  * Allocate a region in an extent map subregion.
540  *
541  * If EX_FAST is specified, we return the first fit in the map.
542  * Otherwise, we try to minimize fragmentation by finding the
543  * smallest gap that will hold the request.
544  *
545  * The allocated region is aligned to "alignment", which must be
546  * a power of 2.
547  */
548 int
549 extent_alloc_subregion(struct extent *ex, u_long substart, u_long subend,
550     u_long size, u_long alignment, u_long skew, u_long boundary, int flags,
551     u_long *result)
552 {
553 	struct extent_region *rp, *myrp, *last, *bestlast;
554 	u_long newstart, newend, exend, beststart, bestovh, ovh;
555 	u_long dontcross;
556 	int error;
557 
558 #ifdef DIAGNOSTIC
559 	/* Check arguments. */
560 	if (ex == NULL)
561 		panic("extent_alloc_subregion: NULL extent");
562 	if (result == NULL)
563 		panic("extent_alloc_subregion: NULL result pointer");
564 	if ((substart < ex->ex_start) || (substart > ex->ex_end) ||
565 	    (subend > ex->ex_end) || (subend < ex->ex_start)) {
566   printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n",
567 		    ex->ex_name, ex->ex_start, ex->ex_end);
568 		printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n",
569 		    substart, subend);
570 		panic("extent_alloc_subregion: bad subregion");
571 	}
572 	if ((size < 1) || ((size - 1) > (subend - substart))) {
573 		printf("extent_alloc_subregion: extent `%s', size 0x%lx\n",
574 		    ex->ex_name, size);
575 		panic("extent_alloc_subregion: bad size");
576 	}
577 	if (alignment == 0)
578 		panic("extent_alloc_subregion: bad alignment");
579 	if (boundary && (boundary < size)) {
580 		printf(
581 		    "extent_alloc_subregion: extent `%s', size 0x%lx, "
582 		    "boundary 0x%lx\n", ex->ex_name, size, boundary);
583 		panic("extent_alloc_subregion: bad boundary");
584 	}
585 #endif
586 
587 	/*
588 	 * Allocate the region descriptor.  It will be freed later
589 	 * if we can coalesce with another region.
590 	 */
591 	myrp = extent_alloc_region_descriptor(ex, flags);
592 	if (myrp == NULL) {
593 #ifdef DIAGNOSTIC
594 		printf(
595 		 "extent_alloc_subregion: can't allocate region descriptor\n");
596 #endif
597 		return (ENOMEM);
598 	}
599 
600  alloc_start:
601 	/*
602 	 * Keep a pointer to the last region we looked at so
603 	 * that we don't have to traverse the list again when
604 	 * we insert ourselves.  If "last" is NULL when we
605 	 * finally insert ourselves, we go at the head of the
606 	 * list.  See extent_insert_and_optimize() for deatails.
607 	 */
608 	last = NULL;
609 
610 	/*
611 	 * Keep track of size and location of the smallest
612 	 * chunk we fit in.
613 	 *
614 	 * Since the extent can be as large as the numeric range
615 	 * of the CPU (0 - 0xffffffff for 32-bit systems), the
616 	 * best overhead value can be the maximum unsigned integer.
617 	 * Thus, we initialize "bestovh" to 0, since we insert ourselves
618 	 * into the region list immediately on an exact match (which
619 	 * is the only case where "bestovh" would be set to 0).
620 	 */
621 	bestovh = 0;
622 	beststart = 0;
623 	bestlast = NULL;
624 
625 	/*
626 	 * Keep track of end of free region.  This is either the end of extent
627 	 * or the start of a region past the subend.
628 	 */
629 	exend = ex->ex_end;
630 
631 	/*
632 	 * For N allocated regions, we must make (N + 1)
633 	 * checks for unallocated space.  The first chunk we
634 	 * check is the area from the beginning of the subregion
635 	 * to the first allocated region after that point.
636 	 */
637 	newstart = extent_align(substart, alignment, skew);
638 	if (newstart < ex->ex_start) {
639 #ifdef DIAGNOSTIC
640 		printf(
641       "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n",
642 		 ex->ex_name, ex->ex_start, ex->ex_end, alignment);
643 		panic("extent_alloc_subregion: overflow after alignment");
644 #else
645 		extent_free_region_descriptor(ex, myrp);
646 		return (EINVAL);
647 #endif
648 	}
649 
650 	/*
651 	 * Find the first allocated region that begins on or after
652 	 * the subregion start, advancing the "last" pointer along
653 	 * the way.
654 	 */
655 	LIST_FOREACH(rp, &ex->ex_regions, er_link) {
656 		if (rp->er_start >= newstart)
657 			break;
658 		last = rp;
659 	}
660 
661 	/*
662 	 * Relocate the start of our candidate region to the end of
663 	 * the last allocated region (if there was one overlapping
664 	 * our subrange).
665 	 */
666 	if (last != NULL && last->er_end >= newstart)
667 		newstart = extent_align((last->er_end + 1), alignment, skew);
668 
669 	for (; rp != LIST_END(&ex->ex_regions); rp = LIST_NEXT(rp, er_link)) {
670 		/*
671 		 * If the region pasts the subend, bail out and see
672 		 * if we fit against the subend.
673 		 */
674 		if (rp->er_start > subend) {
675 			exend = rp->er_start;
676 			break;
677 		}
678 
679 		/*
680 		 * Check the chunk before "rp".  Note that our
681 		 * comparison is safe from overflow conditions.
682 		 */
683 		if (LE_OV(newstart, size, rp->er_start)) {
684 			/*
685 			 * Do a boundary check, if necessary.  Note
686 			 * that a region may *begin* on the boundary,
687 			 * but it must end before the boundary.
688 			 */
689 			if (boundary) {
690 				newend = newstart + (size - 1);
691 
692 				/*
693 				 * Calculate the next boundary after the start
694 				 * of this region.
695 				 */
696 				dontcross = extent_align(newstart+1, boundary,
697 				    (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
698 				    - 1;
699 
700 #if 0
701 				printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
702 				    newstart, newend, ex->ex_start, ex->ex_end,
703 				    boundary, dontcross);
704 #endif
705 
706 				/* Check for overflow */
707 				if (dontcross < ex->ex_start)
708 					dontcross = ex->ex_end;
709 				else if (newend > dontcross) {
710 					/*
711 					 * Candidate region crosses boundary.
712 					 * Throw away the leading part and see
713 					 * if we still fit.
714 					 */
715 					newstart = dontcross + 1;
716 					newend = newstart + (size - 1);
717 					dontcross += boundary;
718 					if (!LE_OV(newstart, size, rp->er_start))
719 						goto skip;
720 				}
721 
722 				/*
723 				 * If we run past the end of
724 				 * the extent or the boundary
725 				 * overflows, then the request
726 				 * can't fit.
727 				 */
728 				if (newstart + size - 1 > ex->ex_end ||
729 				    dontcross < newstart)
730 					goto fail;
731 			}
732 
733 			/*
734 			 * We would fit into this space.  Calculate
735 			 * the overhead (wasted space).  If we exactly
736 			 * fit, or we're taking the first fit, insert
737 			 * ourselves into the region list.
738 			 */
739 			ovh = rp->er_start - newstart - size;
740 			if ((flags & EX_FAST) || (ovh == 0))
741 				goto found;
742 
743 			/*
744 			 * Don't exactly fit, but check to see
745 			 * if we're better than any current choice.
746 			 */
747 			if ((bestovh == 0) || (ovh < bestovh)) {
748 				bestovh = ovh;
749 				beststart = newstart;
750 				bestlast = last;
751 			}
752 		}
753 
754 skip:
755 		/*
756 		 * Skip past the current region and check again.
757 		 */
758 		newstart = extent_align((rp->er_end + 1), alignment, skew);
759 		if (newstart < rp->er_end) {
760 			/*
761 			 * Overflow condition.  Don't error out, since
762 			 * we might have a chunk of space that we can
763 			 * use.
764 			 */
765 			goto fail;
766 		}
767 
768 		last = rp;
769 	}
770 
771 	/*
772 	 * The final check is from the current starting point to the
773 	 * end of the subregion.  If there were no allocated regions,
774 	 * "newstart" is set to the beginning of the subregion, or
775 	 * just past the end of the last allocated region, adjusted
776 	 * for alignment in either case.
777 	 */
778 	if (LE_OV(newstart, (size - 1), subend)) {
779 		/*
780 		 * Do a boundary check, if necessary.  Note
781 		 * that a region may *begin* on the boundary,
782 		 * but it must end before the boundary.
783 		 */
784 		if (boundary) {
785 			newend = newstart + (size - 1);
786 
787 			/*
788 			 * Calculate the next boundary after the start
789 			 * of this region.
790 			 */
791 			dontcross = extent_align(newstart+1, boundary,
792 			    (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
793 			    - 1;
794 
795 #if 0
796 			printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
797 			    newstart, newend, ex->ex_start, ex->ex_end,
798 			    boundary, dontcross);
799 #endif
800 
801 			/* Check for overflow */
802 			if (dontcross < ex->ex_start)
803 				dontcross = ex->ex_end;
804 			else if (newend > dontcross) {
805 				/*
806 				 * Candidate region crosses boundary.
807 				 * Throw away the leading part and see
808 				 * if we still fit.
809 				 */
810 				newstart = dontcross + 1;
811 				newend = newstart + (size - 1);
812 				dontcross += boundary;
813 				if (!LE_OV(newstart, (size - 1), subend))
814 					goto fail;
815 			}
816 
817 			/*
818 			 * If we run past the end of
819 			 * the extent or the boundary
820 			 * overflows, then the request
821 			 * can't fit.
822 			 */
823 			if (newstart + size - 1 > ex->ex_end ||
824 			    dontcross < newstart)
825 				goto fail;
826 		}
827 
828 		/*
829 		 * We would fit into this space.  Calculate
830 		 * the overhead (wasted space).  If we exactly
831 		 * fit, or we're taking the first fit, insert
832 		 * ourselves into the region list.
833 		 */
834 		ovh = exend - newstart - (size - 1);
835 		if ((flags & EX_FAST) || (ovh == 0))
836 			goto found;
837 
838 		/*
839 		 * Don't exactly fit, but check to see
840 		 * if we're better than any current choice.
841 		 */
842 		if ((bestovh == 0) || (ovh < bestovh)) {
843 			bestovh = ovh;
844 			beststart = newstart;
845 			bestlast = last;
846 		}
847 	}
848 
849  fail:
850 	/*
851 	 * One of the following two conditions have
852 	 * occurred:
853 	 *
854 	 *	There is no chunk large enough to hold the request.
855 	 *
856 	 *	If EX_FAST was not specified, there is not an
857 	 *	exact match for the request.
858 	 *
859 	 * Note that if we reach this point and EX_FAST is
860 	 * set, then we know there is no space in the extent for
861 	 * the request.
862 	 */
863 	if (((flags & EX_FAST) == 0) && (bestovh != 0)) {
864 		/*
865 		 * We have a match that's "good enough".
866 		 */
867 		newstart = beststart;
868 		last = bestlast;
869 		goto found;
870 	}
871 
872 	/*
873 	 * No space currently available.  Wait for it to free up,
874 	 * if possible.
875 	 */
876 	if (flags & EX_WAITSPACE) {
877 		ex->ex_flags |= EXF_WANTED;
878 		error = tsleep(ex,
879 		    PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), "extnt", 0);
880 		if (error)
881 			return (error);
882 		goto alloc_start;
883 	}
884 
885 	extent_free_region_descriptor(ex, myrp);
886 	return (EAGAIN);
887 
888  found:
889 	/*
890 	 * Insert ourselves into the region list.
891 	 */
892 	extent_insert_and_optimize(ex, newstart, size, last, myrp);
893 	*result = newstart;
894 	return (0);
895 }
896 
897 int
898 extent_free(struct extent *ex, u_long start, u_long size, int flags)
899 {
900 	struct extent_region *rp, *nrp = NULL;
901 	u_long end = start + (size - 1);
902 	int exflags;
903 
904 #ifdef DIAGNOSTIC
905 	/* Check arguments. */
906 	if (ex == NULL)
907 		panic("extent_free: NULL extent");
908 	if ((start < ex->ex_start) || (end > ex->ex_end)) {
909 		extent_print(ex);
910 		printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
911 		    ex->ex_name, start, size);
912 		panic("extent_free: extent `%s', region not within extent",
913 		    ex->ex_name);
914 	}
915 	/* Check for an overflow. */
916 	if (end < start) {
917 		extent_print(ex);
918 		printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
919 		    ex->ex_name, start, size);
920 		panic("extent_free: overflow");
921 	}
922 #endif
923 
924 	/*
925 	 * If we're allowing coalescing, we must allocate a region
926 	 * descriptor now, since it might block.
927 	 *
928 	 * XXX Make a static, create-time flags word, so we don't
929 	 * XXX have to lock to read it!
930 	 */
931 	exflags = ex->ex_flags;
932 
933 	if ((exflags & EXF_NOCOALESCE) == 0) {
934 		/* Allocate a region descriptor. */
935 		nrp = extent_alloc_region_descriptor(ex, flags);
936 		if (nrp == NULL)
937 			return (ENOMEM);
938 	}
939 
940 	/*
941 	 * Find region and deallocate.  Several possibilities:
942 	 *
943 	 *	1. (start == er_start) && (end == er_end):
944 	 *	   Free descriptor.
945 	 *
946 	 *	2. (start == er_start) && (end < er_end):
947 	 *	   Adjust er_start.
948 	 *
949 	 *	3. (start > er_start) && (end == er_end):
950 	 *	   Adjust er_end.
951 	 *
952 	 *	4. (start > er_start) && (end < er_end):
953 	 *	   Fragment region.  Requires descriptor alloc.
954 	 *
955 	 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag
956 	 * is not set.
957 	 */
958 	LIST_FOREACH(rp, &ex->ex_regions, er_link) {
959 		/*
960 		 * Save ourselves some comparisons; does the current
961 		 * region end before chunk to be freed begins?  If so,
962 		 * then we haven't found the appropriate region descriptor.
963 		 */
964 		if (rp->er_end < start)
965 			continue;
966 
967 		/*
968 		 * Save ourselves some traversal; does the current
969 		 * region begin after the chunk to be freed ends?  If so,
970 		 * then we've already passed any possible region descriptors
971 		 * that might have contained the chunk to be freed.
972 		 */
973 		if (rp->er_start > end)
974 			break;
975 
976 		/* Case 1. */
977 		if ((start == rp->er_start) && (end == rp->er_end)) {
978 			LIST_REMOVE(rp, er_link);
979 			extent_free_region_descriptor(ex, rp);
980 			goto done;
981 		}
982 
983 		/*
984 		 * The following cases all require that EXF_NOCOALESCE
985 		 * is not set.
986 		 */
987 		if (ex->ex_flags & EXF_NOCOALESCE)
988 			continue;
989 
990 		/* Case 2. */
991 		if ((start == rp->er_start) && (end < rp->er_end)) {
992 			rp->er_start = (end + 1);
993 			goto done;
994 		}
995 
996 		/* Case 3. */
997 		if ((start > rp->er_start) && (end == rp->er_end)) {
998 			rp->er_end = (start - 1);
999 			goto done;
1000 		}
1001 
1002 		/* Case 4. */
1003 		if ((start > rp->er_start) && (end < rp->er_end)) {
1004 			/* Fill in new descriptor. */
1005 			nrp->er_start = end + 1;
1006 			nrp->er_end = rp->er_end;
1007 
1008 			/* Adjust current descriptor. */
1009 			rp->er_end = start - 1;
1010 
1011 			/* Insert new descriptor after current. */
1012 			LIST_INSERT_AFTER(rp, nrp, er_link);
1013 
1014 			/* We used the new descriptor, so don't free it below */
1015 			nrp = NULL;
1016 			goto done;
1017 		}
1018 	}
1019 
1020 	/* Region not found, or request otherwise invalid. */
1021 #if defined(DIAGNOSTIC) || defined(DDB)
1022 	extent_print(ex);
1023 #endif
1024 	printf("extent_free: start 0x%lx, end 0x%lx\n", start, end);
1025 	panic("extent_free: region not found");
1026 
1027  done:
1028 	if (nrp != NULL)
1029 		extent_free_region_descriptor(ex, nrp);
1030 	if (ex->ex_flags & EXF_WANTED) {
1031 		ex->ex_flags &= ~EXF_WANTED;
1032 		wakeup(ex);
1033 	}
1034 	return (0);
1035 }
1036 
1037 static struct extent_region *
1038 extent_alloc_region_descriptor(struct extent *ex, int flags)
1039 {
1040 	struct extent_region *rp;
1041 	int s;
1042 
1043 	if (ex->ex_flags & EXF_FIXED) {
1044 		struct extent_fixed *fex = (struct extent_fixed *)ex;
1045 
1046 		while (LIST_EMPTY(&fex->fex_freelist)) {
1047 			if (flags & EX_MALLOCOK)
1048 				goto alloc;
1049 
1050 			if ((flags & EX_WAITOK) == 0)
1051 				return (NULL);
1052 			ex->ex_flags |= EXF_FLWANTED;
1053 			if (tsleep(&fex->fex_freelist,
1054 			    PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
1055 			    "extnt", 0))
1056 				return (NULL);
1057 		}
1058 		rp = LIST_FIRST(&fex->fex_freelist);
1059 		LIST_REMOVE(rp, er_link);
1060 
1061 		/*
1062 		 * Don't muck with flags after pulling it off the
1063 		 * freelist; it may be a dynamiclly allocated
1064 		 * region pointer that was kindly given to us,
1065 		 * and we need to preserve that information.
1066 		 */
1067 
1068 		return (rp);
1069 	}
1070 
1071  alloc:
1072 	s = splvm();
1073 	rp = pool_get(&ex_region_pl, (flags & EX_WAITOK) ? PR_WAITOK : 0);
1074 	splx(s);
1075 	if (rp != NULL)
1076 		rp->er_flags = ER_ALLOC;
1077 
1078 	return (rp);
1079 }
1080 
1081 static void
1082 extent_free_region_descriptor(struct extent *ex, struct extent_region *rp)
1083 {
1084 	int s;
1085 
1086 	if (ex->ex_flags & EXF_FIXED) {
1087 		struct extent_fixed *fex = (struct extent_fixed *)ex;
1088 
1089 		/*
1090 		 * If someone's waiting for a region descriptor,
1091 		 * be nice and give them this one, rather than
1092 		 * just free'ing it back to the system.
1093 		 */
1094 		if (rp->er_flags & ER_ALLOC) {
1095 			if (ex->ex_flags & EXF_FLWANTED) {
1096 				/* Clear all but ER_ALLOC flag. */
1097 				rp->er_flags = ER_ALLOC;
1098 				LIST_INSERT_HEAD(&fex->fex_freelist, rp,
1099 				    er_link);
1100 				goto wake_em_up;
1101 			} else {
1102 				s = splvm();
1103 				pool_put(&ex_region_pl, rp);
1104 				splx(s);
1105 			}
1106 		} else {
1107 			/* Clear all flags. */
1108 			rp->er_flags = 0;
1109 			LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
1110 		}
1111 
1112 		if (ex->ex_flags & EXF_FLWANTED) {
1113  wake_em_up:
1114 			ex->ex_flags &= ~EXF_FLWANTED;
1115 			wakeup(&fex->fex_freelist);
1116 		}
1117 		return;
1118 	}
1119 
1120 	/*
1121 	 * We know it's dynamically allocated if we get here.
1122 	 */
1123 	s = splvm();
1124 	pool_put(&ex_region_pl, rp);
1125 	splx(s);
1126 }
1127 
1128 
1129 #if defined(DIAGNOSTIC) || defined(DDB) || !defined(_KERNEL)
1130 
1131 void
1132 extent_print(struct extent *ex)
1133 {
1134 	extent_print1(ex, printf);
1135 }
1136 
1137 void
1138 extent_print1(struct extent *ex, int (*pr)(const char *, ...))
1139 {
1140 	struct extent_region *rp;
1141 
1142 	if (ex == NULL)
1143 		panic("extent_print: NULL extent");
1144 
1145 #ifdef _KERNEL
1146 	(*pr)("extent `%s' (0x%lx - 0x%lx), flags=%b\n", ex->ex_name,
1147 	    ex->ex_start, ex->ex_end, ex->ex_flags, EXF_BITS);
1148 #else
1149 	(*pr)("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name,
1150 	    ex->ex_start, ex->ex_end, ex->ex_flags);
1151 #endif
1152 
1153 	LIST_FOREACH(rp, &ex->ex_regions, er_link)
1154 		(*pr)("     0x%lx - 0x%lx\n", rp->er_start, rp->er_end);
1155 }
1156 #endif
1157