xref: /netbsd-src/sys/kern/subr_extent.c (revision a536ee5124e62c9a0051a252f7833dc8f50f44c9)
1 /*	$NetBSD: subr_extent.c,v 1.75 2012/01/29 11:14:49 para Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996, 1998, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe and Matthias Drochner.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * General purpose extent manager.
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_extent.c,v 1.75 2012/01/29 11:14:49 para Exp $");
38 
39 #ifdef _KERNEL
40 #include "opt_lockdebug.h"
41 
42 #include <sys/param.h>
43 #include <sys/extent.h>
44 #include <sys/kmem.h>
45 #include <sys/pool.h>
46 #include <sys/time.h>
47 #include <sys/systm.h>
48 #include <sys/proc.h>
49 
50 #include <uvm/uvm_extern.h>
51 
52 #elif defined(_EXTENT_TESTING)
53 /*
54  * user-land definitions, so it can fit into a testing harness.
55  */
56 #include <sys/param.h>
57 #include <sys/pool.h>
58 #include <sys/extent.h>
59 
60 #include <errno.h>
61 #include <stdlib.h>
62 #include <stdio.h>
63 #include <string.h>
64 
65 /*
66  * Use multi-line #defines to avoid screwing up the kernel tags file;
67  * without this, ctags produces a tags file where panic() shows up
68  * in subr_extent.c rather than subr_prf.c.
69  */
70 #define	\
71 kmem_alloc(s, flags)		malloc(s)
72 #define	\
73 kmem_free(p, s)			free(p)
74 #define	\
75 cv_wait_sig(cv, lock)		(EWOULDBLOCK)
76 #define	\
77 pool_get(pool, flags)		kmem_alloc((pool)->pr_size,0)
78 #define	\
79 pool_put(pool, rp)		kmem_free(rp,0)
80 #define	\
81 panic(a)			printf(a)
82 #define	mutex_init(a, b, c)
83 #define	mutex_destroy(a)
84 #define	mutex_enter(l)
85 #define	mutex_exit(l)
86 #define	cv_wait(cv, lock)
87 #define	cv_broadcast(cv)
88 #define	cv_init(a, b)
89 #define	cv_destroy(a)
90 #define	KMEM_IS_RUNNING			(1)
91 #define	IPL_VM				(0)
92 #define	MUTEX_DEFAULT			(0)
93 #endif
94 
95 static struct pool expool;
96 
97 /*
98  * Macro to align to an arbitrary power-of-two boundary.
99  */
100 #define EXTENT_ALIGN(_start, _align, _skew)		\
101 	(((((_start) - (_skew)) + ((_align) - 1)) & (-(_align))) + (_skew))
102 
103 /*
104  * Create the extent_region pool.
105  */
106 void
107 extent_init(void)
108 {
109 
110 #if defined(_KERNEL)
111 	pool_init(&expool, sizeof(struct extent_region), 0, 0, 0,
112 	    "extent", NULL, IPL_VM);
113 #else
114 	expool.pr_size = sizeof(struct extent_region);
115 #endif
116 }
117 
118 /*
119  * Allocate an extent region descriptor.  EXTENT MUST NOT BE LOCKED.
120  * We will handle any locking we may need.
121  */
122 static struct extent_region *
123 extent_alloc_region_descriptor(struct extent *ex, int flags)
124 {
125 	struct extent_region *rp;
126 	int exflags, error;
127 
128 	/*
129 	 * XXX Make a static, create-time flags word, so we don't
130 	 * XXX have to lock to read it!
131 	 */
132 	mutex_enter(&ex->ex_lock);
133 	exflags = ex->ex_flags;
134 	mutex_exit(&ex->ex_lock);
135 
136 	if (exflags & EXF_FIXED) {
137 		struct extent_fixed *fex = (struct extent_fixed *)ex;
138 
139 		mutex_enter(&ex->ex_lock);
140 		for (;;) {
141 			if ((rp = LIST_FIRST(&fex->fex_freelist)) != NULL) {
142 				/*
143 				 * Don't muck with flags after pulling it off
144 				 * the freelist; it may have been dynamically
145 				 * allocated, and kindly given to us.  We
146 				 * need to remember that information.
147 				 */
148 				LIST_REMOVE(rp, er_link);
149 				mutex_exit(&ex->ex_lock);
150 				return (rp);
151 			}
152 			if (flags & EX_MALLOCOK) {
153 				mutex_exit(&ex->ex_lock);
154 				goto alloc;
155 			}
156 			if ((flags & EX_WAITOK) == 0) {
157 				mutex_exit(&ex->ex_lock);
158 				return (NULL);
159 			}
160 			ex->ex_flags |= EXF_FLWANTED;
161 			if ((flags & EX_CATCH) != 0)
162 				error = cv_wait_sig(&ex->ex_cv, &ex->ex_lock);
163 			else {
164 				cv_wait(&ex->ex_cv, &ex->ex_lock);
165 				error = 0;
166 			}
167 			if (error != 0) {
168 				mutex_exit(&ex->ex_lock);
169 				return (NULL);
170 			}
171 		}
172 	}
173 
174  alloc:
175 	rp = pool_get(&expool, (flags & EX_WAITOK) ? PR_WAITOK : 0);
176 
177 	if (rp != NULL)
178 		rp->er_flags = ER_ALLOC;
179 
180 	return (rp);
181 }
182 
183 /*
184  * Free an extent region descriptor.  EXTENT _MUST_ BE LOCKED!
185  */
186 static void
187 extent_free_region_descriptor(struct extent *ex, struct extent_region *rp)
188 {
189 
190 	if (ex->ex_flags & EXF_FIXED) {
191 		struct extent_fixed *fex = (struct extent_fixed *)ex;
192 
193 		/*
194 		 * If someone's waiting for a region descriptor,
195 		 * be nice and give them this one, rather than
196 		 * just free'ing it back to the system.
197 		 */
198 		if (rp->er_flags & ER_ALLOC) {
199 			if (ex->ex_flags & EXF_FLWANTED) {
200 				/* Clear all but ER_ALLOC flag. */
201 				rp->er_flags = ER_ALLOC;
202 				LIST_INSERT_HEAD(&fex->fex_freelist, rp,
203 				    er_link);
204 				goto wake_em_up;
205 			} else
206 				pool_put(&expool, rp);
207 		} else {
208 			/* Clear all flags. */
209 			rp->er_flags = 0;
210 			LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
211 		}
212 
213  wake_em_up:
214 		ex->ex_flags &= ~EXF_FLWANTED;
215 		cv_broadcast(&ex->ex_cv);
216 		return;
217 	}
218 
219 	/*
220 	 * We know it's dynamically allocated if we get here.
221 	 */
222 	pool_put(&expool, rp);
223 }
224 
225 /*
226  * Allocate and initialize an extent map.
227  */
228 struct extent *
229 extent_create(const char *name, u_long start, u_long end,
230     void *storage, size_t storagesize, int flags)
231 {
232 	struct extent *ex;
233 	char *cp = storage;
234 	size_t sz = storagesize;
235 	struct extent_region *rp;
236 	int fixed_extent = (storage != NULL);
237 
238 #ifndef _KERNEL
239 	extent_init();
240 #endif
241 
242 #ifdef DIAGNOSTIC
243 	/* Check arguments. */
244 	if (name == NULL)
245 		panic("extent_create: name == NULL");
246 	if (end < start) {
247 		printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n",
248 		    name, start, end);
249 		panic("extent_create: end < start");
250 	}
251 	if (fixed_extent && (storagesize < sizeof(struct extent_fixed)))
252 		panic("extent_create: fixed extent, bad storagesize 0x%lx",
253 		    (u_long)storagesize);
254 	if (fixed_extent == 0 && (storagesize != 0 || storage != NULL))
255 		panic("extent_create: storage provided for non-fixed");
256 #endif
257 
258 	/* Allocate extent descriptor. */
259 	if (fixed_extent) {
260 		struct extent_fixed *fex;
261 
262 		memset(storage, 0, storagesize);
263 
264 		/*
265 		 * Align all descriptors on "long" boundaries.
266 		 */
267 		fex = (struct extent_fixed *)cp;
268 		ex = (struct extent *)fex;
269 		cp += ALIGN(sizeof(struct extent_fixed));
270 		sz -= ALIGN(sizeof(struct extent_fixed));
271 		fex->fex_storage = storage;
272 		fex->fex_storagesize = storagesize;
273 
274 		/*
275 		 * In a fixed extent, we have to pre-allocate region
276 		 * descriptors and place them in the extent's freelist.
277 		 */
278 		LIST_INIT(&fex->fex_freelist);
279 		while (sz >= ALIGN(sizeof(struct extent_region))) {
280 			rp = (struct extent_region *)cp;
281 			cp += ALIGN(sizeof(struct extent_region));
282 			sz -= ALIGN(sizeof(struct extent_region));
283 			LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
284 		}
285 	} else {
286 		ex = (struct extent *)kmem_alloc(sizeof(struct extent),
287 		    (flags & EX_WAITOK) ? KM_SLEEP : KM_NOSLEEP);
288 		if (ex == NULL)
289 			return (NULL);
290 	}
291 
292 	/* Fill in the extent descriptor and return it to the caller. */
293 	mutex_init(&ex->ex_lock, MUTEX_DEFAULT, IPL_VM);
294 	cv_init(&ex->ex_cv, "extent");
295 	LIST_INIT(&ex->ex_regions);
296 	ex->ex_name = name;
297 	ex->ex_start = start;
298 	ex->ex_end = end;
299 	ex->ex_flags = 0;
300 	if (fixed_extent)
301 		ex->ex_flags |= EXF_FIXED;
302 	if (flags & EX_NOCOALESCE)
303 		ex->ex_flags |= EXF_NOCOALESCE;
304 	return (ex);
305 }
306 
307 /*
308  * Destroy an extent map.
309  * Since we're freeing the data, there can't be any references
310  * so we don't need any locking.
311  */
312 void
313 extent_destroy(struct extent *ex)
314 {
315 	struct extent_region *rp, *orp;
316 
317 #ifdef DIAGNOSTIC
318 	/* Check arguments. */
319 	if (ex == NULL)
320 		panic("extent_destroy: NULL extent");
321 #endif
322 
323 	/* Free all region descriptors in extent. */
324 	for (rp = LIST_FIRST(&ex->ex_regions); rp != NULL; ) {
325 		orp = rp;
326 		rp = LIST_NEXT(rp, er_link);
327 		LIST_REMOVE(orp, er_link);
328 		extent_free_region_descriptor(ex, orp);
329 	}
330 
331 	cv_destroy(&ex->ex_cv);
332 	mutex_destroy(&ex->ex_lock);
333 
334 	/* If we're not a fixed extent, free the extent descriptor itself. */
335 	if ((ex->ex_flags & EXF_FIXED) == 0)
336 		kmem_free(ex, sizeof(*ex));
337 }
338 
339 /*
340  * Insert a region descriptor into the sorted region list after the
341  * entry "after" or at the head of the list (if "after" is NULL).
342  * The region descriptor we insert is passed in "rp".  We must
343  * allocate the region descriptor before calling this function!
344  * If we don't need the region descriptor, it will be freed here.
345  */
346 static void
347 extent_insert_and_optimize(struct extent *ex, u_long start, u_long size,
348     int flags, struct extent_region *after, struct extent_region *rp)
349 {
350 	struct extent_region *nextr;
351 	int appended = 0;
352 
353 	if (after == NULL) {
354 		/*
355 		 * We're the first in the region list.  If there's
356 		 * a region after us, attempt to coalesce to save
357 		 * descriptor overhead.
358 		 */
359 		if (((ex->ex_flags & EXF_NOCOALESCE) == 0) &&
360 		    (LIST_FIRST(&ex->ex_regions) != NULL) &&
361 		    ((start + size) == LIST_FIRST(&ex->ex_regions)->er_start)) {
362 			/*
363 			 * We can coalesce.  Prepend us to the first region.
364 			 */
365 			LIST_FIRST(&ex->ex_regions)->er_start = start;
366 			extent_free_region_descriptor(ex, rp);
367 			return;
368 		}
369 
370 		/*
371 		 * Can't coalesce.  Fill in the region descriptor
372 		 * in, and insert us at the head of the region list.
373 		 */
374 		rp->er_start = start;
375 		rp->er_end = start + (size - 1);
376 		LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link);
377 		return;
378 	}
379 
380 	/*
381 	 * If EXF_NOCOALESCE is set, coalescing is disallowed.
382 	 */
383 	if (ex->ex_flags & EXF_NOCOALESCE)
384 		goto cant_coalesce;
385 
386 	/*
387 	 * Attempt to coalesce with the region before us.
388 	 */
389 	if ((after->er_end + 1) == start) {
390 		/*
391 		 * We can coalesce.  Append ourselves and make
392 		 * note of it.
393 		 */
394 		after->er_end = start + (size - 1);
395 		appended = 1;
396 	}
397 
398 	/*
399 	 * Attempt to coalesce with the region after us.
400 	 */
401 	if ((LIST_NEXT(after, er_link) != NULL) &&
402 	    ((start + size) == LIST_NEXT(after, er_link)->er_start)) {
403 		/*
404 		 * We can coalesce.  Note that if we appended ourselves
405 		 * to the previous region, we exactly fit the gap, and
406 		 * can free the "next" region descriptor.
407 		 */
408 		if (appended) {
409 			/*
410 			 * Yup, we can free it up.
411 			 */
412 			after->er_end = LIST_NEXT(after, er_link)->er_end;
413 			nextr = LIST_NEXT(after, er_link);
414 			LIST_REMOVE(nextr, er_link);
415 			extent_free_region_descriptor(ex, nextr);
416 		} else {
417 			/*
418 			 * Nope, just prepend us to the next region.
419 			 */
420 			LIST_NEXT(after, er_link)->er_start = start;
421 		}
422 
423 		extent_free_region_descriptor(ex, rp);
424 		return;
425 	}
426 
427 	/*
428 	 * We weren't able to coalesce with the next region, but
429 	 * we don't need to allocate a region descriptor if we
430 	 * appended ourselves to the previous region.
431 	 */
432 	if (appended) {
433 		extent_free_region_descriptor(ex, rp);
434 		return;
435 	}
436 
437  cant_coalesce:
438 
439 	/*
440 	 * Fill in the region descriptor and insert ourselves
441 	 * into the region list.
442 	 */
443 	rp->er_start = start;
444 	rp->er_end = start + (size - 1);
445 	LIST_INSERT_AFTER(after, rp, er_link);
446 }
447 
448 /*
449  * Allocate a specific region in an extent map.
450  */
451 int
452 extent_alloc_region(struct extent *ex, u_long start, u_long size, int flags)
453 {
454 	struct extent_region *rp, *last, *myrp;
455 	u_long end = start + (size - 1);
456 	int error;
457 
458 #ifdef DIAGNOSTIC
459 	/* Check arguments. */
460 	if (ex == NULL)
461 		panic("extent_alloc_region: NULL extent");
462 	if (size < 1) {
463 		printf("extent_alloc_region: extent `%s', size 0x%lx\n",
464 		    ex->ex_name, size);
465 		panic("extent_alloc_region: bad size");
466 	}
467 	if (end < start) {
468 		printf(
469 		 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n",
470 		 ex->ex_name, start, size);
471 		panic("extent_alloc_region: overflow");
472 	}
473 #endif
474 #ifdef LOCKDEBUG
475 	if (flags & EX_WAITSPACE) {
476 		ASSERT_SLEEPABLE();
477 	}
478 #endif
479 
480 	/*
481 	 * Make sure the requested region lies within the
482 	 * extent.
483 	 *
484 	 * We don't lock to check the range, because those values
485 	 * are never modified, and if another thread deletes the
486 	 * extent, we're screwed anyway.
487 	 */
488 	if ((start < ex->ex_start) || (end > ex->ex_end)) {
489 #ifdef DIAGNOSTIC
490 		printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n",
491 		    ex->ex_name, ex->ex_start, ex->ex_end);
492 		printf("extent_alloc_region: start 0x%lx, end 0x%lx\n",
493 		    start, end);
494 		panic("extent_alloc_region: region lies outside extent");
495 #else
496 		return (EINVAL);
497 #endif
498 	}
499 
500 	/*
501 	 * Allocate the region descriptor.  It will be freed later
502 	 * if we can coalesce with another region.  Don't lock before
503 	 * here!  This could block.
504 	 */
505 	myrp = extent_alloc_region_descriptor(ex, flags);
506 	if (myrp == NULL) {
507 #ifdef DIAGNOSTIC
508 		printf(
509 		    "extent_alloc_region: can't allocate region descriptor\n");
510 #endif
511 		return (ENOMEM);
512 	}
513 
514 	mutex_enter(&ex->ex_lock);
515  alloc_start:
516 
517 	/*
518 	 * Attempt to place ourselves in the desired area of the
519 	 * extent.  We save ourselves some work by keeping the list sorted.
520 	 * In other words, if the start of the current region is greater
521 	 * than the end of our region, we don't have to search any further.
522 	 */
523 
524 	/*
525 	 * Keep a pointer to the last region we looked at so
526 	 * that we don't have to traverse the list again when
527 	 * we insert ourselves.  If "last" is NULL when we
528 	 * finally insert ourselves, we go at the head of the
529 	 * list.  See extent_insert_and_optimize() for details.
530 	 */
531 	last = NULL;
532 
533 	LIST_FOREACH(rp, &ex->ex_regions, er_link) {
534 		if (rp->er_start > end) {
535 			/*
536 			 * We lie before this region and don't
537 			 * conflict.
538 			 */
539 			break;
540 		}
541 
542 		/*
543 		 * The current region begins before we end.
544 		 * Check for a conflict.
545 		 */
546 		if (rp->er_end >= start) {
547 			/*
548 			 * We conflict.  If we can (and want to) wait,
549 			 * do so.
550 			 */
551 			if (flags & EX_WAITSPACE) {
552 				if ((flags & EX_CATCH) != 0)
553 					error = cv_wait_sig(&ex->ex_cv,
554 					    &ex->ex_lock);
555 				else {
556 					cv_wait(&ex->ex_cv, &ex->ex_lock);
557 					error = 0;
558 				}
559 				if (error == 0)
560 					goto alloc_start;
561 				mutex_exit(&ex->ex_lock);
562 			} else {
563 				mutex_exit(&ex->ex_lock);
564 				error = EAGAIN;
565 			}
566 			extent_free_region_descriptor(ex, myrp);
567 			return error;
568 		}
569 		/*
570 		 * We don't conflict, but this region lies before
571 		 * us.  Keep a pointer to this region, and keep
572 		 * trying.
573 		 */
574 		last = rp;
575 	}
576 
577 	/*
578 	 * We don't conflict with any regions.  "last" points
579 	 * to the region we fall after, or is NULL if we belong
580 	 * at the beginning of the region list.  Insert ourselves.
581 	 */
582 	extent_insert_and_optimize(ex, start, size, flags, last, myrp);
583 	mutex_exit(&ex->ex_lock);
584 	return (0);
585 }
586 
587 /*
588  * Macro to check (x + y) <= z.  This check is designed to fail
589  * if an overflow occurs.
590  */
591 #define LE_OV(x, y, z)	((((x) + (y)) >= (x)) && (((x) + (y)) <= (z)))
592 
593 /*
594  * Allocate a region in an extent map subregion.
595  *
596  * If EX_FAST is specified, we return the first fit in the map.
597  * Otherwise, we try to minimize fragmentation by finding the
598  * smallest gap that will hold the request.
599  *
600  * The allocated region is aligned to "alignment", which must be
601  * a power of 2.
602  */
603 int
604 extent_alloc_subregion1(struct extent *ex, u_long substart, u_long subend,
605     u_long size, u_long alignment, u_long skew, u_long boundary,
606     int flags, u_long *result)
607 {
608 	struct extent_region *rp, *myrp, *last, *bestlast;
609 	u_long newstart, newend, exend, beststart, bestovh, ovh;
610 	u_long dontcross;
611 	int error;
612 
613 #ifdef DIAGNOSTIC
614 	/*
615 	 * Check arguments.
616 	 *
617 	 * We don't lock to check these, because these values
618 	 * are never modified, and if another thread deletes the
619 	 * extent, we're screwed anyway.
620 	 */
621 	if (ex == NULL)
622 		panic("extent_alloc_subregion: NULL extent");
623 	if (result == NULL)
624 		panic("extent_alloc_subregion: NULL result pointer");
625 	if ((substart < ex->ex_start) || (substart > ex->ex_end) ||
626 	    (subend > ex->ex_end) || (subend < ex->ex_start)) {
627   printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n",
628 		    ex->ex_name, ex->ex_start, ex->ex_end);
629 		printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n",
630 		    substart, subend);
631 		panic("extent_alloc_subregion: bad subregion");
632 	}
633 	if ((size < 1) || ((size - 1) > (subend - substart))) {
634 		printf("extent_alloc_subregion: extent `%s', size 0x%lx\n",
635 		    ex->ex_name, size);
636 		panic("extent_alloc_subregion: bad size");
637 	}
638 	if (alignment == 0)
639 		panic("extent_alloc_subregion: bad alignment");
640 	if (boundary && (boundary < size)) {
641 		printf(
642 		    "extent_alloc_subregion: extent `%s', size 0x%lx, "
643 		    "boundary 0x%lx\n", ex->ex_name, size, boundary);
644 		panic("extent_alloc_subregion: bad boundary");
645 	}
646 #endif
647 #ifdef LOCKDEBUG
648 	if (flags & EX_WAITSPACE) {
649 		ASSERT_SLEEPABLE();
650 	}
651 #endif
652 
653 	/*
654 	 * Allocate the region descriptor.  It will be freed later
655 	 * if we can coalesce with another region.  Don't lock before
656 	 * here!  This could block.
657 	 */
658 	myrp = extent_alloc_region_descriptor(ex, flags);
659 	if (myrp == NULL) {
660 #ifdef DIAGNOSTIC
661 		printf(
662 		 "extent_alloc_subregion: can't allocate region descriptor\n");
663 #endif
664 		return (ENOMEM);
665 	}
666 
667  alloc_start:
668 	mutex_enter(&ex->ex_lock);
669 
670 	/*
671 	 * Keep a pointer to the last region we looked at so
672 	 * that we don't have to traverse the list again when
673 	 * we insert ourselves.  If "last" is NULL when we
674 	 * finally insert ourselves, we go at the head of the
675 	 * list.  See extent_insert_and_optimize() for deatails.
676 	 */
677 	last = NULL;
678 
679 	/*
680 	 * Keep track of size and location of the smallest
681 	 * chunk we fit in.
682 	 *
683 	 * Since the extent can be as large as the numeric range
684 	 * of the CPU (0 - 0xffffffff for 32-bit systems), the
685 	 * best overhead value can be the maximum unsigned integer.
686 	 * Thus, we initialize "bestovh" to 0, since we insert ourselves
687 	 * into the region list immediately on an exact match (which
688 	 * is the only case where "bestovh" would be set to 0).
689 	 */
690 	bestovh = 0;
691 	beststart = 0;
692 	bestlast = NULL;
693 
694 	/*
695 	 * Keep track of end of free region.  This is either the end of extent
696 	 * or the start of a region past the subend.
697 	 */
698 	exend = ex->ex_end;
699 
700 	/*
701 	 * For N allocated regions, we must make (N + 1)
702 	 * checks for unallocated space.  The first chunk we
703 	 * check is the area from the beginning of the subregion
704 	 * to the first allocated region after that point.
705 	 */
706 	newstart = EXTENT_ALIGN(substart, alignment, skew);
707 	if (newstart < ex->ex_start) {
708 #ifdef DIAGNOSTIC
709 		printf(
710       "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n",
711 		 ex->ex_name, ex->ex_start, ex->ex_end, alignment);
712 		mutex_exit(&ex->ex_lock);
713 		panic("extent_alloc_subregion: overflow after alignment");
714 #else
715 		extent_free_region_descriptor(ex, myrp);
716 		mutex_exit(&ex->ex_lock);
717 		return (EINVAL);
718 #endif
719 	}
720 
721 	/*
722 	 * Find the first allocated region that begins on or after
723 	 * the subregion start, advancing the "last" pointer along
724 	 * the way.
725 	 */
726 	LIST_FOREACH(rp, &ex->ex_regions, er_link) {
727 		if (rp->er_start >= newstart)
728 			break;
729 		last = rp;
730 	}
731 
732 	/*
733 	 * Relocate the start of our candidate region to the end of
734 	 * the last allocated region (if there was one overlapping
735 	 * our subrange).
736 	 */
737 	if (last != NULL && last->er_end >= newstart)
738 		newstart = EXTENT_ALIGN((last->er_end + 1), alignment, skew);
739 
740 	for (; rp != NULL; rp = LIST_NEXT(rp, er_link)) {
741 		/*
742 		 * If the region pasts the subend, bail out and see
743 		 * if we fit against the subend.
744 		 */
745 		if (rp->er_start > subend) {
746 			exend = rp->er_start;
747 			break;
748 		}
749 
750 		/*
751 		 * Check the chunk before "rp".  Note that our
752 		 * comparison is safe from overflow conditions.
753 		 */
754 		if (LE_OV(newstart, size, rp->er_start)) {
755 			/*
756 			 * Do a boundary check, if necessary.  Note
757 			 * that a region may *begin* on the boundary,
758 			 * but it must end before the boundary.
759 			 */
760 			if (boundary) {
761 				newend = newstart + (size - 1);
762 
763 				/*
764 				 * Calculate the next boundary after the start
765 				 * of this region.
766 				 */
767 				dontcross = EXTENT_ALIGN(newstart+1, boundary,
768 				    (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
769 				    - 1;
770 
771 #if 0
772 				printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
773 				    newstart, newend, ex->ex_start, ex->ex_end,
774 				    boundary, dontcross);
775 #endif
776 
777 				/* Check for overflow */
778 				if (dontcross < ex->ex_start)
779 					dontcross = ex->ex_end;
780 				else if (newend > dontcross) {
781 					/*
782 					 * Candidate region crosses boundary.
783 					 * Throw away the leading part and see
784 					 * if we still fit.
785 					 */
786 					newstart = dontcross + 1;
787 					newend = newstart + (size - 1);
788 					dontcross += boundary;
789 					if (!LE_OV(newstart, size, rp->er_start))
790 						goto skip;
791 				}
792 
793 				/*
794 				 * If we run past the end of
795 				 * the extent or the boundary
796 				 * overflows, then the request
797 				 * can't fit.
798 				 */
799 				if (newstart + size - 1 > ex->ex_end ||
800 				    dontcross < newstart)
801 					goto fail;
802 			}
803 
804 			/*
805 			 * We would fit into this space.  Calculate
806 			 * the overhead (wasted space).  If we exactly
807 			 * fit, or we're taking the first fit, insert
808 			 * ourselves into the region list.
809 			 */
810 			ovh = rp->er_start - newstart - size;
811 			if ((flags & EX_FAST) || (ovh == 0))
812 				goto found;
813 
814 			/*
815 			 * Don't exactly fit, but check to see
816 			 * if we're better than any current choice.
817 			 */
818 			if ((bestovh == 0) || (ovh < bestovh)) {
819 				bestovh = ovh;
820 				beststart = newstart;
821 				bestlast = last;
822 			}
823 		}
824 
825 skip:
826 		/*
827 		 * Skip past the current region and check again.
828 		 */
829 		newstart = EXTENT_ALIGN((rp->er_end + 1), alignment, skew);
830 		if (newstart < rp->er_end) {
831 			/*
832 			 * Overflow condition.  Don't error out, since
833 			 * we might have a chunk of space that we can
834 			 * use.
835 			 */
836 			goto fail;
837 		}
838 
839 		last = rp;
840 	}
841 
842 	/*
843 	 * The final check is from the current starting point to the
844 	 * end of the subregion.  If there were no allocated regions,
845 	 * "newstart" is set to the beginning of the subregion, or
846 	 * just past the end of the last allocated region, adjusted
847 	 * for alignment in either case.
848 	 */
849 	if (LE_OV(newstart, (size - 1), subend)) {
850 		/*
851 		 * Do a boundary check, if necessary.  Note
852 		 * that a region may *begin* on the boundary,
853 		 * but it must end before the boundary.
854 		 */
855 		if (boundary) {
856 			newend = newstart + (size - 1);
857 
858 			/*
859 			 * Calculate the next boundary after the start
860 			 * of this region.
861 			 */
862 			dontcross = EXTENT_ALIGN(newstart+1, boundary,
863 			    (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
864 			    - 1;
865 
866 #if 0
867 			printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
868 			    newstart, newend, ex->ex_start, ex->ex_end,
869 			    boundary, dontcross);
870 #endif
871 
872 			/* Check for overflow */
873 			if (dontcross < ex->ex_start)
874 				dontcross = ex->ex_end;
875 			else if (newend > dontcross) {
876 				/*
877 				 * Candidate region crosses boundary.
878 				 * Throw away the leading part and see
879 				 * if we still fit.
880 				 */
881 				newstart = dontcross + 1;
882 				newend = newstart + (size - 1);
883 				dontcross += boundary;
884 				if (!LE_OV(newstart, (size - 1), subend))
885 					goto fail;
886 			}
887 
888 			/*
889 			 * If we run past the end of
890 			 * the extent or the boundary
891 			 * overflows, then the request
892 			 * can't fit.
893 			 */
894 			if (newstart + size - 1 > ex->ex_end ||
895 			    dontcross < newstart)
896 				goto fail;
897 		}
898 
899 		/*
900 		 * We would fit into this space.  Calculate
901 		 * the overhead (wasted space).  If we exactly
902 		 * fit, or we're taking the first fit, insert
903 		 * ourselves into the region list.
904 		 */
905 		ovh = exend - newstart - (size - 1);
906 		if ((flags & EX_FAST) || (ovh == 0))
907 			goto found;
908 
909 		/*
910 		 * Don't exactly fit, but check to see
911 		 * if we're better than any current choice.
912 		 */
913 		if ((bestovh == 0) || (ovh < bestovh)) {
914 			bestovh = ovh;
915 			beststart = newstart;
916 			bestlast = last;
917 		}
918 	}
919 
920  fail:
921 	/*
922 	 * One of the following two conditions have
923 	 * occurred:
924 	 *
925 	 *	There is no chunk large enough to hold the request.
926 	 *
927 	 *	If EX_FAST was not specified, there is not an
928 	 *	exact match for the request.
929 	 *
930 	 * Note that if we reach this point and EX_FAST is
931 	 * set, then we know there is no space in the extent for
932 	 * the request.
933 	 */
934 	if (((flags & EX_FAST) == 0) && (bestovh != 0)) {
935 		/*
936 		 * We have a match that's "good enough".
937 		 */
938 		newstart = beststart;
939 		last = bestlast;
940 		goto found;
941 	}
942 
943 	/*
944 	 * No space currently available.  Wait for it to free up,
945 	 * if possible.
946 	 */
947 	if (flags & EX_WAITSPACE) {
948 		if ((flags & EX_CATCH) != 0) {
949 			error = cv_wait_sig(&ex->ex_cv, &ex->ex_lock);
950 		} else {
951 			cv_wait(&ex->ex_cv, &ex->ex_lock);
952 			error = 0;
953 		}
954 		if (error == 0)
955 			goto alloc_start;
956 		mutex_exit(&ex->ex_lock);
957 	} else {
958 		mutex_exit(&ex->ex_lock);
959 		error = EAGAIN;
960 	}
961 
962 	extent_free_region_descriptor(ex, myrp);
963 	return error;
964 
965  found:
966 	/*
967 	 * Insert ourselves into the region list.
968 	 */
969 	extent_insert_and_optimize(ex, newstart, size, flags, last, myrp);
970 	mutex_exit(&ex->ex_lock);
971 	*result = newstart;
972 	return (0);
973 }
974 
975 int
976 extent_alloc_subregion(struct extent *ex, u_long start, u_long end, u_long size,
977     u_long alignment, u_long boundary, int flags, u_long *result)
978 {
979 
980 	return (extent_alloc_subregion1(ex, start, end, size, alignment,
981 					0, boundary, flags, result));
982 }
983 
984 int
985 extent_alloc(struct extent *ex, u_long size, u_long alignment, u_long boundary,
986     int flags, u_long *result)
987 {
988 
989 	return (extent_alloc_subregion1(ex, ex->ex_start, ex->ex_end,
990 					size, alignment, 0, boundary,
991 					flags, result));
992 }
993 
994 int
995 extent_alloc1(struct extent *ex, u_long size, u_long alignment, u_long skew,
996     u_long boundary, int flags, u_long *result)
997 {
998 
999 	return (extent_alloc_subregion1(ex, ex->ex_start, ex->ex_end,
1000 					size, alignment, skew, boundary,
1001 					flags, result));
1002 }
1003 
1004 int
1005 extent_free(struct extent *ex, u_long start, u_long size, int flags)
1006 {
1007 	struct extent_region *rp, *nrp = NULL;
1008 	u_long end = start + (size - 1);
1009 	int coalesce;
1010 
1011 #ifdef DIAGNOSTIC
1012 	/*
1013 	 * Check arguments.
1014 	 *
1015 	 * We don't lock to check these, because these values
1016 	 * are never modified, and if another thread deletes the
1017 	 * extent, we're screwed anyway.
1018 	 */
1019 	if (ex == NULL)
1020 		panic("extent_free: NULL extent");
1021 	if ((start < ex->ex_start) || (end > ex->ex_end)) {
1022 		extent_print(ex);
1023 		printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
1024 		    ex->ex_name, start, size);
1025 		panic("extent_free: extent `%s', region not within extent",
1026 		    ex->ex_name);
1027 	}
1028 	/* Check for an overflow. */
1029 	if (end < start) {
1030 		extent_print(ex);
1031 		printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
1032 		    ex->ex_name, start, size);
1033 		panic("extent_free: overflow");
1034 	}
1035 #endif
1036 
1037 	/*
1038 	 * If we're allowing coalescing, we must allocate a region
1039 	 * descriptor now, since it might block.
1040 	 *
1041 	 * XXX Make a static, create-time flags word, so we don't
1042 	 * XXX have to lock to read it!
1043 	 */
1044 	mutex_enter(&ex->ex_lock);
1045 	coalesce = (ex->ex_flags & EXF_NOCOALESCE) == 0;
1046 	mutex_exit(&ex->ex_lock);
1047 
1048 	if (coalesce) {
1049 		/* Allocate a region descriptor. */
1050 		nrp = extent_alloc_region_descriptor(ex, flags);
1051 		if (nrp == NULL)
1052 			return (ENOMEM);
1053 	}
1054 
1055 	mutex_enter(&ex->ex_lock);
1056 
1057 	/*
1058 	 * Find region and deallocate.  Several possibilities:
1059 	 *
1060 	 *	1. (start == er_start) && (end == er_end):
1061 	 *	   Free descriptor.
1062 	 *
1063 	 *	2. (start == er_start) && (end < er_end):
1064 	 *	   Adjust er_start.
1065 	 *
1066 	 *	3. (start > er_start) && (end == er_end):
1067 	 *	   Adjust er_end.
1068 	 *
1069 	 *	4. (start > er_start) && (end < er_end):
1070 	 *	   Fragment region.  Requires descriptor alloc.
1071 	 *
1072 	 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag
1073 	 * is not set.
1074 	 */
1075 	LIST_FOREACH(rp, &ex->ex_regions, er_link) {
1076 		/*
1077 		 * Save ourselves some comparisons; does the current
1078 		 * region end before chunk to be freed begins?  If so,
1079 		 * then we haven't found the appropriate region descriptor.
1080 		 */
1081 		if (rp->er_end < start)
1082 			continue;
1083 
1084 		/*
1085 		 * Save ourselves some traversal; does the current
1086 		 * region begin after the chunk to be freed ends?  If so,
1087 		 * then we've already passed any possible region descriptors
1088 		 * that might have contained the chunk to be freed.
1089 		 */
1090 		if (rp->er_start > end)
1091 			break;
1092 
1093 		/* Case 1. */
1094 		if ((start == rp->er_start) && (end == rp->er_end)) {
1095 			LIST_REMOVE(rp, er_link);
1096 			extent_free_region_descriptor(ex, rp);
1097 			goto done;
1098 		}
1099 
1100 		/*
1101 		 * The following cases all require that EXF_NOCOALESCE
1102 		 * is not set.
1103 		 */
1104 		if (!coalesce)
1105 			continue;
1106 
1107 		/* Case 2. */
1108 		if ((start == rp->er_start) && (end < rp->er_end)) {
1109 			rp->er_start = (end + 1);
1110 			goto done;
1111 		}
1112 
1113 		/* Case 3. */
1114 		if ((start > rp->er_start) && (end == rp->er_end)) {
1115 			rp->er_end = (start - 1);
1116 			goto done;
1117 		}
1118 
1119 		/* Case 4. */
1120 		if ((start > rp->er_start) && (end < rp->er_end)) {
1121 			/* Fill in new descriptor. */
1122 			nrp->er_start = end + 1;
1123 			nrp->er_end = rp->er_end;
1124 
1125 			/* Adjust current descriptor. */
1126 			rp->er_end = start - 1;
1127 
1128 			/* Insert new descriptor after current. */
1129 			LIST_INSERT_AFTER(rp, nrp, er_link);
1130 
1131 			/* We used the new descriptor, so don't free it below */
1132 			nrp = NULL;
1133 			goto done;
1134 		}
1135 	}
1136 
1137 	/* Region not found, or request otherwise invalid. */
1138 	mutex_exit(&ex->ex_lock);
1139 	extent_print(ex);
1140 	printf("extent_free: start 0x%lx, end 0x%lx\n", start, end);
1141 	panic("extent_free: region not found");
1142 
1143  done:
1144 	if (nrp != NULL)
1145 		extent_free_region_descriptor(ex, nrp);
1146 	cv_broadcast(&ex->ex_cv);
1147 	mutex_exit(&ex->ex_lock);
1148 	return (0);
1149 }
1150 
1151 void
1152 extent_print(struct extent *ex)
1153 {
1154 	struct extent_region *rp;
1155 
1156 	if (ex == NULL)
1157 		panic("extent_print: NULL extent");
1158 
1159 	mutex_enter(&ex->ex_lock);
1160 
1161 	printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name,
1162 	    ex->ex_start, ex->ex_end, ex->ex_flags);
1163 
1164 	LIST_FOREACH(rp, &ex->ex_regions, er_link)
1165 		printf("     0x%lx - 0x%lx\n", rp->er_start, rp->er_end);
1166 
1167 	mutex_exit(&ex->ex_lock);
1168 }
1169