1 /* $NetBSD: subr_extent.c,v 1.20 1998/09/12 17:20:02 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 1996, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe and Matthias Drochner. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * General purpose extent manager. 41 */ 42 43 #ifdef _KERNEL 44 #include <sys/param.h> 45 #include <sys/extent.h> 46 #include <sys/malloc.h> 47 #include <sys/pool.h> 48 #include <sys/time.h> 49 #include <sys/systm.h> 50 #include <sys/proc.h> 51 #include <sys/lock.h> 52 #elif defined(_EXTENT_TESTING) 53 /* 54 * user-land definitions, so it can fit into a testing harness. 55 */ 56 #include <sys/param.h> 57 #include <sys/pool.h> 58 #include <sys/extent.h> 59 #include <errno.h> 60 #include <stdlib.h> 61 #include <stdio.h> 62 63 #define malloc(s, t, flags) malloc(s) 64 #define free(p, t) free(p) 65 #define tsleep(chan, pri, str, timo) (EWOULDBLOCK) 66 #define wakeup(chan) ((void)0) 67 #define pool_get(pool, flags) malloc(pool->pr_size,0,0) 68 #define pool_put(pool, rp) free(rp,0) 69 #define panic(a) printf(a) 70 #endif 71 72 static pool_handle_t expool_create __P((void)); 73 static void extent_insert_and_optimize __P((struct extent *, u_long, u_long, 74 int, struct extent_region *, struct extent_region *)); 75 static struct extent_region *extent_alloc_region_descriptor 76 __P((struct extent *, int)); 77 static void extent_free_region_descriptor __P((struct extent *, 78 struct extent_region *)); 79 80 static pool_handle_t expool; 81 82 /* 83 * Macro to align to an arbitrary power-of-two boundary. 84 */ 85 #define EXTENT_ALIGN(_start, _align, _skew) \ 86 (((((_start) - (_skew)) + ((_align) - 1)) & (-(_align))) + (_skew)) 87 88 /* 89 * Create the extent_region pool. 90 * (This is deferred until one of our callers thinks we can malloc()). 91 */ 92 93 static pool_handle_t expool_create() 94 { 95 #if defined(_KERNEL) 96 expool = pool_create(sizeof(struct extent_region), 0, 0, 97 0, "extent", 0, 0, 0, 0); 98 #else 99 expool = (pool_handle_t)malloc(sizeof(*expool),0,0); 100 expool->pr_size = sizeof(struct extent_region); 101 #endif 102 return (expool); 103 } 104 105 /* 106 * Allocate and initialize an extent map. 107 */ 108 struct extent * 109 extent_create(name, start, end, mtype, storage, storagesize, flags) 110 const char *name; 111 u_long start, end; 112 int mtype; 113 caddr_t storage; 114 size_t storagesize; 115 int flags; 116 { 117 struct extent *ex; 118 caddr_t cp = storage; 119 size_t sz = storagesize; 120 struct extent_region *rp; 121 int fixed_extent = (storage != NULL); 122 123 #ifdef DIAGNOSTIC 124 /* Check arguments. */ 125 if (name == NULL) 126 panic("extent_create: name == NULL"); 127 if (end < start) { 128 printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n", 129 name, start, end); 130 panic("extent_create: end < start"); 131 } 132 if (fixed_extent && (storagesize < sizeof(struct extent_fixed))) 133 panic("extent_create: fixed extent, bad storagesize 0x%x", 134 storagesize); 135 if (fixed_extent == 0 && (storagesize != 0 || storage != NULL)) 136 panic("extent_create: storage provided for non-fixed"); 137 #endif 138 139 /* Allocate extent descriptor. */ 140 if (fixed_extent) { 141 struct extent_fixed *fex; 142 143 memset(storage, 0, storagesize); 144 145 /* 146 * Align all descriptors on "long" boundaries. 147 */ 148 fex = (struct extent_fixed *)cp; 149 ex = (struct extent *)fex; 150 cp += ALIGN(sizeof(struct extent_fixed)); 151 sz -= ALIGN(sizeof(struct extent_fixed)); 152 fex->fex_storage = storage; 153 fex->fex_storagesize = storagesize; 154 155 /* 156 * In a fixed extent, we have to pre-allocate region 157 * descriptors and place them in the extent's freelist. 158 */ 159 LIST_INIT(&fex->fex_freelist); 160 while (sz >= ALIGN(sizeof(struct extent_region))) { 161 rp = (struct extent_region *)cp; 162 cp += ALIGN(sizeof(struct extent_region)); 163 sz -= ALIGN(sizeof(struct extent_region)); 164 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link); 165 } 166 } else { 167 if ((expool == NULL) && 168 !expool_create()) 169 return NULL; 170 171 ex = (struct extent *)malloc(sizeof(struct extent), 172 mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT); 173 if (ex == NULL) 174 return (NULL); 175 } 176 177 /* Fill in the extent descriptor and return it to the caller. */ 178 simple_lock_init(&ex->ex_slock); 179 LIST_INIT(&ex->ex_regions); 180 ex->ex_name = name; 181 ex->ex_start = start; 182 ex->ex_end = end; 183 ex->ex_mtype = mtype; 184 ex->ex_flags = 0; 185 if (fixed_extent) 186 ex->ex_flags |= EXF_FIXED; 187 if (flags & EX_NOCOALESCE) 188 ex->ex_flags |= EXF_NOCOALESCE; 189 return (ex); 190 } 191 192 /* 193 * Destroy an extent map. 194 */ 195 void 196 extent_destroy(ex) 197 struct extent *ex; 198 { 199 struct extent_region *rp, *orp; 200 201 #ifdef DIAGNOSTIC 202 /* Check arguments. */ 203 if (ex == NULL) 204 panic("extent_destroy: NULL extent"); 205 #endif 206 207 simple_lock(&ex->ex_slock); 208 209 /* Free all region descriptors in extent. */ 210 for (rp = ex->ex_regions.lh_first; rp != NULL; ) { 211 orp = rp; 212 rp = rp->er_link.le_next; 213 LIST_REMOVE(orp, er_link); 214 extent_free_region_descriptor(ex, orp); 215 } 216 217 /* If we're not a fixed extent, free the extent descriptor itself. */ 218 if ((ex->ex_flags & EXF_FIXED) == 0) 219 free(ex, ex->ex_mtype); 220 } 221 222 /* 223 * Insert a region descriptor into the sorted region list after the 224 * entry "after" or at the head of the list (if "after" is NULL). 225 * The region descriptor we insert is passed in "rp". We must 226 * allocate the region descriptor before calling this function! 227 * If we don't need the region descriptor, it will be freed here. 228 */ 229 static void 230 extent_insert_and_optimize(ex, start, size, flags, after, rp) 231 struct extent *ex; 232 u_long start, size; 233 int flags; 234 struct extent_region *after, *rp; 235 { 236 struct extent_region *nextr; 237 int appended = 0; 238 239 if (after == NULL) { 240 /* 241 * We're the first in the region list. If there's 242 * a region after us, attempt to coalesce to save 243 * descriptor overhead. 244 */ 245 if (((ex->ex_flags & EXF_NOCOALESCE) == 0) && 246 (ex->ex_regions.lh_first != NULL) && 247 ((start + size) == ex->ex_regions.lh_first->er_start)) { 248 /* 249 * We can coalesce. Prepend us to the first region. 250 */ 251 ex->ex_regions.lh_first->er_start = start; 252 extent_free_region_descriptor(ex, rp); 253 return; 254 } 255 256 /* 257 * Can't coalesce. Fill in the region descriptor 258 * in, and insert us at the head of the region list. 259 */ 260 rp->er_start = start; 261 rp->er_end = start + (size - 1); 262 LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link); 263 return; 264 } 265 266 /* 267 * If EXF_NOCOALESCE is set, coalescing is disallowed. 268 */ 269 if (ex->ex_flags & EXF_NOCOALESCE) 270 goto cant_coalesce; 271 272 /* 273 * Attempt to coalesce with the region before us. 274 */ 275 if ((after->er_end + 1) == start) { 276 /* 277 * We can coalesce. Append ourselves and make 278 * note of it. 279 */ 280 after->er_end = start + (size - 1); 281 appended = 1; 282 } 283 284 /* 285 * Attempt to coalesce with the region after us. 286 */ 287 if ((after->er_link.le_next != NULL) && 288 ((start + size) == after->er_link.le_next->er_start)) { 289 /* 290 * We can coalesce. Note that if we appended ourselves 291 * to the previous region, we exactly fit the gap, and 292 * can free the "next" region descriptor. 293 */ 294 if (appended) { 295 /* 296 * Yup, we can free it up. 297 */ 298 after->er_end = after->er_link.le_next->er_end; 299 nextr = after->er_link.le_next; 300 LIST_REMOVE(nextr, er_link); 301 extent_free_region_descriptor(ex, nextr); 302 } else { 303 /* 304 * Nope, just prepend us to the next region. 305 */ 306 after->er_link.le_next->er_start = start; 307 } 308 309 extent_free_region_descriptor(ex, rp); 310 return; 311 } 312 313 /* 314 * We weren't able to coalesce with the next region, but 315 * we don't need to allocate a region descriptor if we 316 * appended ourselves to the previous region. 317 */ 318 if (appended) { 319 extent_free_region_descriptor(ex, rp); 320 return; 321 } 322 323 cant_coalesce: 324 325 /* 326 * Fill in the region descriptor and insert ourselves 327 * into the region list. 328 */ 329 rp->er_start = start; 330 rp->er_end = start + (size - 1); 331 LIST_INSERT_AFTER(after, rp, er_link); 332 } 333 334 /* 335 * Allocate a specific region in an extent map. 336 */ 337 int 338 extent_alloc_region(ex, start, size, flags) 339 struct extent *ex; 340 u_long start, size; 341 int flags; 342 { 343 struct extent_region *rp, *last, *myrp; 344 u_long end = start + (size - 1); 345 int error; 346 347 #ifdef DIAGNOSTIC 348 /* Check arguments. */ 349 if (ex == NULL) 350 panic("extent_alloc_region: NULL extent"); 351 if (size < 1) { 352 printf("extent_alloc_region: extent `%s', size 0x%lx\n", 353 ex->ex_name, size); 354 panic("extent_alloc_region: bad size"); 355 } 356 if (end < start) { 357 printf( 358 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n", 359 ex->ex_name, start, size); 360 panic("extent_alloc_region: overflow"); 361 } 362 #endif 363 364 /* 365 * Make sure the requested region lies within the 366 * extent. 367 * 368 * We don't lock to check the range, because those values 369 * are never modified, and if another thread deletes the 370 * extent, we're screwed anyway. 371 */ 372 if ((start < ex->ex_start) || (end > ex->ex_end)) { 373 #ifdef DIAGNOSTIC 374 printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n", 375 ex->ex_name, ex->ex_start, ex->ex_end); 376 printf("extent_alloc_region: start 0x%lx, end 0x%lx\n", 377 start, end); 378 panic("extent_alloc_region: region lies outside extent"); 379 #else 380 return (EINVAL); 381 #endif 382 } 383 384 /* 385 * Allocate the region descriptor. It will be freed later 386 * if we can coalesce with another region. Don't lock before 387 * here! This could block. 388 */ 389 myrp = extent_alloc_region_descriptor(ex, flags); 390 if (myrp == NULL) { 391 #ifdef DIAGNOSTIC 392 printf( 393 "extent_alloc_region: can't allocate region descriptor\n"); 394 #endif 395 return (ENOMEM); 396 } 397 398 alloc_start: 399 simple_lock(&ex->ex_slock); 400 401 /* 402 * Attempt to place ourselves in the desired area of the 403 * extent. We save ourselves some work by keeping the list sorted. 404 * In other words, if the start of the current region is greater 405 * than the end of our region, we don't have to search any further. 406 */ 407 408 /* 409 * Keep a pointer to the last region we looked at so 410 * that we don't have to traverse the list again when 411 * we insert ourselves. If "last" is NULL when we 412 * finally insert ourselves, we go at the head of the 413 * list. See extent_insert_and_optimize() for details. 414 */ 415 last = NULL; 416 417 for (rp = ex->ex_regions.lh_first; rp != NULL; 418 rp = rp->er_link.le_next) { 419 if (rp->er_start > end) { 420 /* 421 * We lie before this region and don't 422 * conflict. 423 */ 424 break; 425 } 426 427 /* 428 * The current region begins before we end. 429 * Check for a conflict. 430 */ 431 if (rp->er_end >= start) { 432 /* 433 * We conflict. If we can (and want to) wait, 434 * do so. 435 */ 436 if (flags & EX_WAITSPACE) { 437 ex->ex_flags |= EXF_WANTED; 438 simple_unlock(&ex->ex_slock); 439 error = tsleep(ex, 440 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), 441 "extnt", 0); 442 if (error) 443 return (error); 444 goto alloc_start; 445 } 446 extent_free_region_descriptor(ex, myrp); 447 simple_unlock(&ex->ex_slock); 448 return (EAGAIN); 449 } 450 /* 451 * We don't conflict, but this region lies before 452 * us. Keep a pointer to this region, and keep 453 * trying. 454 */ 455 last = rp; 456 } 457 458 /* 459 * We don't conflict with any regions. "last" points 460 * to the region we fall after, or is NULL if we belong 461 * at the beginning of the region list. Insert ourselves. 462 */ 463 extent_insert_and_optimize(ex, start, size, flags, last, myrp); 464 simple_unlock(&ex->ex_slock); 465 return (0); 466 } 467 468 /* 469 * Macro to check (x + y) <= z. This check is designed to fail 470 * if an overflow occurs. 471 */ 472 #define LE_OV(x, y, z) ((((x) + (y)) >= (x)) && (((x) + (y)) <= (z))) 473 474 /* 475 * Allocate a region in an extent map subregion. 476 * 477 * If EX_FAST is specified, we return the first fit in the map. 478 * Otherwise, we try to minimize fragmentation by finding the 479 * smallest gap that will hold the request. 480 * 481 * The allocated region is aligned to "alignment", which must be 482 * a power of 2. 483 */ 484 int 485 extent_alloc_subregion1(ex, substart, subend, size, alignment, skew, boundary, 486 flags, result) 487 struct extent *ex; 488 u_long substart, subend, size, alignment, skew, boundary; 489 int flags; 490 u_long *result; 491 { 492 struct extent_region *rp, *myrp, *last, *bestlast; 493 u_long newstart, newend, beststart, bestovh, ovh; 494 u_long dontcross, odontcross; 495 int error; 496 497 #ifdef DIAGNOSTIC 498 /* 499 * Check arguments. 500 * 501 * We don't lock to check these, because these values 502 * are never modified, and if another thread deletes the 503 * extent, we're screwed anyway. 504 */ 505 if (ex == NULL) 506 panic("extent_alloc_subregion: NULL extent"); 507 if (result == NULL) 508 panic("extent_alloc_subregion: NULL result pointer"); 509 if ((substart < ex->ex_start) || (substart > ex->ex_end) || 510 (subend > ex->ex_end) || (subend < ex->ex_start)) { 511 printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n", 512 ex->ex_name, ex->ex_start, ex->ex_end); 513 printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n", 514 substart, subend); 515 panic("extent_alloc_subregion: bad subregion"); 516 } 517 if ((size < 1) || ((size - 1) > (subend - substart))) { 518 printf("extent_alloc_subregion: extent `%s', size 0x%lx\n", 519 ex->ex_name, size); 520 panic("extent_alloc_subregion: bad size"); 521 } 522 if (alignment == 0) 523 panic("extent_alloc_subregion: bad alignment"); 524 if (boundary && (boundary < size)) { 525 printf( 526 "extent_alloc_subregion: extent `%s', size 0x%lx, 527 boundary 0x%lx\n", ex->ex_name, size, boundary); 528 panic("extent_alloc_subregion: bad boundary"); 529 } 530 #endif 531 532 /* 533 * Allocate the region descriptor. It will be freed later 534 * if we can coalesce with another region. Don't lock before 535 * here! This could block. 536 */ 537 myrp = extent_alloc_region_descriptor(ex, flags); 538 if (myrp == NULL) { 539 #ifdef DIAGNOSTIC 540 printf( 541 "extent_alloc_subregion: can't allocate region descriptor\n"); 542 #endif 543 return (ENOMEM); 544 } 545 546 alloc_start: 547 simple_lock(&ex->ex_slock); 548 549 /* 550 * Keep a pointer to the last region we looked at so 551 * that we don't have to traverse the list again when 552 * we insert ourselves. If "last" is NULL when we 553 * finally insert ourselves, we go at the head of the 554 * list. See extent_insert_and_optimize() for deatails. 555 */ 556 last = NULL; 557 558 /* 559 * Initialize the "don't cross" boundary, a.k.a a line 560 * that a region should not cross. If the boundary lies 561 * before the region starts, we add the "boundary" argument 562 * until we get a meaningful comparison. 563 * 564 * Start the boundary lines at 0 if the caller requests it. 565 */ 566 dontcross = 0; 567 if (boundary) { 568 dontcross = 569 ((flags & EX_BOUNDZERO) ? 0 : ex->ex_start) + boundary; 570 while (dontcross < substart) 571 dontcross += boundary; 572 } 573 574 /* 575 * Keep track of size and location of the smallest 576 * chunk we fit in. 577 * 578 * Since the extent can be as large as the numeric range 579 * of the CPU (0 - 0xffffffff for 32-bit systems), the 580 * best overhead value can be the maximum unsigned integer. 581 * Thus, we initialize "bestovh" to 0, since we insert ourselves 582 * into the region list immediately on an exact match (which 583 * is the only case where "bestovh" would be set to 0). 584 */ 585 bestovh = 0; 586 beststart = 0; 587 bestlast = NULL; 588 589 /* 590 * For N allocated regions, we must make (N + 1) 591 * checks for unallocated space. The first chunk we 592 * check is the area from the beginning of the subregion 593 * to the first allocated region after that point. 594 */ 595 newstart = EXTENT_ALIGN(substart, alignment, skew); 596 if (newstart < ex->ex_start) { 597 #ifdef DIAGNOSTIC 598 printf( 599 "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n", 600 ex->ex_name, ex->ex_start, ex->ex_end, alignment); 601 simple_unlock(&ex->ex_slock); 602 panic("extent_alloc_subregion: overflow after alignment"); 603 #else 604 extent_free_region_descriptor(ex, myrp); 605 simple_unlock(&ex->ex_slock); 606 return (EINVAL); 607 #endif 608 } 609 610 /* 611 * Find the first allocated region that begins on or after 612 * the subregion start, advancing the "last" pointer along 613 * the way. 614 */ 615 for (rp = ex->ex_regions.lh_first; rp != NULL; 616 rp = rp->er_link.le_next) { 617 if (rp->er_start >= newstart) 618 break; 619 last = rp; 620 } 621 622 /* 623 * If there are no allocated regions beyond where we want to be, 624 * relocate the start of our candidate region to the end of 625 * the last allocated region (if there was one). 626 */ 627 if (rp == NULL && last != NULL) 628 newstart = EXTENT_ALIGN((last->er_end + 1), alignment, skew); 629 630 for (; rp != NULL; rp = rp->er_link.le_next) { 631 /* 632 * Check the chunk before "rp". Note that our 633 * comparison is safe from overflow conditions. 634 */ 635 if (LE_OV(newstart, size, rp->er_start)) { 636 /* 637 * Do a boundary check, if necessary. Note 638 * that a region may *begin* on the boundary, 639 * but it must end before the boundary. 640 */ 641 if (boundary) { 642 newend = newstart + (size - 1); 643 644 /* 645 * Adjust boundary for a meaningful 646 * comparison. 647 */ 648 while (dontcross <= newstart) { 649 odontcross = dontcross; 650 dontcross += boundary; 651 652 /* 653 * If we run past the end of 654 * the extent or the boundary 655 * overflows, then the request 656 * can't fit. 657 */ 658 if ((dontcross > ex->ex_end) || 659 (dontcross < odontcross)) 660 goto fail; 661 } 662 663 /* Do the boundary check. */ 664 if (newend >= dontcross) { 665 /* 666 * Candidate region crosses 667 * boundary. Try again. 668 */ 669 continue; 670 } 671 } 672 673 /* 674 * We would fit into this space. Calculate 675 * the overhead (wasted space). If we exactly 676 * fit, or we're taking the first fit, insert 677 * ourselves into the region list. 678 */ 679 ovh = rp->er_start - newstart - size; 680 if ((flags & EX_FAST) || (ovh == 0)) 681 goto found; 682 683 /* 684 * Don't exactly fit, but check to see 685 * if we're better than any current choice. 686 */ 687 if ((bestovh == 0) || (ovh < bestovh)) { 688 bestovh = ovh; 689 beststart = newstart; 690 bestlast = last; 691 } 692 } 693 694 /* 695 * Skip past the current region and check again. 696 */ 697 newstart = EXTENT_ALIGN((rp->er_end + 1), alignment, skew); 698 if (newstart < rp->er_end) { 699 /* 700 * Overflow condition. Don't error out, since 701 * we might have a chunk of space that we can 702 * use. 703 */ 704 goto fail; 705 } 706 707 last = rp; 708 } 709 710 /* 711 * The final check is from the current starting point to the 712 * end of the subregion. If there were no allocated regions, 713 * "newstart" is set to the beginning of the subregion, or 714 * just past the end of the last allocated region, adjusted 715 * for alignment in either case. 716 */ 717 if (LE_OV(newstart, (size - 1), subend)) { 718 /* 719 * We would fit into this space. Calculate 720 * the overhead (wasted space). If we exactly 721 * fit, or we're taking the first fit, insert 722 * ourselves into the region list. 723 */ 724 ovh = ex->ex_end - newstart - (size - 1); 725 if ((flags & EX_FAST) || (ovh == 0)) 726 goto found; 727 728 /* 729 * Don't exactly fit, but check to see 730 * if we're better than any current choice. 731 */ 732 if ((bestovh == 0) || (ovh < bestovh)) { 733 bestovh = ovh; 734 beststart = newstart; 735 bestlast = last; 736 } 737 } 738 739 fail: 740 /* 741 * One of the following two conditions have 742 * occurred: 743 * 744 * There is no chunk large enough to hold the request. 745 * 746 * If EX_FAST was not specified, there is not an 747 * exact match for the request. 748 * 749 * Note that if we reach this point and EX_FAST is 750 * set, then we know there is no space in the extent for 751 * the request. 752 */ 753 if (((flags & EX_FAST) == 0) && (bestovh != 0)) { 754 /* 755 * We have a match that's "good enough". 756 */ 757 newstart = beststart; 758 last = bestlast; 759 goto found; 760 } 761 762 /* 763 * No space currently available. Wait for it to free up, 764 * if possible. 765 */ 766 if (flags & EX_WAITSPACE) { 767 ex->ex_flags |= EXF_WANTED; 768 simple_unlock(&ex->ex_slock); 769 error = tsleep(ex, 770 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), "extnt", 0); 771 if (error) 772 return (error); 773 goto alloc_start; 774 } 775 776 extent_free_region_descriptor(ex, myrp); 777 simple_unlock(&ex->ex_slock); 778 return (EAGAIN); 779 780 found: 781 /* 782 * Insert ourselves into the region list. 783 */ 784 extent_insert_and_optimize(ex, newstart, size, flags, last, myrp); 785 simple_unlock(&ex->ex_slock); 786 *result = newstart; 787 return (0); 788 } 789 790 int 791 extent_free(ex, start, size, flags) 792 struct extent *ex; 793 u_long start, size; 794 int flags; 795 { 796 struct extent_region *rp, *nrp = NULL; 797 u_long end = start + (size - 1); 798 int exflags; 799 800 #ifdef DIAGNOSTIC 801 /* 802 * Check arguments. 803 * 804 * We don't lock to check these, because these values 805 * are never modified, and if another thread deletes the 806 * extent, we're screwed anyway. 807 */ 808 if (ex == NULL) 809 panic("extent_free: NULL extent"); 810 if ((start < ex->ex_start) || (start > ex->ex_end)) { 811 extent_print(ex); 812 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n", 813 ex->ex_name, start, size); 814 panic("extent_free: extent `%s', region not within extent", 815 ex->ex_name); 816 } 817 /* Check for an overflow. */ 818 if (end < start) { 819 extent_print(ex); 820 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n", 821 ex->ex_name, start, size); 822 panic("extent_free: overflow"); 823 } 824 #endif 825 826 /* 827 * If we're allowing coalescing, we must allocate a region 828 * descriptor now, since it might block. 829 * 830 * XXX Make a static, create-time flags word, so we don't 831 * XXX have to lock to read it! 832 */ 833 simple_lock(&ex->ex_slock); 834 exflags = ex->ex_flags; 835 simple_unlock(&ex->ex_slock); 836 837 if ((exflags & EXF_NOCOALESCE) == 0) { 838 /* Allocate a region descriptor. */ 839 nrp = extent_alloc_region_descriptor(ex, flags); 840 if (nrp == NULL) 841 return (ENOMEM); 842 } 843 844 simple_lock(&ex->ex_slock); 845 846 /* 847 * Find region and deallocate. Several possibilities: 848 * 849 * 1. (start == er_start) && (end == er_end): 850 * Free descriptor. 851 * 852 * 2. (start == er_start) && (end < er_end): 853 * Adjust er_start. 854 * 855 * 3. (start > er_start) && (end == er_end): 856 * Adjust er_end. 857 * 858 * 4. (start > er_start) && (end < er_end): 859 * Fragment region. Requires descriptor alloc. 860 * 861 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag 862 * is not set. 863 */ 864 for (rp = ex->ex_regions.lh_first; rp != NULL; 865 rp = rp->er_link.le_next) { 866 /* 867 * Save ourselves some comparisons; does the current 868 * region end before chunk to be freed begins? If so, 869 * then we haven't found the appropriate region descriptor. 870 */ 871 if (rp->er_end < start) 872 continue; 873 874 /* 875 * Save ourselves some traversal; does the current 876 * region begin after the chunk to be freed ends? If so, 877 * then we've already passed any possible region descriptors 878 * that might have contained the chunk to be freed. 879 */ 880 if (rp->er_start > end) 881 break; 882 883 /* Case 1. */ 884 if ((start == rp->er_start) && (end == rp->er_end)) { 885 LIST_REMOVE(rp, er_link); 886 extent_free_region_descriptor(ex, rp); 887 goto done; 888 } 889 890 /* 891 * The following cases all require that EXF_NOCOALESCE 892 * is not set. 893 */ 894 if (ex->ex_flags & EXF_NOCOALESCE) 895 continue; 896 897 /* Case 2. */ 898 if ((start == rp->er_start) && (end < rp->er_end)) { 899 rp->er_start = (end + 1); 900 goto done; 901 } 902 903 /* Case 3. */ 904 if ((start > rp->er_start) && (end == rp->er_end)) { 905 rp->er_end = (start - 1); 906 goto done; 907 } 908 909 /* Case 4. */ 910 if ((start > rp->er_start) && (end < rp->er_end)) { 911 /* Fill in new descriptor. */ 912 nrp->er_start = end + 1; 913 nrp->er_end = rp->er_end; 914 915 /* Adjust current descriptor. */ 916 rp->er_end = start - 1; 917 918 /* Insert new descriptor after current. */ 919 LIST_INSERT_AFTER(rp, nrp, er_link); 920 921 /* We used the new descriptor, so don't free it below */ 922 nrp = NULL; 923 goto done; 924 } 925 } 926 927 /* Region not found, or request otherwise invalid. */ 928 simple_unlock(&ex->ex_slock); 929 extent_print(ex); 930 printf("extent_free: start 0x%lx, end 0x%lx\n", start, end); 931 panic("extent_free: region not found"); 932 933 done: 934 if (nrp != NULL) 935 extent_free_region_descriptor(ex, nrp); 936 if (ex->ex_flags & EXF_WANTED) { 937 ex->ex_flags &= ~EXF_WANTED; 938 wakeup(ex); 939 } 940 simple_unlock(&ex->ex_slock); 941 return (0); 942 } 943 944 /* 945 * Allocate an extent region descriptor. EXTENT MUST NOT BE LOCKED, 946 * AS THIS FUNCTION MAY BLOCK! We will handle any locking we may need. 947 */ 948 static struct extent_region * 949 extent_alloc_region_descriptor(ex, flags) 950 struct extent *ex; 951 int flags; 952 { 953 struct extent_region *rp; 954 int exflags; 955 956 /* 957 * XXX Make a static, create-time flags word, so we don't 958 * XXX have to lock to read it! 959 */ 960 simple_lock(&ex->ex_slock); 961 exflags = ex->ex_flags; 962 simple_unlock(&ex->ex_slock); 963 964 if (exflags & EXF_FIXED) { 965 struct extent_fixed *fex = (struct extent_fixed *)ex; 966 967 for (;;) { 968 simple_lock(&ex->ex_slock); 969 if ((rp = fex->fex_freelist.lh_first) != NULL) { 970 /* 971 * Don't muck with flags after pulling it off 972 * the freelist; it may have been dynamically 973 * allocated, and kindly given to us. We 974 * need to remember that information. 975 */ 976 LIST_REMOVE(rp, er_link); 977 simple_unlock(&ex->ex_slock); 978 return (rp); 979 } 980 if (flags & EX_MALLOCOK) { 981 simple_unlock(&ex->ex_slock); 982 goto alloc; 983 } 984 if ((flags & EX_WAITOK) == 0) { 985 simple_unlock(&ex->ex_slock); 986 return (NULL); 987 } 988 ex->ex_flags |= EXF_FLWANTED; 989 simple_unlock(&ex->ex_slock); 990 if (tsleep(&fex->fex_freelist, 991 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), 992 "extnt", 0)) 993 return (NULL); 994 } 995 } 996 997 alloc: 998 if ((expool == NULL) && 999 !expool_create()) 1000 return (NULL); 1001 1002 rp = pool_get(expool, (flags & EX_WAITOK) ? PR_WAITOK : 0); 1003 1004 if (rp != NULL) 1005 rp->er_flags = ER_ALLOC; 1006 1007 return (rp); 1008 } 1009 1010 /* 1011 * Free an extent region descriptor. EXTENT _MUST_ BE LOCKED! This 1012 * is safe as we do not block here. 1013 */ 1014 static void 1015 extent_free_region_descriptor(ex, rp) 1016 struct extent *ex; 1017 struct extent_region *rp; 1018 { 1019 1020 if (ex->ex_flags & EXF_FIXED) { 1021 struct extent_fixed *fex = (struct extent_fixed *)ex; 1022 1023 /* 1024 * If someone's waiting for a region descriptor, 1025 * be nice and give them this one, rather than 1026 * just free'ing it back to the system. 1027 */ 1028 if (rp->er_flags & ER_ALLOC) { 1029 if (ex->ex_flags & EXF_FLWANTED) { 1030 /* Clear all but ER_ALLOC flag. */ 1031 rp->er_flags = ER_ALLOC; 1032 LIST_INSERT_HEAD(&fex->fex_freelist, rp, 1033 er_link); 1034 goto wake_em_up; 1035 } else { 1036 pool_put(expool, rp); 1037 } 1038 } else { 1039 /* Clear all flags. */ 1040 rp->er_flags = 0; 1041 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link); 1042 } 1043 1044 if (ex->ex_flags & EXF_FLWANTED) { 1045 wake_em_up: 1046 ex->ex_flags &= ~EXF_FLWANTED; 1047 wakeup(&fex->fex_freelist); 1048 } 1049 return; 1050 } 1051 1052 /* 1053 * We know it's dynamically allocated if we get here. 1054 */ 1055 pool_put(expool, rp); 1056 } 1057 1058 void 1059 extent_print(ex) 1060 struct extent *ex; 1061 { 1062 struct extent_region *rp; 1063 1064 if (ex == NULL) 1065 panic("extent_print: NULL extent"); 1066 1067 simple_lock(&ex->ex_slock); 1068 1069 printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name, 1070 ex->ex_start, ex->ex_end, ex->ex_flags); 1071 1072 for (rp = ex->ex_regions.lh_first; rp != NULL; 1073 rp = rp->er_link.le_next) 1074 printf(" 0x%lx - 0x%lx\n", rp->er_start, rp->er_end); 1075 1076 simple_unlock(&ex->ex_slock); 1077 } 1078