1 /* $OpenBSD: subr_extent.c,v 1.40 2009/09/18 22:16:28 kettenis Exp $ */ 2 /* $NetBSD: subr_extent.c,v 1.7 1996/11/21 18:46:34 cgd Exp $ */ 3 4 /*- 5 * Copyright (c) 1996, 1998 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Jason R. Thorpe and Matthias Drochner. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * General purpose extent manager. 35 */ 36 37 #ifdef _KERNEL 38 #include <sys/param.h> 39 #include <sys/extent.h> 40 #include <sys/malloc.h> 41 #include <sys/time.h> 42 #include <sys/systm.h> 43 #include <sys/proc.h> 44 #include <sys/queue.h> 45 #include <sys/pool.h> 46 #include <ddb/db_output.h> 47 #else 48 /* 49 * user-land definitions, so it can fit into a testing harness. 50 */ 51 #include <sys/param.h> 52 #include <sys/pool.h> 53 #include <sys/extent.h> 54 #include <sys/queue.h> 55 #include <errno.h> 56 #include <stdlib.h> 57 #include <stdio.h> 58 59 #define malloc(s, t, flags) malloc(s) 60 #define free(p, t) free(p) 61 #define tsleep(chan, pri, str, timo) (EWOULDBLOCK) 62 #define wakeup(chan) ((void)0) 63 #define pool_get(pool, flags) malloc((pool)->pr_size, 0, 0) 64 #define pool_init(a, b, c, d, e, f, g) (a)->pr_size = (b) 65 #define pool_setipl(pool, ipl) /* nothing */ 66 #define pool_put(pool, rp) free((rp), 0) 67 #define panic printf 68 #endif 69 70 #if defined(DIAGNOSTIC) || defined(DDB) 71 void extent_print1(struct extent *, int (*)(const char *, ...)); 72 #endif 73 74 static void extent_insert_and_optimize(struct extent *, u_long, u_long, 75 struct extent_region *, struct extent_region *); 76 static struct extent_region *extent_alloc_region_descriptor(struct extent *, int); 77 static void extent_free_region_descriptor(struct extent *, 78 struct extent_region *); 79 80 /* 81 * Shortcut to align to an arbitrary power-of-two boundary. 82 */ 83 static __inline__ u_long 84 extent_align(u_long start, u_long align, u_long skew) 85 { 86 return ((((start - skew) + (align - 1)) & (-align)) + skew); 87 } 88 89 90 #if defined(DIAGNOSTIC) || defined(DDB) 91 /* 92 * Register the extent on a doubly linked list. 93 * Should work, no? 94 */ 95 static LIST_HEAD(listhead, extent) ext_list; 96 static void extent_register(struct extent *); 97 98 static void 99 extent_register(struct extent *ex) 100 { 101 #ifdef DIAGNOSTIC 102 struct extent *ep; 103 #endif 104 static int initialized; 105 106 if (!initialized){ 107 LIST_INIT(&ext_list); 108 initialized = 1; 109 } 110 111 #ifdef DIAGNOSTIC 112 LIST_FOREACH(ep, &ext_list, ex_link) { 113 if (ep == ex) 114 panic("extent_register: already registered"); 115 } 116 #endif 117 118 /* Insert into list */ 119 LIST_INSERT_HEAD(&ext_list, ex, ex_link); 120 } 121 #endif /* DIAGNOSTIC || DDB */ 122 123 struct pool ex_region_pl; 124 125 static void 126 extent_pool_init(void) 127 { 128 static int inited; 129 130 if (!inited) { 131 pool_init(&ex_region_pl, sizeof(struct extent_region), 0, 0, 0, 132 "extentpl", NULL); 133 pool_setipl(&ex_region_pl, IPL_VM); 134 inited = 1; 135 } 136 } 137 138 #ifdef DDB 139 /* 140 * Print out all extents registered. This is used in 141 * DDB show extents 142 */ 143 void 144 extent_print_all(void) 145 { 146 struct extent *ep; 147 148 LIST_FOREACH(ep, &ext_list, ex_link) { 149 extent_print1(ep, db_printf); 150 } 151 } 152 #endif 153 154 /* 155 * Allocate and initialize an extent map. 156 */ 157 struct extent * 158 extent_create(char *name, u_long start, u_long end, int mtype, caddr_t storage, 159 size_t storagesize, int flags) 160 { 161 struct extent *ex; 162 caddr_t cp = storage; 163 size_t sz = storagesize; 164 struct extent_region *rp; 165 int fixed_extent = (storage != NULL); 166 167 #ifdef DIAGNOSTIC 168 /* Check arguments. */ 169 if (name == NULL) 170 panic("extent_create: name == NULL"); 171 if (end < start) { 172 printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n", 173 name, start, end); 174 panic("extent_create: end < start"); 175 } 176 if (fixed_extent && (storagesize < sizeof(struct extent_fixed))) 177 panic("extent_create: fixed extent, bad storagesize 0x%lx", 178 (u_long)storagesize); 179 if (fixed_extent == 0 && (storagesize != 0 || storage != NULL)) 180 panic("extent_create: storage provided for non-fixed"); 181 #endif 182 183 extent_pool_init(); 184 185 /* Allocate extent descriptor. */ 186 if (fixed_extent) { 187 struct extent_fixed *fex; 188 189 bzero(storage, storagesize); 190 191 /* 192 * Align all descriptors on "long" boundaries. 193 */ 194 fex = (struct extent_fixed *)cp; 195 ex = (struct extent *)fex; 196 cp += ALIGN(sizeof(struct extent_fixed)); 197 sz -= ALIGN(sizeof(struct extent_fixed)); 198 fex->fex_storage = storage; 199 fex->fex_storagesize = storagesize; 200 201 /* 202 * In a fixed extent, we have to pre-allocate region 203 * descriptors and place them in the extent's freelist. 204 */ 205 LIST_INIT(&fex->fex_freelist); 206 while (sz >= ALIGN(sizeof(struct extent_region))) { 207 rp = (struct extent_region *)cp; 208 cp += ALIGN(sizeof(struct extent_region)); 209 sz -= ALIGN(sizeof(struct extent_region)); 210 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link); 211 } 212 } else { 213 ex = (struct extent *)malloc(sizeof(struct extent), 214 mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT); 215 if (ex == NULL) 216 return (NULL); 217 } 218 219 /* Fill in the extent descriptor and return it to the caller. */ 220 LIST_INIT(&ex->ex_regions); 221 ex->ex_name = name; 222 ex->ex_start = start; 223 ex->ex_end = end; 224 ex->ex_mtype = mtype; 225 ex->ex_flags = 0; 226 if (fixed_extent) 227 ex->ex_flags |= EXF_FIXED; 228 if (flags & EX_NOCOALESCE) 229 ex->ex_flags |= EXF_NOCOALESCE; 230 231 if (flags & EX_FILLED) { 232 rp = extent_alloc_region_descriptor(ex, flags); 233 if (rp == NULL) { 234 if (!fixed_extent) 235 free(ex, mtype); 236 return (NULL); 237 } 238 rp->er_start = start; 239 rp->er_end = end; 240 LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link); 241 } 242 243 #if defined(DIAGNOSTIC) || defined(DDB) 244 extent_register(ex); 245 #endif 246 return (ex); 247 } 248 249 /* 250 * Destroy an extent map. 251 */ 252 void 253 extent_destroy(struct extent *ex) 254 { 255 struct extent_region *rp, *orp; 256 257 #ifdef DIAGNOSTIC 258 /* Check arguments. */ 259 if (ex == NULL) 260 panic("extent_destroy: NULL extent"); 261 #endif 262 263 /* Free all region descriptors in extent. */ 264 for (rp = LIST_FIRST(&ex->ex_regions); 265 rp != LIST_END(&ex->ex_regions); ) { 266 orp = rp; 267 rp = LIST_NEXT(rp, er_link); 268 LIST_REMOVE(orp, er_link); 269 extent_free_region_descriptor(ex, orp); 270 } 271 272 #if defined(DIAGNOSTIC) || defined(DDB) 273 /* Remove from the list of all extents. */ 274 LIST_REMOVE(ex, ex_link); 275 #endif 276 277 /* If we're not a fixed extent, free the extent descriptor itself. */ 278 if ((ex->ex_flags & EXF_FIXED) == 0) 279 free(ex, ex->ex_mtype); 280 } 281 282 /* 283 * Insert a region descriptor into the sorted region list after the 284 * entry "after" or at the head of the list (if "after" is NULL). 285 * The region descriptor we insert is passed in "rp". We must 286 * allocate the region descriptor before calling this function! 287 * If we don't need the region descriptor, it will be freed here. 288 */ 289 static void 290 extent_insert_and_optimize(struct extent *ex, u_long start, u_long size, 291 struct extent_region *after, struct extent_region *rp) 292 { 293 struct extent_region *nextr; 294 int appended = 0; 295 296 if (after == NULL) { 297 /* 298 * We're the first in the region list. If there's 299 * a region after us, attempt to coalesce to save 300 * descriptor overhead. 301 */ 302 if (((ex->ex_flags & EXF_NOCOALESCE) == 0) && 303 !LIST_EMPTY(&ex->ex_regions) && 304 ((start + size) == LIST_FIRST(&ex->ex_regions)->er_start)) { 305 /* 306 * We can coalesce. Prepend us to the first region. 307 */ 308 LIST_FIRST(&ex->ex_regions)->er_start = start; 309 extent_free_region_descriptor(ex, rp); 310 return; 311 } 312 313 /* 314 * Can't coalesce. Fill in the region descriptor 315 * in, and insert us at the head of the region list. 316 */ 317 rp->er_start = start; 318 rp->er_end = start + (size - 1); 319 LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link); 320 return; 321 } 322 323 /* 324 * If EXF_NOCOALESCE is set, coalescing is disallowed. 325 */ 326 if (ex->ex_flags & EXF_NOCOALESCE) 327 goto cant_coalesce; 328 329 /* 330 * Attempt to coalesce with the region before us. 331 */ 332 if ((after->er_end + 1) == start) { 333 /* 334 * We can coalesce. Append ourselves and make 335 * note of it. 336 */ 337 after->er_end = start + (size - 1); 338 appended = 1; 339 } 340 341 /* 342 * Attempt to coalesce with the region after us. 343 */ 344 if (LIST_NEXT(after, er_link) != NULL && 345 ((start + size) == LIST_NEXT(after, er_link)->er_start)) { 346 /* 347 * We can coalesce. Note that if we appended ourselves 348 * to the previous region, we exactly fit the gap, and 349 * can free the "next" region descriptor. 350 */ 351 if (appended) { 352 /* 353 * Yup, we can free it up. 354 */ 355 after->er_end = LIST_NEXT(after, er_link)->er_end; 356 nextr = LIST_NEXT(after, er_link); 357 LIST_REMOVE(nextr, er_link); 358 extent_free_region_descriptor(ex, nextr); 359 } else { 360 /* 361 * Nope, just prepend us to the next region. 362 */ 363 LIST_NEXT(after, er_link)->er_start = start; 364 } 365 366 extent_free_region_descriptor(ex, rp); 367 return; 368 } 369 370 /* 371 * We weren't able to coalesce with the next region, but 372 * we don't need to allocate a region descriptor if we 373 * appended ourselves to the previous region. 374 */ 375 if (appended) { 376 extent_free_region_descriptor(ex, rp); 377 return; 378 } 379 380 cant_coalesce: 381 382 /* 383 * Fill in the region descriptor and insert ourselves 384 * into the region list. 385 */ 386 rp->er_start = start; 387 rp->er_end = start + (size - 1); 388 LIST_INSERT_AFTER(after, rp, er_link); 389 } 390 391 /* 392 * Allocate a specific region in an extent map. 393 */ 394 int 395 extent_alloc_region(struct extent *ex, u_long start, u_long size, int flags) 396 { 397 struct extent_region *rp, *last, *myrp; 398 u_long end = start + (size - 1); 399 int error; 400 401 #ifdef DIAGNOSTIC 402 /* Check arguments. */ 403 if (ex == NULL) 404 panic("extent_alloc_region: NULL extent"); 405 if (size < 1) { 406 printf("extent_alloc_region: extent `%s', size 0x%lx\n", 407 ex->ex_name, size); 408 panic("extent_alloc_region: bad size"); 409 } 410 if (end < start) { 411 printf( 412 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n", 413 ex->ex_name, start, size); 414 panic("extent_alloc_region: overflow"); 415 } 416 if ((flags & EX_CONFLICTOK) && (flags & EX_WAITSPACE)) 417 panic("extent_alloc_region: EX_CONFLICTOK and EX_WAITSPACE " 418 "are mutually exclusive"); 419 #endif 420 421 /* 422 * Make sure the requested region lies within the 423 * extent. 424 */ 425 if ((start < ex->ex_start) || (end > ex->ex_end)) { 426 #ifdef DIAGNOSTIC 427 printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n", 428 ex->ex_name, ex->ex_start, ex->ex_end); 429 printf("extent_alloc_region: start 0x%lx, end 0x%lx\n", 430 start, end); 431 panic("extent_alloc_region: region lies outside extent"); 432 #else 433 return (EINVAL); 434 #endif 435 } 436 437 /* 438 * Allocate the region descriptor. It will be freed later 439 * if we can coalesce with another region. 440 */ 441 myrp = extent_alloc_region_descriptor(ex, flags); 442 if (myrp == NULL) { 443 #ifdef DIAGNOSTIC 444 printf( 445 "extent_alloc_region: can't allocate region descriptor\n"); 446 #endif 447 return (ENOMEM); 448 } 449 450 alloc_start: 451 /* 452 * Attempt to place ourselves in the desired area of the 453 * extent. We save ourselves some work by keeping the list sorted. 454 * In other words, if the start of the current region is greater 455 * than the end of our region, we don't have to search any further. 456 */ 457 458 /* 459 * Keep a pointer to the last region we looked at so 460 * that we don't have to traverse the list again when 461 * we insert ourselves. If "last" is NULL when we 462 * finally insert ourselves, we go at the head of the 463 * list. See extent_insert_and_optimize() for details. 464 */ 465 last = NULL; 466 467 LIST_FOREACH(rp, &ex->ex_regions, er_link) { 468 if (rp->er_start > end) { 469 /* 470 * We lie before this region and don't 471 * conflict. 472 */ 473 break; 474 } 475 476 /* 477 * The current region begins before we end. 478 * Check for a conflict. 479 */ 480 if (rp->er_end >= start) { 481 /* 482 * We conflict. If we can (and want to) wait, 483 * do so. 484 */ 485 if (flags & EX_WAITSPACE) { 486 ex->ex_flags |= EXF_WANTED; 487 error = tsleep(ex, 488 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), 489 "extnt", 0); 490 if (error) 491 return (error); 492 goto alloc_start; 493 } 494 495 /* 496 * If we tolerate conflicts adjust things such 497 * that all space in the requested region is 498 * allocated. 499 */ 500 if (flags & EX_CONFLICTOK) { 501 /* 502 * There are four possibilities: 503 * 504 * 1. The current region overlaps with 505 * the start of the requested region. 506 * Adjust the requested region to 507 * start at the end of the current 508 * region and try again. 509 * 510 * 2. The current region falls 511 * completely within the requested 512 * region. Free the current region 513 * and try again. 514 * 515 * 3. The current region overlaps with 516 * the end of the requested region. 517 * Adjust the requested region to 518 * end at the start of the current 519 * region and try again. 520 * 521 * 4. The requested region falls 522 * completely within the current 523 * region. We're done. 524 */ 525 if (rp->er_start <= start) { 526 start = rp->er_end + 1; 527 size = end - start + 1; 528 goto alloc_start; 529 } else if (rp->er_end < end) { 530 LIST_REMOVE(rp, er_link); 531 extent_free_region_descriptor(ex, rp); 532 goto alloc_start; 533 } else if (rp->er_start < end) { 534 end = rp->er_start - 1; 535 size = end - start + 1; 536 goto alloc_start; 537 } 538 return (0); 539 } 540 541 extent_free_region_descriptor(ex, myrp); 542 return (EAGAIN); 543 } 544 /* 545 * We don't conflict, but this region lies before 546 * us. Keep a pointer to this region, and keep 547 * trying. 548 */ 549 last = rp; 550 } 551 552 /* 553 * We don't conflict with any regions. "last" points 554 * to the region we fall after, or is NULL if we belong 555 * at the beginning of the region list. Insert ourselves. 556 */ 557 extent_insert_and_optimize(ex, start, size, last, myrp); 558 return (0); 559 } 560 561 /* 562 * Macro to check (x + y) <= z. This check is designed to fail 563 * if an overflow occurs. 564 */ 565 #define LE_OV(x, y, z) ((((x) + (y)) >= (x)) && (((x) + (y)) <= (z))) 566 567 /* 568 * Allocate a region in an extent map subregion. 569 * 570 * If EX_FAST is specified, we return the first fit in the map. 571 * Otherwise, we try to minimize fragmentation by finding the 572 * smallest gap that will hold the request. 573 * 574 * The allocated region is aligned to "alignment", which must be 575 * a power of 2. 576 */ 577 int 578 extent_alloc_subregion(struct extent *ex, u_long substart, u_long subend, 579 u_long size, u_long alignment, u_long skew, u_long boundary, int flags, 580 u_long *result) 581 { 582 struct extent_region *rp, *myrp, *last, *bestlast; 583 u_long newstart, newend, exend, beststart, bestovh, ovh; 584 u_long dontcross; 585 int error; 586 587 #ifdef DIAGNOSTIC 588 /* Check arguments. */ 589 if (ex == NULL) 590 panic("extent_alloc_subregion: NULL extent"); 591 if (result == NULL) 592 panic("extent_alloc_subregion: NULL result pointer"); 593 if ((substart < ex->ex_start) || (substart > ex->ex_end) || 594 (subend > ex->ex_end) || (subend < ex->ex_start)) { 595 printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n", 596 ex->ex_name, ex->ex_start, ex->ex_end); 597 printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n", 598 substart, subend); 599 panic("extent_alloc_subregion: bad subregion"); 600 } 601 if ((size < 1) || ((size - 1) > (subend - substart))) { 602 printf("extent_alloc_subregion: extent `%s', size 0x%lx\n", 603 ex->ex_name, size); 604 panic("extent_alloc_subregion: bad size"); 605 } 606 if (alignment == 0) 607 panic("extent_alloc_subregion: bad alignment"); 608 if (boundary && (boundary < size)) { 609 printf( 610 "extent_alloc_subregion: extent `%s', size 0x%lx, " 611 "boundary 0x%lx\n", ex->ex_name, size, boundary); 612 panic("extent_alloc_subregion: bad boundary"); 613 } 614 #endif 615 616 /* 617 * Allocate the region descriptor. It will be freed later 618 * if we can coalesce with another region. 619 */ 620 myrp = extent_alloc_region_descriptor(ex, flags); 621 if (myrp == NULL) { 622 #ifdef DIAGNOSTIC 623 printf( 624 "extent_alloc_subregion: can't allocate region descriptor\n"); 625 #endif 626 return (ENOMEM); 627 } 628 629 alloc_start: 630 /* 631 * Keep a pointer to the last region we looked at so 632 * that we don't have to traverse the list again when 633 * we insert ourselves. If "last" is NULL when we 634 * finally insert ourselves, we go at the head of the 635 * list. See extent_insert_and_optimize() for deatails. 636 */ 637 last = NULL; 638 639 /* 640 * Keep track of size and location of the smallest 641 * chunk we fit in. 642 * 643 * Since the extent can be as large as the numeric range 644 * of the CPU (0 - 0xffffffff for 32-bit systems), the 645 * best overhead value can be the maximum unsigned integer. 646 * Thus, we initialize "bestovh" to 0, since we insert ourselves 647 * into the region list immediately on an exact match (which 648 * is the only case where "bestovh" would be set to 0). 649 */ 650 bestovh = 0; 651 beststart = 0; 652 bestlast = NULL; 653 654 /* 655 * Keep track of end of free region. This is either the end of extent 656 * or the start of a region past the subend. 657 */ 658 exend = ex->ex_end; 659 660 /* 661 * For N allocated regions, we must make (N + 1) 662 * checks for unallocated space. The first chunk we 663 * check is the area from the beginning of the subregion 664 * to the first allocated region after that point. 665 */ 666 newstart = extent_align(substart, alignment, skew); 667 if (newstart < ex->ex_start) { 668 #ifdef DIAGNOSTIC 669 printf( 670 "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n", 671 ex->ex_name, ex->ex_start, ex->ex_end, alignment); 672 panic("extent_alloc_subregion: overflow after alignment"); 673 #else 674 extent_free_region_descriptor(ex, myrp); 675 return (EINVAL); 676 #endif 677 } 678 679 /* 680 * Find the first allocated region that begins on or after 681 * the subregion start, advancing the "last" pointer along 682 * the way. 683 */ 684 LIST_FOREACH(rp, &ex->ex_regions, er_link) { 685 if (rp->er_start >= newstart) 686 break; 687 last = rp; 688 } 689 690 /* 691 * Relocate the start of our candidate region to the end of 692 * the last allocated region (if there was one overlapping 693 * our subrange). 694 */ 695 if (last != NULL && last->er_end >= newstart) 696 newstart = extent_align((last->er_end + 1), alignment, skew); 697 698 for (; rp != LIST_END(&ex->ex_regions); rp = LIST_NEXT(rp, er_link)) { 699 /* 700 * If the region pasts the subend, bail out and see 701 * if we fit against the subend. 702 */ 703 if (rp->er_start > subend) { 704 exend = rp->er_start; 705 break; 706 } 707 708 /* 709 * Check the chunk before "rp". Note that our 710 * comparison is safe from overflow conditions. 711 */ 712 if (LE_OV(newstart, size, rp->er_start)) { 713 /* 714 * Do a boundary check, if necessary. Note 715 * that a region may *begin* on the boundary, 716 * but it must end before the boundary. 717 */ 718 if (boundary) { 719 newend = newstart + (size - 1); 720 721 /* 722 * Calculate the next boundary after the start 723 * of this region. 724 */ 725 dontcross = extent_align(newstart+1, boundary, 726 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start) 727 - 1; 728 729 #if 0 730 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n", 731 newstart, newend, ex->ex_start, ex->ex_end, 732 boundary, dontcross); 733 #endif 734 735 /* Check for overflow */ 736 if (dontcross < ex->ex_start) 737 dontcross = ex->ex_end; 738 else if (newend > dontcross) { 739 /* 740 * Candidate region crosses boundary. 741 * Throw away the leading part and see 742 * if we still fit. 743 */ 744 newstart = dontcross + 1; 745 newend = newstart + (size - 1); 746 dontcross += boundary; 747 if (!LE_OV(newstart, size, rp->er_start)) 748 goto skip; 749 } 750 751 /* 752 * If we run past the end of 753 * the extent or the boundary 754 * overflows, then the request 755 * can't fit. 756 */ 757 if (newstart + size - 1 > ex->ex_end || 758 dontcross < newstart) 759 goto fail; 760 } 761 762 /* 763 * We would fit into this space. Calculate 764 * the overhead (wasted space). If we exactly 765 * fit, or we're taking the first fit, insert 766 * ourselves into the region list. 767 */ 768 ovh = rp->er_start - newstart - size; 769 if ((flags & EX_FAST) || (ovh == 0)) 770 goto found; 771 772 /* 773 * Don't exactly fit, but check to see 774 * if we're better than any current choice. 775 */ 776 if ((bestovh == 0) || (ovh < bestovh)) { 777 bestovh = ovh; 778 beststart = newstart; 779 bestlast = last; 780 } 781 } 782 783 skip: 784 /* 785 * Skip past the current region and check again. 786 */ 787 newstart = extent_align((rp->er_end + 1), alignment, skew); 788 if (newstart < rp->er_end) { 789 /* 790 * Overflow condition. Don't error out, since 791 * we might have a chunk of space that we can 792 * use. 793 */ 794 goto fail; 795 } 796 797 last = rp; 798 } 799 800 /* 801 * The final check is from the current starting point to the 802 * end of the subregion. If there were no allocated regions, 803 * "newstart" is set to the beginning of the subregion, or 804 * just past the end of the last allocated region, adjusted 805 * for alignment in either case. 806 */ 807 if (LE_OV(newstart, (size - 1), subend)) { 808 /* 809 * Do a boundary check, if necessary. Note 810 * that a region may *begin* on the boundary, 811 * but it must end before the boundary. 812 */ 813 if (boundary) { 814 newend = newstart + (size - 1); 815 816 /* 817 * Calculate the next boundary after the start 818 * of this region. 819 */ 820 dontcross = extent_align(newstart+1, boundary, 821 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start) 822 - 1; 823 824 #if 0 825 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n", 826 newstart, newend, ex->ex_start, ex->ex_end, 827 boundary, dontcross); 828 #endif 829 830 /* Check for overflow */ 831 if (dontcross < ex->ex_start) 832 dontcross = ex->ex_end; 833 else if (newend > dontcross) { 834 /* 835 * Candidate region crosses boundary. 836 * Throw away the leading part and see 837 * if we still fit. 838 */ 839 newstart = dontcross + 1; 840 newend = newstart + (size - 1); 841 dontcross += boundary; 842 if (!LE_OV(newstart, (size - 1), subend)) 843 goto fail; 844 } 845 846 /* 847 * If we run past the end of 848 * the extent or the boundary 849 * overflows, then the request 850 * can't fit. 851 */ 852 if (newstart + size - 1 > ex->ex_end || 853 dontcross < newstart) 854 goto fail; 855 } 856 857 /* 858 * We would fit into this space. Calculate 859 * the overhead (wasted space). If we exactly 860 * fit, or we're taking the first fit, insert 861 * ourselves into the region list. 862 */ 863 ovh = exend - newstart - (size - 1); 864 if ((flags & EX_FAST) || (ovh == 0)) 865 goto found; 866 867 /* 868 * Don't exactly fit, but check to see 869 * if we're better than any current choice. 870 */ 871 if ((bestovh == 0) || (ovh < bestovh)) { 872 bestovh = ovh; 873 beststart = newstart; 874 bestlast = last; 875 } 876 } 877 878 fail: 879 /* 880 * One of the following two conditions have 881 * occurred: 882 * 883 * There is no chunk large enough to hold the request. 884 * 885 * If EX_FAST was not specified, there is not an 886 * exact match for the request. 887 * 888 * Note that if we reach this point and EX_FAST is 889 * set, then we know there is no space in the extent for 890 * the request. 891 */ 892 if (((flags & EX_FAST) == 0) && (bestovh != 0)) { 893 /* 894 * We have a match that's "good enough". 895 */ 896 newstart = beststart; 897 last = bestlast; 898 goto found; 899 } 900 901 /* 902 * No space currently available. Wait for it to free up, 903 * if possible. 904 */ 905 if (flags & EX_WAITSPACE) { 906 ex->ex_flags |= EXF_WANTED; 907 error = tsleep(ex, 908 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), "extnt", 0); 909 if (error) 910 return (error); 911 goto alloc_start; 912 } 913 914 extent_free_region_descriptor(ex, myrp); 915 return (EAGAIN); 916 917 found: 918 /* 919 * Insert ourselves into the region list. 920 */ 921 extent_insert_and_optimize(ex, newstart, size, last, myrp); 922 *result = newstart; 923 return (0); 924 } 925 926 int 927 extent_free(struct extent *ex, u_long start, u_long size, int flags) 928 { 929 struct extent_region *rp, *nrp = NULL; 930 u_long end = start + (size - 1); 931 int exflags; 932 933 #ifdef DIAGNOSTIC 934 /* Check arguments. */ 935 if (ex == NULL) 936 panic("extent_free: NULL extent"); 937 if ((start < ex->ex_start) || (end > ex->ex_end)) { 938 extent_print(ex); 939 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n", 940 ex->ex_name, start, size); 941 panic("extent_free: extent `%s', region not within extent", 942 ex->ex_name); 943 } 944 /* Check for an overflow. */ 945 if (end < start) { 946 extent_print(ex); 947 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n", 948 ex->ex_name, start, size); 949 panic("extent_free: overflow"); 950 } 951 #endif 952 953 /* 954 * If we're allowing coalescing, we must allocate a region 955 * descriptor now, since it might block. 956 * 957 * XXX Make a static, create-time flags word, so we don't 958 * XXX have to lock to read it! 959 */ 960 exflags = ex->ex_flags; 961 962 if ((exflags & EXF_NOCOALESCE) == 0) { 963 /* Allocate a region descriptor. */ 964 nrp = extent_alloc_region_descriptor(ex, flags); 965 if (nrp == NULL) 966 return (ENOMEM); 967 } 968 969 /* 970 * Find region and deallocate. Several possibilities: 971 * 972 * 1. (start == er_start) && (end == er_end): 973 * Free descriptor. 974 * 975 * 2. (start == er_start) && (end < er_end): 976 * Adjust er_start. 977 * 978 * 3. (start > er_start) && (end == er_end): 979 * Adjust er_end. 980 * 981 * 4. (start > er_start) && (end < er_end): 982 * Fragment region. Requires descriptor alloc. 983 * 984 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag 985 * is not set. 986 */ 987 LIST_FOREACH(rp, &ex->ex_regions, er_link) { 988 /* 989 * Save ourselves some comparisons; does the current 990 * region end before chunk to be freed begins? If so, 991 * then we haven't found the appropriate region descriptor. 992 */ 993 if (rp->er_end < start) 994 continue; 995 996 /* 997 * Save ourselves some traversal; does the current 998 * region begin after the chunk to be freed ends? If so, 999 * then we've already passed any possible region descriptors 1000 * that might have contained the chunk to be freed. 1001 */ 1002 if (rp->er_start > end) 1003 break; 1004 1005 /* Case 1. */ 1006 if ((start == rp->er_start) && (end == rp->er_end)) { 1007 LIST_REMOVE(rp, er_link); 1008 extent_free_region_descriptor(ex, rp); 1009 goto done; 1010 } 1011 1012 /* 1013 * The following cases all require that EXF_NOCOALESCE 1014 * is not set. 1015 */ 1016 if (ex->ex_flags & EXF_NOCOALESCE) 1017 continue; 1018 1019 /* Case 2. */ 1020 if ((start == rp->er_start) && (end < rp->er_end)) { 1021 rp->er_start = (end + 1); 1022 goto done; 1023 } 1024 1025 /* Case 3. */ 1026 if ((start > rp->er_start) && (end == rp->er_end)) { 1027 rp->er_end = (start - 1); 1028 goto done; 1029 } 1030 1031 /* Case 4. */ 1032 if ((start > rp->er_start) && (end < rp->er_end)) { 1033 /* Fill in new descriptor. */ 1034 nrp->er_start = end + 1; 1035 nrp->er_end = rp->er_end; 1036 1037 /* Adjust current descriptor. */ 1038 rp->er_end = start - 1; 1039 1040 /* Insert new descriptor after current. */ 1041 LIST_INSERT_AFTER(rp, nrp, er_link); 1042 1043 /* We used the new descriptor, so don't free it below */ 1044 nrp = NULL; 1045 goto done; 1046 } 1047 } 1048 1049 /* Region not found, or request otherwise invalid. */ 1050 #if defined(DIAGNOSTIC) || defined(DDB) 1051 extent_print(ex); 1052 #endif 1053 printf("extent_free: start 0x%lx, end 0x%lx\n", start, end); 1054 panic("extent_free: region not found"); 1055 1056 done: 1057 if (nrp != NULL) 1058 extent_free_region_descriptor(ex, nrp); 1059 if (ex->ex_flags & EXF_WANTED) { 1060 ex->ex_flags &= ~EXF_WANTED; 1061 wakeup(ex); 1062 } 1063 return (0); 1064 } 1065 1066 static struct extent_region * 1067 extent_alloc_region_descriptor(struct extent *ex, int flags) 1068 { 1069 struct extent_region *rp; 1070 1071 if (ex->ex_flags & EXF_FIXED) { 1072 struct extent_fixed *fex = (struct extent_fixed *)ex; 1073 1074 while (LIST_EMPTY(&fex->fex_freelist)) { 1075 if (flags & EX_MALLOCOK) 1076 goto alloc; 1077 1078 if ((flags & EX_WAITOK) == 0) 1079 return (NULL); 1080 ex->ex_flags |= EXF_FLWANTED; 1081 if (tsleep(&fex->fex_freelist, 1082 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), 1083 "extnt", 0)) 1084 return (NULL); 1085 } 1086 rp = LIST_FIRST(&fex->fex_freelist); 1087 LIST_REMOVE(rp, er_link); 1088 1089 /* 1090 * Don't muck with flags after pulling it off the 1091 * freelist; it may be a dynamiclly allocated 1092 * region pointer that was kindly given to us, 1093 * and we need to preserve that information. 1094 */ 1095 1096 return (rp); 1097 } 1098 1099 alloc: 1100 rp = pool_get(&ex_region_pl, (flags & EX_WAITOK) ? PR_WAITOK : 0); 1101 if (rp != NULL) 1102 rp->er_flags = ER_ALLOC; 1103 1104 return (rp); 1105 } 1106 1107 static void 1108 extent_free_region_descriptor(struct extent *ex, struct extent_region *rp) 1109 { 1110 if (ex->ex_flags & EXF_FIXED) { 1111 struct extent_fixed *fex = (struct extent_fixed *)ex; 1112 1113 /* 1114 * If someone's waiting for a region descriptor, 1115 * be nice and give them this one, rather than 1116 * just free'ing it back to the system. 1117 */ 1118 if (rp->er_flags & ER_ALLOC) { 1119 if (ex->ex_flags & EXF_FLWANTED) { 1120 /* Clear all but ER_ALLOC flag. */ 1121 rp->er_flags = ER_ALLOC; 1122 LIST_INSERT_HEAD(&fex->fex_freelist, rp, 1123 er_link); 1124 goto wake_em_up; 1125 } else { 1126 pool_put(&ex_region_pl, rp); 1127 } 1128 } else { 1129 /* Clear all flags. */ 1130 rp->er_flags = 0; 1131 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link); 1132 } 1133 1134 if (ex->ex_flags & EXF_FLWANTED) { 1135 wake_em_up: 1136 ex->ex_flags &= ~EXF_FLWANTED; 1137 wakeup(&fex->fex_freelist); 1138 } 1139 return; 1140 } 1141 1142 /* 1143 * We know it's dynamically allocated if we get here. 1144 */ 1145 pool_put(&ex_region_pl, rp); 1146 } 1147 1148 1149 #if defined(DIAGNOSTIC) || defined(DDB) || !defined(_KERNEL) 1150 1151 void 1152 extent_print(struct extent *ex) 1153 { 1154 extent_print1(ex, printf); 1155 } 1156 1157 void 1158 extent_print1(struct extent *ex, int (*pr)(const char *, ...)) 1159 { 1160 struct extent_region *rp; 1161 1162 if (ex == NULL) 1163 panic("extent_print: NULL extent"); 1164 1165 #ifdef _KERNEL 1166 (*pr)("extent `%s' (0x%lx - 0x%lx), flags=%b\n", ex->ex_name, 1167 ex->ex_start, ex->ex_end, ex->ex_flags, EXF_BITS); 1168 #else 1169 (*pr)("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name, 1170 ex->ex_start, ex->ex_end, ex->ex_flags); 1171 #endif 1172 1173 LIST_FOREACH(rp, &ex->ex_regions, er_link) 1174 (*pr)(" 0x%lx - 0x%lx\n", rp->er_start, rp->er_end); 1175 } 1176 #endif 1177