1 /* $NetBSD: subr_extent.c,v 1.44 2001/11/12 15:25:19 lukem Exp $ */ 2 3 /*- 4 * Copyright (c) 1996, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe and Matthias Drochner. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * General purpose extent manager. 41 */ 42 43 #include <sys/cdefs.h> 44 __KERNEL_RCSID(0, "$NetBSD: subr_extent.c,v 1.44 2001/11/12 15:25:19 lukem Exp $"); 45 46 #ifdef _KERNEL 47 #include <sys/param.h> 48 #include <sys/extent.h> 49 #include <sys/malloc.h> 50 #include <sys/pool.h> 51 #include <sys/time.h> 52 #include <sys/systm.h> 53 #include <sys/proc.h> 54 #include <sys/lock.h> 55 56 #include <uvm/uvm_extern.h> 57 58 #define KMEM_IS_RUNNING (kmem_map != NULL) 59 #elif defined(_EXTENT_TESTING) 60 /* 61 * user-land definitions, so it can fit into a testing harness. 62 */ 63 #include <sys/param.h> 64 #include <sys/pool.h> 65 #include <sys/extent.h> 66 #include <errno.h> 67 #include <stdlib.h> 68 #include <stdio.h> 69 #include <string.h> 70 71 /* 72 * Use multi-line #defines to avoid screwing up the kernel tags file; 73 * without this, ctags produces a tags file where panic() shows up 74 * in subr_extent.c rather than subr_prf.c. 75 */ 76 #define \ 77 malloc(s, t, flags) malloc(s) 78 #define \ 79 free(p, t) free(p) 80 #define \ 81 tsleep(chan, pri, str, timo) (EWOULDBLOCK) 82 #define \ 83 ltsleep(chan,pri,str,timo,lck) (EWOULDBLOCK) 84 #define \ 85 wakeup(chan) ((void)0) 86 #define \ 87 pool_get(pool, flags) malloc((pool)->pr_size,0,0) 88 #define \ 89 pool_put(pool, rp) free(rp,0) 90 #define \ 91 panic(a) printf(a) 92 #define \ 93 splhigh() (1) 94 #define \ 95 splx(s) ((void)(s)) 96 97 #define \ 98 simple_lock_init(l) ((void)(l)) 99 #define \ 100 simple_lock(l) ((void)(l)) 101 #define \ 102 simple_unlock(l) ((void)(l)) 103 #define KMEM_IS_RUNNING (1) 104 #endif 105 106 static void extent_insert_and_optimize __P((struct extent *, u_long, u_long, 107 int, struct extent_region *, struct extent_region *)); 108 static struct extent_region *extent_alloc_region_descriptor 109 __P((struct extent *, int)); 110 static void extent_free_region_descriptor __P((struct extent *, 111 struct extent_region *)); 112 113 static struct pool expool; 114 static struct simplelock expool_init_slock = SIMPLELOCK_INITIALIZER; 115 static int expool_initialized; 116 117 /* 118 * Macro to align to an arbitrary power-of-two boundary. 119 */ 120 #define EXTENT_ALIGN(_start, _align, _skew) \ 121 (((((_start) - (_skew)) + ((_align) - 1)) & (-(_align))) + (_skew)) 122 123 /* 124 * Create the extent_region pool. 125 * (This is deferred until one of our callers thinks we can malloc()). 126 */ 127 128 static __inline void 129 expool_init(void) 130 { 131 132 simple_lock(&expool_init_slock); 133 if (expool_initialized) { 134 simple_unlock(&expool_init_slock); 135 return; 136 } 137 138 #if defined(_KERNEL) 139 pool_init(&expool, sizeof(struct extent_region), 0, 0, 0, 140 "extent", 0, 0, 0, 0); 141 #else 142 expool.pr_size = sizeof(struct extent_region); 143 #endif 144 145 expool_initialized = 1; 146 simple_unlock(&expool_init_slock); 147 } 148 149 /* 150 * Allocate and initialize an extent map. 151 */ 152 struct extent * 153 extent_create(name, start, end, mtype, storage, storagesize, flags) 154 const char *name; 155 u_long start, end; 156 int mtype; 157 caddr_t storage; 158 size_t storagesize; 159 int flags; 160 { 161 struct extent *ex; 162 caddr_t cp = storage; 163 size_t sz = storagesize; 164 struct extent_region *rp; 165 int fixed_extent = (storage != NULL); 166 int s; 167 168 #ifdef DIAGNOSTIC 169 /* Check arguments. */ 170 if (name == NULL) 171 panic("extent_create: name == NULL"); 172 if (end < start) { 173 printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n", 174 name, start, end); 175 panic("extent_create: end < start"); 176 } 177 if (fixed_extent && (storagesize < sizeof(struct extent_fixed))) 178 panic("extent_create: fixed extent, bad storagesize 0x%lx", 179 (u_long)storagesize); 180 if (fixed_extent == 0 && (storagesize != 0 || storage != NULL)) 181 panic("extent_create: storage provided for non-fixed"); 182 #endif 183 184 /* Allocate extent descriptor. */ 185 if (fixed_extent) { 186 struct extent_fixed *fex; 187 188 memset(storage, 0, storagesize); 189 190 /* 191 * Align all descriptors on "long" boundaries. 192 */ 193 fex = (struct extent_fixed *)cp; 194 ex = (struct extent *)fex; 195 cp += ALIGN(sizeof(struct extent_fixed)); 196 sz -= ALIGN(sizeof(struct extent_fixed)); 197 fex->fex_storage = storage; 198 fex->fex_storagesize = storagesize; 199 200 /* 201 * In a fixed extent, we have to pre-allocate region 202 * descriptors and place them in the extent's freelist. 203 */ 204 LIST_INIT(&fex->fex_freelist); 205 while (sz >= ALIGN(sizeof(struct extent_region))) { 206 rp = (struct extent_region *)cp; 207 cp += ALIGN(sizeof(struct extent_region)); 208 sz -= ALIGN(sizeof(struct extent_region)); 209 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link); 210 } 211 } else { 212 s = splhigh(); 213 if (expool_initialized == 0) 214 expool_init(); 215 splx(s); 216 217 ex = (struct extent *)malloc(sizeof(struct extent), 218 mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT); 219 if (ex == NULL) 220 return (NULL); 221 } 222 223 /* Fill in the extent descriptor and return it to the caller. */ 224 simple_lock_init(&ex->ex_slock); 225 LIST_INIT(&ex->ex_regions); 226 ex->ex_name = name; 227 ex->ex_start = start; 228 ex->ex_end = end; 229 ex->ex_mtype = mtype; 230 ex->ex_flags = 0; 231 if (fixed_extent) 232 ex->ex_flags |= EXF_FIXED; 233 if (flags & EX_NOCOALESCE) 234 ex->ex_flags |= EXF_NOCOALESCE; 235 return (ex); 236 } 237 238 /* 239 * Destroy an extent map. 240 * Since we're freeing the data, there can't be any references 241 * so we don't need any locking. 242 */ 243 void 244 extent_destroy(ex) 245 struct extent *ex; 246 { 247 struct extent_region *rp, *orp; 248 249 #ifdef DIAGNOSTIC 250 /* Check arguments. */ 251 if (ex == NULL) 252 panic("extent_destroy: NULL extent"); 253 #endif 254 255 /* Free all region descriptors in extent. */ 256 for (rp = ex->ex_regions.lh_first; rp != NULL; ) { 257 orp = rp; 258 rp = rp->er_link.le_next; 259 LIST_REMOVE(orp, er_link); 260 extent_free_region_descriptor(ex, orp); 261 } 262 263 /* If we're not a fixed extent, free the extent descriptor itself. */ 264 if ((ex->ex_flags & EXF_FIXED) == 0) 265 free(ex, ex->ex_mtype); 266 } 267 268 /* 269 * Insert a region descriptor into the sorted region list after the 270 * entry "after" or at the head of the list (if "after" is NULL). 271 * The region descriptor we insert is passed in "rp". We must 272 * allocate the region descriptor before calling this function! 273 * If we don't need the region descriptor, it will be freed here. 274 */ 275 static void 276 extent_insert_and_optimize(ex, start, size, flags, after, rp) 277 struct extent *ex; 278 u_long start, size; 279 int flags; 280 struct extent_region *after, *rp; 281 { 282 struct extent_region *nextr; 283 int appended = 0; 284 285 if (after == NULL) { 286 /* 287 * We're the first in the region list. If there's 288 * a region after us, attempt to coalesce to save 289 * descriptor overhead. 290 */ 291 if (((ex->ex_flags & EXF_NOCOALESCE) == 0) && 292 (ex->ex_regions.lh_first != NULL) && 293 ((start + size) == ex->ex_regions.lh_first->er_start)) { 294 /* 295 * We can coalesce. Prepend us to the first region. 296 */ 297 ex->ex_regions.lh_first->er_start = start; 298 extent_free_region_descriptor(ex, rp); 299 return; 300 } 301 302 /* 303 * Can't coalesce. Fill in the region descriptor 304 * in, and insert us at the head of the region list. 305 */ 306 rp->er_start = start; 307 rp->er_end = start + (size - 1); 308 LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link); 309 return; 310 } 311 312 /* 313 * If EXF_NOCOALESCE is set, coalescing is disallowed. 314 */ 315 if (ex->ex_flags & EXF_NOCOALESCE) 316 goto cant_coalesce; 317 318 /* 319 * Attempt to coalesce with the region before us. 320 */ 321 if ((after->er_end + 1) == start) { 322 /* 323 * We can coalesce. Append ourselves and make 324 * note of it. 325 */ 326 after->er_end = start + (size - 1); 327 appended = 1; 328 } 329 330 /* 331 * Attempt to coalesce with the region after us. 332 */ 333 if ((after->er_link.le_next != NULL) && 334 ((start + size) == after->er_link.le_next->er_start)) { 335 /* 336 * We can coalesce. Note that if we appended ourselves 337 * to the previous region, we exactly fit the gap, and 338 * can free the "next" region descriptor. 339 */ 340 if (appended) { 341 /* 342 * Yup, we can free it up. 343 */ 344 after->er_end = after->er_link.le_next->er_end; 345 nextr = after->er_link.le_next; 346 LIST_REMOVE(nextr, er_link); 347 extent_free_region_descriptor(ex, nextr); 348 } else { 349 /* 350 * Nope, just prepend us to the next region. 351 */ 352 after->er_link.le_next->er_start = start; 353 } 354 355 extent_free_region_descriptor(ex, rp); 356 return; 357 } 358 359 /* 360 * We weren't able to coalesce with the next region, but 361 * we don't need to allocate a region descriptor if we 362 * appended ourselves to the previous region. 363 */ 364 if (appended) { 365 extent_free_region_descriptor(ex, rp); 366 return; 367 } 368 369 cant_coalesce: 370 371 /* 372 * Fill in the region descriptor and insert ourselves 373 * into the region list. 374 */ 375 rp->er_start = start; 376 rp->er_end = start + (size - 1); 377 LIST_INSERT_AFTER(after, rp, er_link); 378 } 379 380 /* 381 * Allocate a specific region in an extent map. 382 */ 383 int 384 extent_alloc_region(ex, start, size, flags) 385 struct extent *ex; 386 u_long start, size; 387 int flags; 388 { 389 struct extent_region *rp, *last, *myrp; 390 u_long end = start + (size - 1); 391 int error; 392 393 #ifdef DIAGNOSTIC 394 /* Check arguments. */ 395 if (ex == NULL) 396 panic("extent_alloc_region: NULL extent"); 397 if (size < 1) { 398 printf("extent_alloc_region: extent `%s', size 0x%lx\n", 399 ex->ex_name, size); 400 panic("extent_alloc_region: bad size"); 401 } 402 if (end < start) { 403 printf( 404 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n", 405 ex->ex_name, start, size); 406 panic("extent_alloc_region: overflow"); 407 } 408 #endif 409 #ifdef LOCKDEBUG 410 if (flags & EX_WAITSPACE) 411 simple_lock_only_held(NULL, 412 "extent_alloc_region(EX_WAITSPACE)"); 413 #endif 414 415 /* 416 * Make sure the requested region lies within the 417 * extent. 418 * 419 * We don't lock to check the range, because those values 420 * are never modified, and if another thread deletes the 421 * extent, we're screwed anyway. 422 */ 423 if ((start < ex->ex_start) || (end > ex->ex_end)) { 424 #ifdef DIAGNOSTIC 425 printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n", 426 ex->ex_name, ex->ex_start, ex->ex_end); 427 printf("extent_alloc_region: start 0x%lx, end 0x%lx\n", 428 start, end); 429 panic("extent_alloc_region: region lies outside extent"); 430 #else 431 return (EINVAL); 432 #endif 433 } 434 435 /* 436 * Allocate the region descriptor. It will be freed later 437 * if we can coalesce with another region. Don't lock before 438 * here! This could block. 439 */ 440 myrp = extent_alloc_region_descriptor(ex, flags); 441 if (myrp == NULL) { 442 #ifdef DIAGNOSTIC 443 printf( 444 "extent_alloc_region: can't allocate region descriptor\n"); 445 #endif 446 return (ENOMEM); 447 } 448 449 alloc_start: 450 simple_lock(&ex->ex_slock); 451 452 /* 453 * Attempt to place ourselves in the desired area of the 454 * extent. We save ourselves some work by keeping the list sorted. 455 * In other words, if the start of the current region is greater 456 * than the end of our region, we don't have to search any further. 457 */ 458 459 /* 460 * Keep a pointer to the last region we looked at so 461 * that we don't have to traverse the list again when 462 * we insert ourselves. If "last" is NULL when we 463 * finally insert ourselves, we go at the head of the 464 * list. See extent_insert_and_optimize() for details. 465 */ 466 last = NULL; 467 468 for (rp = ex->ex_regions.lh_first; rp != NULL; 469 rp = rp->er_link.le_next) { 470 if (rp->er_start > end) { 471 /* 472 * We lie before this region and don't 473 * conflict. 474 */ 475 break; 476 } 477 478 /* 479 * The current region begins before we end. 480 * Check for a conflict. 481 */ 482 if (rp->er_end >= start) { 483 /* 484 * We conflict. If we can (and want to) wait, 485 * do so. 486 */ 487 if (flags & EX_WAITSPACE) { 488 ex->ex_flags |= EXF_WANTED; 489 error = ltsleep(ex, 490 PNORELOCK | PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), 491 "extnt", 0, &ex->ex_slock); 492 if (error) 493 return (error); 494 goto alloc_start; 495 } 496 extent_free_region_descriptor(ex, myrp); 497 simple_unlock(&ex->ex_slock); 498 return (EAGAIN); 499 } 500 /* 501 * We don't conflict, but this region lies before 502 * us. Keep a pointer to this region, and keep 503 * trying. 504 */ 505 last = rp; 506 } 507 508 /* 509 * We don't conflict with any regions. "last" points 510 * to the region we fall after, or is NULL if we belong 511 * at the beginning of the region list. Insert ourselves. 512 */ 513 extent_insert_and_optimize(ex, start, size, flags, last, myrp); 514 simple_unlock(&ex->ex_slock); 515 return (0); 516 } 517 518 /* 519 * Macro to check (x + y) <= z. This check is designed to fail 520 * if an overflow occurs. 521 */ 522 #define LE_OV(x, y, z) ((((x) + (y)) >= (x)) && (((x) + (y)) <= (z))) 523 524 /* 525 * Allocate a region in an extent map subregion. 526 * 527 * If EX_FAST is specified, we return the first fit in the map. 528 * Otherwise, we try to minimize fragmentation by finding the 529 * smallest gap that will hold the request. 530 * 531 * The allocated region is aligned to "alignment", which must be 532 * a power of 2. 533 */ 534 int 535 extent_alloc_subregion1(ex, substart, subend, size, alignment, skew, boundary, 536 flags, result) 537 struct extent *ex; 538 u_long substart, subend, size, alignment, skew, boundary; 539 int flags; 540 u_long *result; 541 { 542 struct extent_region *rp, *myrp, *last, *bestlast; 543 u_long newstart, newend, exend, beststart, bestovh, ovh; 544 u_long dontcross; 545 int error; 546 547 #ifdef DIAGNOSTIC 548 /* 549 * Check arguments. 550 * 551 * We don't lock to check these, because these values 552 * are never modified, and if another thread deletes the 553 * extent, we're screwed anyway. 554 */ 555 if (ex == NULL) 556 panic("extent_alloc_subregion: NULL extent"); 557 if (result == NULL) 558 panic("extent_alloc_subregion: NULL result pointer"); 559 if ((substart < ex->ex_start) || (substart > ex->ex_end) || 560 (subend > ex->ex_end) || (subend < ex->ex_start)) { 561 printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n", 562 ex->ex_name, ex->ex_start, ex->ex_end); 563 printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n", 564 substart, subend); 565 panic("extent_alloc_subregion: bad subregion"); 566 } 567 if ((size < 1) || ((size - 1) > (subend - substart))) { 568 printf("extent_alloc_subregion: extent `%s', size 0x%lx\n", 569 ex->ex_name, size); 570 panic("extent_alloc_subregion: bad size"); 571 } 572 if (alignment == 0) 573 panic("extent_alloc_subregion: bad alignment"); 574 if (boundary && (boundary < size)) { 575 printf( 576 "extent_alloc_subregion: extent `%s', size 0x%lx, " 577 "boundary 0x%lx\n", ex->ex_name, size, boundary); 578 panic("extent_alloc_subregion: bad boundary"); 579 } 580 #endif 581 #ifdef LOCKDEBUG 582 if (flags & EX_WAITSPACE) 583 simple_lock_only_held(NULL, 584 "extent_alloc_subregion1(EX_WAITSPACE)"); 585 #endif 586 587 /* 588 * Allocate the region descriptor. It will be freed later 589 * if we can coalesce with another region. Don't lock before 590 * here! This could block. 591 */ 592 myrp = extent_alloc_region_descriptor(ex, flags); 593 if (myrp == NULL) { 594 #ifdef DIAGNOSTIC 595 printf( 596 "extent_alloc_subregion: can't allocate region descriptor\n"); 597 #endif 598 return (ENOMEM); 599 } 600 601 alloc_start: 602 simple_lock(&ex->ex_slock); 603 604 /* 605 * Keep a pointer to the last region we looked at so 606 * that we don't have to traverse the list again when 607 * we insert ourselves. If "last" is NULL when we 608 * finally insert ourselves, we go at the head of the 609 * list. See extent_insert_and_optimize() for deatails. 610 */ 611 last = NULL; 612 613 /* 614 * Keep track of size and location of the smallest 615 * chunk we fit in. 616 * 617 * Since the extent can be as large as the numeric range 618 * of the CPU (0 - 0xffffffff for 32-bit systems), the 619 * best overhead value can be the maximum unsigned integer. 620 * Thus, we initialize "bestovh" to 0, since we insert ourselves 621 * into the region list immediately on an exact match (which 622 * is the only case where "bestovh" would be set to 0). 623 */ 624 bestovh = 0; 625 beststart = 0; 626 bestlast = NULL; 627 628 /* 629 * Keep track of end of free region. This is either the end of extent 630 * or the start of a region past the subend. 631 */ 632 exend = ex->ex_end; 633 634 /* 635 * For N allocated regions, we must make (N + 1) 636 * checks for unallocated space. The first chunk we 637 * check is the area from the beginning of the subregion 638 * to the first allocated region after that point. 639 */ 640 newstart = EXTENT_ALIGN(substart, alignment, skew); 641 if (newstart < ex->ex_start) { 642 #ifdef DIAGNOSTIC 643 printf( 644 "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n", 645 ex->ex_name, ex->ex_start, ex->ex_end, alignment); 646 simple_unlock(&ex->ex_slock); 647 panic("extent_alloc_subregion: overflow after alignment"); 648 #else 649 extent_free_region_descriptor(ex, myrp); 650 simple_unlock(&ex->ex_slock); 651 return (EINVAL); 652 #endif 653 } 654 655 /* 656 * Find the first allocated region that begins on or after 657 * the subregion start, advancing the "last" pointer along 658 * the way. 659 */ 660 for (rp = ex->ex_regions.lh_first; rp != NULL; 661 rp = rp->er_link.le_next) { 662 if (rp->er_start >= newstart) 663 break; 664 last = rp; 665 } 666 667 /* 668 * Relocate the start of our candidate region to the end of 669 * the last allocated region (if there was one overlapping 670 * our subrange). 671 */ 672 if (last != NULL && last->er_end >= newstart) 673 newstart = EXTENT_ALIGN((last->er_end + 1), alignment, skew); 674 675 for (; rp != NULL; rp = rp->er_link.le_next) { 676 /* 677 * If the region pasts the subend, bail out and see 678 * if we fit against the subend. 679 */ 680 if (rp->er_start >= subend) { 681 exend = rp->er_start; 682 break; 683 } 684 685 /* 686 * Check the chunk before "rp". Note that our 687 * comparison is safe from overflow conditions. 688 */ 689 if (LE_OV(newstart, size, rp->er_start)) { 690 /* 691 * Do a boundary check, if necessary. Note 692 * that a region may *begin* on the boundary, 693 * but it must end before the boundary. 694 */ 695 if (boundary) { 696 newend = newstart + (size - 1); 697 698 /* 699 * Calculate the next boundary after the start 700 * of this region. 701 */ 702 dontcross = EXTENT_ALIGN(newstart+1, boundary, 703 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start) 704 - 1; 705 706 #if 0 707 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n", 708 newstart, newend, ex->ex_start, ex->ex_end, 709 boundary, dontcross); 710 #endif 711 712 /* Check for overflow */ 713 if (dontcross < ex->ex_start) 714 dontcross = ex->ex_end; 715 else if (newend > dontcross) { 716 /* 717 * Candidate region crosses boundary. 718 * Throw away the leading part and see 719 * if we still fit. 720 */ 721 newstart = dontcross + 1; 722 newend = newstart + (size - 1); 723 dontcross += boundary; 724 if (!LE_OV(newstart, size, rp->er_start)) 725 continue; 726 } 727 728 /* 729 * If we run past the end of 730 * the extent or the boundary 731 * overflows, then the request 732 * can't fit. 733 */ 734 if (newstart + size - 1 > ex->ex_end || 735 dontcross < newstart) 736 goto fail; 737 } 738 739 /* 740 * We would fit into this space. Calculate 741 * the overhead (wasted space). If we exactly 742 * fit, or we're taking the first fit, insert 743 * ourselves into the region list. 744 */ 745 ovh = rp->er_start - newstart - size; 746 if ((flags & EX_FAST) || (ovh == 0)) 747 goto found; 748 749 /* 750 * Don't exactly fit, but check to see 751 * if we're better than any current choice. 752 */ 753 if ((bestovh == 0) || (ovh < bestovh)) { 754 bestovh = ovh; 755 beststart = newstart; 756 bestlast = last; 757 } 758 } 759 760 /* 761 * Skip past the current region and check again. 762 */ 763 newstart = EXTENT_ALIGN((rp->er_end + 1), alignment, skew); 764 if (newstart < rp->er_end) { 765 /* 766 * Overflow condition. Don't error out, since 767 * we might have a chunk of space that we can 768 * use. 769 */ 770 goto fail; 771 } 772 773 last = rp; 774 } 775 776 /* 777 * The final check is from the current starting point to the 778 * end of the subregion. If there were no allocated regions, 779 * "newstart" is set to the beginning of the subregion, or 780 * just past the end of the last allocated region, adjusted 781 * for alignment in either case. 782 */ 783 if (LE_OV(newstart, (size - 1), subend)) { 784 /* 785 * Do a boundary check, if necessary. Note 786 * that a region may *begin* on the boundary, 787 * but it must end before the boundary. 788 */ 789 if (boundary) { 790 newend = newstart + (size - 1); 791 792 /* 793 * Calculate the next boundary after the start 794 * of this region. 795 */ 796 dontcross = EXTENT_ALIGN(newstart+1, boundary, 797 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start) 798 - 1; 799 800 #if 0 801 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n", 802 newstart, newend, ex->ex_start, ex->ex_end, 803 boundary, dontcross); 804 #endif 805 806 /* Check for overflow */ 807 if (dontcross < ex->ex_start) 808 dontcross = ex->ex_end; 809 else if (newend > dontcross) { 810 /* 811 * Candidate region crosses boundary. 812 * Throw away the leading part and see 813 * if we still fit. 814 */ 815 newstart = dontcross + 1; 816 newend = newstart + (size - 1); 817 dontcross += boundary; 818 if (!LE_OV(newstart, (size - 1), subend)) 819 goto fail; 820 } 821 822 /* 823 * If we run past the end of 824 * the extent or the boundary 825 * overflows, then the request 826 * can't fit. 827 */ 828 if (newstart + size - 1 > ex->ex_end || 829 dontcross < newstart) 830 goto fail; 831 } 832 833 /* 834 * We would fit into this space. Calculate 835 * the overhead (wasted space). If we exactly 836 * fit, or we're taking the first fit, insert 837 * ourselves into the region list. 838 */ 839 ovh = exend - newstart - (size - 1); 840 if ((flags & EX_FAST) || (ovh == 0)) 841 goto found; 842 843 /* 844 * Don't exactly fit, but check to see 845 * if we're better than any current choice. 846 */ 847 if ((bestovh == 0) || (ovh < bestovh)) { 848 bestovh = ovh; 849 beststart = newstart; 850 bestlast = last; 851 } 852 } 853 854 fail: 855 /* 856 * One of the following two conditions have 857 * occurred: 858 * 859 * There is no chunk large enough to hold the request. 860 * 861 * If EX_FAST was not specified, there is not an 862 * exact match for the request. 863 * 864 * Note that if we reach this point and EX_FAST is 865 * set, then we know there is no space in the extent for 866 * the request. 867 */ 868 if (((flags & EX_FAST) == 0) && (bestovh != 0)) { 869 /* 870 * We have a match that's "good enough". 871 */ 872 newstart = beststart; 873 last = bestlast; 874 goto found; 875 } 876 877 /* 878 * No space currently available. Wait for it to free up, 879 * if possible. 880 */ 881 if (flags & EX_WAITSPACE) { 882 ex->ex_flags |= EXF_WANTED; 883 error = ltsleep(ex, 884 PNORELOCK | PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), 885 "extnt", 0, &ex->ex_slock); 886 if (error) 887 return (error); 888 goto alloc_start; 889 } 890 891 extent_free_region_descriptor(ex, myrp); 892 simple_unlock(&ex->ex_slock); 893 return (EAGAIN); 894 895 found: 896 /* 897 * Insert ourselves into the region list. 898 */ 899 extent_insert_and_optimize(ex, newstart, size, flags, last, myrp); 900 simple_unlock(&ex->ex_slock); 901 *result = newstart; 902 return (0); 903 } 904 905 int 906 extent_free(ex, start, size, flags) 907 struct extent *ex; 908 u_long start, size; 909 int flags; 910 { 911 struct extent_region *rp, *nrp = NULL; 912 u_long end = start + (size - 1); 913 int exflags; 914 915 #ifdef DIAGNOSTIC 916 /* 917 * Check arguments. 918 * 919 * We don't lock to check these, because these values 920 * are never modified, and if another thread deletes the 921 * extent, we're screwed anyway. 922 */ 923 if (ex == NULL) 924 panic("extent_free: NULL extent"); 925 if ((start < ex->ex_start) || (start > ex->ex_end)) { 926 extent_print(ex); 927 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n", 928 ex->ex_name, start, size); 929 panic("extent_free: extent `%s', region not within extent", 930 ex->ex_name); 931 } 932 /* Check for an overflow. */ 933 if (end < start) { 934 extent_print(ex); 935 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n", 936 ex->ex_name, start, size); 937 panic("extent_free: overflow"); 938 } 939 #endif 940 941 /* 942 * If we're allowing coalescing, we must allocate a region 943 * descriptor now, since it might block. 944 * 945 * XXX Make a static, create-time flags word, so we don't 946 * XXX have to lock to read it! 947 */ 948 simple_lock(&ex->ex_slock); 949 exflags = ex->ex_flags; 950 simple_unlock(&ex->ex_slock); 951 952 if ((exflags & EXF_NOCOALESCE) == 0) { 953 /* Allocate a region descriptor. */ 954 nrp = extent_alloc_region_descriptor(ex, flags); 955 if (nrp == NULL) 956 return (ENOMEM); 957 } 958 959 simple_lock(&ex->ex_slock); 960 961 /* 962 * Find region and deallocate. Several possibilities: 963 * 964 * 1. (start == er_start) && (end == er_end): 965 * Free descriptor. 966 * 967 * 2. (start == er_start) && (end < er_end): 968 * Adjust er_start. 969 * 970 * 3. (start > er_start) && (end == er_end): 971 * Adjust er_end. 972 * 973 * 4. (start > er_start) && (end < er_end): 974 * Fragment region. Requires descriptor alloc. 975 * 976 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag 977 * is not set. 978 */ 979 for (rp = ex->ex_regions.lh_first; rp != NULL; 980 rp = rp->er_link.le_next) { 981 /* 982 * Save ourselves some comparisons; does the current 983 * region end before chunk to be freed begins? If so, 984 * then we haven't found the appropriate region descriptor. 985 */ 986 if (rp->er_end < start) 987 continue; 988 989 /* 990 * Save ourselves some traversal; does the current 991 * region begin after the chunk to be freed ends? If so, 992 * then we've already passed any possible region descriptors 993 * that might have contained the chunk to be freed. 994 */ 995 if (rp->er_start > end) 996 break; 997 998 /* Case 1. */ 999 if ((start == rp->er_start) && (end == rp->er_end)) { 1000 LIST_REMOVE(rp, er_link); 1001 extent_free_region_descriptor(ex, rp); 1002 goto done; 1003 } 1004 1005 /* 1006 * The following cases all require that EXF_NOCOALESCE 1007 * is not set. 1008 */ 1009 if (ex->ex_flags & EXF_NOCOALESCE) 1010 continue; 1011 1012 /* Case 2. */ 1013 if ((start == rp->er_start) && (end < rp->er_end)) { 1014 rp->er_start = (end + 1); 1015 goto done; 1016 } 1017 1018 /* Case 3. */ 1019 if ((start > rp->er_start) && (end == rp->er_end)) { 1020 rp->er_end = (start - 1); 1021 goto done; 1022 } 1023 1024 /* Case 4. */ 1025 if ((start > rp->er_start) && (end < rp->er_end)) { 1026 /* Fill in new descriptor. */ 1027 nrp->er_start = end + 1; 1028 nrp->er_end = rp->er_end; 1029 1030 /* Adjust current descriptor. */ 1031 rp->er_end = start - 1; 1032 1033 /* Insert new descriptor after current. */ 1034 LIST_INSERT_AFTER(rp, nrp, er_link); 1035 1036 /* We used the new descriptor, so don't free it below */ 1037 nrp = NULL; 1038 goto done; 1039 } 1040 } 1041 1042 /* Region not found, or request otherwise invalid. */ 1043 simple_unlock(&ex->ex_slock); 1044 extent_print(ex); 1045 printf("extent_free: start 0x%lx, end 0x%lx\n", start, end); 1046 panic("extent_free: region not found"); 1047 1048 done: 1049 if (nrp != NULL) 1050 extent_free_region_descriptor(ex, nrp); 1051 if (ex->ex_flags & EXF_WANTED) { 1052 ex->ex_flags &= ~EXF_WANTED; 1053 wakeup(ex); 1054 } 1055 simple_unlock(&ex->ex_slock); 1056 return (0); 1057 } 1058 1059 /* 1060 * Allocate an extent region descriptor. EXTENT MUST NOT BE LOCKED, 1061 * AS THIS FUNCTION MAY BLOCK! We will handle any locking we may need. 1062 */ 1063 static struct extent_region * 1064 extent_alloc_region_descriptor(ex, flags) 1065 struct extent *ex; 1066 int flags; 1067 { 1068 struct extent_region *rp; 1069 int exflags; 1070 int s; 1071 1072 /* 1073 * If the kernel memory allocator is not yet running, we can't 1074 * use it (obviously). 1075 */ 1076 if (KMEM_IS_RUNNING == 0) 1077 flags &= ~EX_MALLOCOK; 1078 1079 /* 1080 * XXX Make a static, create-time flags word, so we don't 1081 * XXX have to lock to read it! 1082 */ 1083 simple_lock(&ex->ex_slock); 1084 exflags = ex->ex_flags; 1085 simple_unlock(&ex->ex_slock); 1086 1087 if (exflags & EXF_FIXED) { 1088 struct extent_fixed *fex = (struct extent_fixed *)ex; 1089 1090 for (;;) { 1091 simple_lock(&ex->ex_slock); 1092 if ((rp = fex->fex_freelist.lh_first) != NULL) { 1093 /* 1094 * Don't muck with flags after pulling it off 1095 * the freelist; it may have been dynamically 1096 * allocated, and kindly given to us. We 1097 * need to remember that information. 1098 */ 1099 LIST_REMOVE(rp, er_link); 1100 simple_unlock(&ex->ex_slock); 1101 return (rp); 1102 } 1103 if (flags & EX_MALLOCOK) { 1104 simple_unlock(&ex->ex_slock); 1105 goto alloc; 1106 } 1107 if ((flags & EX_WAITOK) == 0) { 1108 simple_unlock(&ex->ex_slock); 1109 return (NULL); 1110 } 1111 ex->ex_flags |= EXF_FLWANTED; 1112 if (ltsleep(&fex->fex_freelist, 1113 PNORELOCK| PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), 1114 "extnt", 0, &ex->ex_slock)) 1115 return (NULL); 1116 } 1117 } 1118 1119 alloc: 1120 s = splhigh(); 1121 if (expool_initialized == 0) 1122 expool_init(); 1123 rp = pool_get(&expool, (flags & EX_WAITOK) ? PR_WAITOK : 0); 1124 splx(s); 1125 1126 if (rp != NULL) 1127 rp->er_flags = ER_ALLOC; 1128 1129 return (rp); 1130 } 1131 1132 /* 1133 * Free an extent region descriptor. EXTENT _MUST_ BE LOCKED! This 1134 * is safe as we do not block here. 1135 */ 1136 static void 1137 extent_free_region_descriptor(ex, rp) 1138 struct extent *ex; 1139 struct extent_region *rp; 1140 { 1141 int s; 1142 1143 if (ex->ex_flags & EXF_FIXED) { 1144 struct extent_fixed *fex = (struct extent_fixed *)ex; 1145 1146 /* 1147 * If someone's waiting for a region descriptor, 1148 * be nice and give them this one, rather than 1149 * just free'ing it back to the system. 1150 */ 1151 if (rp->er_flags & ER_ALLOC) { 1152 if (ex->ex_flags & EXF_FLWANTED) { 1153 /* Clear all but ER_ALLOC flag. */ 1154 rp->er_flags = ER_ALLOC; 1155 LIST_INSERT_HEAD(&fex->fex_freelist, rp, 1156 er_link); 1157 goto wake_em_up; 1158 } else { 1159 s = splhigh(); 1160 pool_put(&expool, rp); 1161 splx(s); 1162 } 1163 } else { 1164 /* Clear all flags. */ 1165 rp->er_flags = 0; 1166 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link); 1167 } 1168 1169 if (ex->ex_flags & EXF_FLWANTED) { 1170 wake_em_up: 1171 ex->ex_flags &= ~EXF_FLWANTED; 1172 wakeup(&fex->fex_freelist); 1173 } 1174 return; 1175 } 1176 1177 /* 1178 * We know it's dynamically allocated if we get here. 1179 */ 1180 s = splhigh(); 1181 pool_put(&expool, rp); 1182 splx(s); 1183 } 1184 1185 void 1186 extent_print(ex) 1187 struct extent *ex; 1188 { 1189 struct extent_region *rp; 1190 1191 if (ex == NULL) 1192 panic("extent_print: NULL extent"); 1193 1194 simple_lock(&ex->ex_slock); 1195 1196 printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name, 1197 ex->ex_start, ex->ex_end, ex->ex_flags); 1198 1199 for (rp = ex->ex_regions.lh_first; rp != NULL; 1200 rp = rp->er_link.le_next) 1201 printf(" 0x%lx - 0x%lx\n", rp->er_start, rp->er_end); 1202 1203 simple_unlock(&ex->ex_slock); 1204 } 1205