1 /* $NetBSD: subr_extent.c,v 1.89 2019/08/15 09:04:22 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 1996, 1998, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe and Matthias Drochner. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * General purpose extent manager. 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: subr_extent.c,v 1.89 2019/08/15 09:04:22 skrll Exp $"); 38 39 #ifdef _KERNEL 40 #ifdef _KERNEL_OPT 41 #include "opt_lockdebug.h" 42 #endif 43 44 #include <sys/param.h> 45 #include <sys/extent.h> 46 #include <sys/kmem.h> 47 #include <sys/pool.h> 48 #include <sys/time.h> 49 #include <sys/systm.h> 50 #include <sys/proc.h> 51 52 #include <uvm/uvm_extern.h> 53 54 #elif defined(_EXTENT_TESTING) 55 56 /* 57 * user-land definitions, so it can fit into a testing harness. 58 */ 59 #include <sys/param.h> 60 #include <sys/pool.h> 61 #include <sys/extent.h> 62 63 #include <errno.h> 64 #include <stdlib.h> 65 #include <stdio.h> 66 #include <string.h> 67 68 static inline void no_op(void) { return; } 69 70 /* 71 * Use multi-line #defines to avoid screwing up the kernel tags file; 72 * without this, ctags produces a tags file where panic() shows up 73 * in subr_extent.c rather than subr_prf.c. 74 */ 75 #define \ 76 kmem_alloc(s, flags) malloc(s) 77 #define \ 78 kmem_free(p, s) free(p) 79 #define \ 80 cv_wait_sig(cv, lock) (EWOULDBLOCK) 81 #define \ 82 pool_get(pool, flags) kmem_alloc((pool)->pr_size,0) 83 #define \ 84 pool_put(pool, rp) kmem_free(rp,0) 85 #define \ 86 panic(a ...) printf(a) 87 #define mutex_init(a, b, c) no_op() 88 #define mutex_destroy(a) no_op() 89 #define mutex_enter(l) no_op() 90 #define mutex_exit(l) no_op() 91 #define cv_wait(cv, lock) no_op() 92 #define cv_broadcast(cv) no_op() 93 #define cv_init(a, b) no_op() 94 #define cv_destroy(a) no_op() 95 #define KMEM_IS_RUNNING (1) 96 #define IPL_VM (0) 97 #define MUTEX_DEFAULT (0) 98 #define KASSERT(exp) 99 #endif 100 101 static struct pool expool; 102 103 /* 104 * Macro to align to an arbitrary power-of-two boundary. 105 */ 106 #define EXTENT_ALIGN(_start, _align, _skew) \ 107 (((((_start) - (_skew)) + ((_align) - 1)) & (-(_align))) + (_skew)) 108 109 /* 110 * Create the extent_region pool. 111 */ 112 void 113 extent_init(void) 114 { 115 116 #if defined(_KERNEL) 117 pool_init(&expool, sizeof(struct extent_region), 0, 0, 0, 118 "extent", NULL, IPL_VM); 119 #else 120 expool.pr_size = sizeof(struct extent_region); 121 #endif 122 } 123 124 /* 125 * Allocate an extent region descriptor. EXTENT MUST NOT BE LOCKED. 126 * We will handle any locking we may need. 127 */ 128 static struct extent_region * 129 extent_alloc_region_descriptor(struct extent *ex, int flags) 130 { 131 struct extent_region *rp; 132 int error; 133 134 if (ex->ex_flags & EXF_FIXED) { 135 struct extent_fixed *fex = (struct extent_fixed *)ex; 136 137 if (!(ex->ex_flags & EXF_EARLY)) 138 mutex_enter(&ex->ex_lock); 139 for (;;) { 140 if ((rp = LIST_FIRST(&fex->fex_freelist)) != NULL) { 141 /* 142 * Don't muck with flags after pulling it off 143 * the freelist; it may have been dynamically 144 * allocated, and kindly given to us. We 145 * need to remember that information. 146 */ 147 LIST_REMOVE(rp, er_link); 148 if (!(ex->ex_flags & EXF_EARLY)) 149 mutex_exit(&ex->ex_lock); 150 return (rp); 151 } 152 if (flags & EX_MALLOCOK) { 153 if (!(ex->ex_flags & EXF_EARLY)) 154 mutex_exit(&ex->ex_lock); 155 goto alloc; 156 } 157 if ((flags & EX_WAITOK) == 0) { 158 if (!(ex->ex_flags & EXF_EARLY)) 159 mutex_exit(&ex->ex_lock); 160 return (NULL); 161 } 162 KASSERT(mutex_owned(&ex->ex_lock)); 163 ex->ex_flwanted = true; 164 if ((flags & EX_CATCH) != 0) 165 error = cv_wait_sig(&ex->ex_cv, &ex->ex_lock); 166 else { 167 cv_wait(&ex->ex_cv, &ex->ex_lock); 168 error = 0; 169 } 170 if (error != 0) { 171 mutex_exit(&ex->ex_lock); 172 return (NULL); 173 } 174 } 175 } 176 177 alloc: 178 rp = pool_get(&expool, (flags & EX_WAITOK) ? PR_WAITOK : PR_NOWAIT); 179 180 if (rp != NULL) 181 rp->er_flags = ER_ALLOC; 182 183 return (rp); 184 } 185 186 /* 187 * Free an extent region descriptor. EXTENT _MUST_ BE LOCKED! 188 */ 189 static void 190 extent_free_region_descriptor(struct extent *ex, struct extent_region *rp) 191 { 192 193 if (ex->ex_flags & EXF_FIXED) { 194 struct extent_fixed *fex = (struct extent_fixed *)ex; 195 196 /* 197 * If someone's waiting for a region descriptor, 198 * be nice and give them this one, rather than 199 * just free'ing it back to the system. 200 */ 201 if (rp->er_flags & ER_ALLOC) { 202 if (ex->ex_flwanted) { 203 /* Clear all but ER_ALLOC flag. */ 204 rp->er_flags = ER_ALLOC; 205 LIST_INSERT_HEAD(&fex->fex_freelist, rp, 206 er_link); 207 goto wake_em_up; 208 } else 209 pool_put(&expool, rp); 210 } else { 211 /* Clear all flags. */ 212 rp->er_flags = 0; 213 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link); 214 } 215 216 wake_em_up: 217 if (!(ex->ex_flags & EXF_EARLY)) { 218 ex->ex_flwanted = false; 219 cv_broadcast(&ex->ex_cv); 220 } 221 return; 222 } 223 224 /* 225 * We know it's dynamically allocated if we get here. 226 */ 227 pool_put(&expool, rp); 228 } 229 230 /* 231 * Allocate and initialize an extent map. 232 */ 233 struct extent * 234 extent_create(const char *name, u_long start, u_long end, 235 void *storage, size_t storagesize, int flags) 236 { 237 struct extent *ex; 238 char *cp = storage; 239 size_t sz = storagesize; 240 struct extent_region *rp; 241 int fixed_extent = (storage != NULL); 242 243 #ifndef _KERNEL 244 extent_init(); 245 #endif 246 247 #ifdef DIAGNOSTIC 248 /* Check arguments. */ 249 if (name == NULL) 250 panic("extent_create: name == NULL"); 251 if (end < start) { 252 printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n", 253 name, start, end); 254 panic("extent_create: end < start"); 255 } 256 if (fixed_extent && (storagesize < sizeof(struct extent_fixed))) 257 panic("extent_create: fixed extent, bad storagesize 0x%lx", 258 (u_long)storagesize); 259 if (fixed_extent == 0 && (storagesize != 0 || storage != NULL)) 260 panic("extent_create: storage provided for non-fixed"); 261 #endif 262 263 /* Allocate extent descriptor. */ 264 if (fixed_extent) { 265 struct extent_fixed *fex; 266 267 memset(storage, 0, storagesize); 268 269 /* 270 * Align all descriptors on "long" boundaries. 271 */ 272 fex = (struct extent_fixed *)cp; 273 ex = (struct extent *)fex; 274 cp += ALIGN(sizeof(struct extent_fixed)); 275 sz -= ALIGN(sizeof(struct extent_fixed)); 276 fex->fex_storage = storage; 277 fex->fex_storagesize = storagesize; 278 279 /* 280 * In a fixed extent, we have to pre-allocate region 281 * descriptors and place them in the extent's freelist. 282 */ 283 LIST_INIT(&fex->fex_freelist); 284 while (sz >= ALIGN(sizeof(struct extent_region))) { 285 rp = (struct extent_region *)cp; 286 cp += ALIGN(sizeof(struct extent_region)); 287 sz -= ALIGN(sizeof(struct extent_region)); 288 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link); 289 } 290 } else { 291 ex = kmem_alloc(sizeof(*ex), 292 (flags & EX_WAITOK) ? KM_SLEEP : KM_NOSLEEP); 293 if (ex == NULL) 294 return (NULL); 295 } 296 297 /* Fill in the extent descriptor and return it to the caller. */ 298 if ((flags & EX_EARLY) == 0) { 299 mutex_init(&ex->ex_lock, MUTEX_DEFAULT, IPL_VM); 300 cv_init(&ex->ex_cv, "extent"); 301 } 302 LIST_INIT(&ex->ex_regions); 303 ex->ex_name = name; 304 ex->ex_start = start; 305 ex->ex_end = end; 306 ex->ex_flags = 0; 307 ex->ex_flwanted = false; 308 if (fixed_extent) 309 ex->ex_flags |= EXF_FIXED; 310 if (flags & EX_NOCOALESCE) 311 ex->ex_flags |= EXF_NOCOALESCE; 312 if (flags & EX_EARLY) 313 ex->ex_flags |= EXF_EARLY; 314 return (ex); 315 } 316 317 /* 318 * Destroy an extent map. 319 * Since we're freeing the data, there can't be any references 320 * so we don't need any locking. 321 */ 322 void 323 extent_destroy(struct extent *ex) 324 { 325 struct extent_region *rp, *orp; 326 327 #ifdef DIAGNOSTIC 328 /* Check arguments. */ 329 if (ex == NULL) 330 panic("extent_destroy: NULL extent"); 331 #endif 332 333 /* Free all region descriptors in extent. */ 334 for (rp = LIST_FIRST(&ex->ex_regions); rp != NULL; ) { 335 orp = rp; 336 rp = LIST_NEXT(rp, er_link); 337 LIST_REMOVE(orp, er_link); 338 extent_free_region_descriptor(ex, orp); 339 } 340 341 cv_destroy(&ex->ex_cv); 342 mutex_destroy(&ex->ex_lock); 343 344 /* If we're not a fixed extent, free the extent descriptor itself. */ 345 if ((ex->ex_flags & EXF_FIXED) == 0) 346 kmem_free(ex, sizeof(*ex)); 347 } 348 349 /* 350 * Insert a region descriptor into the sorted region list after the 351 * entry "after" or at the head of the list (if "after" is NULL). 352 * The region descriptor we insert is passed in "rp". We must 353 * allocate the region descriptor before calling this function! 354 * If we don't need the region descriptor, it will be freed here. 355 */ 356 static void 357 extent_insert_and_optimize(struct extent *ex, u_long start, u_long size, 358 int flags, struct extent_region *after, struct extent_region *rp) 359 { 360 struct extent_region *nextr; 361 int appended = 0; 362 363 if (after == NULL) { 364 /* 365 * We're the first in the region list. If there's 366 * a region after us, attempt to coalesce to save 367 * descriptor overhead. 368 */ 369 if (((ex->ex_flags & EXF_NOCOALESCE) == 0) && 370 (LIST_FIRST(&ex->ex_regions) != NULL) && 371 ((start + size) == LIST_FIRST(&ex->ex_regions)->er_start)) { 372 /* 373 * We can coalesce. Prepend us to the first region. 374 */ 375 LIST_FIRST(&ex->ex_regions)->er_start = start; 376 extent_free_region_descriptor(ex, rp); 377 return; 378 } 379 380 /* 381 * Can't coalesce. Fill in the region descriptor 382 * in, and insert us at the head of the region list. 383 */ 384 rp->er_start = start; 385 rp->er_end = start + (size - 1); 386 LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link); 387 return; 388 } 389 390 /* 391 * If EXF_NOCOALESCE is set, coalescing is disallowed. 392 */ 393 if (ex->ex_flags & EXF_NOCOALESCE) 394 goto cant_coalesce; 395 396 /* 397 * Attempt to coalesce with the region before us. 398 */ 399 if ((after->er_end + 1) == start) { 400 /* 401 * We can coalesce. Append ourselves and make 402 * note of it. 403 */ 404 after->er_end = start + (size - 1); 405 appended = 1; 406 } 407 408 /* 409 * Attempt to coalesce with the region after us. 410 */ 411 if ((LIST_NEXT(after, er_link) != NULL) && 412 ((start + size) == LIST_NEXT(after, er_link)->er_start)) { 413 /* 414 * We can coalesce. Note that if we appended ourselves 415 * to the previous region, we exactly fit the gap, and 416 * can free the "next" region descriptor. 417 */ 418 if (appended) { 419 /* 420 * Yup, we can free it up. 421 */ 422 after->er_end = LIST_NEXT(after, er_link)->er_end; 423 nextr = LIST_NEXT(after, er_link); 424 LIST_REMOVE(nextr, er_link); 425 extent_free_region_descriptor(ex, nextr); 426 } else { 427 /* 428 * Nope, just prepend us to the next region. 429 */ 430 LIST_NEXT(after, er_link)->er_start = start; 431 } 432 433 extent_free_region_descriptor(ex, rp); 434 return; 435 } 436 437 /* 438 * We weren't able to coalesce with the next region, but 439 * we don't need to allocate a region descriptor if we 440 * appended ourselves to the previous region. 441 */ 442 if (appended) { 443 extent_free_region_descriptor(ex, rp); 444 return; 445 } 446 447 cant_coalesce: 448 449 /* 450 * Fill in the region descriptor and insert ourselves 451 * into the region list. 452 */ 453 rp->er_start = start; 454 rp->er_end = start + (size - 1); 455 LIST_INSERT_AFTER(after, rp, er_link); 456 } 457 458 /* 459 * Allocate a specific region in an extent map. 460 */ 461 int 462 extent_alloc_region(struct extent *ex, u_long start, u_long size, int flags) 463 { 464 struct extent_region *rp, *last, *myrp; 465 u_long end = start + (size - 1); 466 int error; 467 468 #ifdef DIAGNOSTIC 469 /* Check arguments. */ 470 if (ex == NULL) 471 panic("extent_alloc_region: NULL extent"); 472 if (size < 1) { 473 printf("extent_alloc_region: extent `%s', size 0x%lx\n", 474 ex->ex_name, size); 475 panic("extent_alloc_region: bad size"); 476 } 477 if (end < start) { 478 printf( 479 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n", 480 ex->ex_name, start, size); 481 panic("extent_alloc_region: overflow"); 482 } 483 #endif 484 #ifdef LOCKDEBUG 485 if (flags & EX_WAITSPACE) { 486 ASSERT_SLEEPABLE(); 487 } 488 #endif 489 490 /* 491 * Make sure the requested region lies within the 492 * extent. 493 * 494 * We don't lock to check the range, because those values 495 * are never modified, and if another thread deletes the 496 * extent, we're screwed anyway. 497 */ 498 if ((start < ex->ex_start) || (end > ex->ex_end)) { 499 #ifdef DIAGNOSTIC 500 printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n", 501 ex->ex_name, ex->ex_start, ex->ex_end); 502 printf("extent_alloc_region: start 0x%lx, end 0x%lx\n", 503 start, end); 504 panic("extent_alloc_region: region lies outside extent"); 505 #else 506 return (EINVAL); 507 #endif 508 } 509 510 /* 511 * Allocate the region descriptor. It will be freed later 512 * if we can coalesce with another region. Don't lock before 513 * here! This could block. 514 */ 515 myrp = extent_alloc_region_descriptor(ex, flags); 516 if (myrp == NULL) { 517 #ifdef DIAGNOSTIC 518 printf( 519 "extent_alloc_region: can't allocate region descriptor\n"); 520 #endif 521 return (ENOMEM); 522 } 523 524 if (!(ex->ex_flags & EXF_EARLY)) 525 mutex_enter(&ex->ex_lock); 526 alloc_start: 527 528 /* 529 * Attempt to place ourselves in the desired area of the 530 * extent. We save ourselves some work by keeping the list sorted. 531 * In other words, if the start of the current region is greater 532 * than the end of our region, we don't have to search any further. 533 */ 534 535 /* 536 * Keep a pointer to the last region we looked at so 537 * that we don't have to traverse the list again when 538 * we insert ourselves. If "last" is NULL when we 539 * finally insert ourselves, we go at the head of the 540 * list. See extent_insert_and_optimize() for details. 541 */ 542 last = NULL; 543 544 LIST_FOREACH(rp, &ex->ex_regions, er_link) { 545 if (rp->er_start > end) { 546 /* 547 * We lie before this region and don't 548 * conflict. 549 */ 550 break; 551 } 552 553 /* 554 * The current region begins before we end. 555 * Check for a conflict. 556 */ 557 if (rp->er_end >= start) { 558 /* 559 * We conflict. If we can (and want to) wait, 560 * do so. 561 */ 562 if (flags & EX_WAITSPACE) { 563 KASSERT(!(ex->ex_flags & EXF_EARLY)); 564 if ((flags & EX_CATCH) != 0) 565 error = cv_wait_sig(&ex->ex_cv, 566 &ex->ex_lock); 567 else { 568 cv_wait(&ex->ex_cv, &ex->ex_lock); 569 error = 0; 570 } 571 if (error == 0) 572 goto alloc_start; 573 mutex_exit(&ex->ex_lock); 574 } else { 575 if (!(ex->ex_flags & EXF_EARLY)) 576 mutex_exit(&ex->ex_lock); 577 error = EAGAIN; 578 } 579 extent_free_region_descriptor(ex, myrp); 580 return error; 581 } 582 /* 583 * We don't conflict, but this region lies before 584 * us. Keep a pointer to this region, and keep 585 * trying. 586 */ 587 last = rp; 588 } 589 590 /* 591 * We don't conflict with any regions. "last" points 592 * to the region we fall after, or is NULL if we belong 593 * at the beginning of the region list. Insert ourselves. 594 */ 595 extent_insert_and_optimize(ex, start, size, flags, last, myrp); 596 if (!(ex->ex_flags & EXF_EARLY)) 597 mutex_exit(&ex->ex_lock); 598 return (0); 599 } 600 601 /* 602 * Macro to check (x + y) <= z. This check is designed to fail 603 * if an overflow occurs. 604 */ 605 #define LE_OV(x, y, z) ((((x) + (y)) >= (x)) && (((x) + (y)) <= (z))) 606 607 /* 608 * Allocate a region in an extent map subregion. 609 * 610 * If EX_FAST is specified, we return the first fit in the map. 611 * Otherwise, we try to minimize fragmentation by finding the 612 * smallest gap that will hold the request. 613 * 614 * The allocated region is aligned to "alignment", which must be 615 * a power of 2. 616 */ 617 int 618 extent_alloc_subregion1(struct extent *ex, u_long substart, u_long subend, 619 u_long size, u_long alignment, u_long skew, u_long boundary, 620 int flags, u_long *result) 621 { 622 struct extent_region *rp, *myrp, *last, *bestlast; 623 u_long newstart, newend, exend, beststart, bestovh, ovh; 624 u_long dontcross; 625 int error; 626 627 #ifdef DIAGNOSTIC 628 /* 629 * Check arguments. 630 * 631 * We don't lock to check these, because these values 632 * are never modified, and if another thread deletes the 633 * extent, we're screwed anyway. 634 */ 635 if (ex == NULL) 636 panic("extent_alloc_subregion: NULL extent"); 637 if (result == NULL) 638 panic("extent_alloc_subregion: NULL result pointer"); 639 if ((substart < ex->ex_start) || (substart > ex->ex_end) || 640 (subend > ex->ex_end) || (subend < ex->ex_start)) { 641 printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, " 642 "ex_end 0x%lx\n", ex->ex_name, ex->ex_start, ex->ex_end); 643 printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n", 644 substart, subend); 645 panic("extent_alloc_subregion: bad subregion"); 646 } 647 if ((size < 1) || ((size - 1) > (subend - substart))) { 648 printf("extent_alloc_subregion: extent `%s', size 0x%lx\n", 649 ex->ex_name, size); 650 printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n", 651 substart, subend); 652 panic("extent_alloc_subregion: bad size"); 653 } 654 if (alignment == 0) 655 panic("extent_alloc_subregion: bad alignment"); 656 if (boundary && (boundary < size)) { 657 printf( 658 "extent_alloc_subregion: extent `%s', size 0x%lx, " 659 "boundary 0x%lx\n", ex->ex_name, size, boundary); 660 panic("extent_alloc_subregion: bad boundary"); 661 } 662 #endif 663 #ifdef LOCKDEBUG 664 if (flags & EX_WAITSPACE) { 665 ASSERT_SLEEPABLE(); 666 } 667 #endif 668 669 /* 670 * Allocate the region descriptor. It will be freed later 671 * if we can coalesce with another region. Don't lock before 672 * here! This could block. 673 */ 674 myrp = extent_alloc_region_descriptor(ex, flags); 675 if (myrp == NULL) { 676 #ifdef DIAGNOSTIC 677 printf( 678 "extent_alloc_subregion: can't allocate region descriptor\n"); 679 #endif 680 return (ENOMEM); 681 } 682 683 alloc_start: 684 mutex_enter(&ex->ex_lock); 685 686 /* 687 * Keep a pointer to the last region we looked at so 688 * that we don't have to traverse the list again when 689 * we insert ourselves. If "last" is NULL when we 690 * finally insert ourselves, we go at the head of the 691 * list. See extent_insert_and_optimize() for deatails. 692 */ 693 last = NULL; 694 695 /* 696 * Keep track of size and location of the smallest 697 * chunk we fit in. 698 * 699 * Since the extent can be as large as the numeric range 700 * of the CPU (0 - 0xffffffff for 32-bit systems), the 701 * best overhead value can be the maximum unsigned integer. 702 * Thus, we initialize "bestovh" to 0, since we insert ourselves 703 * into the region list immediately on an exact match (which 704 * is the only case where "bestovh" would be set to 0). 705 */ 706 bestovh = 0; 707 beststart = 0; 708 bestlast = NULL; 709 710 /* 711 * Keep track of end of free region. This is either the end of extent 712 * or the start of a region past the subend. 713 */ 714 exend = ex->ex_end; 715 716 /* 717 * For N allocated regions, we must make (N + 1) 718 * checks for unallocated space. The first chunk we 719 * check is the area from the beginning of the subregion 720 * to the first allocated region after that point. 721 */ 722 newstart = EXTENT_ALIGN(substart, alignment, skew); 723 if (newstart < ex->ex_start) { 724 #ifdef DIAGNOSTIC 725 printf( 726 "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n", 727 ex->ex_name, ex->ex_start, ex->ex_end, alignment); 728 mutex_exit(&ex->ex_lock); 729 panic("extent_alloc_subregion: overflow after alignment"); 730 #else 731 extent_free_region_descriptor(ex, myrp); 732 mutex_exit(&ex->ex_lock); 733 return (EINVAL); 734 #endif 735 } 736 737 /* 738 * Find the first allocated region that begins on or after 739 * the subregion start, advancing the "last" pointer along 740 * the way. 741 */ 742 LIST_FOREACH(rp, &ex->ex_regions, er_link) { 743 if (rp->er_start >= newstart) 744 break; 745 last = rp; 746 } 747 748 /* 749 * Relocate the start of our candidate region to the end of 750 * the last allocated region (if there was one overlapping 751 * our subrange). 752 */ 753 if (last != NULL && last->er_end >= newstart) 754 newstart = EXTENT_ALIGN((last->er_end + 1), alignment, skew); 755 756 for (; rp != NULL; rp = LIST_NEXT(rp, er_link)) { 757 /* 758 * If the region pasts the subend, bail out and see 759 * if we fit against the subend. 760 */ 761 if (rp->er_start > subend) { 762 exend = rp->er_start; 763 break; 764 } 765 766 /* 767 * Check the chunk before "rp". Note that our 768 * comparison is safe from overflow conditions. 769 */ 770 if (LE_OV(newstart, size, rp->er_start)) { 771 /* 772 * Do a boundary check, if necessary. Note 773 * that a region may *begin* on the boundary, 774 * but it must end before the boundary. 775 */ 776 if (boundary) { 777 newend = newstart + (size - 1); 778 779 /* 780 * Calculate the next boundary after the start 781 * of this region. 782 */ 783 dontcross = EXTENT_ALIGN(newstart+1, boundary, 784 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start) 785 - 1; 786 787 #if 0 788 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n", 789 newstart, newend, ex->ex_start, ex->ex_end, 790 boundary, dontcross); 791 #endif 792 793 /* Check for overflow */ 794 if (dontcross < ex->ex_start) 795 dontcross = ex->ex_end; 796 else if (newend > dontcross) { 797 /* 798 * Candidate region crosses boundary. 799 * Throw away the leading part and see 800 * if we still fit. 801 */ 802 newstart = dontcross + 1; 803 newend = newstart + (size - 1); 804 dontcross += boundary; 805 if (!LE_OV(newstart, size, rp->er_start)) 806 goto skip; 807 } 808 809 /* 810 * If we run past the end of 811 * the extent or the boundary 812 * overflows, then the request 813 * can't fit. 814 */ 815 if (newstart + size - 1 > ex->ex_end || 816 dontcross < newstart) 817 goto fail; 818 } 819 820 /* 821 * We would fit into this space. Calculate 822 * the overhead (wasted space). If we exactly 823 * fit, or we're taking the first fit, insert 824 * ourselves into the region list. 825 */ 826 ovh = rp->er_start - newstart - size; 827 if ((flags & EX_FAST) || (ovh == 0)) 828 goto found; 829 830 /* 831 * Don't exactly fit, but check to see 832 * if we're better than any current choice. 833 */ 834 if ((bestovh == 0) || (ovh < bestovh)) { 835 bestovh = ovh; 836 beststart = newstart; 837 bestlast = last; 838 } 839 } 840 841 skip: 842 /* 843 * Skip past the current region and check again. 844 */ 845 newstart = EXTENT_ALIGN((rp->er_end + 1), alignment, skew); 846 if (newstart < rp->er_end) { 847 /* 848 * Overflow condition. Don't error out, since 849 * we might have a chunk of space that we can 850 * use. 851 */ 852 goto fail; 853 } 854 855 last = rp; 856 } 857 858 /* 859 * The final check is from the current starting point to the 860 * end of the subregion. If there were no allocated regions, 861 * "newstart" is set to the beginning of the subregion, or 862 * just past the end of the last allocated region, adjusted 863 * for alignment in either case. 864 */ 865 if (LE_OV(newstart, (size - 1), subend)) { 866 /* 867 * Do a boundary check, if necessary. Note 868 * that a region may *begin* on the boundary, 869 * but it must end before the boundary. 870 */ 871 if (boundary) { 872 newend = newstart + (size - 1); 873 874 /* 875 * Calculate the next boundary after the start 876 * of this region. 877 */ 878 dontcross = EXTENT_ALIGN(newstart+1, boundary, 879 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start) 880 - 1; 881 882 #if 0 883 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n", 884 newstart, newend, ex->ex_start, ex->ex_end, 885 boundary, dontcross); 886 #endif 887 888 /* Check for overflow */ 889 if (dontcross < ex->ex_start) 890 dontcross = ex->ex_end; 891 else if (newend > dontcross) { 892 /* 893 * Candidate region crosses boundary. 894 * Throw away the leading part and see 895 * if we still fit. 896 */ 897 newstart = dontcross + 1; 898 newend = newstart + (size - 1); 899 dontcross += boundary; 900 if (!LE_OV(newstart, (size - 1), subend)) 901 goto fail; 902 } 903 904 /* 905 * If we run past the end of 906 * the extent or the boundary 907 * overflows, then the request 908 * can't fit. 909 */ 910 if (newstart + size - 1 > ex->ex_end || 911 dontcross < newstart) 912 goto fail; 913 } 914 915 /* 916 * We would fit into this space. Calculate 917 * the overhead (wasted space). If we exactly 918 * fit, or we're taking the first fit, insert 919 * ourselves into the region list. 920 */ 921 ovh = exend - newstart - (size - 1); 922 if ((flags & EX_FAST) || (ovh == 0)) 923 goto found; 924 925 /* 926 * Don't exactly fit, but check to see 927 * if we're better than any current choice. 928 */ 929 if ((bestovh == 0) || (ovh < bestovh)) { 930 bestovh = ovh; 931 beststart = newstart; 932 bestlast = last; 933 } 934 } 935 936 fail: 937 /* 938 * One of the following two conditions have 939 * occurred: 940 * 941 * There is no chunk large enough to hold the request. 942 * 943 * If EX_FAST was not specified, there is not an 944 * exact match for the request. 945 * 946 * Note that if we reach this point and EX_FAST is 947 * set, then we know there is no space in the extent for 948 * the request. 949 */ 950 if (((flags & EX_FAST) == 0) && (bestovh != 0)) { 951 /* 952 * We have a match that's "good enough". 953 */ 954 newstart = beststart; 955 last = bestlast; 956 goto found; 957 } 958 959 /* 960 * No space currently available. Wait for it to free up, 961 * if possible. 962 */ 963 if (flags & EX_WAITSPACE) { 964 if ((flags & EX_CATCH) != 0) { 965 error = cv_wait_sig(&ex->ex_cv, &ex->ex_lock); 966 } else { 967 cv_wait(&ex->ex_cv, &ex->ex_lock); 968 error = 0; 969 } 970 if (error == 0) 971 goto alloc_start; 972 mutex_exit(&ex->ex_lock); 973 } else { 974 mutex_exit(&ex->ex_lock); 975 error = EAGAIN; 976 } 977 978 extent_free_region_descriptor(ex, myrp); 979 return error; 980 981 found: 982 /* 983 * Insert ourselves into the region list. 984 */ 985 extent_insert_and_optimize(ex, newstart, size, flags, last, myrp); 986 mutex_exit(&ex->ex_lock); 987 *result = newstart; 988 return (0); 989 } 990 991 int 992 extent_alloc_subregion(struct extent *ex, u_long start, u_long end, u_long size, 993 u_long alignment, u_long boundary, int flags, u_long *result) 994 { 995 996 return (extent_alloc_subregion1(ex, start, end, size, alignment, 997 0, boundary, flags, result)); 998 } 999 1000 int 1001 extent_alloc(struct extent *ex, u_long size, u_long alignment, u_long boundary, 1002 int flags, u_long *result) 1003 { 1004 1005 return (extent_alloc_subregion1(ex, ex->ex_start, ex->ex_end, 1006 size, alignment, 0, boundary, 1007 flags, result)); 1008 } 1009 1010 int 1011 extent_alloc1(struct extent *ex, u_long size, u_long alignment, u_long skew, 1012 u_long boundary, int flags, u_long *result) 1013 { 1014 1015 return (extent_alloc_subregion1(ex, ex->ex_start, ex->ex_end, 1016 size, alignment, skew, boundary, 1017 flags, result)); 1018 } 1019 1020 int 1021 extent_free(struct extent *ex, u_long start, u_long size, int flags) 1022 { 1023 struct extent_region *rp, *nrp = NULL; 1024 u_long end = start + (size - 1); 1025 1026 #ifdef DIAGNOSTIC 1027 /* 1028 * Check arguments. 1029 * 1030 * We don't lock to check these, because these values 1031 * are never modified, and if another thread deletes the 1032 * extent, we're screwed anyway. 1033 */ 1034 if (ex == NULL) 1035 panic("extent_free: NULL extent"); 1036 if ((start < ex->ex_start) || (end > ex->ex_end)) { 1037 extent_print(ex); 1038 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n", 1039 ex->ex_name, start, size); 1040 panic("extent_free: extent `%s', region not within extent", 1041 ex->ex_name); 1042 } 1043 /* Check for an overflow. */ 1044 if (end < start) { 1045 extent_print(ex); 1046 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n", 1047 ex->ex_name, start, size); 1048 panic("extent_free: overflow"); 1049 } 1050 #endif 1051 1052 /* 1053 * If we're allowing coalescing, we must allocate a region 1054 * descriptor now, since it might block. 1055 */ 1056 const bool coalesce = (ex->ex_flags & EXF_NOCOALESCE) == 0; 1057 1058 if (coalesce) { 1059 /* Allocate a region descriptor. */ 1060 nrp = extent_alloc_region_descriptor(ex, flags); 1061 if (nrp == NULL) 1062 return (ENOMEM); 1063 } 1064 1065 if (!(ex->ex_flags & EXF_EARLY)) 1066 mutex_enter(&ex->ex_lock); 1067 1068 /* 1069 * Find region and deallocate. Several possibilities: 1070 * 1071 * 1. (start == er_start) && (end == er_end): 1072 * Free descriptor. 1073 * 1074 * 2. (start == er_start) && (end < er_end): 1075 * Adjust er_start. 1076 * 1077 * 3. (start > er_start) && (end == er_end): 1078 * Adjust er_end. 1079 * 1080 * 4. (start > er_start) && (end < er_end): 1081 * Fragment region. Requires descriptor alloc. 1082 * 1083 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag 1084 * is not set. 1085 */ 1086 LIST_FOREACH(rp, &ex->ex_regions, er_link) { 1087 /* 1088 * Save ourselves some comparisons; does the current 1089 * region end before chunk to be freed begins? If so, 1090 * then we haven't found the appropriate region descriptor. 1091 */ 1092 if (rp->er_end < start) 1093 continue; 1094 1095 /* 1096 * Save ourselves some traversal; does the current 1097 * region begin after the chunk to be freed ends? If so, 1098 * then we've already passed any possible region descriptors 1099 * that might have contained the chunk to be freed. 1100 */ 1101 if (rp->er_start > end) 1102 break; 1103 1104 /* Case 1. */ 1105 if ((start == rp->er_start) && (end == rp->er_end)) { 1106 LIST_REMOVE(rp, er_link); 1107 extent_free_region_descriptor(ex, rp); 1108 goto done; 1109 } 1110 1111 /* 1112 * The following cases all require that EXF_NOCOALESCE 1113 * is not set. 1114 */ 1115 if (!coalesce) 1116 continue; 1117 1118 /* Case 2. */ 1119 if ((start == rp->er_start) && (end < rp->er_end)) { 1120 rp->er_start = (end + 1); 1121 goto done; 1122 } 1123 1124 /* Case 3. */ 1125 if ((start > rp->er_start) && (end == rp->er_end)) { 1126 rp->er_end = (start - 1); 1127 goto done; 1128 } 1129 1130 /* Case 4. */ 1131 if ((start > rp->er_start) && (end < rp->er_end)) { 1132 /* Fill in new descriptor. */ 1133 nrp->er_start = end + 1; 1134 nrp->er_end = rp->er_end; 1135 1136 /* Adjust current descriptor. */ 1137 rp->er_end = start - 1; 1138 1139 /* Insert new descriptor after current. */ 1140 LIST_INSERT_AFTER(rp, nrp, er_link); 1141 1142 /* We used the new descriptor, so don't free it below */ 1143 nrp = NULL; 1144 goto done; 1145 } 1146 } 1147 1148 /* Region not found, or request otherwise invalid. */ 1149 if (!(ex->ex_flags & EXF_EARLY)) 1150 mutex_exit(&ex->ex_lock); 1151 extent_print(ex); 1152 printf("extent_free: start 0x%lx, end 0x%lx\n", start, end); 1153 panic("extent_free: region not found"); 1154 1155 done: 1156 if (nrp != NULL) 1157 extent_free_region_descriptor(ex, nrp); 1158 if (!(ex->ex_flags & EXF_EARLY)) { 1159 cv_broadcast(&ex->ex_cv); 1160 mutex_exit(&ex->ex_lock); 1161 } 1162 return (0); 1163 } 1164 1165 void 1166 extent_print(struct extent *ex) 1167 { 1168 struct extent_region *rp; 1169 1170 if (ex == NULL) 1171 panic("extent_print: NULL extent"); 1172 1173 if (!(ex->ex_flags & EXF_EARLY)) 1174 mutex_enter(&ex->ex_lock); 1175 1176 printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name, 1177 ex->ex_start, ex->ex_end, ex->ex_flags); 1178 1179 LIST_FOREACH(rp, &ex->ex_regions, er_link) 1180 printf(" 0x%lx - 0x%lx\n", rp->er_start, rp->er_end); 1181 1182 if (!(ex->ex_flags & EXF_EARLY)) 1183 mutex_exit(&ex->ex_lock); 1184 } 1185