1 /* $NetBSD: subr_vmem.c,v 1.97 2018/02/08 09:05:20 dholland Exp $ */ 2 3 /*- 4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * reference: 31 * - Magazines and Vmem: Extending the Slab Allocator 32 * to Many CPUs and Arbitrary Resources 33 * http://www.usenix.org/event/usenix01/bonwick.html 34 * 35 * locking & the boundary tag pool: 36 * - A pool(9) is used for vmem boundary tags 37 * - During a pool get call the global vmem_btag_refill_lock is taken, 38 * to serialize access to the allocation reserve, but no other 39 * vmem arena locks. 40 * - During pool_put calls no vmem mutexes are locked. 41 * - pool_drain doesn't hold the pool's mutex while releasing memory to 42 * its backing therefore no interferance with any vmem mutexes. 43 * - The boundary tag pool is forced to put page headers into pool pages 44 * (PR_PHINPAGE) and not off page to avoid pool recursion. 45 * (due to sizeof(bt_t) it should be the case anyway) 46 */ 47 48 #include <sys/cdefs.h> 49 __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.97 2018/02/08 09:05:20 dholland Exp $"); 50 51 #if defined(_KERNEL) && defined(_KERNEL_OPT) 52 #include "opt_ddb.h" 53 #endif /* defined(_KERNEL) && defined(_KERNEL_OPT) */ 54 55 #include <sys/param.h> 56 #include <sys/hash.h> 57 #include <sys/queue.h> 58 #include <sys/bitops.h> 59 60 #if defined(_KERNEL) 61 #include <sys/systm.h> 62 #include <sys/kernel.h> /* hz */ 63 #include <sys/callout.h> 64 #include <sys/kmem.h> 65 #include <sys/pool.h> 66 #include <sys/vmem.h> 67 #include <sys/vmem_impl.h> 68 #include <sys/workqueue.h> 69 #include <sys/atomic.h> 70 #include <uvm/uvm.h> 71 #include <uvm/uvm_extern.h> 72 #include <uvm/uvm_km.h> 73 #include <uvm/uvm_page.h> 74 #include <uvm/uvm_pdaemon.h> 75 #else /* defined(_KERNEL) */ 76 #include <stdio.h> 77 #include <errno.h> 78 #include <assert.h> 79 #include <stdlib.h> 80 #include <string.h> 81 #include "../sys/vmem.h" 82 #include "../sys/vmem_impl.h" 83 #endif /* defined(_KERNEL) */ 84 85 86 #if defined(_KERNEL) 87 #include <sys/evcnt.h> 88 #define VMEM_EVCNT_DEFINE(name) \ 89 struct evcnt vmem_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \ 90 "vmem", #name); \ 91 EVCNT_ATTACH_STATIC(vmem_evcnt_##name); 92 #define VMEM_EVCNT_INCR(ev) vmem_evcnt_##ev.ev_count++ 93 #define VMEM_EVCNT_DECR(ev) vmem_evcnt_##ev.ev_count-- 94 95 VMEM_EVCNT_DEFINE(static_bt_count) 96 VMEM_EVCNT_DEFINE(static_bt_inuse) 97 98 #define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan) 99 #define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv) 100 #define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock) 101 #define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv) 102 103 #else /* defined(_KERNEL) */ 104 105 #define VMEM_EVCNT_INCR(ev) /* nothing */ 106 #define VMEM_EVCNT_DECR(ev) /* nothing */ 107 108 #define VMEM_CONDVAR_INIT(vm, wchan) /* nothing */ 109 #define VMEM_CONDVAR_DESTROY(vm) /* nothing */ 110 #define VMEM_CONDVAR_WAIT(vm) /* nothing */ 111 #define VMEM_CONDVAR_BROADCAST(vm) /* nothing */ 112 113 #define UNITTEST 114 #define KASSERT(a) assert(a) 115 #define mutex_init(a, b, c) /* nothing */ 116 #define mutex_destroy(a) /* nothing */ 117 #define mutex_enter(a) /* nothing */ 118 #define mutex_tryenter(a) true 119 #define mutex_exit(a) /* nothing */ 120 #define mutex_owned(a) /* nothing */ 121 #define ASSERT_SLEEPABLE() /* nothing */ 122 #define panic(...) printf(__VA_ARGS__); abort() 123 #endif /* defined(_KERNEL) */ 124 125 #if defined(VMEM_SANITY) 126 static void vmem_check(vmem_t *); 127 #else /* defined(VMEM_SANITY) */ 128 #define vmem_check(vm) /* nothing */ 129 #endif /* defined(VMEM_SANITY) */ 130 131 #define VMEM_HASHSIZE_MIN 1 /* XXX */ 132 #define VMEM_HASHSIZE_MAX 65536 /* XXX */ 133 #define VMEM_HASHSIZE_INIT 1 134 135 #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT) 136 137 #if defined(_KERNEL) 138 static bool vmem_bootstrapped = false; 139 static kmutex_t vmem_list_lock; 140 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); 141 #endif /* defined(_KERNEL) */ 142 143 /* ---- misc */ 144 145 #define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock) 146 #define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock) 147 #define VMEM_UNLOCK(vm) mutex_exit(&vm->vm_lock) 148 #define VMEM_LOCK_INIT(vm, ipl) mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl) 149 #define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock) 150 #define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock)) 151 152 #define VMEM_ALIGNUP(addr, align) \ 153 (-(-(addr) & -(align))) 154 155 #define VMEM_CROSS_P(addr1, addr2, boundary) \ 156 ((((addr1) ^ (addr2)) & -(boundary)) != 0) 157 158 #define ORDER2SIZE(order) ((vmem_size_t)1 << (order)) 159 #define SIZE2ORDER(size) ((int)ilog2(size)) 160 161 #if !defined(_KERNEL) 162 #define xmalloc(sz, flags) malloc(sz) 163 #define xfree(p, sz) free(p) 164 #define bt_alloc(vm, flags) malloc(sizeof(bt_t)) 165 #define bt_free(vm, bt) free(bt) 166 #else /* defined(_KERNEL) */ 167 168 #define xmalloc(sz, flags) \ 169 kmem_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP); 170 #define xfree(p, sz) kmem_free(p, sz); 171 172 /* 173 * BT_RESERVE calculation: 174 * we allocate memory for boundry tags with vmem; therefore we have 175 * to keep a reserve of bts used to allocated memory for bts. 176 * This reserve is 4 for each arena involved in allocating vmems memory. 177 * BT_MAXFREE: don't cache excessive counts of bts in arenas 178 */ 179 #define STATIC_BT_COUNT 200 180 #define BT_MINRESERVE 4 181 #define BT_MAXFREE 64 182 183 static struct vmem_btag static_bts[STATIC_BT_COUNT]; 184 static int static_bt_count = STATIC_BT_COUNT; 185 186 static struct vmem kmem_va_meta_arena_store; 187 vmem_t *kmem_va_meta_arena; 188 static struct vmem kmem_meta_arena_store; 189 vmem_t *kmem_meta_arena = NULL; 190 191 static kmutex_t vmem_btag_refill_lock; 192 static kmutex_t vmem_btag_lock; 193 static LIST_HEAD(, vmem_btag) vmem_btag_freelist; 194 static size_t vmem_btag_freelist_count = 0; 195 static struct pool vmem_btag_pool; 196 197 static void 198 vmem_kick_pdaemon(void) 199 { 200 #if defined(_KERNEL) 201 mutex_spin_enter(&uvm_fpageqlock); 202 uvm_kick_pdaemon(); 203 mutex_spin_exit(&uvm_fpageqlock); 204 #endif 205 } 206 207 /* ---- boundary tag */ 208 209 static int bt_refill(vmem_t *vm); 210 211 static void * 212 pool_page_alloc_vmem_meta(struct pool *pp, int flags) 213 { 214 const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; 215 vmem_addr_t va; 216 int ret; 217 218 ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz, 219 (vflags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING, &va); 220 221 return ret ? NULL : (void *)va; 222 } 223 224 static void 225 pool_page_free_vmem_meta(struct pool *pp, void *v) 226 { 227 228 vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz); 229 } 230 231 /* allocator for vmem-pool metadata */ 232 struct pool_allocator pool_allocator_vmem_meta = { 233 .pa_alloc = pool_page_alloc_vmem_meta, 234 .pa_free = pool_page_free_vmem_meta, 235 .pa_pagesz = 0 236 }; 237 238 static int 239 bt_refill(vmem_t *vm) 240 { 241 bt_t *bt; 242 243 VMEM_LOCK(vm); 244 if (vm->vm_nfreetags > BT_MINRESERVE) { 245 VMEM_UNLOCK(vm); 246 return 0; 247 } 248 249 mutex_enter(&vmem_btag_lock); 250 while (!LIST_EMPTY(&vmem_btag_freelist) && 251 vm->vm_nfreetags <= BT_MINRESERVE) { 252 bt = LIST_FIRST(&vmem_btag_freelist); 253 LIST_REMOVE(bt, bt_freelist); 254 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 255 vm->vm_nfreetags++; 256 vmem_btag_freelist_count--; 257 VMEM_EVCNT_INCR(static_bt_inuse); 258 } 259 mutex_exit(&vmem_btag_lock); 260 261 while (vm->vm_nfreetags <= BT_MINRESERVE) { 262 VMEM_UNLOCK(vm); 263 mutex_enter(&vmem_btag_refill_lock); 264 bt = pool_get(&vmem_btag_pool, PR_NOWAIT); 265 mutex_exit(&vmem_btag_refill_lock); 266 VMEM_LOCK(vm); 267 if (bt == NULL) 268 break; 269 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 270 vm->vm_nfreetags++; 271 } 272 273 if (vm->vm_nfreetags <= BT_MINRESERVE) { 274 VMEM_UNLOCK(vm); 275 return ENOMEM; 276 } 277 278 VMEM_UNLOCK(vm); 279 280 if (kmem_meta_arena != NULL) { 281 (void)bt_refill(kmem_arena); 282 (void)bt_refill(kmem_va_meta_arena); 283 (void)bt_refill(kmem_meta_arena); 284 } 285 286 return 0; 287 } 288 289 static bt_t * 290 bt_alloc(vmem_t *vm, vm_flag_t flags) 291 { 292 bt_t *bt; 293 VMEM_LOCK(vm); 294 while (vm->vm_nfreetags <= BT_MINRESERVE && (flags & VM_POPULATING) == 0) { 295 VMEM_UNLOCK(vm); 296 if (bt_refill(vm)) { 297 if ((flags & VM_NOSLEEP) != 0) { 298 return NULL; 299 } 300 301 /* 302 * It would be nice to wait for something specific here 303 * but there are multiple ways that a retry could 304 * succeed and we can't wait for multiple things 305 * simultaneously. So we'll just sleep for an arbitrary 306 * short period of time and retry regardless. 307 * This should be a very rare case. 308 */ 309 310 vmem_kick_pdaemon(); 311 kpause("btalloc", false, 1, NULL); 312 } 313 VMEM_LOCK(vm); 314 } 315 bt = LIST_FIRST(&vm->vm_freetags); 316 LIST_REMOVE(bt, bt_freelist); 317 vm->vm_nfreetags--; 318 VMEM_UNLOCK(vm); 319 320 return bt; 321 } 322 323 static void 324 bt_free(vmem_t *vm, bt_t *bt) 325 { 326 327 VMEM_LOCK(vm); 328 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 329 vm->vm_nfreetags++; 330 VMEM_UNLOCK(vm); 331 } 332 333 static void 334 bt_freetrim(vmem_t *vm, int freelimit) 335 { 336 bt_t *t; 337 LIST_HEAD(, vmem_btag) tofree; 338 339 LIST_INIT(&tofree); 340 341 VMEM_LOCK(vm); 342 while (vm->vm_nfreetags > freelimit) { 343 bt_t *bt = LIST_FIRST(&vm->vm_freetags); 344 LIST_REMOVE(bt, bt_freelist); 345 vm->vm_nfreetags--; 346 if (bt >= static_bts 347 && bt < &static_bts[STATIC_BT_COUNT]) { 348 mutex_enter(&vmem_btag_lock); 349 LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist); 350 vmem_btag_freelist_count++; 351 mutex_exit(&vmem_btag_lock); 352 VMEM_EVCNT_DECR(static_bt_inuse); 353 } else { 354 LIST_INSERT_HEAD(&tofree, bt, bt_freelist); 355 } 356 } 357 358 VMEM_UNLOCK(vm); 359 while (!LIST_EMPTY(&tofree)) { 360 t = LIST_FIRST(&tofree); 361 LIST_REMOVE(t, bt_freelist); 362 pool_put(&vmem_btag_pool, t); 363 } 364 } 365 #endif /* defined(_KERNEL) */ 366 367 /* 368 * freelist[0] ... [1, 1] 369 * freelist[1] ... [2, 3] 370 * freelist[2] ... [4, 7] 371 * freelist[3] ... [8, 15] 372 * : 373 * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1] 374 * : 375 */ 376 377 static struct vmem_freelist * 378 bt_freehead_tofree(vmem_t *vm, vmem_size_t size) 379 { 380 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 381 const int idx = SIZE2ORDER(qsize); 382 383 KASSERT(size != 0 && qsize != 0); 384 KASSERT((size & vm->vm_quantum_mask) == 0); 385 KASSERT(idx >= 0); 386 KASSERT(idx < VMEM_MAXORDER); 387 388 return &vm->vm_freelist[idx]; 389 } 390 391 /* 392 * bt_freehead_toalloc: return the freelist for the given size and allocation 393 * strategy. 394 * 395 * for VM_INSTANTFIT, return the list in which any blocks are large enough 396 * for the requested size. otherwise, return the list which can have blocks 397 * large enough for the requested size. 398 */ 399 400 static struct vmem_freelist * 401 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat) 402 { 403 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 404 int idx = SIZE2ORDER(qsize); 405 406 KASSERT(size != 0 && qsize != 0); 407 KASSERT((size & vm->vm_quantum_mask) == 0); 408 409 if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) { 410 idx++; 411 /* check too large request? */ 412 } 413 KASSERT(idx >= 0); 414 KASSERT(idx < VMEM_MAXORDER); 415 416 return &vm->vm_freelist[idx]; 417 } 418 419 /* ---- boundary tag hash */ 420 421 static struct vmem_hashlist * 422 bt_hashhead(vmem_t *vm, vmem_addr_t addr) 423 { 424 struct vmem_hashlist *list; 425 unsigned int hash; 426 427 hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT); 428 list = &vm->vm_hashlist[hash % vm->vm_hashsize]; 429 430 return list; 431 } 432 433 static bt_t * 434 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr) 435 { 436 struct vmem_hashlist *list; 437 bt_t *bt; 438 439 list = bt_hashhead(vm, addr); 440 LIST_FOREACH(bt, list, bt_hashlist) { 441 if (bt->bt_start == addr) { 442 break; 443 } 444 } 445 446 return bt; 447 } 448 449 static void 450 bt_rembusy(vmem_t *vm, bt_t *bt) 451 { 452 453 KASSERT(vm->vm_nbusytag > 0); 454 vm->vm_inuse -= bt->bt_size; 455 vm->vm_nbusytag--; 456 LIST_REMOVE(bt, bt_hashlist); 457 } 458 459 static void 460 bt_insbusy(vmem_t *vm, bt_t *bt) 461 { 462 struct vmem_hashlist *list; 463 464 KASSERT(bt->bt_type == BT_TYPE_BUSY); 465 466 list = bt_hashhead(vm, bt->bt_start); 467 LIST_INSERT_HEAD(list, bt, bt_hashlist); 468 vm->vm_nbusytag++; 469 vm->vm_inuse += bt->bt_size; 470 } 471 472 /* ---- boundary tag list */ 473 474 static void 475 bt_remseg(vmem_t *vm, bt_t *bt) 476 { 477 478 TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); 479 } 480 481 static void 482 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) 483 { 484 485 TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); 486 } 487 488 static void 489 bt_insseg_tail(vmem_t *vm, bt_t *bt) 490 { 491 492 TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); 493 } 494 495 static void 496 bt_remfree(vmem_t *vm, bt_t *bt) 497 { 498 499 KASSERT(bt->bt_type == BT_TYPE_FREE); 500 501 LIST_REMOVE(bt, bt_freelist); 502 } 503 504 static void 505 bt_insfree(vmem_t *vm, bt_t *bt) 506 { 507 struct vmem_freelist *list; 508 509 list = bt_freehead_tofree(vm, bt->bt_size); 510 LIST_INSERT_HEAD(list, bt, bt_freelist); 511 } 512 513 /* ---- vmem internal functions */ 514 515 #if defined(QCACHE) 516 static inline vm_flag_t 517 prf_to_vmf(int prflags) 518 { 519 vm_flag_t vmflags; 520 521 KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0); 522 if ((prflags & PR_WAITOK) != 0) { 523 vmflags = VM_SLEEP; 524 } else { 525 vmflags = VM_NOSLEEP; 526 } 527 return vmflags; 528 } 529 530 static inline int 531 vmf_to_prf(vm_flag_t vmflags) 532 { 533 int prflags; 534 535 if ((vmflags & VM_SLEEP) != 0) { 536 prflags = PR_WAITOK; 537 } else { 538 prflags = PR_NOWAIT; 539 } 540 return prflags; 541 } 542 543 static size_t 544 qc_poolpage_size(size_t qcache_max) 545 { 546 int i; 547 548 for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) { 549 /* nothing */ 550 } 551 return ORDER2SIZE(i); 552 } 553 554 static void * 555 qc_poolpage_alloc(struct pool *pool, int prflags) 556 { 557 qcache_t *qc = QC_POOL_TO_QCACHE(pool); 558 vmem_t *vm = qc->qc_vmem; 559 vmem_addr_t addr; 560 561 if (vmem_alloc(vm, pool->pr_alloc->pa_pagesz, 562 prf_to_vmf(prflags) | VM_INSTANTFIT, &addr) != 0) 563 return NULL; 564 return (void *)addr; 565 } 566 567 static void 568 qc_poolpage_free(struct pool *pool, void *addr) 569 { 570 qcache_t *qc = QC_POOL_TO_QCACHE(pool); 571 vmem_t *vm = qc->qc_vmem; 572 573 vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz); 574 } 575 576 static void 577 qc_init(vmem_t *vm, size_t qcache_max, int ipl) 578 { 579 qcache_t *prevqc; 580 struct pool_allocator *pa; 581 int qcache_idx_max; 582 int i; 583 584 KASSERT((qcache_max & vm->vm_quantum_mask) == 0); 585 if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) { 586 qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift; 587 } 588 vm->vm_qcache_max = qcache_max; 589 pa = &vm->vm_qcache_allocator; 590 memset(pa, 0, sizeof(*pa)); 591 pa->pa_alloc = qc_poolpage_alloc; 592 pa->pa_free = qc_poolpage_free; 593 pa->pa_pagesz = qc_poolpage_size(qcache_max); 594 595 qcache_idx_max = qcache_max >> vm->vm_quantum_shift; 596 prevqc = NULL; 597 for (i = qcache_idx_max; i > 0; i--) { 598 qcache_t *qc = &vm->vm_qcache_store[i - 1]; 599 size_t size = i << vm->vm_quantum_shift; 600 pool_cache_t pc; 601 602 qc->qc_vmem = vm; 603 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", 604 vm->vm_name, size); 605 606 pc = pool_cache_init(size, 607 ORDER2SIZE(vm->vm_quantum_shift), 0, 608 PR_NOALIGN | PR_NOTOUCH | PR_RECURSIVE /* XXX */, 609 qc->qc_name, pa, ipl, NULL, NULL, NULL); 610 611 KASSERT(pc); 612 613 qc->qc_cache = pc; 614 KASSERT(qc->qc_cache != NULL); /* XXX */ 615 if (prevqc != NULL && 616 qc->qc_cache->pc_pool.pr_itemsperpage == 617 prevqc->qc_cache->pc_pool.pr_itemsperpage) { 618 pool_cache_destroy(qc->qc_cache); 619 vm->vm_qcache[i - 1] = prevqc; 620 continue; 621 } 622 qc->qc_cache->pc_pool.pr_qcache = qc; 623 vm->vm_qcache[i - 1] = qc; 624 prevqc = qc; 625 } 626 } 627 628 static void 629 qc_destroy(vmem_t *vm) 630 { 631 const qcache_t *prevqc; 632 int i; 633 int qcache_idx_max; 634 635 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 636 prevqc = NULL; 637 for (i = 0; i < qcache_idx_max; i++) { 638 qcache_t *qc = vm->vm_qcache[i]; 639 640 if (prevqc == qc) { 641 continue; 642 } 643 pool_cache_destroy(qc->qc_cache); 644 prevqc = qc; 645 } 646 } 647 #endif 648 649 #if defined(_KERNEL) 650 static void 651 vmem_bootstrap(void) 652 { 653 654 mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_VM); 655 mutex_init(&vmem_btag_lock, MUTEX_DEFAULT, IPL_VM); 656 mutex_init(&vmem_btag_refill_lock, MUTEX_DEFAULT, IPL_VM); 657 658 while (static_bt_count-- > 0) { 659 bt_t *bt = &static_bts[static_bt_count]; 660 LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist); 661 VMEM_EVCNT_INCR(static_bt_count); 662 vmem_btag_freelist_count++; 663 } 664 vmem_bootstrapped = TRUE; 665 } 666 667 void 668 vmem_subsystem_init(vmem_t *vm) 669 { 670 671 kmem_va_meta_arena = vmem_init(&kmem_va_meta_arena_store, "vmem-va", 672 0, 0, PAGE_SIZE, vmem_alloc, vmem_free, vm, 673 0, VM_NOSLEEP | VM_BOOTSTRAP | VM_LARGEIMPORT, 674 IPL_VM); 675 676 kmem_meta_arena = vmem_init(&kmem_meta_arena_store, "vmem-meta", 677 0, 0, PAGE_SIZE, 678 uvm_km_kmem_alloc, uvm_km_kmem_free, kmem_va_meta_arena, 679 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM); 680 681 pool_init(&vmem_btag_pool, sizeof(bt_t), 0, 0, PR_PHINPAGE, 682 "vmembt", &pool_allocator_vmem_meta, IPL_VM); 683 } 684 #endif /* defined(_KERNEL) */ 685 686 static int 687 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags, 688 int spanbttype) 689 { 690 bt_t *btspan; 691 bt_t *btfree; 692 693 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 694 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 695 KASSERT(spanbttype == BT_TYPE_SPAN || 696 spanbttype == BT_TYPE_SPAN_STATIC); 697 698 btspan = bt_alloc(vm, flags); 699 if (btspan == NULL) { 700 return ENOMEM; 701 } 702 btfree = bt_alloc(vm, flags); 703 if (btfree == NULL) { 704 bt_free(vm, btspan); 705 return ENOMEM; 706 } 707 708 btspan->bt_type = spanbttype; 709 btspan->bt_start = addr; 710 btspan->bt_size = size; 711 712 btfree->bt_type = BT_TYPE_FREE; 713 btfree->bt_start = addr; 714 btfree->bt_size = size; 715 716 VMEM_LOCK(vm); 717 bt_insseg_tail(vm, btspan); 718 bt_insseg(vm, btfree, btspan); 719 bt_insfree(vm, btfree); 720 vm->vm_size += size; 721 VMEM_UNLOCK(vm); 722 723 return 0; 724 } 725 726 static void 727 vmem_destroy1(vmem_t *vm) 728 { 729 730 #if defined(QCACHE) 731 qc_destroy(vm); 732 #endif /* defined(QCACHE) */ 733 if (vm->vm_hashlist != NULL) { 734 int i; 735 736 for (i = 0; i < vm->vm_hashsize; i++) { 737 bt_t *bt; 738 739 while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) { 740 KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC); 741 bt_free(vm, bt); 742 } 743 } 744 if (vm->vm_hashlist != &vm->vm_hash0) { 745 xfree(vm->vm_hashlist, 746 sizeof(struct vmem_hashlist *) * vm->vm_hashsize); 747 } 748 } 749 750 bt_freetrim(vm, 0); 751 752 VMEM_CONDVAR_DESTROY(vm); 753 VMEM_LOCK_DESTROY(vm); 754 xfree(vm, sizeof(*vm)); 755 } 756 757 static int 758 vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags) 759 { 760 vmem_addr_t addr; 761 int rc; 762 763 if (vm->vm_importfn == NULL) { 764 return EINVAL; 765 } 766 767 if (vm->vm_flags & VM_LARGEIMPORT) { 768 size *= 16; 769 } 770 771 if (vm->vm_flags & VM_XIMPORT) { 772 rc = ((vmem_ximport_t *)vm->vm_importfn)(vm->vm_arg, size, 773 &size, flags, &addr); 774 } else { 775 rc = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr); 776 } 777 if (rc) { 778 return ENOMEM; 779 } 780 781 if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) != 0) { 782 (*vm->vm_releasefn)(vm->vm_arg, addr, size); 783 return ENOMEM; 784 } 785 786 return 0; 787 } 788 789 static int 790 vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags) 791 { 792 bt_t *bt; 793 int i; 794 struct vmem_hashlist *newhashlist; 795 struct vmem_hashlist *oldhashlist; 796 size_t oldhashsize; 797 798 KASSERT(newhashsize > 0); 799 800 newhashlist = 801 xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags); 802 if (newhashlist == NULL) { 803 return ENOMEM; 804 } 805 for (i = 0; i < newhashsize; i++) { 806 LIST_INIT(&newhashlist[i]); 807 } 808 809 if (!VMEM_TRYLOCK(vm)) { 810 xfree(newhashlist, 811 sizeof(struct vmem_hashlist *) * newhashsize); 812 return EBUSY; 813 } 814 oldhashlist = vm->vm_hashlist; 815 oldhashsize = vm->vm_hashsize; 816 vm->vm_hashlist = newhashlist; 817 vm->vm_hashsize = newhashsize; 818 if (oldhashlist == NULL) { 819 VMEM_UNLOCK(vm); 820 return 0; 821 } 822 for (i = 0; i < oldhashsize; i++) { 823 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { 824 bt_rembusy(vm, bt); /* XXX */ 825 bt_insbusy(vm, bt); 826 } 827 } 828 VMEM_UNLOCK(vm); 829 830 if (oldhashlist != &vm->vm_hash0) { 831 xfree(oldhashlist, 832 sizeof(struct vmem_hashlist *) * oldhashsize); 833 } 834 835 return 0; 836 } 837 838 /* 839 * vmem_fit: check if a bt can satisfy the given restrictions. 840 * 841 * it's a caller's responsibility to ensure the region is big enough 842 * before calling us. 843 */ 844 845 static int 846 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, 847 vmem_size_t phase, vmem_size_t nocross, 848 vmem_addr_t minaddr, vmem_addr_t maxaddr, vmem_addr_t *addrp) 849 { 850 vmem_addr_t start; 851 vmem_addr_t end; 852 853 KASSERT(size > 0); 854 KASSERT(bt->bt_size >= size); /* caller's responsibility */ 855 856 /* 857 * XXX assumption: vmem_addr_t and vmem_size_t are 858 * unsigned integer of the same size. 859 */ 860 861 start = bt->bt_start; 862 if (start < minaddr) { 863 start = minaddr; 864 } 865 end = BT_END(bt); 866 if (end > maxaddr) { 867 end = maxaddr; 868 } 869 if (start > end) { 870 return ENOMEM; 871 } 872 873 start = VMEM_ALIGNUP(start - phase, align) + phase; 874 if (start < bt->bt_start) { 875 start += align; 876 } 877 if (VMEM_CROSS_P(start, start + size - 1, nocross)) { 878 KASSERT(align < nocross); 879 start = VMEM_ALIGNUP(start - phase, nocross) + phase; 880 } 881 if (start <= end && end - start >= size - 1) { 882 KASSERT((start & (align - 1)) == phase); 883 KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross)); 884 KASSERT(minaddr <= start); 885 KASSERT(maxaddr == 0 || start + size - 1 <= maxaddr); 886 KASSERT(bt->bt_start <= start); 887 KASSERT(BT_END(bt) - start >= size - 1); 888 *addrp = start; 889 return 0; 890 } 891 return ENOMEM; 892 } 893 894 /* ---- vmem API */ 895 896 /* 897 * vmem_create_internal: creates a vmem arena. 898 */ 899 900 vmem_t * 901 vmem_init(vmem_t *vm, const char *name, 902 vmem_addr_t base, vmem_size_t size, vmem_size_t quantum, 903 vmem_import_t *importfn, vmem_release_t *releasefn, 904 vmem_t *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl) 905 { 906 int i; 907 908 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 909 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 910 KASSERT(quantum > 0); 911 912 #if defined(_KERNEL) 913 /* XXX: SMP, we get called early... */ 914 if (!vmem_bootstrapped) { 915 vmem_bootstrap(); 916 } 917 #endif /* defined(_KERNEL) */ 918 919 if (vm == NULL) { 920 vm = xmalloc(sizeof(*vm), flags); 921 } 922 if (vm == NULL) { 923 return NULL; 924 } 925 926 VMEM_CONDVAR_INIT(vm, "vmem"); 927 VMEM_LOCK_INIT(vm, ipl); 928 vm->vm_flags = flags; 929 vm->vm_nfreetags = 0; 930 LIST_INIT(&vm->vm_freetags); 931 strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); 932 vm->vm_quantum_mask = quantum - 1; 933 vm->vm_quantum_shift = SIZE2ORDER(quantum); 934 KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum); 935 vm->vm_importfn = importfn; 936 vm->vm_releasefn = releasefn; 937 vm->vm_arg = arg; 938 vm->vm_nbusytag = 0; 939 vm->vm_size = 0; 940 vm->vm_inuse = 0; 941 #if defined(QCACHE) 942 qc_init(vm, qcache_max, ipl); 943 #endif /* defined(QCACHE) */ 944 945 TAILQ_INIT(&vm->vm_seglist); 946 for (i = 0; i < VMEM_MAXORDER; i++) { 947 LIST_INIT(&vm->vm_freelist[i]); 948 } 949 memset(&vm->vm_hash0, 0, sizeof(struct vmem_hashlist)); 950 vm->vm_hashsize = 1; 951 vm->vm_hashlist = &vm->vm_hash0; 952 953 if (size != 0) { 954 if (vmem_add(vm, base, size, flags) != 0) { 955 vmem_destroy1(vm); 956 return NULL; 957 } 958 } 959 960 #if defined(_KERNEL) 961 if (flags & VM_BOOTSTRAP) { 962 bt_refill(vm); 963 } 964 965 mutex_enter(&vmem_list_lock); 966 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); 967 mutex_exit(&vmem_list_lock); 968 #endif /* defined(_KERNEL) */ 969 970 return vm; 971 } 972 973 974 975 /* 976 * vmem_create: create an arena. 977 * 978 * => must not be called from interrupt context. 979 */ 980 981 vmem_t * 982 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, 983 vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn, 984 vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl) 985 { 986 987 KASSERT((flags & (VM_XIMPORT)) == 0); 988 989 return vmem_init(NULL, name, base, size, quantum, 990 importfn, releasefn, source, qcache_max, flags, ipl); 991 } 992 993 /* 994 * vmem_xcreate: create an arena takes alternative import func. 995 * 996 * => must not be called from interrupt context. 997 */ 998 999 vmem_t * 1000 vmem_xcreate(const char *name, vmem_addr_t base, vmem_size_t size, 1001 vmem_size_t quantum, vmem_ximport_t *importfn, vmem_release_t *releasefn, 1002 vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl) 1003 { 1004 1005 KASSERT((flags & (VM_XIMPORT)) == 0); 1006 1007 return vmem_init(NULL, name, base, size, quantum, 1008 (vmem_import_t *)importfn, releasefn, source, 1009 qcache_max, flags | VM_XIMPORT, ipl); 1010 } 1011 1012 void 1013 vmem_destroy(vmem_t *vm) 1014 { 1015 1016 #if defined(_KERNEL) 1017 mutex_enter(&vmem_list_lock); 1018 LIST_REMOVE(vm, vm_alllist); 1019 mutex_exit(&vmem_list_lock); 1020 #endif /* defined(_KERNEL) */ 1021 1022 vmem_destroy1(vm); 1023 } 1024 1025 vmem_size_t 1026 vmem_roundup_size(vmem_t *vm, vmem_size_t size) 1027 { 1028 1029 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; 1030 } 1031 1032 /* 1033 * vmem_alloc: allocate resource from the arena. 1034 */ 1035 1036 int 1037 vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, vmem_addr_t *addrp) 1038 { 1039 const vm_flag_t strat __diagused = flags & VM_FITMASK; 1040 int error; 1041 1042 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 1043 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 1044 1045 KASSERT(size > 0); 1046 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 1047 if ((flags & VM_SLEEP) != 0) { 1048 ASSERT_SLEEPABLE(); 1049 } 1050 1051 #if defined(QCACHE) 1052 if (size <= vm->vm_qcache_max) { 1053 void *p; 1054 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 1055 qcache_t *qc = vm->vm_qcache[qidx - 1]; 1056 1057 p = pool_cache_get(qc->qc_cache, vmf_to_prf(flags)); 1058 if (addrp != NULL) 1059 *addrp = (vmem_addr_t)p; 1060 error = (p == NULL) ? ENOMEM : 0; 1061 goto out; 1062 } 1063 #endif /* defined(QCACHE) */ 1064 1065 error = vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 1066 flags, addrp); 1067 out: 1068 KASSERT(error == 0 || (flags & VM_SLEEP) == 0); 1069 return error; 1070 } 1071 1072 int 1073 vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, 1074 const vmem_size_t phase, const vmem_size_t nocross, 1075 const vmem_addr_t minaddr, const vmem_addr_t maxaddr, const vm_flag_t flags, 1076 vmem_addr_t *addrp) 1077 { 1078 struct vmem_freelist *list; 1079 struct vmem_freelist *first; 1080 struct vmem_freelist *end; 1081 bt_t *bt; 1082 bt_t *btnew; 1083 bt_t *btnew2; 1084 const vmem_size_t size = vmem_roundup_size(vm, size0); 1085 vm_flag_t strat = flags & VM_FITMASK; 1086 vmem_addr_t start; 1087 int rc; 1088 1089 KASSERT(size0 > 0); 1090 KASSERT(size > 0); 1091 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 1092 if ((flags & VM_SLEEP) != 0) { 1093 ASSERT_SLEEPABLE(); 1094 } 1095 KASSERT((align & vm->vm_quantum_mask) == 0); 1096 KASSERT((align & (align - 1)) == 0); 1097 KASSERT((phase & vm->vm_quantum_mask) == 0); 1098 KASSERT((nocross & vm->vm_quantum_mask) == 0); 1099 KASSERT((nocross & (nocross - 1)) == 0); 1100 KASSERT((align == 0 && phase == 0) || phase < align); 1101 KASSERT(nocross == 0 || nocross >= size); 1102 KASSERT(minaddr <= maxaddr); 1103 KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 1104 1105 if (align == 0) { 1106 align = vm->vm_quantum_mask + 1; 1107 } 1108 1109 /* 1110 * allocate boundary tags before acquiring the vmem lock. 1111 */ 1112 btnew = bt_alloc(vm, flags); 1113 if (btnew == NULL) { 1114 return ENOMEM; 1115 } 1116 btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */ 1117 if (btnew2 == NULL) { 1118 bt_free(vm, btnew); 1119 return ENOMEM; 1120 } 1121 1122 /* 1123 * choose a free block from which we allocate. 1124 */ 1125 retry_strat: 1126 first = bt_freehead_toalloc(vm, size, strat); 1127 end = &vm->vm_freelist[VMEM_MAXORDER]; 1128 retry: 1129 bt = NULL; 1130 VMEM_LOCK(vm); 1131 vmem_check(vm); 1132 if (strat == VM_INSTANTFIT) { 1133 /* 1134 * just choose the first block which satisfies our restrictions. 1135 * 1136 * note that we don't need to check the size of the blocks 1137 * because any blocks found on these list should be larger than 1138 * the given size. 1139 */ 1140 for (list = first; list < end; list++) { 1141 bt = LIST_FIRST(list); 1142 if (bt != NULL) { 1143 rc = vmem_fit(bt, size, align, phase, 1144 nocross, minaddr, maxaddr, &start); 1145 if (rc == 0) { 1146 goto gotit; 1147 } 1148 /* 1149 * don't bother to follow the bt_freelist link 1150 * here. the list can be very long and we are 1151 * told to run fast. blocks from the later free 1152 * lists are larger and have better chances to 1153 * satisfy our restrictions. 1154 */ 1155 } 1156 } 1157 } else { /* VM_BESTFIT */ 1158 /* 1159 * we assume that, for space efficiency, it's better to 1160 * allocate from a smaller block. thus we will start searching 1161 * from the lower-order list than VM_INSTANTFIT. 1162 * however, don't bother to find the smallest block in a free 1163 * list because the list can be very long. we can revisit it 1164 * if/when it turns out to be a problem. 1165 * 1166 * note that the 'first' list can contain blocks smaller than 1167 * the requested size. thus we need to check bt_size. 1168 */ 1169 for (list = first; list < end; list++) { 1170 LIST_FOREACH(bt, list, bt_freelist) { 1171 if (bt->bt_size >= size) { 1172 rc = vmem_fit(bt, size, align, phase, 1173 nocross, minaddr, maxaddr, &start); 1174 if (rc == 0) { 1175 goto gotit; 1176 } 1177 } 1178 } 1179 } 1180 } 1181 VMEM_UNLOCK(vm); 1182 #if 1 1183 if (strat == VM_INSTANTFIT) { 1184 strat = VM_BESTFIT; 1185 goto retry_strat; 1186 } 1187 #endif 1188 if (align != vm->vm_quantum_mask + 1 || phase != 0 || nocross != 0) { 1189 1190 /* 1191 * XXX should try to import a region large enough to 1192 * satisfy restrictions? 1193 */ 1194 1195 goto fail; 1196 } 1197 /* XXX eeek, minaddr & maxaddr not respected */ 1198 if (vmem_import(vm, size, flags) == 0) { 1199 goto retry; 1200 } 1201 /* XXX */ 1202 1203 if ((flags & VM_SLEEP) != 0) { 1204 vmem_kick_pdaemon(); 1205 VMEM_LOCK(vm); 1206 VMEM_CONDVAR_WAIT(vm); 1207 VMEM_UNLOCK(vm); 1208 goto retry; 1209 } 1210 fail: 1211 bt_free(vm, btnew); 1212 bt_free(vm, btnew2); 1213 return ENOMEM; 1214 1215 gotit: 1216 KASSERT(bt->bt_type == BT_TYPE_FREE); 1217 KASSERT(bt->bt_size >= size); 1218 bt_remfree(vm, bt); 1219 vmem_check(vm); 1220 if (bt->bt_start != start) { 1221 btnew2->bt_type = BT_TYPE_FREE; 1222 btnew2->bt_start = bt->bt_start; 1223 btnew2->bt_size = start - bt->bt_start; 1224 bt->bt_start = start; 1225 bt->bt_size -= btnew2->bt_size; 1226 bt_insfree(vm, btnew2); 1227 bt_insseg(vm, btnew2, TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 1228 btnew2 = NULL; 1229 vmem_check(vm); 1230 } 1231 KASSERT(bt->bt_start == start); 1232 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { 1233 /* split */ 1234 btnew->bt_type = BT_TYPE_BUSY; 1235 btnew->bt_start = bt->bt_start; 1236 btnew->bt_size = size; 1237 bt->bt_start = bt->bt_start + size; 1238 bt->bt_size -= size; 1239 bt_insfree(vm, bt); 1240 bt_insseg(vm, btnew, TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 1241 bt_insbusy(vm, btnew); 1242 vmem_check(vm); 1243 VMEM_UNLOCK(vm); 1244 } else { 1245 bt->bt_type = BT_TYPE_BUSY; 1246 bt_insbusy(vm, bt); 1247 vmem_check(vm); 1248 VMEM_UNLOCK(vm); 1249 bt_free(vm, btnew); 1250 btnew = bt; 1251 } 1252 if (btnew2 != NULL) { 1253 bt_free(vm, btnew2); 1254 } 1255 KASSERT(btnew->bt_size >= size); 1256 btnew->bt_type = BT_TYPE_BUSY; 1257 1258 if (addrp != NULL) 1259 *addrp = btnew->bt_start; 1260 return 0; 1261 } 1262 1263 /* 1264 * vmem_free: free the resource to the arena. 1265 */ 1266 1267 void 1268 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1269 { 1270 1271 KASSERT(size > 0); 1272 1273 #if defined(QCACHE) 1274 if (size <= vm->vm_qcache_max) { 1275 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 1276 qcache_t *qc = vm->vm_qcache[qidx - 1]; 1277 1278 pool_cache_put(qc->qc_cache, (void *)addr); 1279 return; 1280 } 1281 #endif /* defined(QCACHE) */ 1282 1283 vmem_xfree(vm, addr, size); 1284 } 1285 1286 void 1287 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1288 { 1289 bt_t *bt; 1290 bt_t *t; 1291 LIST_HEAD(, vmem_btag) tofree; 1292 1293 LIST_INIT(&tofree); 1294 1295 KASSERT(size > 0); 1296 1297 VMEM_LOCK(vm); 1298 1299 bt = bt_lookupbusy(vm, addr); 1300 KASSERT(bt != NULL); 1301 KASSERT(bt->bt_start == addr); 1302 KASSERT(bt->bt_size == vmem_roundup_size(vm, size) || 1303 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1304 KASSERT(bt->bt_type == BT_TYPE_BUSY); 1305 bt_rembusy(vm, bt); 1306 bt->bt_type = BT_TYPE_FREE; 1307 1308 /* coalesce */ 1309 t = TAILQ_NEXT(bt, bt_seglist); 1310 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1311 KASSERT(BT_END(bt) < t->bt_start); /* YYY */ 1312 bt_remfree(vm, t); 1313 bt_remseg(vm, t); 1314 bt->bt_size += t->bt_size; 1315 LIST_INSERT_HEAD(&tofree, t, bt_freelist); 1316 } 1317 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1318 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1319 KASSERT(BT_END(t) < bt->bt_start); /* YYY */ 1320 bt_remfree(vm, t); 1321 bt_remseg(vm, t); 1322 bt->bt_size += t->bt_size; 1323 bt->bt_start = t->bt_start; 1324 LIST_INSERT_HEAD(&tofree, t, bt_freelist); 1325 } 1326 1327 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1328 KASSERT(t != NULL); 1329 KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY); 1330 if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN && 1331 t->bt_size == bt->bt_size) { 1332 vmem_addr_t spanaddr; 1333 vmem_size_t spansize; 1334 1335 KASSERT(t->bt_start == bt->bt_start); 1336 spanaddr = bt->bt_start; 1337 spansize = bt->bt_size; 1338 bt_remseg(vm, bt); 1339 LIST_INSERT_HEAD(&tofree, bt, bt_freelist); 1340 bt_remseg(vm, t); 1341 LIST_INSERT_HEAD(&tofree, t, bt_freelist); 1342 vm->vm_size -= spansize; 1343 VMEM_CONDVAR_BROADCAST(vm); 1344 VMEM_UNLOCK(vm); 1345 (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize); 1346 } else { 1347 bt_insfree(vm, bt); 1348 VMEM_CONDVAR_BROADCAST(vm); 1349 VMEM_UNLOCK(vm); 1350 } 1351 1352 while (!LIST_EMPTY(&tofree)) { 1353 t = LIST_FIRST(&tofree); 1354 LIST_REMOVE(t, bt_freelist); 1355 bt_free(vm, t); 1356 } 1357 1358 bt_freetrim(vm, BT_MAXFREE); 1359 } 1360 1361 /* 1362 * vmem_add: 1363 * 1364 * => caller must ensure appropriate spl, 1365 * if the arena can be accessed from interrupt context. 1366 */ 1367 1368 int 1369 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags) 1370 { 1371 1372 return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC); 1373 } 1374 1375 /* 1376 * vmem_size: information about arenas size 1377 * 1378 * => return free/allocated size in arena 1379 */ 1380 vmem_size_t 1381 vmem_size(vmem_t *vm, int typemask) 1382 { 1383 1384 switch (typemask) { 1385 case VMEM_ALLOC: 1386 return vm->vm_inuse; 1387 case VMEM_FREE: 1388 return vm->vm_size - vm->vm_inuse; 1389 case VMEM_FREE|VMEM_ALLOC: 1390 return vm->vm_size; 1391 default: 1392 panic("vmem_size"); 1393 } 1394 } 1395 1396 /* ---- rehash */ 1397 1398 #if defined(_KERNEL) 1399 static struct callout vmem_rehash_ch; 1400 static int vmem_rehash_interval; 1401 static struct workqueue *vmem_rehash_wq; 1402 static struct work vmem_rehash_wk; 1403 1404 static void 1405 vmem_rehash_all(struct work *wk, void *dummy) 1406 { 1407 vmem_t *vm; 1408 1409 KASSERT(wk == &vmem_rehash_wk); 1410 mutex_enter(&vmem_list_lock); 1411 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1412 size_t desired; 1413 size_t current; 1414 1415 if (!VMEM_TRYLOCK(vm)) { 1416 continue; 1417 } 1418 desired = vm->vm_nbusytag; 1419 current = vm->vm_hashsize; 1420 VMEM_UNLOCK(vm); 1421 1422 if (desired > VMEM_HASHSIZE_MAX) { 1423 desired = VMEM_HASHSIZE_MAX; 1424 } else if (desired < VMEM_HASHSIZE_MIN) { 1425 desired = VMEM_HASHSIZE_MIN; 1426 } 1427 if (desired > current * 2 || desired * 2 < current) { 1428 vmem_rehash(vm, desired, VM_NOSLEEP); 1429 } 1430 } 1431 mutex_exit(&vmem_list_lock); 1432 1433 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1434 } 1435 1436 static void 1437 vmem_rehash_all_kick(void *dummy) 1438 { 1439 1440 workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL); 1441 } 1442 1443 void 1444 vmem_rehash_start(void) 1445 { 1446 int error; 1447 1448 error = workqueue_create(&vmem_rehash_wq, "vmem_rehash", 1449 vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, WQ_MPSAFE); 1450 if (error) { 1451 panic("%s: workqueue_create %d\n", __func__, error); 1452 } 1453 callout_init(&vmem_rehash_ch, CALLOUT_MPSAFE); 1454 callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL); 1455 1456 vmem_rehash_interval = hz * 10; 1457 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1458 } 1459 #endif /* defined(_KERNEL) */ 1460 1461 /* ---- debug */ 1462 1463 #if defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) 1464 1465 static void bt_dump(const bt_t *, void (*)(const char *, ...) 1466 __printflike(1, 2)); 1467 1468 static const char * 1469 bt_type_string(int type) 1470 { 1471 static const char * const table[] = { 1472 [BT_TYPE_BUSY] = "busy", 1473 [BT_TYPE_FREE] = "free", 1474 [BT_TYPE_SPAN] = "span", 1475 [BT_TYPE_SPAN_STATIC] = "static span", 1476 }; 1477 1478 if (type >= __arraycount(table)) { 1479 return "BOGUS"; 1480 } 1481 return table[type]; 1482 } 1483 1484 static void 1485 bt_dump(const bt_t *bt, void (*pr)(const char *, ...)) 1486 { 1487 1488 (*pr)("\t%p: %" PRIu64 ", %" PRIu64 ", %d(%s)\n", 1489 bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size, 1490 bt->bt_type, bt_type_string(bt->bt_type)); 1491 } 1492 1493 static void 1494 vmem_dump(const vmem_t *vm , void (*pr)(const char *, ...) __printflike(1, 2)) 1495 { 1496 const bt_t *bt; 1497 int i; 1498 1499 (*pr)("vmem %p '%s'\n", vm, vm->vm_name); 1500 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1501 bt_dump(bt, pr); 1502 } 1503 1504 for (i = 0; i < VMEM_MAXORDER; i++) { 1505 const struct vmem_freelist *fl = &vm->vm_freelist[i]; 1506 1507 if (LIST_EMPTY(fl)) { 1508 continue; 1509 } 1510 1511 (*pr)("freelist[%d]\n", i); 1512 LIST_FOREACH(bt, fl, bt_freelist) { 1513 bt_dump(bt, pr); 1514 } 1515 } 1516 } 1517 1518 #endif /* defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) */ 1519 1520 #if defined(DDB) 1521 static bt_t * 1522 vmem_whatis_lookup(vmem_t *vm, uintptr_t addr) 1523 { 1524 bt_t *bt; 1525 1526 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1527 if (BT_ISSPAN_P(bt)) { 1528 continue; 1529 } 1530 if (bt->bt_start <= addr && addr <= BT_END(bt)) { 1531 return bt; 1532 } 1533 } 1534 1535 return NULL; 1536 } 1537 1538 void 1539 vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...)) 1540 { 1541 vmem_t *vm; 1542 1543 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1544 bt_t *bt; 1545 1546 bt = vmem_whatis_lookup(vm, addr); 1547 if (bt == NULL) { 1548 continue; 1549 } 1550 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n", 1551 (void *)addr, (void *)bt->bt_start, 1552 (size_t)(addr - bt->bt_start), vm->vm_name, 1553 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free"); 1554 } 1555 } 1556 1557 void 1558 vmem_printall(const char *modif, void (*pr)(const char *, ...)) 1559 { 1560 const vmem_t *vm; 1561 1562 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1563 vmem_dump(vm, pr); 1564 } 1565 } 1566 1567 void 1568 vmem_print(uintptr_t addr, const char *modif, void (*pr)(const char *, ...)) 1569 { 1570 const vmem_t *vm = (const void *)addr; 1571 1572 vmem_dump(vm, pr); 1573 } 1574 #endif /* defined(DDB) */ 1575 1576 #if defined(_KERNEL) 1577 #define vmem_printf printf 1578 #else 1579 #include <stdio.h> 1580 #include <stdarg.h> 1581 1582 static void 1583 vmem_printf(const char *fmt, ...) 1584 { 1585 va_list ap; 1586 va_start(ap, fmt); 1587 vprintf(fmt, ap); 1588 va_end(ap); 1589 } 1590 #endif 1591 1592 #if defined(VMEM_SANITY) 1593 1594 static bool 1595 vmem_check_sanity(vmem_t *vm) 1596 { 1597 const bt_t *bt, *bt2; 1598 1599 KASSERT(vm != NULL); 1600 1601 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1602 if (bt->bt_start > BT_END(bt)) { 1603 printf("corrupted tag\n"); 1604 bt_dump(bt, vmem_printf); 1605 return false; 1606 } 1607 } 1608 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1609 TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) { 1610 if (bt == bt2) { 1611 continue; 1612 } 1613 if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) { 1614 continue; 1615 } 1616 if (bt->bt_start <= BT_END(bt2) && 1617 bt2->bt_start <= BT_END(bt)) { 1618 printf("overwrapped tags\n"); 1619 bt_dump(bt, vmem_printf); 1620 bt_dump(bt2, vmem_printf); 1621 return false; 1622 } 1623 } 1624 } 1625 1626 return true; 1627 } 1628 1629 static void 1630 vmem_check(vmem_t *vm) 1631 { 1632 1633 if (!vmem_check_sanity(vm)) { 1634 panic("insanity vmem %p", vm); 1635 } 1636 } 1637 1638 #endif /* defined(VMEM_SANITY) */ 1639 1640 #if defined(UNITTEST) 1641 int 1642 main(void) 1643 { 1644 int rc; 1645 vmem_t *vm; 1646 vmem_addr_t p; 1647 struct reg { 1648 vmem_addr_t p; 1649 vmem_size_t sz; 1650 bool x; 1651 } *reg = NULL; 1652 int nreg = 0; 1653 int nalloc = 0; 1654 int nfree = 0; 1655 vmem_size_t total = 0; 1656 #if 1 1657 vm_flag_t strat = VM_INSTANTFIT; 1658 #else 1659 vm_flag_t strat = VM_BESTFIT; 1660 #endif 1661 1662 vm = vmem_create("test", 0, 0, 1, NULL, NULL, NULL, 0, VM_SLEEP, 1663 #ifdef _KERNEL 1664 IPL_NONE 1665 #else 1666 0 1667 #endif 1668 ); 1669 if (vm == NULL) { 1670 printf("vmem_create\n"); 1671 exit(EXIT_FAILURE); 1672 } 1673 vmem_dump(vm, vmem_printf); 1674 1675 rc = vmem_add(vm, 0, 50, VM_SLEEP); 1676 assert(rc == 0); 1677 rc = vmem_add(vm, 100, 200, VM_SLEEP); 1678 assert(rc == 0); 1679 rc = vmem_add(vm, 2000, 1, VM_SLEEP); 1680 assert(rc == 0); 1681 rc = vmem_add(vm, 40000, 65536, VM_SLEEP); 1682 assert(rc == 0); 1683 rc = vmem_add(vm, 10000, 10000, VM_SLEEP); 1684 assert(rc == 0); 1685 rc = vmem_add(vm, 500, 1000, VM_SLEEP); 1686 assert(rc == 0); 1687 rc = vmem_add(vm, 0xffffff00, 0x100, VM_SLEEP); 1688 assert(rc == 0); 1689 rc = vmem_xalloc(vm, 0x101, 0, 0, 0, 1690 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p); 1691 assert(rc != 0); 1692 rc = vmem_xalloc(vm, 50, 0, 0, 0, 0, 49, strat|VM_SLEEP, &p); 1693 assert(rc == 0 && p == 0); 1694 vmem_xfree(vm, p, 50); 1695 rc = vmem_xalloc(vm, 25, 0, 0, 0, 0, 24, strat|VM_SLEEP, &p); 1696 assert(rc == 0 && p == 0); 1697 rc = vmem_xalloc(vm, 0x100, 0, 0, 0, 1698 0xffffff01, 0xffffffff, strat|VM_SLEEP, &p); 1699 assert(rc != 0); 1700 rc = vmem_xalloc(vm, 0x100, 0, 0, 0, 1701 0xffffff00, 0xfffffffe, strat|VM_SLEEP, &p); 1702 assert(rc != 0); 1703 rc = vmem_xalloc(vm, 0x100, 0, 0, 0, 1704 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p); 1705 assert(rc == 0); 1706 vmem_dump(vm, vmem_printf); 1707 for (;;) { 1708 struct reg *r; 1709 int t = rand() % 100; 1710 1711 if (t > 45) { 1712 /* alloc */ 1713 vmem_size_t sz = rand() % 500 + 1; 1714 bool x; 1715 vmem_size_t align, phase, nocross; 1716 vmem_addr_t minaddr, maxaddr; 1717 1718 if (t > 70) { 1719 x = true; 1720 /* XXX */ 1721 align = 1 << (rand() % 15); 1722 phase = rand() % 65536; 1723 nocross = 1 << (rand() % 15); 1724 if (align <= phase) { 1725 phase = 0; 1726 } 1727 if (VMEM_CROSS_P(phase, phase + sz - 1, 1728 nocross)) { 1729 nocross = 0; 1730 } 1731 do { 1732 minaddr = rand() % 50000; 1733 maxaddr = rand() % 70000; 1734 } while (minaddr > maxaddr); 1735 printf("=== xalloc %" PRIu64 1736 " align=%" PRIu64 ", phase=%" PRIu64 1737 ", nocross=%" PRIu64 ", min=%" PRIu64 1738 ", max=%" PRIu64 "\n", 1739 (uint64_t)sz, 1740 (uint64_t)align, 1741 (uint64_t)phase, 1742 (uint64_t)nocross, 1743 (uint64_t)minaddr, 1744 (uint64_t)maxaddr); 1745 rc = vmem_xalloc(vm, sz, align, phase, nocross, 1746 minaddr, maxaddr, strat|VM_SLEEP, &p); 1747 } else { 1748 x = false; 1749 printf("=== alloc %" PRIu64 "\n", (uint64_t)sz); 1750 rc = vmem_alloc(vm, sz, strat|VM_SLEEP, &p); 1751 } 1752 printf("-> %" PRIu64 "\n", (uint64_t)p); 1753 vmem_dump(vm, vmem_printf); 1754 if (rc != 0) { 1755 if (x) { 1756 continue; 1757 } 1758 break; 1759 } 1760 nreg++; 1761 reg = realloc(reg, sizeof(*reg) * nreg); 1762 r = ®[nreg - 1]; 1763 r->p = p; 1764 r->sz = sz; 1765 r->x = x; 1766 total += sz; 1767 nalloc++; 1768 } else if (nreg != 0) { 1769 /* free */ 1770 r = ®[rand() % nreg]; 1771 printf("=== free %" PRIu64 ", %" PRIu64 "\n", 1772 (uint64_t)r->p, (uint64_t)r->sz); 1773 if (r->x) { 1774 vmem_xfree(vm, r->p, r->sz); 1775 } else { 1776 vmem_free(vm, r->p, r->sz); 1777 } 1778 total -= r->sz; 1779 vmem_dump(vm, vmem_printf); 1780 *r = reg[nreg - 1]; 1781 nreg--; 1782 nfree++; 1783 } 1784 printf("total=%" PRIu64 "\n", (uint64_t)total); 1785 } 1786 fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n", 1787 (uint64_t)total, nalloc, nfree); 1788 exit(EXIT_SUCCESS); 1789 } 1790 #endif /* defined(UNITTEST) */ 1791