1 /* $NetBSD: subr_vmem.c,v 1.87 2013/11/22 21:04:11 christos Exp $ */ 2 3 /*- 4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * reference: 31 * - Magazines and Vmem: Extending the Slab Allocator 32 * to Many CPUs and Arbitrary Resources 33 * http://www.usenix.org/event/usenix01/bonwick.html 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.87 2013/11/22 21:04:11 christos Exp $"); 38 39 #if defined(_KERNEL) 40 #include "opt_ddb.h" 41 #endif /* defined(_KERNEL) */ 42 43 #include <sys/param.h> 44 #include <sys/hash.h> 45 #include <sys/queue.h> 46 #include <sys/bitops.h> 47 48 #if defined(_KERNEL) 49 #include <sys/systm.h> 50 #include <sys/kernel.h> /* hz */ 51 #include <sys/callout.h> 52 #include <sys/kmem.h> 53 #include <sys/pool.h> 54 #include <sys/vmem.h> 55 #include <sys/vmem_impl.h> 56 #include <sys/workqueue.h> 57 #include <sys/atomic.h> 58 #include <uvm/uvm.h> 59 #include <uvm/uvm_extern.h> 60 #include <uvm/uvm_km.h> 61 #include <uvm/uvm_page.h> 62 #include <uvm/uvm_pdaemon.h> 63 #else /* defined(_KERNEL) */ 64 #include <stdio.h> 65 #include <errno.h> 66 #include <assert.h> 67 #include <stdlib.h> 68 #include <string.h> 69 #include "../sys/vmem.h" 70 #include "../sys/vmem_impl.h" 71 #endif /* defined(_KERNEL) */ 72 73 74 #if defined(_KERNEL) 75 #include <sys/evcnt.h> 76 #define VMEM_EVCNT_DEFINE(name) \ 77 struct evcnt vmem_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \ 78 "vmemev", #name); \ 79 EVCNT_ATTACH_STATIC(vmem_evcnt_##name); 80 #define VMEM_EVCNT_INCR(ev) vmem_evcnt_##ev.ev_count++ 81 #define VMEM_EVCNT_DECR(ev) vmem_evcnt_##ev.ev_count-- 82 83 VMEM_EVCNT_DEFINE(bt_pages) 84 VMEM_EVCNT_DEFINE(bt_count) 85 VMEM_EVCNT_DEFINE(bt_inuse) 86 87 #define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan) 88 #define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv) 89 #define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock) 90 #define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv) 91 92 #else /* defined(_KERNEL) */ 93 94 #define VMEM_EVCNT_INCR(ev) /* nothing */ 95 #define VMEM_EVCNT_DECR(ev) /* nothing */ 96 97 #define VMEM_CONDVAR_INIT(vm, wchan) /* nothing */ 98 #define VMEM_CONDVAR_DESTROY(vm) /* nothing */ 99 #define VMEM_CONDVAR_WAIT(vm) /* nothing */ 100 #define VMEM_CONDVAR_BROADCAST(vm) /* nothing */ 101 102 #define UNITTEST 103 #define KASSERT(a) assert(a) 104 #define mutex_init(a, b, c) /* nothing */ 105 #define mutex_destroy(a) /* nothing */ 106 #define mutex_enter(a) /* nothing */ 107 #define mutex_tryenter(a) true 108 #define mutex_exit(a) /* nothing */ 109 #define mutex_owned(a) /* nothing */ 110 #define ASSERT_SLEEPABLE() /* nothing */ 111 #define panic(...) printf(__VA_ARGS__); abort() 112 #endif /* defined(_KERNEL) */ 113 114 #if defined(VMEM_SANITY) 115 static void vmem_check(vmem_t *); 116 #else /* defined(VMEM_SANITY) */ 117 #define vmem_check(vm) /* nothing */ 118 #endif /* defined(VMEM_SANITY) */ 119 120 #define VMEM_HASHSIZE_MIN 1 /* XXX */ 121 #define VMEM_HASHSIZE_MAX 65536 /* XXX */ 122 #define VMEM_HASHSIZE_INIT 1 123 124 #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT) 125 126 #if defined(_KERNEL) 127 static bool vmem_bootstrapped = false; 128 static kmutex_t vmem_list_lock; 129 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); 130 #endif /* defined(_KERNEL) */ 131 132 /* ---- misc */ 133 134 #define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock) 135 #define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock) 136 #define VMEM_UNLOCK(vm) mutex_exit(&vm->vm_lock) 137 #define VMEM_LOCK_INIT(vm, ipl) mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl) 138 #define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock) 139 #define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock)) 140 141 #define VMEM_ALIGNUP(addr, align) \ 142 (-(-(addr) & -(align))) 143 144 #define VMEM_CROSS_P(addr1, addr2, boundary) \ 145 ((((addr1) ^ (addr2)) & -(boundary)) != 0) 146 147 #define ORDER2SIZE(order) ((vmem_size_t)1 << (order)) 148 #define SIZE2ORDER(size) ((int)ilog2(size)) 149 150 #if !defined(_KERNEL) 151 #define xmalloc(sz, flags) malloc(sz) 152 #define xfree(p, sz) free(p) 153 #define bt_alloc(vm, flags) malloc(sizeof(bt_t)) 154 #define bt_free(vm, bt) free(bt) 155 #else /* defined(_KERNEL) */ 156 157 #define xmalloc(sz, flags) \ 158 kmem_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP); 159 #define xfree(p, sz) kmem_free(p, sz); 160 161 /* 162 * BT_RESERVE calculation: 163 * we allocate memory for boundry tags with vmem, therefor we have 164 * to keep a reserve of bts used to allocated memory for bts. 165 * This reserve is 4 for each arena involved in allocating vmems memory. 166 * BT_MAXFREE: don't cache excessive counts of bts in arenas 167 */ 168 #define STATIC_BT_COUNT 200 169 #define BT_MINRESERVE 4 170 #define BT_MAXFREE 64 171 172 static struct vmem_btag static_bts[STATIC_BT_COUNT]; 173 static int static_bt_count = STATIC_BT_COUNT; 174 175 static struct vmem kmem_va_meta_arena_store; 176 vmem_t *kmem_va_meta_arena; 177 static struct vmem kmem_meta_arena_store; 178 vmem_t *kmem_meta_arena; 179 180 static kmutex_t vmem_refill_lock; 181 static kmutex_t vmem_btag_lock; 182 static LIST_HEAD(, vmem_btag) vmem_btag_freelist; 183 static size_t vmem_btag_freelist_count = 0; 184 static size_t vmem_btag_count = STATIC_BT_COUNT; 185 186 /* ---- boundary tag */ 187 188 #define BT_PER_PAGE (PAGE_SIZE / sizeof(bt_t)) 189 190 static int bt_refill(vmem_t *vm, vm_flag_t flags); 191 192 static int 193 bt_refillglobal(vm_flag_t flags) 194 { 195 vmem_addr_t va; 196 bt_t *btp; 197 bt_t *bt; 198 int i; 199 200 mutex_enter(&vmem_refill_lock); 201 202 mutex_enter(&vmem_btag_lock); 203 if (vmem_btag_freelist_count > 0) { 204 mutex_exit(&vmem_btag_lock); 205 mutex_exit(&vmem_refill_lock); 206 return 0; 207 } 208 mutex_exit(&vmem_btag_lock); 209 210 if (vmem_alloc(kmem_meta_arena, PAGE_SIZE, 211 (flags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING, &va) != 0) { 212 mutex_exit(&vmem_refill_lock); 213 return ENOMEM; 214 } 215 VMEM_EVCNT_INCR(bt_pages); 216 217 mutex_enter(&vmem_btag_lock); 218 btp = (void *) va; 219 for (i = 0; i < (BT_PER_PAGE); i++) { 220 bt = btp; 221 memset(bt, 0, sizeof(*bt)); 222 LIST_INSERT_HEAD(&vmem_btag_freelist, bt, 223 bt_freelist); 224 vmem_btag_freelist_count++; 225 vmem_btag_count++; 226 VMEM_EVCNT_INCR(bt_count); 227 btp++; 228 } 229 mutex_exit(&vmem_btag_lock); 230 231 bt_refill(kmem_arena, (flags & ~VM_FITMASK) 232 | VM_INSTANTFIT | VM_POPULATING); 233 bt_refill(kmem_va_meta_arena, (flags & ~VM_FITMASK) 234 | VM_INSTANTFIT | VM_POPULATING); 235 bt_refill(kmem_meta_arena, (flags & ~VM_FITMASK) 236 | VM_INSTANTFIT | VM_POPULATING); 237 238 mutex_exit(&vmem_refill_lock); 239 240 return 0; 241 } 242 243 static int 244 bt_refill(vmem_t *vm, vm_flag_t flags) 245 { 246 bt_t *bt; 247 248 if (!(flags & VM_POPULATING)) { 249 bt_refillglobal(flags); 250 } 251 252 VMEM_LOCK(vm); 253 mutex_enter(&vmem_btag_lock); 254 while (!LIST_EMPTY(&vmem_btag_freelist) && 255 vm->vm_nfreetags <= BT_MINRESERVE) { 256 bt = LIST_FIRST(&vmem_btag_freelist); 257 LIST_REMOVE(bt, bt_freelist); 258 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 259 vm->vm_nfreetags++; 260 vmem_btag_freelist_count--; 261 } 262 mutex_exit(&vmem_btag_lock); 263 264 if (vm->vm_nfreetags == 0) { 265 VMEM_UNLOCK(vm); 266 return ENOMEM; 267 } 268 VMEM_UNLOCK(vm); 269 270 return 0; 271 } 272 273 static inline bt_t * 274 bt_alloc(vmem_t *vm, vm_flag_t flags) 275 { 276 bt_t *bt; 277 again: 278 VMEM_LOCK(vm); 279 if (vm->vm_nfreetags <= BT_MINRESERVE && 280 (flags & VM_POPULATING) == 0) { 281 VMEM_UNLOCK(vm); 282 if (bt_refill(vm, VM_NOSLEEP | VM_INSTANTFIT)) { 283 return NULL; 284 } 285 goto again; 286 } 287 bt = LIST_FIRST(&vm->vm_freetags); 288 LIST_REMOVE(bt, bt_freelist); 289 vm->vm_nfreetags--; 290 VMEM_UNLOCK(vm); 291 VMEM_EVCNT_INCR(bt_inuse); 292 293 return bt; 294 } 295 296 static inline void 297 bt_free(vmem_t *vm, bt_t *bt) 298 { 299 300 VMEM_LOCK(vm); 301 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 302 vm->vm_nfreetags++; 303 while (vm->vm_nfreetags > BT_MAXFREE) { 304 bt = LIST_FIRST(&vm->vm_freetags); 305 LIST_REMOVE(bt, bt_freelist); 306 vm->vm_nfreetags--; 307 mutex_enter(&vmem_btag_lock); 308 LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist); 309 vmem_btag_freelist_count++; 310 mutex_exit(&vmem_btag_lock); 311 } 312 VMEM_UNLOCK(vm); 313 VMEM_EVCNT_DECR(bt_inuse); 314 } 315 316 #endif /* defined(_KERNEL) */ 317 318 /* 319 * freelist[0] ... [1, 1] 320 * freelist[1] ... [2, 3] 321 * freelist[2] ... [4, 7] 322 * freelist[3] ... [8, 15] 323 * : 324 * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1] 325 * : 326 */ 327 328 static struct vmem_freelist * 329 bt_freehead_tofree(vmem_t *vm, vmem_size_t size) 330 { 331 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 332 const int idx = SIZE2ORDER(qsize); 333 334 KASSERT(size != 0 && qsize != 0); 335 KASSERT((size & vm->vm_quantum_mask) == 0); 336 KASSERT(idx >= 0); 337 KASSERT(idx < VMEM_MAXORDER); 338 339 return &vm->vm_freelist[idx]; 340 } 341 342 /* 343 * bt_freehead_toalloc: return the freelist for the given size and allocation 344 * strategy. 345 * 346 * for VM_INSTANTFIT, return the list in which any blocks are large enough 347 * for the requested size. otherwise, return the list which can have blocks 348 * large enough for the requested size. 349 */ 350 351 static struct vmem_freelist * 352 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat) 353 { 354 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 355 int idx = SIZE2ORDER(qsize); 356 357 KASSERT(size != 0 && qsize != 0); 358 KASSERT((size & vm->vm_quantum_mask) == 0); 359 360 if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) { 361 idx++; 362 /* check too large request? */ 363 } 364 KASSERT(idx >= 0); 365 KASSERT(idx < VMEM_MAXORDER); 366 367 return &vm->vm_freelist[idx]; 368 } 369 370 /* ---- boundary tag hash */ 371 372 static struct vmem_hashlist * 373 bt_hashhead(vmem_t *vm, vmem_addr_t addr) 374 { 375 struct vmem_hashlist *list; 376 unsigned int hash; 377 378 hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT); 379 list = &vm->vm_hashlist[hash % vm->vm_hashsize]; 380 381 return list; 382 } 383 384 static bt_t * 385 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr) 386 { 387 struct vmem_hashlist *list; 388 bt_t *bt; 389 390 list = bt_hashhead(vm, addr); 391 LIST_FOREACH(bt, list, bt_hashlist) { 392 if (bt->bt_start == addr) { 393 break; 394 } 395 } 396 397 return bt; 398 } 399 400 static void 401 bt_rembusy(vmem_t *vm, bt_t *bt) 402 { 403 404 KASSERT(vm->vm_nbusytag > 0); 405 vm->vm_inuse -= bt->bt_size; 406 vm->vm_nbusytag--; 407 LIST_REMOVE(bt, bt_hashlist); 408 } 409 410 static void 411 bt_insbusy(vmem_t *vm, bt_t *bt) 412 { 413 struct vmem_hashlist *list; 414 415 KASSERT(bt->bt_type == BT_TYPE_BUSY); 416 417 list = bt_hashhead(vm, bt->bt_start); 418 LIST_INSERT_HEAD(list, bt, bt_hashlist); 419 vm->vm_nbusytag++; 420 vm->vm_inuse += bt->bt_size; 421 } 422 423 /* ---- boundary tag list */ 424 425 static void 426 bt_remseg(vmem_t *vm, bt_t *bt) 427 { 428 429 TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); 430 } 431 432 static void 433 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) 434 { 435 436 TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); 437 } 438 439 static void 440 bt_insseg_tail(vmem_t *vm, bt_t *bt) 441 { 442 443 TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); 444 } 445 446 static void 447 bt_remfree(vmem_t *vm, bt_t *bt) 448 { 449 450 KASSERT(bt->bt_type == BT_TYPE_FREE); 451 452 LIST_REMOVE(bt, bt_freelist); 453 } 454 455 static void 456 bt_insfree(vmem_t *vm, bt_t *bt) 457 { 458 struct vmem_freelist *list; 459 460 list = bt_freehead_tofree(vm, bt->bt_size); 461 LIST_INSERT_HEAD(list, bt, bt_freelist); 462 } 463 464 /* ---- vmem internal functions */ 465 466 #if defined(QCACHE) 467 static inline vm_flag_t 468 prf_to_vmf(int prflags) 469 { 470 vm_flag_t vmflags; 471 472 KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0); 473 if ((prflags & PR_WAITOK) != 0) { 474 vmflags = VM_SLEEP; 475 } else { 476 vmflags = VM_NOSLEEP; 477 } 478 return vmflags; 479 } 480 481 static inline int 482 vmf_to_prf(vm_flag_t vmflags) 483 { 484 int prflags; 485 486 if ((vmflags & VM_SLEEP) != 0) { 487 prflags = PR_WAITOK; 488 } else { 489 prflags = PR_NOWAIT; 490 } 491 return prflags; 492 } 493 494 static size_t 495 qc_poolpage_size(size_t qcache_max) 496 { 497 int i; 498 499 for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) { 500 /* nothing */ 501 } 502 return ORDER2SIZE(i); 503 } 504 505 static void * 506 qc_poolpage_alloc(struct pool *pool, int prflags) 507 { 508 qcache_t *qc = QC_POOL_TO_QCACHE(pool); 509 vmem_t *vm = qc->qc_vmem; 510 vmem_addr_t addr; 511 512 if (vmem_alloc(vm, pool->pr_alloc->pa_pagesz, 513 prf_to_vmf(prflags) | VM_INSTANTFIT, &addr) != 0) 514 return NULL; 515 return (void *)addr; 516 } 517 518 static void 519 qc_poolpage_free(struct pool *pool, void *addr) 520 { 521 qcache_t *qc = QC_POOL_TO_QCACHE(pool); 522 vmem_t *vm = qc->qc_vmem; 523 524 vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz); 525 } 526 527 static void 528 qc_init(vmem_t *vm, size_t qcache_max, int ipl) 529 { 530 qcache_t *prevqc; 531 struct pool_allocator *pa; 532 int qcache_idx_max; 533 int i; 534 535 KASSERT((qcache_max & vm->vm_quantum_mask) == 0); 536 if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) { 537 qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift; 538 } 539 vm->vm_qcache_max = qcache_max; 540 pa = &vm->vm_qcache_allocator; 541 memset(pa, 0, sizeof(*pa)); 542 pa->pa_alloc = qc_poolpage_alloc; 543 pa->pa_free = qc_poolpage_free; 544 pa->pa_pagesz = qc_poolpage_size(qcache_max); 545 546 qcache_idx_max = qcache_max >> vm->vm_quantum_shift; 547 prevqc = NULL; 548 for (i = qcache_idx_max; i > 0; i--) { 549 qcache_t *qc = &vm->vm_qcache_store[i - 1]; 550 size_t size = i << vm->vm_quantum_shift; 551 pool_cache_t pc; 552 553 qc->qc_vmem = vm; 554 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", 555 vm->vm_name, size); 556 557 pc = pool_cache_init(size, 558 ORDER2SIZE(vm->vm_quantum_shift), 0, 559 PR_NOALIGN | PR_NOTOUCH | PR_RECURSIVE /* XXX */, 560 qc->qc_name, pa, ipl, NULL, NULL, NULL); 561 562 KASSERT(pc); 563 564 qc->qc_cache = pc; 565 KASSERT(qc->qc_cache != NULL); /* XXX */ 566 if (prevqc != NULL && 567 qc->qc_cache->pc_pool.pr_itemsperpage == 568 prevqc->qc_cache->pc_pool.pr_itemsperpage) { 569 pool_cache_destroy(qc->qc_cache); 570 vm->vm_qcache[i - 1] = prevqc; 571 continue; 572 } 573 qc->qc_cache->pc_pool.pr_qcache = qc; 574 vm->vm_qcache[i - 1] = qc; 575 prevqc = qc; 576 } 577 } 578 579 static void 580 qc_destroy(vmem_t *vm) 581 { 582 const qcache_t *prevqc; 583 int i; 584 int qcache_idx_max; 585 586 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 587 prevqc = NULL; 588 for (i = 0; i < qcache_idx_max; i++) { 589 qcache_t *qc = vm->vm_qcache[i]; 590 591 if (prevqc == qc) { 592 continue; 593 } 594 pool_cache_destroy(qc->qc_cache); 595 prevqc = qc; 596 } 597 } 598 #endif 599 600 #if defined(_KERNEL) 601 static void 602 vmem_bootstrap(void) 603 { 604 605 mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_VM); 606 mutex_init(&vmem_refill_lock, MUTEX_DEFAULT, IPL_VM); 607 mutex_init(&vmem_btag_lock, MUTEX_DEFAULT, IPL_VM); 608 609 while (static_bt_count-- > 0) { 610 bt_t *bt = &static_bts[static_bt_count]; 611 LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist); 612 VMEM_EVCNT_INCR(bt_count); 613 vmem_btag_freelist_count++; 614 } 615 vmem_bootstrapped = TRUE; 616 } 617 618 void 619 vmem_subsystem_init(vmem_t *vm) 620 { 621 622 kmem_va_meta_arena = vmem_init(&kmem_va_meta_arena_store, "vmem-va", 623 0, 0, PAGE_SIZE, vmem_alloc, vmem_free, vm, 624 0, VM_NOSLEEP | VM_BOOTSTRAP | VM_LARGEIMPORT, 625 IPL_VM); 626 627 kmem_meta_arena = vmem_init(&kmem_meta_arena_store, "vmem-meta", 628 0, 0, PAGE_SIZE, 629 uvm_km_kmem_alloc, uvm_km_kmem_free, kmem_va_meta_arena, 630 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM); 631 } 632 #endif /* defined(_KERNEL) */ 633 634 static int 635 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags, 636 int spanbttype) 637 { 638 bt_t *btspan; 639 bt_t *btfree; 640 641 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 642 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 643 KASSERT(spanbttype == BT_TYPE_SPAN || 644 spanbttype == BT_TYPE_SPAN_STATIC); 645 646 btspan = bt_alloc(vm, flags); 647 if (btspan == NULL) { 648 return ENOMEM; 649 } 650 btfree = bt_alloc(vm, flags); 651 if (btfree == NULL) { 652 bt_free(vm, btspan); 653 return ENOMEM; 654 } 655 656 btspan->bt_type = spanbttype; 657 btspan->bt_start = addr; 658 btspan->bt_size = size; 659 660 btfree->bt_type = BT_TYPE_FREE; 661 btfree->bt_start = addr; 662 btfree->bt_size = size; 663 664 VMEM_LOCK(vm); 665 bt_insseg_tail(vm, btspan); 666 bt_insseg(vm, btfree, btspan); 667 bt_insfree(vm, btfree); 668 vm->vm_size += size; 669 VMEM_UNLOCK(vm); 670 671 return 0; 672 } 673 674 static void 675 vmem_destroy1(vmem_t *vm) 676 { 677 678 #if defined(QCACHE) 679 qc_destroy(vm); 680 #endif /* defined(QCACHE) */ 681 if (vm->vm_hashlist != NULL) { 682 int i; 683 684 for (i = 0; i < vm->vm_hashsize; i++) { 685 bt_t *bt; 686 687 while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) { 688 KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC); 689 bt_free(vm, bt); 690 } 691 } 692 if (vm->vm_hashlist != &vm->vm_hash0) { 693 xfree(vm->vm_hashlist, 694 sizeof(struct vmem_hashlist *) * vm->vm_hashsize); 695 } 696 } 697 698 while (vm->vm_nfreetags > 0) { 699 bt_t *bt = LIST_FIRST(&vm->vm_freetags); 700 LIST_REMOVE(bt, bt_freelist); 701 vm->vm_nfreetags--; 702 mutex_enter(&vmem_btag_lock); 703 #if defined (_KERNEL) 704 LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist); 705 vmem_btag_freelist_count++; 706 #endif /* defined(_KERNEL) */ 707 mutex_exit(&vmem_btag_lock); 708 } 709 710 VMEM_CONDVAR_DESTROY(vm); 711 VMEM_LOCK_DESTROY(vm); 712 xfree(vm, sizeof(*vm)); 713 } 714 715 static int 716 vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags) 717 { 718 vmem_addr_t addr; 719 int rc; 720 721 if (vm->vm_importfn == NULL) { 722 return EINVAL; 723 } 724 725 if (vm->vm_flags & VM_LARGEIMPORT) { 726 size *= 16; 727 } 728 729 if (vm->vm_flags & VM_XIMPORT) { 730 rc = ((vmem_ximport_t *)vm->vm_importfn)(vm->vm_arg, size, 731 &size, flags, &addr); 732 } else { 733 rc = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr); 734 } 735 if (rc) { 736 return ENOMEM; 737 } 738 739 if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) != 0) { 740 (*vm->vm_releasefn)(vm->vm_arg, addr, size); 741 return ENOMEM; 742 } 743 744 return 0; 745 } 746 747 static int 748 vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags) 749 { 750 bt_t *bt; 751 int i; 752 struct vmem_hashlist *newhashlist; 753 struct vmem_hashlist *oldhashlist; 754 size_t oldhashsize; 755 756 KASSERT(newhashsize > 0); 757 758 newhashlist = 759 xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags); 760 if (newhashlist == NULL) { 761 return ENOMEM; 762 } 763 for (i = 0; i < newhashsize; i++) { 764 LIST_INIT(&newhashlist[i]); 765 } 766 767 if (!VMEM_TRYLOCK(vm)) { 768 xfree(newhashlist, 769 sizeof(struct vmem_hashlist *) * newhashsize); 770 return EBUSY; 771 } 772 oldhashlist = vm->vm_hashlist; 773 oldhashsize = vm->vm_hashsize; 774 vm->vm_hashlist = newhashlist; 775 vm->vm_hashsize = newhashsize; 776 if (oldhashlist == NULL) { 777 VMEM_UNLOCK(vm); 778 return 0; 779 } 780 for (i = 0; i < oldhashsize; i++) { 781 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { 782 bt_rembusy(vm, bt); /* XXX */ 783 bt_insbusy(vm, bt); 784 } 785 } 786 VMEM_UNLOCK(vm); 787 788 if (oldhashlist != &vm->vm_hash0) { 789 xfree(oldhashlist, 790 sizeof(struct vmem_hashlist *) * oldhashsize); 791 } 792 793 return 0; 794 } 795 796 /* 797 * vmem_fit: check if a bt can satisfy the given restrictions. 798 * 799 * it's a caller's responsibility to ensure the region is big enough 800 * before calling us. 801 */ 802 803 static int 804 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, 805 vmem_size_t phase, vmem_size_t nocross, 806 vmem_addr_t minaddr, vmem_addr_t maxaddr, vmem_addr_t *addrp) 807 { 808 vmem_addr_t start; 809 vmem_addr_t end; 810 811 KASSERT(size > 0); 812 KASSERT(bt->bt_size >= size); /* caller's responsibility */ 813 814 /* 815 * XXX assumption: vmem_addr_t and vmem_size_t are 816 * unsigned integer of the same size. 817 */ 818 819 start = bt->bt_start; 820 if (start < minaddr) { 821 start = minaddr; 822 } 823 end = BT_END(bt); 824 if (end > maxaddr) { 825 end = maxaddr; 826 } 827 if (start > end) { 828 return ENOMEM; 829 } 830 831 start = VMEM_ALIGNUP(start - phase, align) + phase; 832 if (start < bt->bt_start) { 833 start += align; 834 } 835 if (VMEM_CROSS_P(start, start + size - 1, nocross)) { 836 KASSERT(align < nocross); 837 start = VMEM_ALIGNUP(start - phase, nocross) + phase; 838 } 839 if (start <= end && end - start >= size - 1) { 840 KASSERT((start & (align - 1)) == phase); 841 KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross)); 842 KASSERT(minaddr <= start); 843 KASSERT(maxaddr == 0 || start + size - 1 <= maxaddr); 844 KASSERT(bt->bt_start <= start); 845 KASSERT(BT_END(bt) - start >= size - 1); 846 *addrp = start; 847 return 0; 848 } 849 return ENOMEM; 850 } 851 852 /* ---- vmem API */ 853 854 /* 855 * vmem_create_internal: creates a vmem arena. 856 */ 857 858 vmem_t * 859 vmem_init(vmem_t *vm, const char *name, 860 vmem_addr_t base, vmem_size_t size, vmem_size_t quantum, 861 vmem_import_t *importfn, vmem_release_t *releasefn, 862 vmem_t *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl) 863 { 864 int i; 865 866 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 867 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 868 KASSERT(quantum > 0); 869 870 #if defined(_KERNEL) 871 /* XXX: SMP, we get called early... */ 872 if (!vmem_bootstrapped) { 873 vmem_bootstrap(); 874 } 875 #endif /* defined(_KERNEL) */ 876 877 if (vm == NULL) { 878 vm = xmalloc(sizeof(*vm), flags); 879 } 880 if (vm == NULL) { 881 return NULL; 882 } 883 884 VMEM_CONDVAR_INIT(vm, "vmem"); 885 VMEM_LOCK_INIT(vm, ipl); 886 vm->vm_flags = flags; 887 vm->vm_nfreetags = 0; 888 LIST_INIT(&vm->vm_freetags); 889 strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); 890 vm->vm_quantum_mask = quantum - 1; 891 vm->vm_quantum_shift = SIZE2ORDER(quantum); 892 KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum); 893 vm->vm_importfn = importfn; 894 vm->vm_releasefn = releasefn; 895 vm->vm_arg = arg; 896 vm->vm_nbusytag = 0; 897 vm->vm_size = 0; 898 vm->vm_inuse = 0; 899 #if defined(QCACHE) 900 qc_init(vm, qcache_max, ipl); 901 #endif /* defined(QCACHE) */ 902 903 TAILQ_INIT(&vm->vm_seglist); 904 for (i = 0; i < VMEM_MAXORDER; i++) { 905 LIST_INIT(&vm->vm_freelist[i]); 906 } 907 memset(&vm->vm_hash0, 0, sizeof(struct vmem_hashlist)); 908 vm->vm_hashsize = 1; 909 vm->vm_hashlist = &vm->vm_hash0; 910 911 if (size != 0) { 912 if (vmem_add(vm, base, size, flags) != 0) { 913 vmem_destroy1(vm); 914 return NULL; 915 } 916 } 917 918 #if defined(_KERNEL) 919 if (flags & VM_BOOTSTRAP) { 920 bt_refill(vm, VM_NOSLEEP); 921 } 922 923 mutex_enter(&vmem_list_lock); 924 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); 925 mutex_exit(&vmem_list_lock); 926 #endif /* defined(_KERNEL) */ 927 928 return vm; 929 } 930 931 932 933 /* 934 * vmem_create: create an arena. 935 * 936 * => must not be called from interrupt context. 937 */ 938 939 vmem_t * 940 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, 941 vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn, 942 vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl) 943 { 944 945 KASSERT((flags & (VM_XIMPORT)) == 0); 946 947 return vmem_init(NULL, name, base, size, quantum, 948 importfn, releasefn, source, qcache_max, flags, ipl); 949 } 950 951 /* 952 * vmem_xcreate: create an arena takes alternative import func. 953 * 954 * => must not be called from interrupt context. 955 */ 956 957 vmem_t * 958 vmem_xcreate(const char *name, vmem_addr_t base, vmem_size_t size, 959 vmem_size_t quantum, vmem_ximport_t *importfn, vmem_release_t *releasefn, 960 vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl) 961 { 962 963 KASSERT((flags & (VM_XIMPORT)) == 0); 964 965 return vmem_init(NULL, name, base, size, quantum, 966 (vmem_import_t *)importfn, releasefn, source, 967 qcache_max, flags | VM_XIMPORT, ipl); 968 } 969 970 void 971 vmem_destroy(vmem_t *vm) 972 { 973 974 #if defined(_KERNEL) 975 mutex_enter(&vmem_list_lock); 976 LIST_REMOVE(vm, vm_alllist); 977 mutex_exit(&vmem_list_lock); 978 #endif /* defined(_KERNEL) */ 979 980 vmem_destroy1(vm); 981 } 982 983 vmem_size_t 984 vmem_roundup_size(vmem_t *vm, vmem_size_t size) 985 { 986 987 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; 988 } 989 990 /* 991 * vmem_alloc: allocate resource from the arena. 992 */ 993 994 int 995 vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, vmem_addr_t *addrp) 996 { 997 const vm_flag_t strat __diagused = flags & VM_FITMASK; 998 999 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 1000 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 1001 1002 KASSERT(size > 0); 1003 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 1004 if ((flags & VM_SLEEP) != 0) { 1005 ASSERT_SLEEPABLE(); 1006 } 1007 1008 #if defined(QCACHE) 1009 if (size <= vm->vm_qcache_max) { 1010 void *p; 1011 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 1012 qcache_t *qc = vm->vm_qcache[qidx - 1]; 1013 1014 p = pool_cache_get(qc->qc_cache, vmf_to_prf(flags)); 1015 if (addrp != NULL) 1016 *addrp = (vmem_addr_t)p; 1017 return (p == NULL) ? ENOMEM : 0; 1018 } 1019 #endif /* defined(QCACHE) */ 1020 1021 return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 1022 flags, addrp); 1023 } 1024 1025 int 1026 vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, 1027 const vmem_size_t phase, const vmem_size_t nocross, 1028 const vmem_addr_t minaddr, const vmem_addr_t maxaddr, const vm_flag_t flags, 1029 vmem_addr_t *addrp) 1030 { 1031 struct vmem_freelist *list; 1032 struct vmem_freelist *first; 1033 struct vmem_freelist *end; 1034 bt_t *bt; 1035 bt_t *btnew; 1036 bt_t *btnew2; 1037 const vmem_size_t size = vmem_roundup_size(vm, size0); 1038 vm_flag_t strat = flags & VM_FITMASK; 1039 vmem_addr_t start; 1040 int rc; 1041 1042 KASSERT(size0 > 0); 1043 KASSERT(size > 0); 1044 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 1045 if ((flags & VM_SLEEP) != 0) { 1046 ASSERT_SLEEPABLE(); 1047 } 1048 KASSERT((align & vm->vm_quantum_mask) == 0); 1049 KASSERT((align & (align - 1)) == 0); 1050 KASSERT((phase & vm->vm_quantum_mask) == 0); 1051 KASSERT((nocross & vm->vm_quantum_mask) == 0); 1052 KASSERT((nocross & (nocross - 1)) == 0); 1053 KASSERT((align == 0 && phase == 0) || phase < align); 1054 KASSERT(nocross == 0 || nocross >= size); 1055 KASSERT(minaddr <= maxaddr); 1056 KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 1057 1058 if (align == 0) { 1059 align = vm->vm_quantum_mask + 1; 1060 } 1061 1062 /* 1063 * allocate boundary tags before acquiring the vmem lock. 1064 */ 1065 btnew = bt_alloc(vm, flags); 1066 if (btnew == NULL) { 1067 return ENOMEM; 1068 } 1069 btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */ 1070 if (btnew2 == NULL) { 1071 bt_free(vm, btnew); 1072 return ENOMEM; 1073 } 1074 1075 /* 1076 * choose a free block from which we allocate. 1077 */ 1078 retry_strat: 1079 first = bt_freehead_toalloc(vm, size, strat); 1080 end = &vm->vm_freelist[VMEM_MAXORDER]; 1081 retry: 1082 bt = NULL; 1083 VMEM_LOCK(vm); 1084 vmem_check(vm); 1085 if (strat == VM_INSTANTFIT) { 1086 /* 1087 * just choose the first block which satisfies our restrictions. 1088 * 1089 * note that we don't need to check the size of the blocks 1090 * because any blocks found on these list should be larger than 1091 * the given size. 1092 */ 1093 for (list = first; list < end; list++) { 1094 bt = LIST_FIRST(list); 1095 if (bt != NULL) { 1096 rc = vmem_fit(bt, size, align, phase, 1097 nocross, minaddr, maxaddr, &start); 1098 if (rc == 0) { 1099 goto gotit; 1100 } 1101 /* 1102 * don't bother to follow the bt_freelist link 1103 * here. the list can be very long and we are 1104 * told to run fast. blocks from the later free 1105 * lists are larger and have better chances to 1106 * satisfy our restrictions. 1107 */ 1108 } 1109 } 1110 } else { /* VM_BESTFIT */ 1111 /* 1112 * we assume that, for space efficiency, it's better to 1113 * allocate from a smaller block. thus we will start searching 1114 * from the lower-order list than VM_INSTANTFIT. 1115 * however, don't bother to find the smallest block in a free 1116 * list because the list can be very long. we can revisit it 1117 * if/when it turns out to be a problem. 1118 * 1119 * note that the 'first' list can contain blocks smaller than 1120 * the requested size. thus we need to check bt_size. 1121 */ 1122 for (list = first; list < end; list++) { 1123 LIST_FOREACH(bt, list, bt_freelist) { 1124 if (bt->bt_size >= size) { 1125 rc = vmem_fit(bt, size, align, phase, 1126 nocross, minaddr, maxaddr, &start); 1127 if (rc == 0) { 1128 goto gotit; 1129 } 1130 } 1131 } 1132 } 1133 } 1134 VMEM_UNLOCK(vm); 1135 #if 1 1136 if (strat == VM_INSTANTFIT) { 1137 strat = VM_BESTFIT; 1138 goto retry_strat; 1139 } 1140 #endif 1141 if (align != vm->vm_quantum_mask + 1 || phase != 0 || nocross != 0) { 1142 1143 /* 1144 * XXX should try to import a region large enough to 1145 * satisfy restrictions? 1146 */ 1147 1148 goto fail; 1149 } 1150 /* XXX eeek, minaddr & maxaddr not respected */ 1151 if (vmem_import(vm, size, flags) == 0) { 1152 goto retry; 1153 } 1154 /* XXX */ 1155 1156 if ((flags & VM_SLEEP) != 0) { 1157 #if defined(_KERNEL) && !defined(_RUMPKERNEL) 1158 mutex_spin_enter(&uvm_fpageqlock); 1159 uvm_kick_pdaemon(); 1160 mutex_spin_exit(&uvm_fpageqlock); 1161 #endif 1162 VMEM_LOCK(vm); 1163 VMEM_CONDVAR_WAIT(vm); 1164 VMEM_UNLOCK(vm); 1165 goto retry; 1166 } 1167 fail: 1168 bt_free(vm, btnew); 1169 bt_free(vm, btnew2); 1170 return ENOMEM; 1171 1172 gotit: 1173 KASSERT(bt->bt_type == BT_TYPE_FREE); 1174 KASSERT(bt->bt_size >= size); 1175 bt_remfree(vm, bt); 1176 vmem_check(vm); 1177 if (bt->bt_start != start) { 1178 btnew2->bt_type = BT_TYPE_FREE; 1179 btnew2->bt_start = bt->bt_start; 1180 btnew2->bt_size = start - bt->bt_start; 1181 bt->bt_start = start; 1182 bt->bt_size -= btnew2->bt_size; 1183 bt_insfree(vm, btnew2); 1184 bt_insseg(vm, btnew2, TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 1185 btnew2 = NULL; 1186 vmem_check(vm); 1187 } 1188 KASSERT(bt->bt_start == start); 1189 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { 1190 /* split */ 1191 btnew->bt_type = BT_TYPE_BUSY; 1192 btnew->bt_start = bt->bt_start; 1193 btnew->bt_size = size; 1194 bt->bt_start = bt->bt_start + size; 1195 bt->bt_size -= size; 1196 bt_insfree(vm, bt); 1197 bt_insseg(vm, btnew, TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 1198 bt_insbusy(vm, btnew); 1199 vmem_check(vm); 1200 VMEM_UNLOCK(vm); 1201 } else { 1202 bt->bt_type = BT_TYPE_BUSY; 1203 bt_insbusy(vm, bt); 1204 vmem_check(vm); 1205 VMEM_UNLOCK(vm); 1206 bt_free(vm, btnew); 1207 btnew = bt; 1208 } 1209 if (btnew2 != NULL) { 1210 bt_free(vm, btnew2); 1211 } 1212 KASSERT(btnew->bt_size >= size); 1213 btnew->bt_type = BT_TYPE_BUSY; 1214 1215 if (addrp != NULL) 1216 *addrp = btnew->bt_start; 1217 return 0; 1218 } 1219 1220 /* 1221 * vmem_free: free the resource to the arena. 1222 */ 1223 1224 void 1225 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1226 { 1227 1228 KASSERT(size > 0); 1229 1230 #if defined(QCACHE) 1231 if (size <= vm->vm_qcache_max) { 1232 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 1233 qcache_t *qc = vm->vm_qcache[qidx - 1]; 1234 1235 pool_cache_put(qc->qc_cache, (void *)addr); 1236 return; 1237 } 1238 #endif /* defined(QCACHE) */ 1239 1240 vmem_xfree(vm, addr, size); 1241 } 1242 1243 void 1244 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1245 { 1246 bt_t *bt; 1247 bt_t *t; 1248 LIST_HEAD(, vmem_btag) tofree; 1249 1250 LIST_INIT(&tofree); 1251 1252 KASSERT(size > 0); 1253 1254 VMEM_LOCK(vm); 1255 1256 bt = bt_lookupbusy(vm, addr); 1257 KASSERT(bt != NULL); 1258 KASSERT(bt->bt_start == addr); 1259 KASSERT(bt->bt_size == vmem_roundup_size(vm, size) || 1260 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1261 KASSERT(bt->bt_type == BT_TYPE_BUSY); 1262 bt_rembusy(vm, bt); 1263 bt->bt_type = BT_TYPE_FREE; 1264 1265 /* coalesce */ 1266 t = TAILQ_NEXT(bt, bt_seglist); 1267 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1268 KASSERT(BT_END(bt) < t->bt_start); /* YYY */ 1269 bt_remfree(vm, t); 1270 bt_remseg(vm, t); 1271 bt->bt_size += t->bt_size; 1272 LIST_INSERT_HEAD(&tofree, t, bt_freelist); 1273 } 1274 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1275 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1276 KASSERT(BT_END(t) < bt->bt_start); /* YYY */ 1277 bt_remfree(vm, t); 1278 bt_remseg(vm, t); 1279 bt->bt_size += t->bt_size; 1280 bt->bt_start = t->bt_start; 1281 LIST_INSERT_HEAD(&tofree, t, bt_freelist); 1282 } 1283 1284 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1285 KASSERT(t != NULL); 1286 KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY); 1287 if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN && 1288 t->bt_size == bt->bt_size) { 1289 vmem_addr_t spanaddr; 1290 vmem_size_t spansize; 1291 1292 KASSERT(t->bt_start == bt->bt_start); 1293 spanaddr = bt->bt_start; 1294 spansize = bt->bt_size; 1295 bt_remseg(vm, bt); 1296 LIST_INSERT_HEAD(&tofree, bt, bt_freelist); 1297 bt_remseg(vm, t); 1298 LIST_INSERT_HEAD(&tofree, t, bt_freelist); 1299 vm->vm_size -= spansize; 1300 VMEM_CONDVAR_BROADCAST(vm); 1301 VMEM_UNLOCK(vm); 1302 (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize); 1303 } else { 1304 bt_insfree(vm, bt); 1305 VMEM_CONDVAR_BROADCAST(vm); 1306 VMEM_UNLOCK(vm); 1307 } 1308 1309 while (!LIST_EMPTY(&tofree)) { 1310 t = LIST_FIRST(&tofree); 1311 LIST_REMOVE(t, bt_freelist); 1312 bt_free(vm, t); 1313 } 1314 } 1315 1316 /* 1317 * vmem_add: 1318 * 1319 * => caller must ensure appropriate spl, 1320 * if the arena can be accessed from interrupt context. 1321 */ 1322 1323 int 1324 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags) 1325 { 1326 1327 return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC); 1328 } 1329 1330 /* 1331 * vmem_size: information about arenas size 1332 * 1333 * => return free/allocated size in arena 1334 */ 1335 vmem_size_t 1336 vmem_size(vmem_t *vm, int typemask) 1337 { 1338 1339 switch (typemask) { 1340 case VMEM_ALLOC: 1341 return vm->vm_inuse; 1342 case VMEM_FREE: 1343 return vm->vm_size - vm->vm_inuse; 1344 case VMEM_FREE|VMEM_ALLOC: 1345 return vm->vm_size; 1346 default: 1347 panic("vmem_size"); 1348 } 1349 } 1350 1351 /* ---- rehash */ 1352 1353 #if defined(_KERNEL) 1354 static struct callout vmem_rehash_ch; 1355 static int vmem_rehash_interval; 1356 static struct workqueue *vmem_rehash_wq; 1357 static struct work vmem_rehash_wk; 1358 1359 static void 1360 vmem_rehash_all(struct work *wk, void *dummy) 1361 { 1362 vmem_t *vm; 1363 1364 KASSERT(wk == &vmem_rehash_wk); 1365 mutex_enter(&vmem_list_lock); 1366 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1367 size_t desired; 1368 size_t current; 1369 1370 if (!VMEM_TRYLOCK(vm)) { 1371 continue; 1372 } 1373 desired = vm->vm_nbusytag; 1374 current = vm->vm_hashsize; 1375 VMEM_UNLOCK(vm); 1376 1377 if (desired > VMEM_HASHSIZE_MAX) { 1378 desired = VMEM_HASHSIZE_MAX; 1379 } else if (desired < VMEM_HASHSIZE_MIN) { 1380 desired = VMEM_HASHSIZE_MIN; 1381 } 1382 if (desired > current * 2 || desired * 2 < current) { 1383 vmem_rehash(vm, desired, VM_NOSLEEP); 1384 } 1385 } 1386 mutex_exit(&vmem_list_lock); 1387 1388 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1389 } 1390 1391 static void 1392 vmem_rehash_all_kick(void *dummy) 1393 { 1394 1395 workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL); 1396 } 1397 1398 void 1399 vmem_rehash_start(void) 1400 { 1401 int error; 1402 1403 error = workqueue_create(&vmem_rehash_wq, "vmem_rehash", 1404 vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, WQ_MPSAFE); 1405 if (error) { 1406 panic("%s: workqueue_create %d\n", __func__, error); 1407 } 1408 callout_init(&vmem_rehash_ch, CALLOUT_MPSAFE); 1409 callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL); 1410 1411 vmem_rehash_interval = hz * 10; 1412 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1413 } 1414 #endif /* defined(_KERNEL) */ 1415 1416 /* ---- debug */ 1417 1418 #if defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) 1419 1420 static void bt_dump(const bt_t *, void (*)(const char *, ...) 1421 __printflike(1, 2)); 1422 1423 static const char * 1424 bt_type_string(int type) 1425 { 1426 static const char * const table[] = { 1427 [BT_TYPE_BUSY] = "busy", 1428 [BT_TYPE_FREE] = "free", 1429 [BT_TYPE_SPAN] = "span", 1430 [BT_TYPE_SPAN_STATIC] = "static span", 1431 }; 1432 1433 if (type >= __arraycount(table)) { 1434 return "BOGUS"; 1435 } 1436 return table[type]; 1437 } 1438 1439 static void 1440 bt_dump(const bt_t *bt, void (*pr)(const char *, ...)) 1441 { 1442 1443 (*pr)("\t%p: %" PRIu64 ", %" PRIu64 ", %d(%s)\n", 1444 bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size, 1445 bt->bt_type, bt_type_string(bt->bt_type)); 1446 } 1447 1448 static void 1449 vmem_dump(const vmem_t *vm , void (*pr)(const char *, ...) __printflike(1, 2)) 1450 { 1451 const bt_t *bt; 1452 int i; 1453 1454 (*pr)("vmem %p '%s'\n", vm, vm->vm_name); 1455 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1456 bt_dump(bt, pr); 1457 } 1458 1459 for (i = 0; i < VMEM_MAXORDER; i++) { 1460 const struct vmem_freelist *fl = &vm->vm_freelist[i]; 1461 1462 if (LIST_EMPTY(fl)) { 1463 continue; 1464 } 1465 1466 (*pr)("freelist[%d]\n", i); 1467 LIST_FOREACH(bt, fl, bt_freelist) { 1468 bt_dump(bt, pr); 1469 } 1470 } 1471 } 1472 1473 #endif /* defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) */ 1474 1475 #if defined(DDB) 1476 static bt_t * 1477 vmem_whatis_lookup(vmem_t *vm, uintptr_t addr) 1478 { 1479 bt_t *bt; 1480 1481 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1482 if (BT_ISSPAN_P(bt)) { 1483 continue; 1484 } 1485 if (bt->bt_start <= addr && addr <= BT_END(bt)) { 1486 return bt; 1487 } 1488 } 1489 1490 return NULL; 1491 } 1492 1493 void 1494 vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...)) 1495 { 1496 vmem_t *vm; 1497 1498 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1499 bt_t *bt; 1500 1501 bt = vmem_whatis_lookup(vm, addr); 1502 if (bt == NULL) { 1503 continue; 1504 } 1505 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n", 1506 (void *)addr, (void *)bt->bt_start, 1507 (size_t)(addr - bt->bt_start), vm->vm_name, 1508 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free"); 1509 } 1510 } 1511 1512 void 1513 vmem_printall(const char *modif, void (*pr)(const char *, ...)) 1514 { 1515 const vmem_t *vm; 1516 1517 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1518 vmem_dump(vm, pr); 1519 } 1520 } 1521 1522 void 1523 vmem_print(uintptr_t addr, const char *modif, void (*pr)(const char *, ...)) 1524 { 1525 const vmem_t *vm = (const void *)addr; 1526 1527 vmem_dump(vm, pr); 1528 } 1529 #endif /* defined(DDB) */ 1530 1531 #if defined(_KERNEL) 1532 #define vmem_printf printf 1533 #else 1534 #include <stdio.h> 1535 #include <stdarg.h> 1536 1537 static void 1538 vmem_printf(const char *fmt, ...) 1539 { 1540 va_list ap; 1541 va_start(ap, fmt); 1542 vprintf(fmt, ap); 1543 va_end(ap); 1544 } 1545 #endif 1546 1547 #if defined(VMEM_SANITY) 1548 1549 static bool 1550 vmem_check_sanity(vmem_t *vm) 1551 { 1552 const bt_t *bt, *bt2; 1553 1554 KASSERT(vm != NULL); 1555 1556 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1557 if (bt->bt_start > BT_END(bt)) { 1558 printf("corrupted tag\n"); 1559 bt_dump(bt, vmem_printf); 1560 return false; 1561 } 1562 } 1563 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1564 TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) { 1565 if (bt == bt2) { 1566 continue; 1567 } 1568 if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) { 1569 continue; 1570 } 1571 if (bt->bt_start <= BT_END(bt2) && 1572 bt2->bt_start <= BT_END(bt)) { 1573 printf("overwrapped tags\n"); 1574 bt_dump(bt, vmem_printf); 1575 bt_dump(bt2, vmem_printf); 1576 return false; 1577 } 1578 } 1579 } 1580 1581 return true; 1582 } 1583 1584 static void 1585 vmem_check(vmem_t *vm) 1586 { 1587 1588 if (!vmem_check_sanity(vm)) { 1589 panic("insanity vmem %p", vm); 1590 } 1591 } 1592 1593 #endif /* defined(VMEM_SANITY) */ 1594 1595 #if defined(UNITTEST) 1596 int 1597 main(void) 1598 { 1599 int rc; 1600 vmem_t *vm; 1601 vmem_addr_t p; 1602 struct reg { 1603 vmem_addr_t p; 1604 vmem_size_t sz; 1605 bool x; 1606 } *reg = NULL; 1607 int nreg = 0; 1608 int nalloc = 0; 1609 int nfree = 0; 1610 vmem_size_t total = 0; 1611 #if 1 1612 vm_flag_t strat = VM_INSTANTFIT; 1613 #else 1614 vm_flag_t strat = VM_BESTFIT; 1615 #endif 1616 1617 vm = vmem_create("test", 0, 0, 1, NULL, NULL, NULL, 0, VM_SLEEP, 1618 #ifdef _KERNEL 1619 IPL_NONE 1620 #else 1621 0 1622 #endif 1623 ); 1624 if (vm == NULL) { 1625 printf("vmem_create\n"); 1626 exit(EXIT_FAILURE); 1627 } 1628 vmem_dump(vm, vmem_printf); 1629 1630 rc = vmem_add(vm, 0, 50, VM_SLEEP); 1631 assert(rc == 0); 1632 rc = vmem_add(vm, 100, 200, VM_SLEEP); 1633 assert(rc == 0); 1634 rc = vmem_add(vm, 2000, 1, VM_SLEEP); 1635 assert(rc == 0); 1636 rc = vmem_add(vm, 40000, 65536, VM_SLEEP); 1637 assert(rc == 0); 1638 rc = vmem_add(vm, 10000, 10000, VM_SLEEP); 1639 assert(rc == 0); 1640 rc = vmem_add(vm, 500, 1000, VM_SLEEP); 1641 assert(rc == 0); 1642 rc = vmem_add(vm, 0xffffff00, 0x100, VM_SLEEP); 1643 assert(rc == 0); 1644 rc = vmem_xalloc(vm, 0x101, 0, 0, 0, 1645 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p); 1646 assert(rc != 0); 1647 rc = vmem_xalloc(vm, 50, 0, 0, 0, 0, 49, strat|VM_SLEEP, &p); 1648 assert(rc == 0 && p == 0); 1649 vmem_xfree(vm, p, 50); 1650 rc = vmem_xalloc(vm, 25, 0, 0, 0, 0, 24, strat|VM_SLEEP, &p); 1651 assert(rc == 0 && p == 0); 1652 rc = vmem_xalloc(vm, 0x100, 0, 0, 0, 1653 0xffffff01, 0xffffffff, strat|VM_SLEEP, &p); 1654 assert(rc != 0); 1655 rc = vmem_xalloc(vm, 0x100, 0, 0, 0, 1656 0xffffff00, 0xfffffffe, strat|VM_SLEEP, &p); 1657 assert(rc != 0); 1658 rc = vmem_xalloc(vm, 0x100, 0, 0, 0, 1659 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p); 1660 assert(rc == 0); 1661 vmem_dump(vm, vmem_printf); 1662 for (;;) { 1663 struct reg *r; 1664 int t = rand() % 100; 1665 1666 if (t > 45) { 1667 /* alloc */ 1668 vmem_size_t sz = rand() % 500 + 1; 1669 bool x; 1670 vmem_size_t align, phase, nocross; 1671 vmem_addr_t minaddr, maxaddr; 1672 1673 if (t > 70) { 1674 x = true; 1675 /* XXX */ 1676 align = 1 << (rand() % 15); 1677 phase = rand() % 65536; 1678 nocross = 1 << (rand() % 15); 1679 if (align <= phase) { 1680 phase = 0; 1681 } 1682 if (VMEM_CROSS_P(phase, phase + sz - 1, 1683 nocross)) { 1684 nocross = 0; 1685 } 1686 do { 1687 minaddr = rand() % 50000; 1688 maxaddr = rand() % 70000; 1689 } while (minaddr > maxaddr); 1690 printf("=== xalloc %" PRIu64 1691 " align=%" PRIu64 ", phase=%" PRIu64 1692 ", nocross=%" PRIu64 ", min=%" PRIu64 1693 ", max=%" PRIu64 "\n", 1694 (uint64_t)sz, 1695 (uint64_t)align, 1696 (uint64_t)phase, 1697 (uint64_t)nocross, 1698 (uint64_t)minaddr, 1699 (uint64_t)maxaddr); 1700 rc = vmem_xalloc(vm, sz, align, phase, nocross, 1701 minaddr, maxaddr, strat|VM_SLEEP, &p); 1702 } else { 1703 x = false; 1704 printf("=== alloc %" PRIu64 "\n", (uint64_t)sz); 1705 rc = vmem_alloc(vm, sz, strat|VM_SLEEP, &p); 1706 } 1707 printf("-> %" PRIu64 "\n", (uint64_t)p); 1708 vmem_dump(vm, vmem_printf); 1709 if (rc != 0) { 1710 if (x) { 1711 continue; 1712 } 1713 break; 1714 } 1715 nreg++; 1716 reg = realloc(reg, sizeof(*reg) * nreg); 1717 r = ®[nreg - 1]; 1718 r->p = p; 1719 r->sz = sz; 1720 r->x = x; 1721 total += sz; 1722 nalloc++; 1723 } else if (nreg != 0) { 1724 /* free */ 1725 r = ®[rand() % nreg]; 1726 printf("=== free %" PRIu64 ", %" PRIu64 "\n", 1727 (uint64_t)r->p, (uint64_t)r->sz); 1728 if (r->x) { 1729 vmem_xfree(vm, r->p, r->sz); 1730 } else { 1731 vmem_free(vm, r->p, r->sz); 1732 } 1733 total -= r->sz; 1734 vmem_dump(vm, vmem_printf); 1735 *r = reg[nreg - 1]; 1736 nreg--; 1737 nfree++; 1738 } 1739 printf("total=%" PRIu64 "\n", (uint64_t)total); 1740 } 1741 fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n", 1742 (uint64_t)total, nalloc, nfree); 1743 exit(EXIT_SUCCESS); 1744 } 1745 #endif /* defined(UNITTEST) */ 1746