1 /* $NetBSD: subr_vmem.c,v 1.65 2011/10/20 03:05:14 yamt Exp $ */ 2 3 /*- 4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * reference: 31 * - Magazines and Vmem: Extending the Slab Allocator 32 * to Many CPUs and Arbitrary Resources 33 * http://www.usenix.org/event/usenix01/bonwick.html 34 * 35 * todo: 36 * - decide how to import segments for vmem_xalloc. 37 * - don't rely on malloc(9). 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.65 2011/10/20 03:05:14 yamt Exp $"); 42 43 #if defined(_KERNEL) 44 #include "opt_ddb.h" 45 #define QCACHE 46 #endif /* defined(_KERNEL) */ 47 48 #include <sys/param.h> 49 #include <sys/hash.h> 50 #include <sys/queue.h> 51 #include <sys/bitops.h> 52 53 #if defined(_KERNEL) 54 #include <sys/systm.h> 55 #include <sys/kernel.h> /* hz */ 56 #include <sys/callout.h> 57 #include <sys/malloc.h> 58 #include <sys/once.h> 59 #include <sys/pool.h> 60 #include <sys/vmem.h> 61 #include <sys/workqueue.h> 62 #else /* defined(_KERNEL) */ 63 #include "../sys/vmem.h" 64 #endif /* defined(_KERNEL) */ 65 66 #if defined(_KERNEL) 67 #define LOCK_DECL(name) \ 68 kmutex_t name; char lockpad[COHERENCY_UNIT - sizeof(kmutex_t)] 69 #else /* defined(_KERNEL) */ 70 #include <errno.h> 71 #include <assert.h> 72 #include <stdlib.h> 73 #include <string.h> 74 75 #define UNITTEST 76 #define KASSERT(a) assert(a) 77 #define LOCK_DECL(name) /* nothing */ 78 #define mutex_init(a, b, c) /* nothing */ 79 #define mutex_destroy(a) /* nothing */ 80 #define mutex_enter(a) /* nothing */ 81 #define mutex_tryenter(a) true 82 #define mutex_exit(a) /* nothing */ 83 #define mutex_owned(a) /* nothing */ 84 #define ASSERT_SLEEPABLE() /* nothing */ 85 #define panic(...) printf(__VA_ARGS__); abort() 86 #endif /* defined(_KERNEL) */ 87 88 struct vmem; 89 struct vmem_btag; 90 91 #if defined(VMEM_SANITY) 92 static void vmem_check(vmem_t *); 93 #else /* defined(VMEM_SANITY) */ 94 #define vmem_check(vm) /* nothing */ 95 #endif /* defined(VMEM_SANITY) */ 96 97 #define VMEM_MAXORDER (sizeof(vmem_size_t) * CHAR_BIT) 98 99 #define VMEM_HASHSIZE_MIN 1 /* XXX */ 100 #define VMEM_HASHSIZE_MAX 65536 /* XXX */ 101 #define VMEM_HASHSIZE_INIT 128 102 103 #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT) 104 105 CIRCLEQ_HEAD(vmem_seglist, vmem_btag); 106 LIST_HEAD(vmem_freelist, vmem_btag); 107 LIST_HEAD(vmem_hashlist, vmem_btag); 108 109 #if defined(QCACHE) 110 #define VMEM_QCACHE_IDX_MAX 32 111 112 #define QC_NAME_MAX 16 113 114 struct qcache { 115 pool_cache_t qc_cache; 116 vmem_t *qc_vmem; 117 char qc_name[QC_NAME_MAX]; 118 }; 119 typedef struct qcache qcache_t; 120 #define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache)) 121 #endif /* defined(QCACHE) */ 122 123 #define VMEM_NAME_MAX 16 124 125 /* vmem arena */ 126 struct vmem { 127 LOCK_DECL(vm_lock); 128 int (*vm_importfn)(void *, vmem_size_t, vmem_size_t *, 129 vm_flag_t, vmem_addr_t *); 130 void (*vm_releasefn)(void *, vmem_addr_t, vmem_size_t); 131 vmem_t *vm_source; 132 void *vm_arg; 133 struct vmem_seglist vm_seglist; 134 struct vmem_freelist vm_freelist[VMEM_MAXORDER]; 135 size_t vm_hashsize; 136 size_t vm_nbusytag; 137 struct vmem_hashlist *vm_hashlist; 138 size_t vm_quantum_mask; 139 int vm_quantum_shift; 140 char vm_name[VMEM_NAME_MAX+1]; 141 LIST_ENTRY(vmem) vm_alllist; 142 143 #if defined(QCACHE) 144 /* quantum cache */ 145 size_t vm_qcache_max; 146 struct pool_allocator vm_qcache_allocator; 147 qcache_t vm_qcache_store[VMEM_QCACHE_IDX_MAX]; 148 qcache_t *vm_qcache[VMEM_QCACHE_IDX_MAX]; 149 #endif /* defined(QCACHE) */ 150 }; 151 152 #define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock) 153 #define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock) 154 #define VMEM_UNLOCK(vm) mutex_exit(&vm->vm_lock) 155 #define VMEM_LOCK_INIT(vm, ipl) mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl) 156 #define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock) 157 #define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock)) 158 159 /* boundary tag */ 160 struct vmem_btag { 161 CIRCLEQ_ENTRY(vmem_btag) bt_seglist; 162 union { 163 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */ 164 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */ 165 } bt_u; 166 #define bt_hashlist bt_u.u_hashlist 167 #define bt_freelist bt_u.u_freelist 168 vmem_addr_t bt_start; 169 vmem_size_t bt_size; 170 int bt_type; 171 }; 172 173 #define BT_TYPE_SPAN 1 174 #define BT_TYPE_SPAN_STATIC 2 175 #define BT_TYPE_FREE 3 176 #define BT_TYPE_BUSY 4 177 #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC) 178 179 #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1) 180 181 typedef struct vmem_btag bt_t; 182 183 /* ---- misc */ 184 185 #define VMEM_ALIGNUP(addr, align) \ 186 (-(-(addr) & -(align))) 187 188 #define VMEM_CROSS_P(addr1, addr2, boundary) \ 189 ((((addr1) ^ (addr2)) & -(boundary)) != 0) 190 191 #define ORDER2SIZE(order) ((vmem_size_t)1 << (order)) 192 #define SIZE2ORDER(size) ((int)ilog2(size)) 193 194 #if !defined(_KERNEL) 195 #define xmalloc(sz, flags) malloc(sz) 196 #define xfree(p) free(p) 197 #define bt_alloc(vm, flags) malloc(sizeof(bt_t)) 198 #define bt_free(vm, bt) free(bt) 199 #else /* !defined(_KERNEL) */ 200 201 static MALLOC_DEFINE(M_VMEM, "vmem", "vmem"); 202 203 static inline void * 204 xmalloc(size_t sz, vm_flag_t flags) 205 { 206 return malloc(sz, M_VMEM, 207 M_CANFAIL | ((flags & VM_SLEEP) ? M_WAITOK : M_NOWAIT)); 208 } 209 210 static inline void 211 xfree(void *p) 212 { 213 free(p, M_VMEM); 214 } 215 216 /* ---- boundary tag */ 217 218 static struct pool_cache bt_cache; 219 220 static inline bt_t * 221 bt_alloc(vmem_t *vm, vm_flag_t flags) 222 { 223 return pool_cache_get(&bt_cache, 224 (flags & VM_SLEEP) ? PR_WAITOK : PR_NOWAIT); 225 } 226 227 static inline void 228 bt_free(vmem_t *vm, bt_t *bt) 229 { 230 pool_cache_put(&bt_cache, bt); 231 } 232 233 #endif /* !defined(_KERNEL) */ 234 235 /* 236 * freelist[0] ... [1, 1] 237 * freelist[1] ... [2, 3] 238 * freelist[2] ... [4, 7] 239 * freelist[3] ... [8, 15] 240 * : 241 * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1] 242 * : 243 */ 244 245 static struct vmem_freelist * 246 bt_freehead_tofree(vmem_t *vm, vmem_size_t size) 247 { 248 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 249 const int idx = SIZE2ORDER(qsize); 250 251 KASSERT(size != 0 && qsize != 0); 252 KASSERT((size & vm->vm_quantum_mask) == 0); 253 KASSERT(idx >= 0); 254 KASSERT(idx < VMEM_MAXORDER); 255 256 return &vm->vm_freelist[idx]; 257 } 258 259 /* 260 * bt_freehead_toalloc: return the freelist for the given size and allocation 261 * strategy. 262 * 263 * for VM_INSTANTFIT, return the list in which any blocks are large enough 264 * for the requested size. otherwise, return the list which can have blocks 265 * large enough for the requested size. 266 */ 267 268 static struct vmem_freelist * 269 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat) 270 { 271 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 272 int idx = SIZE2ORDER(qsize); 273 274 KASSERT(size != 0 && qsize != 0); 275 KASSERT((size & vm->vm_quantum_mask) == 0); 276 277 if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) { 278 idx++; 279 /* check too large request? */ 280 } 281 KASSERT(idx >= 0); 282 KASSERT(idx < VMEM_MAXORDER); 283 284 return &vm->vm_freelist[idx]; 285 } 286 287 /* ---- boundary tag hash */ 288 289 static struct vmem_hashlist * 290 bt_hashhead(vmem_t *vm, vmem_addr_t addr) 291 { 292 struct vmem_hashlist *list; 293 unsigned int hash; 294 295 hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT); 296 list = &vm->vm_hashlist[hash % vm->vm_hashsize]; 297 298 return list; 299 } 300 301 static bt_t * 302 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr) 303 { 304 struct vmem_hashlist *list; 305 bt_t *bt; 306 307 list = bt_hashhead(vm, addr); 308 LIST_FOREACH(bt, list, bt_hashlist) { 309 if (bt->bt_start == addr) { 310 break; 311 } 312 } 313 314 return bt; 315 } 316 317 static void 318 bt_rembusy(vmem_t *vm, bt_t *bt) 319 { 320 321 KASSERT(vm->vm_nbusytag > 0); 322 vm->vm_nbusytag--; 323 LIST_REMOVE(bt, bt_hashlist); 324 } 325 326 static void 327 bt_insbusy(vmem_t *vm, bt_t *bt) 328 { 329 struct vmem_hashlist *list; 330 331 KASSERT(bt->bt_type == BT_TYPE_BUSY); 332 333 list = bt_hashhead(vm, bt->bt_start); 334 LIST_INSERT_HEAD(list, bt, bt_hashlist); 335 vm->vm_nbusytag++; 336 } 337 338 /* ---- boundary tag list */ 339 340 static void 341 bt_remseg(vmem_t *vm, bt_t *bt) 342 { 343 344 CIRCLEQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); 345 } 346 347 static void 348 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) 349 { 350 351 CIRCLEQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); 352 } 353 354 static void 355 bt_insseg_tail(vmem_t *vm, bt_t *bt) 356 { 357 358 CIRCLEQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); 359 } 360 361 static void 362 bt_remfree(vmem_t *vm, bt_t *bt) 363 { 364 365 KASSERT(bt->bt_type == BT_TYPE_FREE); 366 367 LIST_REMOVE(bt, bt_freelist); 368 } 369 370 static void 371 bt_insfree(vmem_t *vm, bt_t *bt) 372 { 373 struct vmem_freelist *list; 374 375 list = bt_freehead_tofree(vm, bt->bt_size); 376 LIST_INSERT_HEAD(list, bt, bt_freelist); 377 } 378 379 /* ---- vmem internal functions */ 380 381 #if defined(_KERNEL) 382 static kmutex_t vmem_list_lock; 383 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); 384 #endif /* defined(_KERNEL) */ 385 386 #if defined(QCACHE) 387 static inline vm_flag_t 388 prf_to_vmf(int prflags) 389 { 390 vm_flag_t vmflags; 391 392 KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0); 393 if ((prflags & PR_WAITOK) != 0) { 394 vmflags = VM_SLEEP; 395 } else { 396 vmflags = VM_NOSLEEP; 397 } 398 return vmflags; 399 } 400 401 static inline int 402 vmf_to_prf(vm_flag_t vmflags) 403 { 404 int prflags; 405 406 if ((vmflags & VM_SLEEP) != 0) { 407 prflags = PR_WAITOK; 408 } else { 409 prflags = PR_NOWAIT; 410 } 411 return prflags; 412 } 413 414 static size_t 415 qc_poolpage_size(size_t qcache_max) 416 { 417 int i; 418 419 for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) { 420 /* nothing */ 421 } 422 return ORDER2SIZE(i); 423 } 424 425 static void * 426 qc_poolpage_alloc(struct pool *pool, int prflags) 427 { 428 qcache_t *qc = QC_POOL_TO_QCACHE(pool); 429 vmem_t *vm = qc->qc_vmem; 430 vmem_addr_t addr; 431 432 if (vmem_alloc(vm, pool->pr_alloc->pa_pagesz, 433 prf_to_vmf(prflags) | VM_INSTANTFIT, &addr) != 0) 434 return NULL; 435 return (void *)addr; 436 } 437 438 static void 439 qc_poolpage_free(struct pool *pool, void *addr) 440 { 441 qcache_t *qc = QC_POOL_TO_QCACHE(pool); 442 vmem_t *vm = qc->qc_vmem; 443 444 vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz); 445 } 446 447 static void 448 qc_init(vmem_t *vm, size_t qcache_max, int ipl) 449 { 450 qcache_t *prevqc; 451 struct pool_allocator *pa; 452 int qcache_idx_max; 453 int i; 454 455 KASSERT((qcache_max & vm->vm_quantum_mask) == 0); 456 if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) { 457 qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift; 458 } 459 vm->vm_qcache_max = qcache_max; 460 pa = &vm->vm_qcache_allocator; 461 memset(pa, 0, sizeof(*pa)); 462 pa->pa_alloc = qc_poolpage_alloc; 463 pa->pa_free = qc_poolpage_free; 464 pa->pa_pagesz = qc_poolpage_size(qcache_max); 465 466 qcache_idx_max = qcache_max >> vm->vm_quantum_shift; 467 prevqc = NULL; 468 for (i = qcache_idx_max; i > 0; i--) { 469 qcache_t *qc = &vm->vm_qcache_store[i - 1]; 470 size_t size = i << vm->vm_quantum_shift; 471 472 qc->qc_vmem = vm; 473 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", 474 vm->vm_name, size); 475 qc->qc_cache = pool_cache_init(size, 476 ORDER2SIZE(vm->vm_quantum_shift), 0, 477 PR_NOALIGN | PR_NOTOUCH /* XXX */, 478 qc->qc_name, pa, ipl, NULL, NULL, NULL); 479 KASSERT(qc->qc_cache != NULL); /* XXX */ 480 if (prevqc != NULL && 481 qc->qc_cache->pc_pool.pr_itemsperpage == 482 prevqc->qc_cache->pc_pool.pr_itemsperpage) { 483 pool_cache_destroy(qc->qc_cache); 484 vm->vm_qcache[i - 1] = prevqc; 485 continue; 486 } 487 qc->qc_cache->pc_pool.pr_qcache = qc; 488 vm->vm_qcache[i - 1] = qc; 489 prevqc = qc; 490 } 491 } 492 493 static void 494 qc_destroy(vmem_t *vm) 495 { 496 const qcache_t *prevqc; 497 int i; 498 int qcache_idx_max; 499 500 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 501 prevqc = NULL; 502 for (i = 0; i < qcache_idx_max; i++) { 503 qcache_t *qc = vm->vm_qcache[i]; 504 505 if (prevqc == qc) { 506 continue; 507 } 508 pool_cache_destroy(qc->qc_cache); 509 prevqc = qc; 510 } 511 } 512 513 static bool 514 qc_reap(vmem_t *vm) 515 { 516 const qcache_t *prevqc; 517 int i; 518 int qcache_idx_max; 519 bool didsomething = false; 520 521 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 522 prevqc = NULL; 523 for (i = 0; i < qcache_idx_max; i++) { 524 qcache_t *qc = vm->vm_qcache[i]; 525 526 if (prevqc == qc) { 527 continue; 528 } 529 if (pool_cache_reclaim(qc->qc_cache) != 0) { 530 didsomething = true; 531 } 532 prevqc = qc; 533 } 534 535 return didsomething; 536 } 537 #endif /* defined(QCACHE) */ 538 539 #if defined(_KERNEL) 540 static int 541 vmem_init(void) 542 { 543 544 mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_NONE); 545 pool_cache_bootstrap(&bt_cache, sizeof(bt_t), 0, 0, 0, "vmembt", 546 NULL, IPL_VM, NULL, NULL, NULL); 547 return 0; 548 } 549 #endif /* defined(_KERNEL) */ 550 551 static int 552 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags, 553 int spanbttype) 554 { 555 bt_t *btspan; 556 bt_t *btfree; 557 558 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 559 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 560 KASSERT(spanbttype == BT_TYPE_SPAN || 561 spanbttype == BT_TYPE_SPAN_STATIC); 562 563 btspan = bt_alloc(vm, flags); 564 if (btspan == NULL) { 565 return ENOMEM; 566 } 567 btfree = bt_alloc(vm, flags); 568 if (btfree == NULL) { 569 bt_free(vm, btspan); 570 return ENOMEM; 571 } 572 573 btspan->bt_type = spanbttype; 574 btspan->bt_start = addr; 575 btspan->bt_size = size; 576 577 btfree->bt_type = BT_TYPE_FREE; 578 btfree->bt_start = addr; 579 btfree->bt_size = size; 580 581 VMEM_LOCK(vm); 582 bt_insseg_tail(vm, btspan); 583 bt_insseg(vm, btfree, btspan); 584 bt_insfree(vm, btfree); 585 VMEM_UNLOCK(vm); 586 587 return 0; 588 } 589 590 static void 591 vmem_destroy1(vmem_t *vm) 592 { 593 594 #if defined(QCACHE) 595 qc_destroy(vm); 596 #endif /* defined(QCACHE) */ 597 if (vm->vm_hashlist != NULL) { 598 int i; 599 600 for (i = 0; i < vm->vm_hashsize; i++) { 601 bt_t *bt; 602 603 while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) { 604 KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC); 605 bt_free(vm, bt); 606 } 607 } 608 xfree(vm->vm_hashlist); 609 } 610 VMEM_LOCK_DESTROY(vm); 611 xfree(vm); 612 } 613 614 static int 615 vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags) 616 { 617 vmem_addr_t addr; 618 int rc; 619 620 if (vm->vm_importfn == NULL) { 621 return EINVAL; 622 } 623 624 rc = (*vm->vm_importfn)(vm->vm_arg, size, &size, flags, &addr); 625 if (rc != 0) { 626 return ENOMEM; 627 } 628 629 if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) != 0) { 630 (*vm->vm_releasefn)(vm->vm_arg, addr, size); 631 return ENOMEM; 632 } 633 634 return 0; 635 } 636 637 static int 638 vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags) 639 { 640 bt_t *bt; 641 int i; 642 struct vmem_hashlist *newhashlist; 643 struct vmem_hashlist *oldhashlist; 644 size_t oldhashsize; 645 646 KASSERT(newhashsize > 0); 647 648 newhashlist = 649 xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags); 650 if (newhashlist == NULL) { 651 return ENOMEM; 652 } 653 for (i = 0; i < newhashsize; i++) { 654 LIST_INIT(&newhashlist[i]); 655 } 656 657 if (!VMEM_TRYLOCK(vm)) { 658 xfree(newhashlist); 659 return EBUSY; 660 } 661 oldhashlist = vm->vm_hashlist; 662 oldhashsize = vm->vm_hashsize; 663 vm->vm_hashlist = newhashlist; 664 vm->vm_hashsize = newhashsize; 665 if (oldhashlist == NULL) { 666 VMEM_UNLOCK(vm); 667 return 0; 668 } 669 for (i = 0; i < oldhashsize; i++) { 670 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { 671 bt_rembusy(vm, bt); /* XXX */ 672 bt_insbusy(vm, bt); 673 } 674 } 675 VMEM_UNLOCK(vm); 676 677 xfree(oldhashlist); 678 679 return 0; 680 } 681 682 /* 683 * vmem_fit: check if a bt can satisfy the given restrictions. 684 * 685 * it's a caller's responsibility to ensure the region is big enough 686 * before calling us. 687 */ 688 689 static int 690 vmem_fit(const bt_t const *bt, vmem_size_t size, vmem_size_t align, 691 vmem_size_t phase, vmem_size_t nocross, 692 vmem_addr_t minaddr, vmem_addr_t maxaddr, vmem_addr_t *addrp) 693 { 694 vmem_addr_t start; 695 vmem_addr_t end; 696 697 KASSERT(size > 0); 698 KASSERT(bt->bt_size >= size); /* caller's responsibility */ 699 700 /* 701 * XXX assumption: vmem_addr_t and vmem_size_t are 702 * unsigned integer of the same size. 703 */ 704 705 start = bt->bt_start; 706 if (start < minaddr) { 707 start = minaddr; 708 } 709 end = BT_END(bt); 710 if (end > maxaddr) { 711 end = maxaddr; 712 } 713 if (start > end) { 714 return ENOMEM; 715 } 716 717 start = VMEM_ALIGNUP(start - phase, align) + phase; 718 if (start < bt->bt_start) { 719 start += align; 720 } 721 if (VMEM_CROSS_P(start, start + size - 1, nocross)) { 722 KASSERT(align < nocross); 723 start = VMEM_ALIGNUP(start - phase, nocross) + phase; 724 } 725 if (start <= end && end - start >= size - 1) { 726 KASSERT((start & (align - 1)) == phase); 727 KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross)); 728 KASSERT(minaddr <= start); 729 KASSERT(maxaddr == 0 || start + size - 1 <= maxaddr); 730 KASSERT(bt->bt_start <= start); 731 KASSERT(BT_END(bt) - start >= size - 1); 732 *addrp = start; 733 return 0; 734 } 735 return ENOMEM; 736 } 737 738 /* ---- vmem API */ 739 740 /* 741 * vmem_create: create an arena. 742 * 743 * => must not be called from interrupt context. 744 */ 745 746 vmem_t * 747 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, 748 vmem_size_t quantum, 749 int (*importfn)(void *, vmem_size_t, vmem_size_t *, vm_flag_t, 750 vmem_addr_t *), 751 void (*releasefn)(void *, vmem_addr_t, vmem_size_t), 752 void *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl) 753 { 754 vmem_t *vm; 755 int i; 756 #if defined(_KERNEL) 757 static ONCE_DECL(control); 758 #endif /* defined(_KERNEL) */ 759 760 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 761 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 762 KASSERT(quantum > 0); 763 764 #if defined(_KERNEL) 765 if (RUN_ONCE(&control, vmem_init)) { 766 return NULL; 767 } 768 #endif /* defined(_KERNEL) */ 769 vm = xmalloc(sizeof(*vm), flags); 770 if (vm == NULL) { 771 return NULL; 772 } 773 774 VMEM_LOCK_INIT(vm, ipl); 775 strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); 776 vm->vm_quantum_mask = quantum - 1; 777 vm->vm_quantum_shift = SIZE2ORDER(quantum); 778 KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum); 779 vm->vm_importfn = importfn; 780 vm->vm_releasefn = releasefn; 781 vm->vm_arg = arg; 782 vm->vm_nbusytag = 0; 783 #if defined(QCACHE) 784 qc_init(vm, qcache_max, ipl); 785 #endif /* defined(QCACHE) */ 786 787 CIRCLEQ_INIT(&vm->vm_seglist); 788 for (i = 0; i < VMEM_MAXORDER; i++) { 789 LIST_INIT(&vm->vm_freelist[i]); 790 } 791 vm->vm_hashlist = NULL; 792 if (vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags)) { 793 vmem_destroy1(vm); 794 return NULL; 795 } 796 797 if (size != 0) { 798 if (vmem_add(vm, base, size, flags) != 0) { 799 vmem_destroy1(vm); 800 return NULL; 801 } 802 } 803 804 #if defined(_KERNEL) 805 mutex_enter(&vmem_list_lock); 806 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); 807 mutex_exit(&vmem_list_lock); 808 #endif /* defined(_KERNEL) */ 809 810 return vm; 811 } 812 813 void 814 vmem_destroy(vmem_t *vm) 815 { 816 817 #if defined(_KERNEL) 818 mutex_enter(&vmem_list_lock); 819 LIST_REMOVE(vm, vm_alllist); 820 mutex_exit(&vmem_list_lock); 821 #endif /* defined(_KERNEL) */ 822 823 vmem_destroy1(vm); 824 } 825 826 vmem_size_t 827 vmem_roundup_size(vmem_t *vm, vmem_size_t size) 828 { 829 830 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; 831 } 832 833 /* 834 * vmem_alloc: 835 * 836 * => caller must ensure appropriate spl, 837 * if the arena can be accessed from interrupt context. 838 */ 839 840 int 841 vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, vmem_addr_t *addrp) 842 { 843 const vm_flag_t strat __unused = flags & VM_FITMASK; 844 845 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 846 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 847 848 KASSERT(size > 0); 849 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 850 if ((flags & VM_SLEEP) != 0) { 851 ASSERT_SLEEPABLE(); 852 } 853 854 #if defined(QCACHE) 855 if (size <= vm->vm_qcache_max) { 856 void *p; 857 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 858 qcache_t *qc = vm->vm_qcache[qidx - 1]; 859 860 p = pool_cache_get(qc->qc_cache, vmf_to_prf(flags)); 861 if (addrp != NULL) 862 *addrp = (vmem_addr_t)p; 863 return (p == NULL) ? ENOMEM : 0; 864 } 865 #endif /* defined(QCACHE) */ 866 867 return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 868 flags, addrp); 869 } 870 871 int 872 vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, 873 const vmem_size_t phase, const vmem_size_t nocross, 874 const vmem_addr_t minaddr, const vmem_addr_t maxaddr, const vm_flag_t flags, 875 vmem_addr_t *addrp) 876 { 877 struct vmem_freelist *list; 878 struct vmem_freelist *first; 879 struct vmem_freelist *end; 880 bt_t *bt; 881 bt_t *btnew; 882 bt_t *btnew2; 883 const vmem_size_t size = vmem_roundup_size(vm, size0); 884 vm_flag_t strat = flags & VM_FITMASK; 885 vmem_addr_t start; 886 int rc; 887 888 KASSERT(size0 > 0); 889 KASSERT(size > 0); 890 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 891 if ((flags & VM_SLEEP) != 0) { 892 ASSERT_SLEEPABLE(); 893 } 894 KASSERT((align & vm->vm_quantum_mask) == 0); 895 KASSERT((align & (align - 1)) == 0); 896 KASSERT((phase & vm->vm_quantum_mask) == 0); 897 KASSERT((nocross & vm->vm_quantum_mask) == 0); 898 KASSERT((nocross & (nocross - 1)) == 0); 899 KASSERT((align == 0 && phase == 0) || phase < align); 900 KASSERT(nocross == 0 || nocross >= size); 901 KASSERT(minaddr <= maxaddr); 902 KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 903 904 if (align == 0) { 905 align = vm->vm_quantum_mask + 1; 906 } 907 908 /* 909 * allocate boundary tags before acquiring the vmem lock. 910 */ 911 btnew = bt_alloc(vm, flags); 912 if (btnew == NULL) { 913 return ENOMEM; 914 } 915 btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */ 916 if (btnew2 == NULL) { 917 bt_free(vm, btnew); 918 return ENOMEM; 919 } 920 921 /* 922 * choose a free block from which we allocate. 923 */ 924 retry_strat: 925 first = bt_freehead_toalloc(vm, size, strat); 926 end = &vm->vm_freelist[VMEM_MAXORDER]; 927 retry: 928 bt = NULL; 929 VMEM_LOCK(vm); 930 vmem_check(vm); 931 if (strat == VM_INSTANTFIT) { 932 /* 933 * just choose the first block which satisfies our restrictions. 934 * 935 * note that we don't need to check the size of the blocks 936 * because any blocks found on these list should be larger than 937 * the given size. 938 */ 939 for (list = first; list < end; list++) { 940 bt = LIST_FIRST(list); 941 if (bt != NULL) { 942 rc = vmem_fit(bt, size, align, phase, 943 nocross, minaddr, maxaddr, &start); 944 if (rc == 0) { 945 goto gotit; 946 } 947 /* 948 * don't bother to follow the bt_freelist link 949 * here. the list can be very long and we are 950 * told to run fast. blocks from the later free 951 * lists are larger and have better chances to 952 * satisfy our restrictions. 953 */ 954 } 955 } 956 } else { /* VM_BESTFIT */ 957 /* 958 * we assume that, for space efficiency, it's better to 959 * allocate from a smaller block. thus we will start searching 960 * from the lower-order list than VM_INSTANTFIT. 961 * however, don't bother to find the smallest block in a free 962 * list because the list can be very long. we can revisit it 963 * if/when it turns out to be a problem. 964 * 965 * note that the 'first' list can contain blocks smaller than 966 * the requested size. thus we need to check bt_size. 967 */ 968 for (list = first; list < end; list++) { 969 LIST_FOREACH(bt, list, bt_freelist) { 970 if (bt->bt_size >= size) { 971 rc = vmem_fit(bt, size, align, phase, 972 nocross, minaddr, maxaddr, &start); 973 if (rc == 0) { 974 goto gotit; 975 } 976 } 977 } 978 } 979 } 980 VMEM_UNLOCK(vm); 981 #if 1 982 if (strat == VM_INSTANTFIT) { 983 strat = VM_BESTFIT; 984 goto retry_strat; 985 } 986 #endif 987 if (align != vm->vm_quantum_mask + 1 || phase != 0 || 988 nocross != 0) { 989 990 /* 991 * XXX should try to import a region large enough to 992 * satisfy restrictions? 993 */ 994 995 goto fail; 996 } 997 /* XXX eeek, minaddr & maxaddr not respected */ 998 if (vmem_import(vm, size, flags) == 0) { 999 goto retry; 1000 } 1001 /* XXX */ 1002 fail: 1003 bt_free(vm, btnew); 1004 bt_free(vm, btnew2); 1005 return ENOMEM; 1006 1007 gotit: 1008 KASSERT(bt->bt_type == BT_TYPE_FREE); 1009 KASSERT(bt->bt_size >= size); 1010 bt_remfree(vm, bt); 1011 vmem_check(vm); 1012 if (bt->bt_start != start) { 1013 btnew2->bt_type = BT_TYPE_FREE; 1014 btnew2->bt_start = bt->bt_start; 1015 btnew2->bt_size = start - bt->bt_start; 1016 bt->bt_start = start; 1017 bt->bt_size -= btnew2->bt_size; 1018 bt_insfree(vm, btnew2); 1019 bt_insseg(vm, btnew2, CIRCLEQ_PREV(bt, bt_seglist)); 1020 btnew2 = NULL; 1021 vmem_check(vm); 1022 } 1023 KASSERT(bt->bt_start == start); 1024 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { 1025 /* split */ 1026 btnew->bt_type = BT_TYPE_BUSY; 1027 btnew->bt_start = bt->bt_start; 1028 btnew->bt_size = size; 1029 bt->bt_start = bt->bt_start + size; 1030 bt->bt_size -= size; 1031 bt_insfree(vm, bt); 1032 bt_insseg(vm, btnew, CIRCLEQ_PREV(bt, bt_seglist)); 1033 bt_insbusy(vm, btnew); 1034 vmem_check(vm); 1035 VMEM_UNLOCK(vm); 1036 } else { 1037 bt->bt_type = BT_TYPE_BUSY; 1038 bt_insbusy(vm, bt); 1039 vmem_check(vm); 1040 VMEM_UNLOCK(vm); 1041 bt_free(vm, btnew); 1042 btnew = bt; 1043 } 1044 if (btnew2 != NULL) { 1045 bt_free(vm, btnew2); 1046 } 1047 KASSERT(btnew->bt_size >= size); 1048 btnew->bt_type = BT_TYPE_BUSY; 1049 1050 if (addrp != NULL) 1051 *addrp = btnew->bt_start; 1052 return 0; 1053 } 1054 1055 /* 1056 * vmem_free: 1057 * 1058 * => caller must ensure appropriate spl, 1059 * if the arena can be accessed from interrupt context. 1060 */ 1061 1062 void 1063 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1064 { 1065 1066 KASSERT(size > 0); 1067 1068 #if defined(QCACHE) 1069 if (size <= vm->vm_qcache_max) { 1070 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 1071 qcache_t *qc = vm->vm_qcache[qidx - 1]; 1072 1073 pool_cache_put(qc->qc_cache, (void *)addr); 1074 return; 1075 } 1076 #endif /* defined(QCACHE) */ 1077 1078 vmem_xfree(vm, addr, size); 1079 } 1080 1081 void 1082 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1083 { 1084 bt_t *bt; 1085 bt_t *t; 1086 1087 KASSERT(size > 0); 1088 1089 VMEM_LOCK(vm); 1090 1091 bt = bt_lookupbusy(vm, addr); 1092 KASSERT(bt != NULL); 1093 KASSERT(bt->bt_start == addr); 1094 KASSERT(bt->bt_size == vmem_roundup_size(vm, size) || 1095 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1096 KASSERT(bt->bt_type == BT_TYPE_BUSY); 1097 bt_rembusy(vm, bt); 1098 bt->bt_type = BT_TYPE_FREE; 1099 1100 /* coalesce */ 1101 t = CIRCLEQ_NEXT(bt, bt_seglist); 1102 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1103 KASSERT(BT_END(bt) < t->bt_start); /* YYY */ 1104 bt_remfree(vm, t); 1105 bt_remseg(vm, t); 1106 bt->bt_size += t->bt_size; 1107 bt_free(vm, t); 1108 } 1109 t = CIRCLEQ_PREV(bt, bt_seglist); 1110 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1111 KASSERT(BT_END(t) < bt->bt_start); /* YYY */ 1112 bt_remfree(vm, t); 1113 bt_remseg(vm, t); 1114 bt->bt_size += t->bt_size; 1115 bt->bt_start = t->bt_start; 1116 bt_free(vm, t); 1117 } 1118 1119 t = CIRCLEQ_PREV(bt, bt_seglist); 1120 KASSERT(t != NULL); 1121 KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY); 1122 if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN && 1123 t->bt_size == bt->bt_size) { 1124 vmem_addr_t spanaddr; 1125 vmem_size_t spansize; 1126 1127 KASSERT(t->bt_start == bt->bt_start); 1128 spanaddr = bt->bt_start; 1129 spansize = bt->bt_size; 1130 bt_remseg(vm, bt); 1131 bt_free(vm, bt); 1132 bt_remseg(vm, t); 1133 bt_free(vm, t); 1134 VMEM_UNLOCK(vm); 1135 (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize); 1136 } else { 1137 bt_insfree(vm, bt); 1138 VMEM_UNLOCK(vm); 1139 } 1140 } 1141 1142 /* 1143 * vmem_add: 1144 * 1145 * => caller must ensure appropriate spl, 1146 * if the arena can be accessed from interrupt context. 1147 */ 1148 1149 int 1150 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags) 1151 { 1152 1153 return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC); 1154 } 1155 1156 /* 1157 * vmem_reap: reap unused resources. 1158 * 1159 * => return true if we successfully reaped something. 1160 */ 1161 1162 bool 1163 vmem_reap(vmem_t *vm) 1164 { 1165 bool didsomething = false; 1166 1167 #if defined(QCACHE) 1168 didsomething = qc_reap(vm); 1169 #endif /* defined(QCACHE) */ 1170 return didsomething; 1171 } 1172 1173 /* ---- rehash */ 1174 1175 #if defined(_KERNEL) 1176 static struct callout vmem_rehash_ch; 1177 static int vmem_rehash_interval; 1178 static struct workqueue *vmem_rehash_wq; 1179 static struct work vmem_rehash_wk; 1180 1181 static void 1182 vmem_rehash_all(struct work *wk, void *dummy) 1183 { 1184 vmem_t *vm; 1185 1186 KASSERT(wk == &vmem_rehash_wk); 1187 mutex_enter(&vmem_list_lock); 1188 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1189 size_t desired; 1190 size_t current; 1191 1192 if (!VMEM_TRYLOCK(vm)) { 1193 continue; 1194 } 1195 desired = vm->vm_nbusytag; 1196 current = vm->vm_hashsize; 1197 VMEM_UNLOCK(vm); 1198 1199 if (desired > VMEM_HASHSIZE_MAX) { 1200 desired = VMEM_HASHSIZE_MAX; 1201 } else if (desired < VMEM_HASHSIZE_MIN) { 1202 desired = VMEM_HASHSIZE_MIN; 1203 } 1204 if (desired > current * 2 || desired * 2 < current) { 1205 vmem_rehash(vm, desired, VM_NOSLEEP); 1206 } 1207 } 1208 mutex_exit(&vmem_list_lock); 1209 1210 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1211 } 1212 1213 static void 1214 vmem_rehash_all_kick(void *dummy) 1215 { 1216 1217 workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL); 1218 } 1219 1220 void 1221 vmem_rehash_start(void) 1222 { 1223 int error; 1224 1225 error = workqueue_create(&vmem_rehash_wq, "vmem_rehash", 1226 vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, WQ_MPSAFE); 1227 if (error) { 1228 panic("%s: workqueue_create %d\n", __func__, error); 1229 } 1230 callout_init(&vmem_rehash_ch, CALLOUT_MPSAFE); 1231 callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL); 1232 1233 vmem_rehash_interval = hz * 10; 1234 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1235 } 1236 #endif /* defined(_KERNEL) */ 1237 1238 /* ---- debug */ 1239 1240 #if defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) 1241 1242 static void bt_dump(const bt_t *, void (*)(const char *, ...)); 1243 1244 static const char * 1245 bt_type_string(int type) 1246 { 1247 static const char * const table[] = { 1248 [BT_TYPE_BUSY] = "busy", 1249 [BT_TYPE_FREE] = "free", 1250 [BT_TYPE_SPAN] = "span", 1251 [BT_TYPE_SPAN_STATIC] = "static span", 1252 }; 1253 1254 if (type >= __arraycount(table)) { 1255 return "BOGUS"; 1256 } 1257 return table[type]; 1258 } 1259 1260 static void 1261 bt_dump(const bt_t *bt, void (*pr)(const char *, ...)) 1262 { 1263 1264 (*pr)("\t%p: %" PRIu64 ", %" PRIu64 ", %d(%s)\n", 1265 bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size, 1266 bt->bt_type, bt_type_string(bt->bt_type)); 1267 } 1268 1269 static void 1270 vmem_dump(const vmem_t *vm , void (*pr)(const char *, ...)) 1271 { 1272 const bt_t *bt; 1273 int i; 1274 1275 (*pr)("vmem %p '%s'\n", vm, vm->vm_name); 1276 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1277 bt_dump(bt, pr); 1278 } 1279 1280 for (i = 0; i < VMEM_MAXORDER; i++) { 1281 const struct vmem_freelist *fl = &vm->vm_freelist[i]; 1282 1283 if (LIST_EMPTY(fl)) { 1284 continue; 1285 } 1286 1287 (*pr)("freelist[%d]\n", i); 1288 LIST_FOREACH(bt, fl, bt_freelist) { 1289 bt_dump(bt, pr); 1290 } 1291 } 1292 } 1293 1294 #endif /* defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) */ 1295 1296 #if defined(DDB) 1297 static bt_t * 1298 vmem_whatis_lookup(vmem_t *vm, uintptr_t addr) 1299 { 1300 bt_t *bt; 1301 1302 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1303 if (BT_ISSPAN_P(bt)) { 1304 continue; 1305 } 1306 if (bt->bt_start <= addr && addr <= BT_END(bt)) { 1307 return bt; 1308 } 1309 } 1310 1311 return NULL; 1312 } 1313 1314 void 1315 vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...)) 1316 { 1317 vmem_t *vm; 1318 1319 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1320 bt_t *bt; 1321 1322 bt = vmem_whatis_lookup(vm, addr); 1323 if (bt == NULL) { 1324 continue; 1325 } 1326 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n", 1327 (void *)addr, (void *)bt->bt_start, 1328 (size_t)(addr - bt->bt_start), vm->vm_name, 1329 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free"); 1330 } 1331 } 1332 1333 void 1334 vmem_printall(const char *modif, void (*pr)(const char *, ...)) 1335 { 1336 const vmem_t *vm; 1337 1338 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1339 vmem_dump(vm, pr); 1340 } 1341 } 1342 1343 void 1344 vmem_print(uintptr_t addr, const char *modif, void (*pr)(const char *, ...)) 1345 { 1346 const vmem_t *vm = (const void *)addr; 1347 1348 vmem_dump(vm, pr); 1349 } 1350 #endif /* defined(DDB) */ 1351 1352 #if defined(_KERNEL) 1353 #define vmem_printf printf 1354 #else 1355 #include <stdio.h> 1356 #include <stdarg.h> 1357 1358 static void 1359 vmem_printf(const char *fmt, ...) 1360 { 1361 va_list ap; 1362 va_start(ap, fmt); 1363 vprintf(fmt, ap); 1364 va_end(ap); 1365 } 1366 #endif 1367 1368 #if defined(VMEM_SANITY) 1369 1370 static bool 1371 vmem_check_sanity(vmem_t *vm) 1372 { 1373 const bt_t *bt, *bt2; 1374 1375 KASSERT(vm != NULL); 1376 1377 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1378 if (bt->bt_start > BT_END(bt)) { 1379 printf("corrupted tag\n"); 1380 bt_dump(bt, vmem_printf); 1381 return false; 1382 } 1383 } 1384 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1385 CIRCLEQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) { 1386 if (bt == bt2) { 1387 continue; 1388 } 1389 if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) { 1390 continue; 1391 } 1392 if (bt->bt_start <= BT_END(bt2) && 1393 bt2->bt_start <= BT_END(bt)) { 1394 printf("overwrapped tags\n"); 1395 bt_dump(bt, vmem_printf); 1396 bt_dump(bt2, vmem_printf); 1397 return false; 1398 } 1399 } 1400 } 1401 1402 return true; 1403 } 1404 1405 static void 1406 vmem_check(vmem_t *vm) 1407 { 1408 1409 if (!vmem_check_sanity(vm)) { 1410 panic("insanity vmem %p", vm); 1411 } 1412 } 1413 1414 #endif /* defined(VMEM_SANITY) */ 1415 1416 #if defined(UNITTEST) 1417 int 1418 main(void) 1419 { 1420 int rc; 1421 vmem_t *vm; 1422 vmem_addr_t p; 1423 struct reg { 1424 vmem_addr_t p; 1425 vmem_size_t sz; 1426 bool x; 1427 } *reg = NULL; 1428 int nreg = 0; 1429 int nalloc = 0; 1430 int nfree = 0; 1431 vmem_size_t total = 0; 1432 #if 1 1433 vm_flag_t strat = VM_INSTANTFIT; 1434 #else 1435 vm_flag_t strat = VM_BESTFIT; 1436 #endif 1437 1438 vm = vmem_create("test", 0, 0, 1, NULL, NULL, NULL, 0, VM_SLEEP, 1439 #ifdef _KERNEL 1440 IPL_NONE 1441 #else 1442 0 1443 #endif 1444 ); 1445 if (vm == NULL) { 1446 printf("vmem_create\n"); 1447 exit(EXIT_FAILURE); 1448 } 1449 vmem_dump(vm, vmem_printf); 1450 1451 rc = vmem_add(vm, 0, 50, VM_SLEEP); 1452 assert(rc == 0); 1453 rc = vmem_add(vm, 100, 200, VM_SLEEP); 1454 assert(rc == 0); 1455 rc = vmem_add(vm, 2000, 1, VM_SLEEP); 1456 assert(rc == 0); 1457 rc = vmem_add(vm, 40000, 65536, VM_SLEEP); 1458 assert(rc == 0); 1459 rc = vmem_add(vm, 10000, 10000, VM_SLEEP); 1460 assert(rc == 0); 1461 rc = vmem_add(vm, 500, 1000, VM_SLEEP); 1462 assert(rc == 0); 1463 rc = vmem_add(vm, 0xffffff00, 0x100, VM_SLEEP); 1464 assert(rc == 0); 1465 rc = vmem_xalloc(vm, 0x101, 0, 0, 0, 1466 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p); 1467 assert(rc != 0); 1468 rc = vmem_xalloc(vm, 50, 0, 0, 0, 0, 49, strat|VM_SLEEP, &p); 1469 assert(rc == 0 && p == 0); 1470 vmem_xfree(vm, p, 50); 1471 rc = vmem_xalloc(vm, 25, 0, 0, 0, 0, 24, strat|VM_SLEEP, &p); 1472 assert(rc == 0 && p == 0); 1473 rc = vmem_xalloc(vm, 0x100, 0, 0, 0, 1474 0xffffff01, 0xffffffff, strat|VM_SLEEP, &p); 1475 assert(rc != 0); 1476 rc = vmem_xalloc(vm, 0x100, 0, 0, 0, 1477 0xffffff00, 0xfffffffe, strat|VM_SLEEP, &p); 1478 assert(rc != 0); 1479 rc = vmem_xalloc(vm, 0x100, 0, 0, 0, 1480 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p); 1481 assert(rc == 0); 1482 vmem_dump(vm, vmem_printf); 1483 for (;;) { 1484 struct reg *r; 1485 int t = rand() % 100; 1486 1487 if (t > 45) { 1488 /* alloc */ 1489 vmem_size_t sz = rand() % 500 + 1; 1490 bool x; 1491 vmem_size_t align, phase, nocross; 1492 vmem_addr_t minaddr, maxaddr; 1493 1494 if (t > 70) { 1495 x = true; 1496 /* XXX */ 1497 align = 1 << (rand() % 15); 1498 phase = rand() % 65536; 1499 nocross = 1 << (rand() % 15); 1500 if (align <= phase) { 1501 phase = 0; 1502 } 1503 if (VMEM_CROSS_P(phase, phase + sz - 1, 1504 nocross)) { 1505 nocross = 0; 1506 } 1507 do { 1508 minaddr = rand() % 50000; 1509 maxaddr = rand() % 70000; 1510 } while (minaddr > maxaddr); 1511 printf("=== xalloc %" PRIu64 1512 " align=%" PRIu64 ", phase=%" PRIu64 1513 ", nocross=%" PRIu64 ", min=%" PRIu64 1514 ", max=%" PRIu64 "\n", 1515 (uint64_t)sz, 1516 (uint64_t)align, 1517 (uint64_t)phase, 1518 (uint64_t)nocross, 1519 (uint64_t)minaddr, 1520 (uint64_t)maxaddr); 1521 rc = vmem_xalloc(vm, sz, align, phase, nocross, 1522 minaddr, maxaddr, strat|VM_SLEEP, &p); 1523 } else { 1524 x = false; 1525 printf("=== alloc %" PRIu64 "\n", (uint64_t)sz); 1526 rc = vmem_alloc(vm, sz, strat|VM_SLEEP, &p); 1527 } 1528 printf("-> %" PRIu64 "\n", (uint64_t)p); 1529 vmem_dump(vm, vmem_printf); 1530 if (rc != 0) { 1531 if (x) { 1532 continue; 1533 } 1534 break; 1535 } 1536 nreg++; 1537 reg = realloc(reg, sizeof(*reg) * nreg); 1538 r = ®[nreg - 1]; 1539 r->p = p; 1540 r->sz = sz; 1541 r->x = x; 1542 total += sz; 1543 nalloc++; 1544 } else if (nreg != 0) { 1545 /* free */ 1546 r = ®[rand() % nreg]; 1547 printf("=== free %" PRIu64 ", %" PRIu64 "\n", 1548 (uint64_t)r->p, (uint64_t)r->sz); 1549 if (r->x) { 1550 vmem_xfree(vm, r->p, r->sz); 1551 } else { 1552 vmem_free(vm, r->p, r->sz); 1553 } 1554 total -= r->sz; 1555 vmem_dump(vm, vmem_printf); 1556 *r = reg[nreg - 1]; 1557 nreg--; 1558 nfree++; 1559 } 1560 printf("total=%" PRIu64 "\n", (uint64_t)total); 1561 } 1562 fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n", 1563 (uint64_t)total, nalloc, nfree); 1564 exit(EXIT_SUCCESS); 1565 } 1566 #endif /* defined(UNITTEST) */ 1567