1 /* $NetBSD: subr_vmem.c,v 1.62 2011/10/02 21:32:48 rmind Exp $ */ 2 3 /*- 4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * reference: 31 * - Magazines and Vmem: Extending the Slab Allocator 32 * to Many CPUs and Arbitrary Resources 33 * http://www.usenix.org/event/usenix01/bonwick.html 34 * 35 * todo: 36 * - decide how to import segments for vmem_xalloc. 37 * - don't rely on malloc(9). 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.62 2011/10/02 21:32:48 rmind Exp $"); 42 43 #if defined(_KERNEL) 44 #include "opt_ddb.h" 45 #define QCACHE 46 #endif /* defined(_KERNEL) */ 47 48 #include <sys/param.h> 49 #include <sys/hash.h> 50 #include <sys/queue.h> 51 #include <sys/bitops.h> 52 53 #if defined(_KERNEL) 54 #include <sys/systm.h> 55 #include <sys/kernel.h> /* hz */ 56 #include <sys/callout.h> 57 #include <sys/malloc.h> 58 #include <sys/once.h> 59 #include <sys/pool.h> 60 #include <sys/vmem.h> 61 #include <sys/workqueue.h> 62 #else /* defined(_KERNEL) */ 63 #include "../sys/vmem.h" 64 #endif /* defined(_KERNEL) */ 65 66 #if defined(_KERNEL) 67 #define LOCK_DECL(name) \ 68 kmutex_t name; char lockpad[COHERENCY_UNIT - sizeof(kmutex_t)] 69 #else /* defined(_KERNEL) */ 70 #include <errno.h> 71 #include <assert.h> 72 #include <stdlib.h> 73 74 #define UNITTEST 75 #define KASSERT(a) assert(a) 76 #define LOCK_DECL(name) /* nothing */ 77 #define mutex_init(a, b, c) /* nothing */ 78 #define mutex_destroy(a) /* nothing */ 79 #define mutex_enter(a) /* nothing */ 80 #define mutex_tryenter(a) true 81 #define mutex_exit(a) /* nothing */ 82 #define mutex_owned(a) /* nothing */ 83 #define ASSERT_SLEEPABLE() /* nothing */ 84 #define panic(...) printf(__VA_ARGS__); abort() 85 #endif /* defined(_KERNEL) */ 86 87 struct vmem; 88 struct vmem_btag; 89 90 #if defined(VMEM_SANITY) 91 static void vmem_check(vmem_t *); 92 #else /* defined(VMEM_SANITY) */ 93 #define vmem_check(vm) /* nothing */ 94 #endif /* defined(VMEM_SANITY) */ 95 96 #define VMEM_MAXORDER (sizeof(vmem_size_t) * CHAR_BIT) 97 98 #define VMEM_HASHSIZE_MIN 1 /* XXX */ 99 #define VMEM_HASHSIZE_MAX 65536 /* XXX */ 100 #define VMEM_HASHSIZE_INIT 128 101 102 #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT) 103 104 CIRCLEQ_HEAD(vmem_seglist, vmem_btag); 105 LIST_HEAD(vmem_freelist, vmem_btag); 106 LIST_HEAD(vmem_hashlist, vmem_btag); 107 108 #if defined(QCACHE) 109 #define VMEM_QCACHE_IDX_MAX 32 110 111 #define QC_NAME_MAX 16 112 113 struct qcache { 114 pool_cache_t qc_cache; 115 vmem_t *qc_vmem; 116 char qc_name[QC_NAME_MAX]; 117 }; 118 typedef struct qcache qcache_t; 119 #define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache)) 120 #endif /* defined(QCACHE) */ 121 122 /* vmem arena */ 123 struct vmem { 124 LOCK_DECL(vm_lock); 125 int (*vm_importfn)(void *, vmem_size_t, vmem_size_t *, 126 vm_flag_t, vmem_addr_t *); 127 void (*vm_releasefn)(void *, vmem_addr_t, vmem_size_t); 128 vmem_t *vm_source; 129 void *vm_arg; 130 struct vmem_seglist vm_seglist; 131 struct vmem_freelist vm_freelist[VMEM_MAXORDER]; 132 size_t vm_hashsize; 133 size_t vm_nbusytag; 134 struct vmem_hashlist *vm_hashlist; 135 size_t vm_quantum_mask; 136 int vm_quantum_shift; 137 const char *vm_name; 138 LIST_ENTRY(vmem) vm_alllist; 139 140 #if defined(QCACHE) 141 /* quantum cache */ 142 size_t vm_qcache_max; 143 struct pool_allocator vm_qcache_allocator; 144 qcache_t vm_qcache_store[VMEM_QCACHE_IDX_MAX]; 145 qcache_t *vm_qcache[VMEM_QCACHE_IDX_MAX]; 146 #endif /* defined(QCACHE) */ 147 }; 148 149 #define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock) 150 #define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock) 151 #define VMEM_UNLOCK(vm) mutex_exit(&vm->vm_lock) 152 #define VMEM_LOCK_INIT(vm, ipl) mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl) 153 #define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock) 154 #define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock)) 155 156 /* boundary tag */ 157 struct vmem_btag { 158 CIRCLEQ_ENTRY(vmem_btag) bt_seglist; 159 union { 160 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */ 161 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */ 162 } bt_u; 163 #define bt_hashlist bt_u.u_hashlist 164 #define bt_freelist bt_u.u_freelist 165 vmem_addr_t bt_start; 166 vmem_size_t bt_size; 167 int bt_type; 168 }; 169 170 #define BT_TYPE_SPAN 1 171 #define BT_TYPE_SPAN_STATIC 2 172 #define BT_TYPE_FREE 3 173 #define BT_TYPE_BUSY 4 174 #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC) 175 176 #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1) 177 178 typedef struct vmem_btag bt_t; 179 180 /* ---- misc */ 181 182 #define VMEM_ALIGNUP(addr, align) \ 183 (-(-(addr) & -(align))) 184 185 #define VMEM_CROSS_P(addr1, addr2, boundary) \ 186 ((((addr1) ^ (addr2)) & -(boundary)) != 0) 187 188 #define ORDER2SIZE(order) ((vmem_size_t)1 << (order)) 189 #define SIZE2ORDER(size) ((int)ilog2(size)) 190 191 #if !defined(_KERNEL) 192 #define xmalloc(sz, flags) malloc(sz) 193 #define xfree(p) free(p) 194 #define bt_alloc(vm, flags) malloc(sizeof(bt_t)) 195 #define bt_free(vm, bt) free(bt) 196 #else /* !defined(_KERNEL) */ 197 198 static MALLOC_DEFINE(M_VMEM, "vmem", "vmem"); 199 200 static inline void * 201 xmalloc(size_t sz, vm_flag_t flags) 202 { 203 return malloc(sz, M_VMEM, 204 M_CANFAIL | ((flags & VM_SLEEP) ? M_WAITOK : M_NOWAIT)); 205 } 206 207 static inline void 208 xfree(void *p) 209 { 210 return free(p, M_VMEM); 211 } 212 213 /* ---- boundary tag */ 214 215 static struct pool_cache bt_cache; 216 217 static inline bt_t * 218 bt_alloc(vmem_t *vm, vm_flag_t flags) 219 { 220 return pool_cache_get(&bt_cache, 221 (flags & VM_SLEEP) ? PR_WAITOK : PR_NOWAIT); 222 } 223 224 static inline void 225 bt_free(vmem_t *vm, bt_t *bt) 226 { 227 pool_cache_put(&bt_cache, bt); 228 } 229 230 #endif /* !defined(_KERNEL) */ 231 232 /* 233 * freelist[0] ... [1, 1] 234 * freelist[1] ... [2, 3] 235 * freelist[2] ... [4, 7] 236 * freelist[3] ... [8, 15] 237 * : 238 * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1] 239 * : 240 */ 241 242 static struct vmem_freelist * 243 bt_freehead_tofree(vmem_t *vm, vmem_size_t size) 244 { 245 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 246 const int idx = SIZE2ORDER(qsize); 247 248 KASSERT(size != 0 && qsize != 0); 249 KASSERT((size & vm->vm_quantum_mask) == 0); 250 KASSERT(idx >= 0); 251 KASSERT(idx < VMEM_MAXORDER); 252 253 return &vm->vm_freelist[idx]; 254 } 255 256 /* 257 * bt_freehead_toalloc: return the freelist for the given size and allocation 258 * strategy. 259 * 260 * for VM_INSTANTFIT, return the list in which any blocks are large enough 261 * for the requested size. otherwise, return the list which can have blocks 262 * large enough for the requested size. 263 */ 264 265 static struct vmem_freelist * 266 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat) 267 { 268 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 269 int idx = SIZE2ORDER(qsize); 270 271 KASSERT(size != 0 && qsize != 0); 272 KASSERT((size & vm->vm_quantum_mask) == 0); 273 274 if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) { 275 idx++; 276 /* check too large request? */ 277 } 278 KASSERT(idx >= 0); 279 KASSERT(idx < VMEM_MAXORDER); 280 281 return &vm->vm_freelist[idx]; 282 } 283 284 /* ---- boundary tag hash */ 285 286 static struct vmem_hashlist * 287 bt_hashhead(vmem_t *vm, vmem_addr_t addr) 288 { 289 struct vmem_hashlist *list; 290 unsigned int hash; 291 292 hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT); 293 list = &vm->vm_hashlist[hash % vm->vm_hashsize]; 294 295 return list; 296 } 297 298 static bt_t * 299 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr) 300 { 301 struct vmem_hashlist *list; 302 bt_t *bt; 303 304 list = bt_hashhead(vm, addr); 305 LIST_FOREACH(bt, list, bt_hashlist) { 306 if (bt->bt_start == addr) { 307 break; 308 } 309 } 310 311 return bt; 312 } 313 314 static void 315 bt_rembusy(vmem_t *vm, bt_t *bt) 316 { 317 318 KASSERT(vm->vm_nbusytag > 0); 319 vm->vm_nbusytag--; 320 LIST_REMOVE(bt, bt_hashlist); 321 } 322 323 static void 324 bt_insbusy(vmem_t *vm, bt_t *bt) 325 { 326 struct vmem_hashlist *list; 327 328 KASSERT(bt->bt_type == BT_TYPE_BUSY); 329 330 list = bt_hashhead(vm, bt->bt_start); 331 LIST_INSERT_HEAD(list, bt, bt_hashlist); 332 vm->vm_nbusytag++; 333 } 334 335 /* ---- boundary tag list */ 336 337 static void 338 bt_remseg(vmem_t *vm, bt_t *bt) 339 { 340 341 CIRCLEQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); 342 } 343 344 static void 345 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) 346 { 347 348 CIRCLEQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); 349 } 350 351 static void 352 bt_insseg_tail(vmem_t *vm, bt_t *bt) 353 { 354 355 CIRCLEQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); 356 } 357 358 static void 359 bt_remfree(vmem_t *vm, bt_t *bt) 360 { 361 362 KASSERT(bt->bt_type == BT_TYPE_FREE); 363 364 LIST_REMOVE(bt, bt_freelist); 365 } 366 367 static void 368 bt_insfree(vmem_t *vm, bt_t *bt) 369 { 370 struct vmem_freelist *list; 371 372 list = bt_freehead_tofree(vm, bt->bt_size); 373 LIST_INSERT_HEAD(list, bt, bt_freelist); 374 } 375 376 /* ---- vmem internal functions */ 377 378 #if defined(_KERNEL) 379 static kmutex_t vmem_list_lock; 380 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); 381 #endif /* defined(_KERNEL) */ 382 383 #if defined(QCACHE) 384 static inline vm_flag_t 385 prf_to_vmf(int prflags) 386 { 387 vm_flag_t vmflags; 388 389 KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0); 390 if ((prflags & PR_WAITOK) != 0) { 391 vmflags = VM_SLEEP; 392 } else { 393 vmflags = VM_NOSLEEP; 394 } 395 return vmflags; 396 } 397 398 static inline int 399 vmf_to_prf(vm_flag_t vmflags) 400 { 401 int prflags; 402 403 if ((vmflags & VM_SLEEP) != 0) { 404 prflags = PR_WAITOK; 405 } else { 406 prflags = PR_NOWAIT; 407 } 408 return prflags; 409 } 410 411 static size_t 412 qc_poolpage_size(size_t qcache_max) 413 { 414 int i; 415 416 for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) { 417 /* nothing */ 418 } 419 return ORDER2SIZE(i); 420 } 421 422 static void * 423 qc_poolpage_alloc(struct pool *pool, int prflags) 424 { 425 qcache_t *qc = QC_POOL_TO_QCACHE(pool); 426 vmem_t *vm = qc->qc_vmem; 427 vmem_addr_t addr; 428 429 if (vmem_alloc(vm, pool->pr_alloc->pa_pagesz, 430 prf_to_vmf(prflags) | VM_INSTANTFIT, &addr) != 0) 431 return NULL; 432 return (void *)addr; 433 } 434 435 static void 436 qc_poolpage_free(struct pool *pool, void *addr) 437 { 438 qcache_t *qc = QC_POOL_TO_QCACHE(pool); 439 vmem_t *vm = qc->qc_vmem; 440 441 vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz); 442 } 443 444 static void 445 qc_init(vmem_t *vm, size_t qcache_max, int ipl) 446 { 447 qcache_t *prevqc; 448 struct pool_allocator *pa; 449 int qcache_idx_max; 450 int i; 451 452 KASSERT((qcache_max & vm->vm_quantum_mask) == 0); 453 if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) { 454 qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift; 455 } 456 vm->vm_qcache_max = qcache_max; 457 pa = &vm->vm_qcache_allocator; 458 memset(pa, 0, sizeof(*pa)); 459 pa->pa_alloc = qc_poolpage_alloc; 460 pa->pa_free = qc_poolpage_free; 461 pa->pa_pagesz = qc_poolpage_size(qcache_max); 462 463 qcache_idx_max = qcache_max >> vm->vm_quantum_shift; 464 prevqc = NULL; 465 for (i = qcache_idx_max; i > 0; i--) { 466 qcache_t *qc = &vm->vm_qcache_store[i - 1]; 467 size_t size = i << vm->vm_quantum_shift; 468 469 qc->qc_vmem = vm; 470 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", 471 vm->vm_name, size); 472 qc->qc_cache = pool_cache_init(size, 473 ORDER2SIZE(vm->vm_quantum_shift), 0, 474 PR_NOALIGN | PR_NOTOUCH /* XXX */, 475 qc->qc_name, pa, ipl, NULL, NULL, NULL); 476 KASSERT(qc->qc_cache != NULL); /* XXX */ 477 if (prevqc != NULL && 478 qc->qc_cache->pc_pool.pr_itemsperpage == 479 prevqc->qc_cache->pc_pool.pr_itemsperpage) { 480 pool_cache_destroy(qc->qc_cache); 481 vm->vm_qcache[i - 1] = prevqc; 482 continue; 483 } 484 qc->qc_cache->pc_pool.pr_qcache = qc; 485 vm->vm_qcache[i - 1] = qc; 486 prevqc = qc; 487 } 488 } 489 490 static void 491 qc_destroy(vmem_t *vm) 492 { 493 const qcache_t *prevqc; 494 int i; 495 int qcache_idx_max; 496 497 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 498 prevqc = NULL; 499 for (i = 0; i < qcache_idx_max; i++) { 500 qcache_t *qc = vm->vm_qcache[i]; 501 502 if (prevqc == qc) { 503 continue; 504 } 505 pool_cache_destroy(qc->qc_cache); 506 prevqc = qc; 507 } 508 } 509 510 static bool 511 qc_reap(vmem_t *vm) 512 { 513 const qcache_t *prevqc; 514 int i; 515 int qcache_idx_max; 516 bool didsomething = false; 517 518 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 519 prevqc = NULL; 520 for (i = 0; i < qcache_idx_max; i++) { 521 qcache_t *qc = vm->vm_qcache[i]; 522 523 if (prevqc == qc) { 524 continue; 525 } 526 if (pool_cache_reclaim(qc->qc_cache) != 0) { 527 didsomething = true; 528 } 529 prevqc = qc; 530 } 531 532 return didsomething; 533 } 534 #endif /* defined(QCACHE) */ 535 536 #if defined(_KERNEL) 537 static int 538 vmem_init(void) 539 { 540 541 mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_NONE); 542 pool_cache_bootstrap(&bt_cache, sizeof(bt_t), 0, 0, 0, "vmembt", 543 NULL, IPL_VM, NULL, NULL, NULL); 544 return 0; 545 } 546 #endif /* defined(_KERNEL) */ 547 548 static int 549 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags, 550 int spanbttype) 551 { 552 bt_t *btspan; 553 bt_t *btfree; 554 555 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 556 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 557 KASSERT(spanbttype == BT_TYPE_SPAN || 558 spanbttype == BT_TYPE_SPAN_STATIC); 559 560 btspan = bt_alloc(vm, flags); 561 if (btspan == NULL) { 562 return ENOMEM; 563 } 564 btfree = bt_alloc(vm, flags); 565 if (btfree == NULL) { 566 bt_free(vm, btspan); 567 return ENOMEM; 568 } 569 570 btspan->bt_type = spanbttype; 571 btspan->bt_start = addr; 572 btspan->bt_size = size; 573 574 btfree->bt_type = BT_TYPE_FREE; 575 btfree->bt_start = addr; 576 btfree->bt_size = size; 577 578 VMEM_LOCK(vm); 579 bt_insseg_tail(vm, btspan); 580 bt_insseg(vm, btfree, btspan); 581 bt_insfree(vm, btfree); 582 VMEM_UNLOCK(vm); 583 584 return 0; 585 } 586 587 static void 588 vmem_destroy1(vmem_t *vm) 589 { 590 591 #if defined(QCACHE) 592 qc_destroy(vm); 593 #endif /* defined(QCACHE) */ 594 if (vm->vm_hashlist != NULL) { 595 int i; 596 597 for (i = 0; i < vm->vm_hashsize; i++) { 598 bt_t *bt; 599 600 while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) { 601 KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC); 602 bt_free(vm, bt); 603 } 604 } 605 xfree(vm->vm_hashlist); 606 } 607 VMEM_LOCK_DESTROY(vm); 608 xfree(vm); 609 } 610 611 static int 612 vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags) 613 { 614 vmem_addr_t addr; 615 int rc; 616 617 if (vm->vm_importfn == NULL) { 618 return EINVAL; 619 } 620 621 rc = (*vm->vm_importfn)(vm->vm_arg, size, &size, flags, &addr); 622 if (rc != 0) { 623 return ENOMEM; 624 } 625 626 if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) != 0) { 627 (*vm->vm_releasefn)(vm->vm_arg, addr, size); 628 return ENOMEM; 629 } 630 631 return 0; 632 } 633 634 static int 635 vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags) 636 { 637 bt_t *bt; 638 int i; 639 struct vmem_hashlist *newhashlist; 640 struct vmem_hashlist *oldhashlist; 641 size_t oldhashsize; 642 643 KASSERT(newhashsize > 0); 644 645 newhashlist = 646 xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags); 647 if (newhashlist == NULL) { 648 return ENOMEM; 649 } 650 for (i = 0; i < newhashsize; i++) { 651 LIST_INIT(&newhashlist[i]); 652 } 653 654 if (!VMEM_TRYLOCK(vm)) { 655 xfree(newhashlist); 656 return EBUSY; 657 } 658 oldhashlist = vm->vm_hashlist; 659 oldhashsize = vm->vm_hashsize; 660 vm->vm_hashlist = newhashlist; 661 vm->vm_hashsize = newhashsize; 662 if (oldhashlist == NULL) { 663 VMEM_UNLOCK(vm); 664 return 0; 665 } 666 for (i = 0; i < oldhashsize; i++) { 667 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { 668 bt_rembusy(vm, bt); /* XXX */ 669 bt_insbusy(vm, bt); 670 } 671 } 672 VMEM_UNLOCK(vm); 673 674 xfree(oldhashlist); 675 676 return 0; 677 } 678 679 /* 680 * vmem_fit: check if a bt can satisfy the given restrictions. 681 * 682 * it's a caller's responsibility to ensure the region is big enough 683 * before calling us. 684 */ 685 686 static int 687 vmem_fit(const bt_t const *bt, vmem_size_t size, vmem_size_t align, 688 vmem_size_t phase, vmem_size_t nocross, 689 vmem_addr_t minaddr, vmem_addr_t maxaddr, vmem_addr_t *addrp) 690 { 691 vmem_addr_t start; 692 vmem_addr_t end; 693 694 KASSERT(size > 0); 695 KASSERT(bt->bt_size >= size); /* caller's responsibility */ 696 697 /* 698 * XXX assumption: vmem_addr_t and vmem_size_t are 699 * unsigned integer of the same size. 700 */ 701 702 start = bt->bt_start; 703 if (start < minaddr) { 704 start = minaddr; 705 } 706 end = BT_END(bt); 707 if (end > maxaddr) { 708 end = maxaddr; 709 } 710 if (start > end) { 711 return ENOMEM; 712 } 713 714 start = VMEM_ALIGNUP(start - phase, align) + phase; 715 if (start < bt->bt_start) { 716 start += align; 717 } 718 if (VMEM_CROSS_P(start, start + size - 1, nocross)) { 719 KASSERT(align < nocross); 720 start = VMEM_ALIGNUP(start - phase, nocross) + phase; 721 } 722 if (start <= end && end - start >= size - 1) { 723 KASSERT((start & (align - 1)) == phase); 724 KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross)); 725 KASSERT(minaddr <= start); 726 KASSERT(maxaddr == 0 || start + size - 1 <= maxaddr); 727 KASSERT(bt->bt_start <= start); 728 KASSERT(BT_END(bt) - start >= size - 1); 729 *addrp = start; 730 return 0; 731 } 732 return ENOMEM; 733 } 734 735 /* ---- vmem API */ 736 737 /* 738 * vmem_create: create an arena. 739 * 740 * => must not be called from interrupt context. 741 */ 742 743 vmem_t * 744 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, 745 vmem_size_t quantum, 746 int (*importfn)(void *, vmem_size_t, vmem_size_t *, vm_flag_t, 747 vmem_addr_t *), 748 void (*releasefn)(void *, vmem_addr_t, vmem_size_t), 749 void *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl) 750 { 751 vmem_t *vm; 752 int i; 753 #if defined(_KERNEL) 754 static ONCE_DECL(control); 755 #endif /* defined(_KERNEL) */ 756 757 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 758 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 759 KASSERT(quantum > 0); 760 761 #if defined(_KERNEL) 762 if (RUN_ONCE(&control, vmem_init)) { 763 return NULL; 764 } 765 #endif /* defined(_KERNEL) */ 766 vm = xmalloc(sizeof(*vm), flags); 767 if (vm == NULL) { 768 return NULL; 769 } 770 771 VMEM_LOCK_INIT(vm, ipl); 772 vm->vm_name = name; 773 vm->vm_quantum_mask = quantum - 1; 774 vm->vm_quantum_shift = SIZE2ORDER(quantum); 775 KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum); 776 vm->vm_importfn = importfn; 777 vm->vm_releasefn = releasefn; 778 vm->vm_arg = arg; 779 vm->vm_nbusytag = 0; 780 #if defined(QCACHE) 781 qc_init(vm, qcache_max, ipl); 782 #endif /* defined(QCACHE) */ 783 784 CIRCLEQ_INIT(&vm->vm_seglist); 785 for (i = 0; i < VMEM_MAXORDER; i++) { 786 LIST_INIT(&vm->vm_freelist[i]); 787 } 788 vm->vm_hashlist = NULL; 789 if (vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags)) { 790 vmem_destroy1(vm); 791 return NULL; 792 } 793 794 if (size != 0) { 795 if (vmem_add(vm, base, size, flags) != 0) { 796 vmem_destroy1(vm); 797 return NULL; 798 } 799 } 800 801 #if defined(_KERNEL) 802 mutex_enter(&vmem_list_lock); 803 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); 804 mutex_exit(&vmem_list_lock); 805 #endif /* defined(_KERNEL) */ 806 807 return vm; 808 } 809 810 void 811 vmem_destroy(vmem_t *vm) 812 { 813 814 #if defined(_KERNEL) 815 mutex_enter(&vmem_list_lock); 816 LIST_REMOVE(vm, vm_alllist); 817 mutex_exit(&vmem_list_lock); 818 #endif /* defined(_KERNEL) */ 819 820 vmem_destroy1(vm); 821 } 822 823 vmem_size_t 824 vmem_roundup_size(vmem_t *vm, vmem_size_t size) 825 { 826 827 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; 828 } 829 830 /* 831 * vmem_alloc: 832 * 833 * => caller must ensure appropriate spl, 834 * if the arena can be accessed from interrupt context. 835 */ 836 837 int 838 vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, vmem_addr_t *addrp) 839 { 840 const vm_flag_t strat __unused = flags & VM_FITMASK; 841 842 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 843 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 844 845 KASSERT(size > 0); 846 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 847 if ((flags & VM_SLEEP) != 0) { 848 ASSERT_SLEEPABLE(); 849 } 850 851 #if defined(QCACHE) 852 if (size <= vm->vm_qcache_max) { 853 void *p; 854 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 855 qcache_t *qc = vm->vm_qcache[qidx - 1]; 856 857 p = pool_cache_get(qc->qc_cache, vmf_to_prf(flags)); 858 if (addrp != NULL) 859 *addrp = (vmem_addr_t)p; 860 return (p == NULL) ? ENOMEM : 0; 861 } 862 #endif /* defined(QCACHE) */ 863 864 return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 865 flags, addrp); 866 } 867 868 int 869 vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, 870 const vmem_size_t phase, const vmem_size_t nocross, 871 const vmem_addr_t minaddr, const vmem_addr_t maxaddr, const vm_flag_t flags, 872 vmem_addr_t *addrp) 873 { 874 struct vmem_freelist *list; 875 struct vmem_freelist *first; 876 struct vmem_freelist *end; 877 bt_t *bt; 878 bt_t *btnew; 879 bt_t *btnew2; 880 const vmem_size_t size = vmem_roundup_size(vm, size0); 881 vm_flag_t strat = flags & VM_FITMASK; 882 vmem_addr_t start; 883 int rc; 884 885 KASSERT(size0 > 0); 886 KASSERT(size > 0); 887 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 888 if ((flags & VM_SLEEP) != 0) { 889 ASSERT_SLEEPABLE(); 890 } 891 KASSERT((align & vm->vm_quantum_mask) == 0); 892 KASSERT((align & (align - 1)) == 0); 893 KASSERT((phase & vm->vm_quantum_mask) == 0); 894 KASSERT((nocross & vm->vm_quantum_mask) == 0); 895 KASSERT((nocross & (nocross - 1)) == 0); 896 KASSERT((align == 0 && phase == 0) || phase < align); 897 KASSERT(nocross == 0 || nocross >= size); 898 KASSERT(minaddr <= maxaddr); 899 KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 900 901 if (align == 0) { 902 align = vm->vm_quantum_mask + 1; 903 } 904 905 /* 906 * allocate boundary tags before acquiring the vmem lock. 907 */ 908 btnew = bt_alloc(vm, flags); 909 if (btnew == NULL) { 910 return ENOMEM; 911 } 912 btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */ 913 if (btnew2 == NULL) { 914 bt_free(vm, btnew); 915 return ENOMEM; 916 } 917 918 /* 919 * choose a free block from which we allocate. 920 */ 921 retry_strat: 922 first = bt_freehead_toalloc(vm, size, strat); 923 end = &vm->vm_freelist[VMEM_MAXORDER]; 924 retry: 925 bt = NULL; 926 VMEM_LOCK(vm); 927 vmem_check(vm); 928 if (strat == VM_INSTANTFIT) { 929 /* 930 * just choose the first block which satisfies our restrictions. 931 * 932 * note that we don't need to check the size of the blocks 933 * because any blocks found on these list should be larger than 934 * the given size. 935 */ 936 for (list = first; list < end; list++) { 937 bt = LIST_FIRST(list); 938 if (bt != NULL) { 939 rc = vmem_fit(bt, size, align, phase, 940 nocross, minaddr, maxaddr, &start); 941 if (rc == 0) { 942 goto gotit; 943 } 944 /* 945 * don't bother to follow the bt_freelist link 946 * here. the list can be very long and we are 947 * told to run fast. blocks from the later free 948 * lists are larger and have better chances to 949 * satisfy our restrictions. 950 */ 951 } 952 } 953 } else { /* VM_BESTFIT */ 954 /* 955 * we assume that, for space efficiency, it's better to 956 * allocate from a smaller block. thus we will start searching 957 * from the lower-order list than VM_INSTANTFIT. 958 * however, don't bother to find the smallest block in a free 959 * list because the list can be very long. we can revisit it 960 * if/when it turns out to be a problem. 961 * 962 * note that the 'first' list can contain blocks smaller than 963 * the requested size. thus we need to check bt_size. 964 */ 965 for (list = first; list < end; list++) { 966 LIST_FOREACH(bt, list, bt_freelist) { 967 if (bt->bt_size >= size) { 968 rc = vmem_fit(bt, size, align, phase, 969 nocross, minaddr, maxaddr, &start); 970 if (rc == 0) { 971 goto gotit; 972 } 973 } 974 } 975 } 976 } 977 VMEM_UNLOCK(vm); 978 #if 1 979 if (strat == VM_INSTANTFIT) { 980 strat = VM_BESTFIT; 981 goto retry_strat; 982 } 983 #endif 984 if (align != vm->vm_quantum_mask + 1 || phase != 0 || 985 nocross != 0) { 986 987 /* 988 * XXX should try to import a region large enough to 989 * satisfy restrictions? 990 */ 991 992 goto fail; 993 } 994 /* XXX eeek, minaddr & maxaddr not respected */ 995 if (vmem_import(vm, size, flags) == 0) { 996 goto retry; 997 } 998 /* XXX */ 999 fail: 1000 bt_free(vm, btnew); 1001 bt_free(vm, btnew2); 1002 return ENOMEM; 1003 1004 gotit: 1005 KASSERT(bt->bt_type == BT_TYPE_FREE); 1006 KASSERT(bt->bt_size >= size); 1007 bt_remfree(vm, bt); 1008 vmem_check(vm); 1009 if (bt->bt_start != start) { 1010 btnew2->bt_type = BT_TYPE_FREE; 1011 btnew2->bt_start = bt->bt_start; 1012 btnew2->bt_size = start - bt->bt_start; 1013 bt->bt_start = start; 1014 bt->bt_size -= btnew2->bt_size; 1015 bt_insfree(vm, btnew2); 1016 bt_insseg(vm, btnew2, CIRCLEQ_PREV(bt, bt_seglist)); 1017 btnew2 = NULL; 1018 vmem_check(vm); 1019 } 1020 KASSERT(bt->bt_start == start); 1021 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { 1022 /* split */ 1023 btnew->bt_type = BT_TYPE_BUSY; 1024 btnew->bt_start = bt->bt_start; 1025 btnew->bt_size = size; 1026 bt->bt_start = bt->bt_start + size; 1027 bt->bt_size -= size; 1028 bt_insfree(vm, bt); 1029 bt_insseg(vm, btnew, CIRCLEQ_PREV(bt, bt_seglist)); 1030 bt_insbusy(vm, btnew); 1031 vmem_check(vm); 1032 VMEM_UNLOCK(vm); 1033 } else { 1034 bt->bt_type = BT_TYPE_BUSY; 1035 bt_insbusy(vm, bt); 1036 vmem_check(vm); 1037 VMEM_UNLOCK(vm); 1038 bt_free(vm, btnew); 1039 btnew = bt; 1040 } 1041 if (btnew2 != NULL) { 1042 bt_free(vm, btnew2); 1043 } 1044 KASSERT(btnew->bt_size >= size); 1045 btnew->bt_type = BT_TYPE_BUSY; 1046 1047 if (addrp != NULL) 1048 *addrp = btnew->bt_start; 1049 return 0; 1050 } 1051 1052 /* 1053 * vmem_free: 1054 * 1055 * => caller must ensure appropriate spl, 1056 * if the arena can be accessed from interrupt context. 1057 */ 1058 1059 void 1060 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1061 { 1062 1063 KASSERT(size > 0); 1064 1065 #if defined(QCACHE) 1066 if (size <= vm->vm_qcache_max) { 1067 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 1068 qcache_t *qc = vm->vm_qcache[qidx - 1]; 1069 1070 return pool_cache_put(qc->qc_cache, (void *)addr); 1071 } 1072 #endif /* defined(QCACHE) */ 1073 1074 vmem_xfree(vm, addr, size); 1075 } 1076 1077 void 1078 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1079 { 1080 bt_t *bt; 1081 bt_t *t; 1082 1083 KASSERT(size > 0); 1084 1085 VMEM_LOCK(vm); 1086 1087 bt = bt_lookupbusy(vm, addr); 1088 KASSERT(bt != NULL); 1089 KASSERT(bt->bt_start == addr); 1090 KASSERT(bt->bt_size == vmem_roundup_size(vm, size) || 1091 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1092 KASSERT(bt->bt_type == BT_TYPE_BUSY); 1093 bt_rembusy(vm, bt); 1094 bt->bt_type = BT_TYPE_FREE; 1095 1096 /* coalesce */ 1097 t = CIRCLEQ_NEXT(bt, bt_seglist); 1098 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1099 KASSERT(BT_END(bt) < t->bt_start); /* YYY */ 1100 bt_remfree(vm, t); 1101 bt_remseg(vm, t); 1102 bt->bt_size += t->bt_size; 1103 bt_free(vm, t); 1104 } 1105 t = CIRCLEQ_PREV(bt, bt_seglist); 1106 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1107 KASSERT(BT_END(t) < bt->bt_start); /* YYY */ 1108 bt_remfree(vm, t); 1109 bt_remseg(vm, t); 1110 bt->bt_size += t->bt_size; 1111 bt->bt_start = t->bt_start; 1112 bt_free(vm, t); 1113 } 1114 1115 t = CIRCLEQ_PREV(bt, bt_seglist); 1116 KASSERT(t != NULL); 1117 KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY); 1118 if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN && 1119 t->bt_size == bt->bt_size) { 1120 vmem_addr_t spanaddr; 1121 vmem_size_t spansize; 1122 1123 KASSERT(t->bt_start == bt->bt_start); 1124 spanaddr = bt->bt_start; 1125 spansize = bt->bt_size; 1126 bt_remseg(vm, bt); 1127 bt_free(vm, bt); 1128 bt_remseg(vm, t); 1129 bt_free(vm, t); 1130 VMEM_UNLOCK(vm); 1131 (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize); 1132 } else { 1133 bt_insfree(vm, bt); 1134 VMEM_UNLOCK(vm); 1135 } 1136 } 1137 1138 /* 1139 * vmem_add: 1140 * 1141 * => caller must ensure appropriate spl, 1142 * if the arena can be accessed from interrupt context. 1143 */ 1144 1145 int 1146 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags) 1147 { 1148 1149 return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC); 1150 } 1151 1152 /* 1153 * vmem_reap: reap unused resources. 1154 * 1155 * => return true if we successfully reaped something. 1156 */ 1157 1158 bool 1159 vmem_reap(vmem_t *vm) 1160 { 1161 bool didsomething = false; 1162 1163 #if defined(QCACHE) 1164 didsomething = qc_reap(vm); 1165 #endif /* defined(QCACHE) */ 1166 return didsomething; 1167 } 1168 1169 /* ---- rehash */ 1170 1171 #if defined(_KERNEL) 1172 static struct callout vmem_rehash_ch; 1173 static int vmem_rehash_interval; 1174 static struct workqueue *vmem_rehash_wq; 1175 static struct work vmem_rehash_wk; 1176 1177 static void 1178 vmem_rehash_all(struct work *wk, void *dummy) 1179 { 1180 vmem_t *vm; 1181 1182 KASSERT(wk == &vmem_rehash_wk); 1183 mutex_enter(&vmem_list_lock); 1184 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1185 size_t desired; 1186 size_t current; 1187 1188 if (!VMEM_TRYLOCK(vm)) { 1189 continue; 1190 } 1191 desired = vm->vm_nbusytag; 1192 current = vm->vm_hashsize; 1193 VMEM_UNLOCK(vm); 1194 1195 if (desired > VMEM_HASHSIZE_MAX) { 1196 desired = VMEM_HASHSIZE_MAX; 1197 } else if (desired < VMEM_HASHSIZE_MIN) { 1198 desired = VMEM_HASHSIZE_MIN; 1199 } 1200 if (desired > current * 2 || desired * 2 < current) { 1201 vmem_rehash(vm, desired, VM_NOSLEEP); 1202 } 1203 } 1204 mutex_exit(&vmem_list_lock); 1205 1206 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1207 } 1208 1209 static void 1210 vmem_rehash_all_kick(void *dummy) 1211 { 1212 1213 workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL); 1214 } 1215 1216 void 1217 vmem_rehash_start(void) 1218 { 1219 int error; 1220 1221 error = workqueue_create(&vmem_rehash_wq, "vmem_rehash", 1222 vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, WQ_MPSAFE); 1223 if (error) { 1224 panic("%s: workqueue_create %d\n", __func__, error); 1225 } 1226 callout_init(&vmem_rehash_ch, CALLOUT_MPSAFE); 1227 callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL); 1228 1229 vmem_rehash_interval = hz * 10; 1230 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1231 } 1232 #endif /* defined(_KERNEL) */ 1233 1234 /* ---- debug */ 1235 1236 #if defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) 1237 1238 static void bt_dump(const bt_t *, void (*)(const char *, ...)); 1239 1240 static const char * 1241 bt_type_string(int type) 1242 { 1243 static const char * const table[] = { 1244 [BT_TYPE_BUSY] = "busy", 1245 [BT_TYPE_FREE] = "free", 1246 [BT_TYPE_SPAN] = "span", 1247 [BT_TYPE_SPAN_STATIC] = "static span", 1248 }; 1249 1250 if (type >= __arraycount(table)) { 1251 return "BOGUS"; 1252 } 1253 return table[type]; 1254 } 1255 1256 static void 1257 bt_dump(const bt_t *bt, void (*pr)(const char *, ...)) 1258 { 1259 1260 (*pr)("\t%p: %" PRIu64 ", %" PRIu64 ", %d(%s)\n", 1261 bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size, 1262 bt->bt_type, bt_type_string(bt->bt_type)); 1263 } 1264 1265 static void 1266 vmem_dump(const vmem_t *vm , void (*pr)(const char *, ...)) 1267 { 1268 const bt_t *bt; 1269 int i; 1270 1271 (*pr)("vmem %p '%s'\n", vm, vm->vm_name); 1272 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1273 bt_dump(bt, pr); 1274 } 1275 1276 for (i = 0; i < VMEM_MAXORDER; i++) { 1277 const struct vmem_freelist *fl = &vm->vm_freelist[i]; 1278 1279 if (LIST_EMPTY(fl)) { 1280 continue; 1281 } 1282 1283 (*pr)("freelist[%d]\n", i); 1284 LIST_FOREACH(bt, fl, bt_freelist) { 1285 bt_dump(bt, pr); 1286 } 1287 } 1288 } 1289 1290 #endif /* defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) */ 1291 1292 #if defined(DDB) 1293 static bt_t * 1294 vmem_whatis_lookup(vmem_t *vm, uintptr_t addr) 1295 { 1296 bt_t *bt; 1297 1298 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1299 if (BT_ISSPAN_P(bt)) { 1300 continue; 1301 } 1302 if (bt->bt_start <= addr && addr <= BT_END(bt)) { 1303 return bt; 1304 } 1305 } 1306 1307 return NULL; 1308 } 1309 1310 void 1311 vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...)) 1312 { 1313 vmem_t *vm; 1314 1315 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1316 bt_t *bt; 1317 1318 bt = vmem_whatis_lookup(vm, addr); 1319 if (bt == NULL) { 1320 continue; 1321 } 1322 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n", 1323 (void *)addr, (void *)bt->bt_start, 1324 (size_t)(addr - bt->bt_start), vm->vm_name, 1325 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free"); 1326 } 1327 } 1328 1329 void 1330 vmem_printall(const char *modif, void (*pr)(const char *, ...)) 1331 { 1332 const vmem_t *vm; 1333 1334 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1335 vmem_dump(vm, pr); 1336 } 1337 } 1338 1339 void 1340 vmem_print(uintptr_t addr, const char *modif, void (*pr)(const char *, ...)) 1341 { 1342 const vmem_t *vm = (const void *)addr; 1343 1344 vmem_dump(vm, pr); 1345 } 1346 #endif /* defined(DDB) */ 1347 1348 #if defined(_KERNEL) 1349 #define vmem_printf printf 1350 #else 1351 #include <stdio.h> 1352 #include <stdarg.h> 1353 1354 static void 1355 vmem_printf(const char *fmt, ...) 1356 { 1357 va_list ap; 1358 va_start(ap, fmt); 1359 vprintf(fmt, ap); 1360 va_end(ap); 1361 } 1362 #endif 1363 1364 #if defined(VMEM_SANITY) 1365 1366 static bool 1367 vmem_check_sanity(vmem_t *vm) 1368 { 1369 const bt_t *bt, *bt2; 1370 1371 KASSERT(vm != NULL); 1372 1373 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1374 if (bt->bt_start > BT_END(bt)) { 1375 printf("corrupted tag\n"); 1376 bt_dump(bt, vmem_printf); 1377 return false; 1378 } 1379 } 1380 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1381 CIRCLEQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) { 1382 if (bt == bt2) { 1383 continue; 1384 } 1385 if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) { 1386 continue; 1387 } 1388 if (bt->bt_start <= BT_END(bt2) && 1389 bt2->bt_start <= BT_END(bt)) { 1390 printf("overwrapped tags\n"); 1391 bt_dump(bt, vmem_printf); 1392 bt_dump(bt2, vmem_printf); 1393 return false; 1394 } 1395 } 1396 } 1397 1398 return true; 1399 } 1400 1401 static void 1402 vmem_check(vmem_t *vm) 1403 { 1404 1405 if (!vmem_check_sanity(vm)) { 1406 panic("insanity vmem %p", vm); 1407 } 1408 } 1409 1410 #endif /* defined(VMEM_SANITY) */ 1411 1412 #if defined(UNITTEST) 1413 int 1414 main(void) 1415 { 1416 int rc; 1417 vmem_t *vm; 1418 vmem_addr_t p; 1419 struct reg { 1420 vmem_addr_t p; 1421 vmem_size_t sz; 1422 bool x; 1423 } *reg = NULL; 1424 int nreg = 0; 1425 int nalloc = 0; 1426 int nfree = 0; 1427 vmem_size_t total = 0; 1428 #if 1 1429 vm_flag_t strat = VM_INSTANTFIT; 1430 #else 1431 vm_flag_t strat = VM_BESTFIT; 1432 #endif 1433 1434 vm = vmem_create("test", 0, 0, 1, NULL, NULL, NULL, 0, VM_SLEEP, 1435 #ifdef _KERNEL 1436 IPL_NONE 1437 #else 1438 0 1439 #endif 1440 ); 1441 if (vm == NULL) { 1442 printf("vmem_create\n"); 1443 exit(EXIT_FAILURE); 1444 } 1445 vmem_dump(vm, vmem_printf); 1446 1447 rc = vmem_add(vm, 0, 50, VM_SLEEP); 1448 assert(rc == 0); 1449 rc = vmem_add(vm, 100, 200, VM_SLEEP); 1450 assert(rc == 0); 1451 rc = vmem_add(vm, 2000, 1, VM_SLEEP); 1452 assert(rc == 0); 1453 rc = vmem_add(vm, 40000, 65536, VM_SLEEP); 1454 assert(rc == 0); 1455 rc = vmem_add(vm, 10000, 10000, VM_SLEEP); 1456 assert(rc == 0); 1457 rc = vmem_add(vm, 500, 1000, VM_SLEEP); 1458 assert(rc == 0); 1459 rc = vmem_add(vm, 0xffffff00, 0x100, VM_SLEEP); 1460 assert(rc == 0); 1461 rc = vmem_xalloc(vm, 0x101, 0, 0, 0, 1462 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p); 1463 assert(rc != 0); 1464 rc = vmem_xalloc(vm, 50, 0, 0, 0, 0, 49, strat|VM_SLEEP, &p); 1465 assert(rc == 0 && p == 0); 1466 vmem_xfree(vm, p, 50); 1467 rc = vmem_xalloc(vm, 25, 0, 0, 0, 0, 24, strat|VM_SLEEP, &p); 1468 assert(rc == 0 && p == 0); 1469 rc = vmem_xalloc(vm, 0x100, 0, 0, 0, 1470 0xffffff01, 0xffffffff, strat|VM_SLEEP, &p); 1471 assert(rc != 0); 1472 rc = vmem_xalloc(vm, 0x100, 0, 0, 0, 1473 0xffffff00, 0xfffffffe, strat|VM_SLEEP, &p); 1474 assert(rc != 0); 1475 rc = vmem_xalloc(vm, 0x100, 0, 0, 0, 1476 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p); 1477 assert(rc == 0); 1478 vmem_dump(vm, vmem_printf); 1479 for (;;) { 1480 struct reg *r; 1481 int t = rand() % 100; 1482 1483 if (t > 45) { 1484 /* alloc */ 1485 vmem_size_t sz = rand() % 500 + 1; 1486 bool x; 1487 vmem_size_t align, phase, nocross; 1488 vmem_addr_t minaddr, maxaddr; 1489 1490 if (t > 70) { 1491 x = true; 1492 /* XXX */ 1493 align = 1 << (rand() % 15); 1494 phase = rand() % 65536; 1495 nocross = 1 << (rand() % 15); 1496 if (align <= phase) { 1497 phase = 0; 1498 } 1499 if (VMEM_CROSS_P(phase, phase + sz - 1, 1500 nocross)) { 1501 nocross = 0; 1502 } 1503 do { 1504 minaddr = rand() % 50000; 1505 maxaddr = rand() % 70000; 1506 } while (minaddr > maxaddr); 1507 printf("=== xalloc %" PRIu64 1508 " align=%" PRIu64 ", phase=%" PRIu64 1509 ", nocross=%" PRIu64 ", min=%" PRIu64 1510 ", max=%" PRIu64 "\n", 1511 (uint64_t)sz, 1512 (uint64_t)align, 1513 (uint64_t)phase, 1514 (uint64_t)nocross, 1515 (uint64_t)minaddr, 1516 (uint64_t)maxaddr); 1517 rc = vmem_xalloc(vm, sz, align, phase, nocross, 1518 minaddr, maxaddr, strat|VM_SLEEP, &p); 1519 } else { 1520 x = false; 1521 printf("=== alloc %" PRIu64 "\n", (uint64_t)sz); 1522 rc = vmem_alloc(vm, sz, strat|VM_SLEEP, &p); 1523 } 1524 printf("-> %" PRIu64 "\n", (uint64_t)p); 1525 vmem_dump(vm, vmem_printf); 1526 if (rc != 0) { 1527 if (x) { 1528 continue; 1529 } 1530 break; 1531 } 1532 nreg++; 1533 reg = realloc(reg, sizeof(*reg) * nreg); 1534 r = ®[nreg - 1]; 1535 r->p = p; 1536 r->sz = sz; 1537 r->x = x; 1538 total += sz; 1539 nalloc++; 1540 } else if (nreg != 0) { 1541 /* free */ 1542 r = ®[rand() % nreg]; 1543 printf("=== free %" PRIu64 ", %" PRIu64 "\n", 1544 (uint64_t)r->p, (uint64_t)r->sz); 1545 if (r->x) { 1546 vmem_xfree(vm, r->p, r->sz); 1547 } else { 1548 vmem_free(vm, r->p, r->sz); 1549 } 1550 total -= r->sz; 1551 vmem_dump(vm, vmem_printf); 1552 *r = reg[nreg - 1]; 1553 nreg--; 1554 nfree++; 1555 } 1556 printf("total=%" PRIu64 "\n", (uint64_t)total); 1557 } 1558 fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n", 1559 (uint64_t)total, nalloc, nfree); 1560 exit(EXIT_SUCCESS); 1561 } 1562 #endif /* defined(UNITTEST) */ 1563