1 /* $NetBSD: subr_vmem.c,v 1.57 2009/03/18 10:22:42 cegger Exp $ */ 2 3 /*- 4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * reference: 31 * - Magazines and Vmem: Extending the Slab Allocator 32 * to Many CPUs and Arbitrary Resources 33 * http://www.usenix.org/event/usenix01/bonwick.html 34 * 35 * todo: 36 * - decide how to import segments for vmem_xalloc. 37 * - don't rely on malloc(9). 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.57 2009/03/18 10:22:42 cegger Exp $"); 42 43 #if defined(_KERNEL) 44 #include "opt_ddb.h" 45 #define QCACHE 46 #endif /* defined(_KERNEL) */ 47 48 #include <sys/param.h> 49 #include <sys/hash.h> 50 #include <sys/queue.h> 51 52 #if defined(_KERNEL) 53 #include <sys/systm.h> 54 #include <sys/kernel.h> /* hz */ 55 #include <sys/callout.h> 56 #include <sys/malloc.h> 57 #include <sys/once.h> 58 #include <sys/pool.h> 59 #include <sys/vmem.h> 60 #include <sys/workqueue.h> 61 #else /* defined(_KERNEL) */ 62 #include "../sys/vmem.h" 63 #endif /* defined(_KERNEL) */ 64 65 #if defined(_KERNEL) 66 #define LOCK_DECL(name) \ 67 kmutex_t name; char lockpad[COHERENCY_UNIT - sizeof(kmutex_t)] 68 #else /* defined(_KERNEL) */ 69 #include <errno.h> 70 #include <assert.h> 71 #include <stdlib.h> 72 73 #define UNITTEST 74 #define KASSERT(a) assert(a) 75 #define LOCK_DECL(name) /* nothing */ 76 #define mutex_init(a, b, c) /* nothing */ 77 #define mutex_destroy(a) /* nothing */ 78 #define mutex_enter(a) /* nothing */ 79 #define mutex_tryenter(a) true 80 #define mutex_exit(a) /* nothing */ 81 #define mutex_owned(a) /* nothing */ 82 #define ASSERT_SLEEPABLE() /* nothing */ 83 #define panic(...) printf(__VA_ARGS__); abort() 84 #endif /* defined(_KERNEL) */ 85 86 struct vmem; 87 struct vmem_btag; 88 89 #if defined(VMEM_SANITY) 90 static void vmem_check(vmem_t *); 91 #else /* defined(VMEM_SANITY) */ 92 #define vmem_check(vm) /* nothing */ 93 #endif /* defined(VMEM_SANITY) */ 94 95 #define VMEM_MAXORDER (sizeof(vmem_size_t) * CHAR_BIT) 96 97 #define VMEM_HASHSIZE_MIN 1 /* XXX */ 98 #define VMEM_HASHSIZE_MAX 65536 /* XXX */ 99 #define VMEM_HASHSIZE_INIT 128 100 101 #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT) 102 103 CIRCLEQ_HEAD(vmem_seglist, vmem_btag); 104 LIST_HEAD(vmem_freelist, vmem_btag); 105 LIST_HEAD(vmem_hashlist, vmem_btag); 106 107 #if defined(QCACHE) 108 #define VMEM_QCACHE_IDX_MAX 32 109 110 #define QC_NAME_MAX 16 111 112 struct qcache { 113 pool_cache_t qc_cache; 114 vmem_t *qc_vmem; 115 char qc_name[QC_NAME_MAX]; 116 }; 117 typedef struct qcache qcache_t; 118 #define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache)) 119 #endif /* defined(QCACHE) */ 120 121 /* vmem arena */ 122 struct vmem { 123 LOCK_DECL(vm_lock); 124 vmem_addr_t (*vm_allocfn)(vmem_t *, vmem_size_t, vmem_size_t *, 125 vm_flag_t); 126 void (*vm_freefn)(vmem_t *, vmem_addr_t, vmem_size_t); 127 vmem_t *vm_source; 128 struct vmem_seglist vm_seglist; 129 struct vmem_freelist vm_freelist[VMEM_MAXORDER]; 130 size_t vm_hashsize; 131 size_t vm_nbusytag; 132 struct vmem_hashlist *vm_hashlist; 133 size_t vm_quantum_mask; 134 int vm_quantum_shift; 135 const char *vm_name; 136 LIST_ENTRY(vmem) vm_alllist; 137 138 #if defined(QCACHE) 139 /* quantum cache */ 140 size_t vm_qcache_max; 141 struct pool_allocator vm_qcache_allocator; 142 qcache_t vm_qcache_store[VMEM_QCACHE_IDX_MAX]; 143 qcache_t *vm_qcache[VMEM_QCACHE_IDX_MAX]; 144 #endif /* defined(QCACHE) */ 145 }; 146 147 #define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock) 148 #define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock) 149 #define VMEM_UNLOCK(vm) mutex_exit(&vm->vm_lock) 150 #define VMEM_LOCK_INIT(vm, ipl) mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl) 151 #define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock) 152 #define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock)) 153 154 /* boundary tag */ 155 struct vmem_btag { 156 CIRCLEQ_ENTRY(vmem_btag) bt_seglist; 157 union { 158 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */ 159 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */ 160 } bt_u; 161 #define bt_hashlist bt_u.u_hashlist 162 #define bt_freelist bt_u.u_freelist 163 vmem_addr_t bt_start; 164 vmem_size_t bt_size; 165 int bt_type; 166 }; 167 168 #define BT_TYPE_SPAN 1 169 #define BT_TYPE_SPAN_STATIC 2 170 #define BT_TYPE_FREE 3 171 #define BT_TYPE_BUSY 4 172 #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC) 173 174 #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size) 175 176 typedef struct vmem_btag bt_t; 177 178 /* ---- misc */ 179 180 #define VMEM_ALIGNUP(addr, align) \ 181 (-(-(addr) & -(align))) 182 #define VMEM_CROSS_P(addr1, addr2, boundary) \ 183 ((((addr1) ^ (addr2)) & -(boundary)) != 0) 184 185 #define ORDER2SIZE(order) ((vmem_size_t)1 << (order)) 186 187 static int 188 calc_order(vmem_size_t size) 189 { 190 vmem_size_t target; 191 int i; 192 193 KASSERT(size != 0); 194 195 i = 0; 196 target = size >> 1; 197 while (ORDER2SIZE(i) <= target) { 198 i++; 199 } 200 201 KASSERT(ORDER2SIZE(i) <= size); 202 KASSERT(size < ORDER2SIZE(i + 1) || ORDER2SIZE(i + 1) < ORDER2SIZE(i)); 203 204 return i; 205 } 206 207 #if defined(_KERNEL) 208 static MALLOC_DEFINE(M_VMEM, "vmem", "vmem"); 209 #endif /* defined(_KERNEL) */ 210 211 static void * 212 xmalloc(size_t sz, vm_flag_t flags) 213 { 214 215 #if defined(_KERNEL) 216 return malloc(sz, M_VMEM, 217 M_CANFAIL | ((flags & VM_SLEEP) ? M_WAITOK : M_NOWAIT)); 218 #else /* defined(_KERNEL) */ 219 return malloc(sz); 220 #endif /* defined(_KERNEL) */ 221 } 222 223 static void 224 xfree(void *p) 225 { 226 227 #if defined(_KERNEL) 228 return free(p, M_VMEM); 229 #else /* defined(_KERNEL) */ 230 return free(p); 231 #endif /* defined(_KERNEL) */ 232 } 233 234 /* ---- boundary tag */ 235 236 #if defined(_KERNEL) 237 static struct pool_cache bt_cache; 238 #endif /* defined(_KERNEL) */ 239 240 static bt_t * 241 bt_alloc(vmem_t *vm, vm_flag_t flags) 242 { 243 bt_t *bt; 244 245 #if defined(_KERNEL) 246 bt = pool_cache_get(&bt_cache, 247 (flags & VM_SLEEP) != 0 ? PR_WAITOK : PR_NOWAIT); 248 #else /* defined(_KERNEL) */ 249 bt = malloc(sizeof *bt); 250 #endif /* defined(_KERNEL) */ 251 252 return bt; 253 } 254 255 static void 256 bt_free(vmem_t *vm, bt_t *bt) 257 { 258 259 #if defined(_KERNEL) 260 pool_cache_put(&bt_cache, bt); 261 #else /* defined(_KERNEL) */ 262 free(bt); 263 #endif /* defined(_KERNEL) */ 264 } 265 266 /* 267 * freelist[0] ... [1, 1] 268 * freelist[1] ... [2, 3] 269 * freelist[2] ... [4, 7] 270 * freelist[3] ... [8, 15] 271 * : 272 * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1] 273 * : 274 */ 275 276 static struct vmem_freelist * 277 bt_freehead_tofree(vmem_t *vm, vmem_size_t size) 278 { 279 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 280 int idx; 281 282 KASSERT((size & vm->vm_quantum_mask) == 0); 283 KASSERT(size != 0); 284 285 idx = calc_order(qsize); 286 KASSERT(idx >= 0); 287 KASSERT(idx < VMEM_MAXORDER); 288 289 return &vm->vm_freelist[idx]; 290 } 291 292 static struct vmem_freelist * 293 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat) 294 { 295 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 296 int idx; 297 298 KASSERT((size & vm->vm_quantum_mask) == 0); 299 KASSERT(size != 0); 300 301 idx = calc_order(qsize); 302 if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) { 303 idx++; 304 /* check too large request? */ 305 } 306 KASSERT(idx >= 0); 307 KASSERT(idx < VMEM_MAXORDER); 308 309 return &vm->vm_freelist[idx]; 310 } 311 312 /* ---- boundary tag hash */ 313 314 static struct vmem_hashlist * 315 bt_hashhead(vmem_t *vm, vmem_addr_t addr) 316 { 317 struct vmem_hashlist *list; 318 unsigned int hash; 319 320 hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT); 321 list = &vm->vm_hashlist[hash % vm->vm_hashsize]; 322 323 return list; 324 } 325 326 static bt_t * 327 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr) 328 { 329 struct vmem_hashlist *list; 330 bt_t *bt; 331 332 list = bt_hashhead(vm, addr); 333 LIST_FOREACH(bt, list, bt_hashlist) { 334 if (bt->bt_start == addr) { 335 break; 336 } 337 } 338 339 return bt; 340 } 341 342 static void 343 bt_rembusy(vmem_t *vm, bt_t *bt) 344 { 345 346 KASSERT(vm->vm_nbusytag > 0); 347 vm->vm_nbusytag--; 348 LIST_REMOVE(bt, bt_hashlist); 349 } 350 351 static void 352 bt_insbusy(vmem_t *vm, bt_t *bt) 353 { 354 struct vmem_hashlist *list; 355 356 KASSERT(bt->bt_type == BT_TYPE_BUSY); 357 358 list = bt_hashhead(vm, bt->bt_start); 359 LIST_INSERT_HEAD(list, bt, bt_hashlist); 360 vm->vm_nbusytag++; 361 } 362 363 /* ---- boundary tag list */ 364 365 static void 366 bt_remseg(vmem_t *vm, bt_t *bt) 367 { 368 369 CIRCLEQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); 370 } 371 372 static void 373 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) 374 { 375 376 CIRCLEQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); 377 } 378 379 static void 380 bt_insseg_tail(vmem_t *vm, bt_t *bt) 381 { 382 383 CIRCLEQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); 384 } 385 386 static void 387 bt_remfree(vmem_t *vm, bt_t *bt) 388 { 389 390 KASSERT(bt->bt_type == BT_TYPE_FREE); 391 392 LIST_REMOVE(bt, bt_freelist); 393 } 394 395 static void 396 bt_insfree(vmem_t *vm, bt_t *bt) 397 { 398 struct vmem_freelist *list; 399 400 list = bt_freehead_tofree(vm, bt->bt_size); 401 LIST_INSERT_HEAD(list, bt, bt_freelist); 402 } 403 404 /* ---- vmem internal functions */ 405 406 #if defined(_KERNEL) 407 static kmutex_t vmem_list_lock; 408 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); 409 #endif /* defined(_KERNEL) */ 410 411 #if defined(QCACHE) 412 static inline vm_flag_t 413 prf_to_vmf(int prflags) 414 { 415 vm_flag_t vmflags; 416 417 KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0); 418 if ((prflags & PR_WAITOK) != 0) { 419 vmflags = VM_SLEEP; 420 } else { 421 vmflags = VM_NOSLEEP; 422 } 423 return vmflags; 424 } 425 426 static inline int 427 vmf_to_prf(vm_flag_t vmflags) 428 { 429 int prflags; 430 431 if ((vmflags & VM_SLEEP) != 0) { 432 prflags = PR_WAITOK; 433 } else { 434 prflags = PR_NOWAIT; 435 } 436 return prflags; 437 } 438 439 static size_t 440 qc_poolpage_size(size_t qcache_max) 441 { 442 int i; 443 444 for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) { 445 /* nothing */ 446 } 447 return ORDER2SIZE(i); 448 } 449 450 static void * 451 qc_poolpage_alloc(struct pool *pool, int prflags) 452 { 453 qcache_t *qc = QC_POOL_TO_QCACHE(pool); 454 vmem_t *vm = qc->qc_vmem; 455 456 return (void *)vmem_alloc(vm, pool->pr_alloc->pa_pagesz, 457 prf_to_vmf(prflags) | VM_INSTANTFIT); 458 } 459 460 static void 461 qc_poolpage_free(struct pool *pool, void *addr) 462 { 463 qcache_t *qc = QC_POOL_TO_QCACHE(pool); 464 vmem_t *vm = qc->qc_vmem; 465 466 vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz); 467 } 468 469 static void 470 qc_init(vmem_t *vm, size_t qcache_max, int ipl) 471 { 472 qcache_t *prevqc; 473 struct pool_allocator *pa; 474 int qcache_idx_max; 475 int i; 476 477 KASSERT((qcache_max & vm->vm_quantum_mask) == 0); 478 if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) { 479 qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift; 480 } 481 vm->vm_qcache_max = qcache_max; 482 pa = &vm->vm_qcache_allocator; 483 memset(pa, 0, sizeof(*pa)); 484 pa->pa_alloc = qc_poolpage_alloc; 485 pa->pa_free = qc_poolpage_free; 486 pa->pa_pagesz = qc_poolpage_size(qcache_max); 487 488 qcache_idx_max = qcache_max >> vm->vm_quantum_shift; 489 prevqc = NULL; 490 for (i = qcache_idx_max; i > 0; i--) { 491 qcache_t *qc = &vm->vm_qcache_store[i - 1]; 492 size_t size = i << vm->vm_quantum_shift; 493 494 qc->qc_vmem = vm; 495 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", 496 vm->vm_name, size); 497 qc->qc_cache = pool_cache_init(size, 498 ORDER2SIZE(vm->vm_quantum_shift), 0, 499 PR_NOALIGN | PR_NOTOUCH /* XXX */, 500 qc->qc_name, pa, ipl, NULL, NULL, NULL); 501 KASSERT(qc->qc_cache != NULL); /* XXX */ 502 if (prevqc != NULL && 503 qc->qc_cache->pc_pool.pr_itemsperpage == 504 prevqc->qc_cache->pc_pool.pr_itemsperpage) { 505 pool_cache_destroy(qc->qc_cache); 506 vm->vm_qcache[i - 1] = prevqc; 507 continue; 508 } 509 qc->qc_cache->pc_pool.pr_qcache = qc; 510 vm->vm_qcache[i - 1] = qc; 511 prevqc = qc; 512 } 513 } 514 515 static void 516 qc_destroy(vmem_t *vm) 517 { 518 const qcache_t *prevqc; 519 int i; 520 int qcache_idx_max; 521 522 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 523 prevqc = NULL; 524 for (i = 0; i < qcache_idx_max; i++) { 525 qcache_t *qc = vm->vm_qcache[i]; 526 527 if (prevqc == qc) { 528 continue; 529 } 530 pool_cache_destroy(qc->qc_cache); 531 prevqc = qc; 532 } 533 } 534 535 static bool 536 qc_reap(vmem_t *vm) 537 { 538 const qcache_t *prevqc; 539 int i; 540 int qcache_idx_max; 541 bool didsomething = false; 542 543 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 544 prevqc = NULL; 545 for (i = 0; i < qcache_idx_max; i++) { 546 qcache_t *qc = vm->vm_qcache[i]; 547 548 if (prevqc == qc) { 549 continue; 550 } 551 if (pool_cache_reclaim(qc->qc_cache) != 0) { 552 didsomething = true; 553 } 554 prevqc = qc; 555 } 556 557 return didsomething; 558 } 559 #endif /* defined(QCACHE) */ 560 561 #if defined(_KERNEL) 562 static int 563 vmem_init(void) 564 { 565 566 mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_NONE); 567 pool_cache_bootstrap(&bt_cache, sizeof(bt_t), 0, 0, 0, "vmembt", 568 NULL, IPL_VM, NULL, NULL, NULL); 569 return 0; 570 } 571 #endif /* defined(_KERNEL) */ 572 573 static vmem_addr_t 574 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags, 575 int spanbttype) 576 { 577 bt_t *btspan; 578 bt_t *btfree; 579 580 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 581 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 582 KASSERT(spanbttype == BT_TYPE_SPAN || spanbttype == BT_TYPE_SPAN_STATIC); 583 584 btspan = bt_alloc(vm, flags); 585 if (btspan == NULL) { 586 return VMEM_ADDR_NULL; 587 } 588 btfree = bt_alloc(vm, flags); 589 if (btfree == NULL) { 590 bt_free(vm, btspan); 591 return VMEM_ADDR_NULL; 592 } 593 594 btspan->bt_type = spanbttype; 595 btspan->bt_start = addr; 596 btspan->bt_size = size; 597 598 btfree->bt_type = BT_TYPE_FREE; 599 btfree->bt_start = addr; 600 btfree->bt_size = size; 601 602 VMEM_LOCK(vm); 603 bt_insseg_tail(vm, btspan); 604 bt_insseg(vm, btfree, btspan); 605 bt_insfree(vm, btfree); 606 VMEM_UNLOCK(vm); 607 608 return addr; 609 } 610 611 static void 612 vmem_destroy1(vmem_t *vm) 613 { 614 615 #if defined(QCACHE) 616 qc_destroy(vm); 617 #endif /* defined(QCACHE) */ 618 if (vm->vm_hashlist != NULL) { 619 int i; 620 621 for (i = 0; i < vm->vm_hashsize; i++) { 622 bt_t *bt; 623 624 while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) { 625 KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC); 626 bt_free(vm, bt); 627 } 628 } 629 xfree(vm->vm_hashlist); 630 } 631 VMEM_LOCK_DESTROY(vm); 632 xfree(vm); 633 } 634 635 static int 636 vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags) 637 { 638 vmem_addr_t addr; 639 640 if (vm->vm_allocfn == NULL) { 641 return EINVAL; 642 } 643 644 addr = (*vm->vm_allocfn)(vm->vm_source, size, &size, flags); 645 if (addr == VMEM_ADDR_NULL) { 646 return ENOMEM; 647 } 648 649 if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) == VMEM_ADDR_NULL) { 650 (*vm->vm_freefn)(vm->vm_source, addr, size); 651 return ENOMEM; 652 } 653 654 return 0; 655 } 656 657 static int 658 vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags) 659 { 660 bt_t *bt; 661 int i; 662 struct vmem_hashlist *newhashlist; 663 struct vmem_hashlist *oldhashlist; 664 size_t oldhashsize; 665 666 KASSERT(newhashsize > 0); 667 668 newhashlist = 669 xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags); 670 if (newhashlist == NULL) { 671 return ENOMEM; 672 } 673 for (i = 0; i < newhashsize; i++) { 674 LIST_INIT(&newhashlist[i]); 675 } 676 677 if (!VMEM_TRYLOCK(vm)) { 678 xfree(newhashlist); 679 return EBUSY; 680 } 681 oldhashlist = vm->vm_hashlist; 682 oldhashsize = vm->vm_hashsize; 683 vm->vm_hashlist = newhashlist; 684 vm->vm_hashsize = newhashsize; 685 if (oldhashlist == NULL) { 686 VMEM_UNLOCK(vm); 687 return 0; 688 } 689 for (i = 0; i < oldhashsize; i++) { 690 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { 691 bt_rembusy(vm, bt); /* XXX */ 692 bt_insbusy(vm, bt); 693 } 694 } 695 VMEM_UNLOCK(vm); 696 697 xfree(oldhashlist); 698 699 return 0; 700 } 701 702 /* 703 * vmem_fit: check if a bt can satisfy the given restrictions. 704 */ 705 706 static vmem_addr_t 707 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, vmem_size_t phase, 708 vmem_size_t nocross, vmem_addr_t minaddr, vmem_addr_t maxaddr) 709 { 710 vmem_addr_t start; 711 vmem_addr_t end; 712 713 KASSERT(bt->bt_size >= size); 714 715 /* 716 * XXX assumption: vmem_addr_t and vmem_size_t are 717 * unsigned integer of the same size. 718 */ 719 720 start = bt->bt_start; 721 if (start < minaddr) { 722 start = minaddr; 723 } 724 end = BT_END(bt); 725 if (end > maxaddr - 1) { 726 end = maxaddr - 1; 727 } 728 if (start >= end) { 729 return VMEM_ADDR_NULL; 730 } 731 732 start = VMEM_ALIGNUP(start - phase, align) + phase; 733 if (start < bt->bt_start) { 734 start += align; 735 } 736 if (VMEM_CROSS_P(start, start + size - 1, nocross)) { 737 KASSERT(align < nocross); 738 start = VMEM_ALIGNUP(start - phase, nocross) + phase; 739 } 740 if (start < end && end - start >= size) { 741 KASSERT((start & (align - 1)) == phase); 742 KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross)); 743 KASSERT(minaddr <= start); 744 KASSERT(maxaddr == 0 || start + size <= maxaddr); 745 KASSERT(bt->bt_start <= start); 746 KASSERT(start + size <= BT_END(bt)); 747 return start; 748 } 749 return VMEM_ADDR_NULL; 750 } 751 752 /* ---- vmem API */ 753 754 /* 755 * vmem_create: create an arena. 756 * 757 * => must not be called from interrupt context. 758 */ 759 760 vmem_t * 761 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, 762 vmem_size_t quantum, 763 vmem_addr_t (*allocfn)(vmem_t *, vmem_size_t, vmem_size_t *, vm_flag_t), 764 void (*freefn)(vmem_t *, vmem_addr_t, vmem_size_t), 765 vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, 766 int ipl) 767 { 768 vmem_t *vm; 769 int i; 770 #if defined(_KERNEL) 771 static ONCE_DECL(control); 772 #endif /* defined(_KERNEL) */ 773 774 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 775 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 776 777 #if defined(_KERNEL) 778 if (RUN_ONCE(&control, vmem_init)) { 779 return NULL; 780 } 781 #endif /* defined(_KERNEL) */ 782 vm = xmalloc(sizeof(*vm), flags); 783 if (vm == NULL) { 784 return NULL; 785 } 786 787 VMEM_LOCK_INIT(vm, ipl); 788 vm->vm_name = name; 789 vm->vm_quantum_mask = quantum - 1; 790 vm->vm_quantum_shift = calc_order(quantum); 791 KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum); 792 vm->vm_allocfn = allocfn; 793 vm->vm_freefn = freefn; 794 vm->vm_source = source; 795 vm->vm_nbusytag = 0; 796 #if defined(QCACHE) 797 qc_init(vm, qcache_max, ipl); 798 #endif /* defined(QCACHE) */ 799 800 CIRCLEQ_INIT(&vm->vm_seglist); 801 for (i = 0; i < VMEM_MAXORDER; i++) { 802 LIST_INIT(&vm->vm_freelist[i]); 803 } 804 vm->vm_hashlist = NULL; 805 if (vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags)) { 806 vmem_destroy1(vm); 807 return NULL; 808 } 809 810 if (size != 0) { 811 if (vmem_add(vm, base, size, flags) == 0) { 812 vmem_destroy1(vm); 813 return NULL; 814 } 815 } 816 817 #if defined(_KERNEL) 818 mutex_enter(&vmem_list_lock); 819 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); 820 mutex_exit(&vmem_list_lock); 821 #endif /* defined(_KERNEL) */ 822 823 return vm; 824 } 825 826 void 827 vmem_destroy(vmem_t *vm) 828 { 829 830 #if defined(_KERNEL) 831 mutex_enter(&vmem_list_lock); 832 LIST_REMOVE(vm, vm_alllist); 833 mutex_exit(&vmem_list_lock); 834 #endif /* defined(_KERNEL) */ 835 836 vmem_destroy1(vm); 837 } 838 839 vmem_size_t 840 vmem_roundup_size(vmem_t *vm, vmem_size_t size) 841 { 842 843 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; 844 } 845 846 /* 847 * vmem_alloc: 848 * 849 * => caller must ensure appropriate spl, 850 * if the arena can be accessed from interrupt context. 851 */ 852 853 vmem_addr_t 854 vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags) 855 { 856 const vm_flag_t strat __unused = flags & VM_FITMASK; 857 858 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 859 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 860 861 KASSERT(size > 0); 862 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 863 if ((flags & VM_SLEEP) != 0) { 864 ASSERT_SLEEPABLE(); 865 } 866 867 #if defined(QCACHE) 868 if (size <= vm->vm_qcache_max) { 869 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 870 qcache_t *qc = vm->vm_qcache[qidx - 1]; 871 872 return (vmem_addr_t)pool_cache_get(qc->qc_cache, 873 vmf_to_prf(flags)); 874 } 875 #endif /* defined(QCACHE) */ 876 877 return vmem_xalloc(vm, size, 0, 0, 0, 0, 0, flags); 878 } 879 880 vmem_addr_t 881 vmem_xalloc(vmem_t *vm, vmem_size_t size0, vmem_size_t align, vmem_size_t phase, 882 vmem_size_t nocross, vmem_addr_t minaddr, vmem_addr_t maxaddr, 883 vm_flag_t flags) 884 { 885 struct vmem_freelist *list; 886 struct vmem_freelist *first; 887 struct vmem_freelist *end; 888 bt_t *bt; 889 bt_t *btnew; 890 bt_t *btnew2; 891 const vmem_size_t size = vmem_roundup_size(vm, size0); 892 vm_flag_t strat = flags & VM_FITMASK; 893 vmem_addr_t start; 894 895 KASSERT(size0 > 0); 896 KASSERT(size > 0); 897 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 898 if ((flags & VM_SLEEP) != 0) { 899 ASSERT_SLEEPABLE(); 900 } 901 KASSERT((align & vm->vm_quantum_mask) == 0); 902 KASSERT((align & (align - 1)) == 0); 903 KASSERT((phase & vm->vm_quantum_mask) == 0); 904 KASSERT((nocross & vm->vm_quantum_mask) == 0); 905 KASSERT((nocross & (nocross - 1)) == 0); 906 KASSERT((align == 0 && phase == 0) || phase < align); 907 KASSERT(nocross == 0 || nocross >= size); 908 KASSERT(maxaddr == 0 || minaddr < maxaddr); 909 KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 910 911 if (align == 0) { 912 align = vm->vm_quantum_mask + 1; 913 } 914 btnew = bt_alloc(vm, flags); 915 if (btnew == NULL) { 916 return VMEM_ADDR_NULL; 917 } 918 btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */ 919 if (btnew2 == NULL) { 920 bt_free(vm, btnew); 921 return VMEM_ADDR_NULL; 922 } 923 924 retry_strat: 925 first = bt_freehead_toalloc(vm, size, strat); 926 end = &vm->vm_freelist[VMEM_MAXORDER]; 927 retry: 928 bt = NULL; 929 VMEM_LOCK(vm); 930 vmem_check(vm); 931 if (strat == VM_INSTANTFIT) { 932 for (list = first; list < end; list++) { 933 bt = LIST_FIRST(list); 934 if (bt != NULL) { 935 start = vmem_fit(bt, size, align, phase, 936 nocross, minaddr, maxaddr); 937 if (start != VMEM_ADDR_NULL) { 938 goto gotit; 939 } 940 } 941 } 942 } else { /* VM_BESTFIT */ 943 for (list = first; list < end; list++) { 944 LIST_FOREACH(bt, list, bt_freelist) { 945 if (bt->bt_size >= size) { 946 start = vmem_fit(bt, size, align, phase, 947 nocross, minaddr, maxaddr); 948 if (start != VMEM_ADDR_NULL) { 949 goto gotit; 950 } 951 } 952 } 953 } 954 } 955 VMEM_UNLOCK(vm); 956 #if 1 957 if (strat == VM_INSTANTFIT) { 958 strat = VM_BESTFIT; 959 goto retry_strat; 960 } 961 #endif 962 if (align != vm->vm_quantum_mask + 1 || phase != 0 || 963 nocross != 0 || minaddr != 0 || maxaddr != 0) { 964 965 /* 966 * XXX should try to import a region large enough to 967 * satisfy restrictions? 968 */ 969 970 goto fail; 971 } 972 if (vmem_import(vm, size, flags) == 0) { 973 goto retry; 974 } 975 /* XXX */ 976 fail: 977 bt_free(vm, btnew); 978 bt_free(vm, btnew2); 979 return VMEM_ADDR_NULL; 980 981 gotit: 982 KASSERT(bt->bt_type == BT_TYPE_FREE); 983 KASSERT(bt->bt_size >= size); 984 bt_remfree(vm, bt); 985 vmem_check(vm); 986 if (bt->bt_start != start) { 987 btnew2->bt_type = BT_TYPE_FREE; 988 btnew2->bt_start = bt->bt_start; 989 btnew2->bt_size = start - bt->bt_start; 990 bt->bt_start = start; 991 bt->bt_size -= btnew2->bt_size; 992 bt_insfree(vm, btnew2); 993 bt_insseg(vm, btnew2, CIRCLEQ_PREV(bt, bt_seglist)); 994 btnew2 = NULL; 995 vmem_check(vm); 996 } 997 KASSERT(bt->bt_start == start); 998 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { 999 /* split */ 1000 btnew->bt_type = BT_TYPE_BUSY; 1001 btnew->bt_start = bt->bt_start; 1002 btnew->bt_size = size; 1003 bt->bt_start = bt->bt_start + size; 1004 bt->bt_size -= size; 1005 bt_insfree(vm, bt); 1006 bt_insseg(vm, btnew, CIRCLEQ_PREV(bt, bt_seglist)); 1007 bt_insbusy(vm, btnew); 1008 vmem_check(vm); 1009 VMEM_UNLOCK(vm); 1010 } else { 1011 bt->bt_type = BT_TYPE_BUSY; 1012 bt_insbusy(vm, bt); 1013 vmem_check(vm); 1014 VMEM_UNLOCK(vm); 1015 bt_free(vm, btnew); 1016 btnew = bt; 1017 } 1018 if (btnew2 != NULL) { 1019 bt_free(vm, btnew2); 1020 } 1021 KASSERT(btnew->bt_size >= size); 1022 btnew->bt_type = BT_TYPE_BUSY; 1023 1024 return btnew->bt_start; 1025 } 1026 1027 /* 1028 * vmem_free: 1029 * 1030 * => caller must ensure appropriate spl, 1031 * if the arena can be accessed from interrupt context. 1032 */ 1033 1034 void 1035 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1036 { 1037 1038 KASSERT(addr != VMEM_ADDR_NULL); 1039 KASSERT(size > 0); 1040 1041 #if defined(QCACHE) 1042 if (size <= vm->vm_qcache_max) { 1043 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 1044 qcache_t *qc = vm->vm_qcache[qidx - 1]; 1045 1046 return pool_cache_put(qc->qc_cache, (void *)addr); 1047 } 1048 #endif /* defined(QCACHE) */ 1049 1050 vmem_xfree(vm, addr, size); 1051 } 1052 1053 void 1054 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1055 { 1056 bt_t *bt; 1057 bt_t *t; 1058 1059 KASSERT(addr != VMEM_ADDR_NULL); 1060 KASSERT(size > 0); 1061 1062 VMEM_LOCK(vm); 1063 1064 bt = bt_lookupbusy(vm, addr); 1065 KASSERT(bt != NULL); 1066 KASSERT(bt->bt_start == addr); 1067 KASSERT(bt->bt_size == vmem_roundup_size(vm, size) || 1068 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1069 KASSERT(bt->bt_type == BT_TYPE_BUSY); 1070 bt_rembusy(vm, bt); 1071 bt->bt_type = BT_TYPE_FREE; 1072 1073 /* coalesce */ 1074 t = CIRCLEQ_NEXT(bt, bt_seglist); 1075 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1076 KASSERT(BT_END(bt) == t->bt_start); 1077 bt_remfree(vm, t); 1078 bt_remseg(vm, t); 1079 bt->bt_size += t->bt_size; 1080 bt_free(vm, t); 1081 } 1082 t = CIRCLEQ_PREV(bt, bt_seglist); 1083 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1084 KASSERT(BT_END(t) == bt->bt_start); 1085 bt_remfree(vm, t); 1086 bt_remseg(vm, t); 1087 bt->bt_size += t->bt_size; 1088 bt->bt_start = t->bt_start; 1089 bt_free(vm, t); 1090 } 1091 1092 t = CIRCLEQ_PREV(bt, bt_seglist); 1093 KASSERT(t != NULL); 1094 KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY); 1095 if (vm->vm_freefn != NULL && t->bt_type == BT_TYPE_SPAN && 1096 t->bt_size == bt->bt_size) { 1097 vmem_addr_t spanaddr; 1098 vmem_size_t spansize; 1099 1100 KASSERT(t->bt_start == bt->bt_start); 1101 spanaddr = bt->bt_start; 1102 spansize = bt->bt_size; 1103 bt_remseg(vm, bt); 1104 bt_free(vm, bt); 1105 bt_remseg(vm, t); 1106 bt_free(vm, t); 1107 VMEM_UNLOCK(vm); 1108 (*vm->vm_freefn)(vm->vm_source, spanaddr, spansize); 1109 } else { 1110 bt_insfree(vm, bt); 1111 VMEM_UNLOCK(vm); 1112 } 1113 } 1114 1115 /* 1116 * vmem_add: 1117 * 1118 * => caller must ensure appropriate spl, 1119 * if the arena can be accessed from interrupt context. 1120 */ 1121 1122 vmem_addr_t 1123 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags) 1124 { 1125 1126 return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC); 1127 } 1128 1129 /* 1130 * vmem_reap: reap unused resources. 1131 * 1132 * => return true if we successfully reaped something. 1133 */ 1134 1135 bool 1136 vmem_reap(vmem_t *vm) 1137 { 1138 bool didsomething = false; 1139 1140 #if defined(QCACHE) 1141 didsomething = qc_reap(vm); 1142 #endif /* defined(QCACHE) */ 1143 return didsomething; 1144 } 1145 1146 /* ---- rehash */ 1147 1148 #if defined(_KERNEL) 1149 static struct callout vmem_rehash_ch; 1150 static int vmem_rehash_interval; 1151 static struct workqueue *vmem_rehash_wq; 1152 static struct work vmem_rehash_wk; 1153 1154 static void 1155 vmem_rehash_all(struct work *wk, void *dummy) 1156 { 1157 vmem_t *vm; 1158 1159 KASSERT(wk == &vmem_rehash_wk); 1160 mutex_enter(&vmem_list_lock); 1161 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1162 size_t desired; 1163 size_t current; 1164 1165 if (!VMEM_TRYLOCK(vm)) { 1166 continue; 1167 } 1168 desired = vm->vm_nbusytag; 1169 current = vm->vm_hashsize; 1170 VMEM_UNLOCK(vm); 1171 1172 if (desired > VMEM_HASHSIZE_MAX) { 1173 desired = VMEM_HASHSIZE_MAX; 1174 } else if (desired < VMEM_HASHSIZE_MIN) { 1175 desired = VMEM_HASHSIZE_MIN; 1176 } 1177 if (desired > current * 2 || desired * 2 < current) { 1178 vmem_rehash(vm, desired, VM_NOSLEEP); 1179 } 1180 } 1181 mutex_exit(&vmem_list_lock); 1182 1183 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1184 } 1185 1186 static void 1187 vmem_rehash_all_kick(void *dummy) 1188 { 1189 1190 workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL); 1191 } 1192 1193 void 1194 vmem_rehash_start(void) 1195 { 1196 int error; 1197 1198 error = workqueue_create(&vmem_rehash_wq, "vmem_rehash", 1199 vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, WQ_MPSAFE); 1200 if (error) { 1201 panic("%s: workqueue_create %d\n", __func__, error); 1202 } 1203 callout_init(&vmem_rehash_ch, CALLOUT_MPSAFE); 1204 callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL); 1205 1206 vmem_rehash_interval = hz * 10; 1207 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1208 } 1209 #endif /* defined(_KERNEL) */ 1210 1211 /* ---- debug */ 1212 1213 #if defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) 1214 1215 static void bt_dump(const bt_t *, void (*)(const char *, ...)); 1216 1217 static const char * 1218 bt_type_string(int type) 1219 { 1220 static const char * const table[] = { 1221 [BT_TYPE_BUSY] = "busy", 1222 [BT_TYPE_FREE] = "free", 1223 [BT_TYPE_SPAN] = "span", 1224 [BT_TYPE_SPAN_STATIC] = "static span", 1225 }; 1226 1227 if (type >= __arraycount(table)) { 1228 return "BOGUS"; 1229 } 1230 return table[type]; 1231 } 1232 1233 static void 1234 bt_dump(const bt_t *bt, void (*pr)(const char *, ...)) 1235 { 1236 1237 (*pr)("\t%p: %" PRIu64 ", %" PRIu64 ", %d(%s)\n", 1238 bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size, 1239 bt->bt_type, bt_type_string(bt->bt_type)); 1240 } 1241 1242 static void 1243 vmem_dump(const vmem_t *vm , void (*pr)(const char *, ...)) 1244 { 1245 const bt_t *bt; 1246 int i; 1247 1248 (*pr)("vmem %p '%s'\n", vm, vm->vm_name); 1249 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1250 bt_dump(bt, pr); 1251 } 1252 1253 for (i = 0; i < VMEM_MAXORDER; i++) { 1254 const struct vmem_freelist *fl = &vm->vm_freelist[i]; 1255 1256 if (LIST_EMPTY(fl)) { 1257 continue; 1258 } 1259 1260 (*pr)("freelist[%d]\n", i); 1261 LIST_FOREACH(bt, fl, bt_freelist) { 1262 bt_dump(bt, pr); 1263 } 1264 } 1265 } 1266 1267 #endif /* defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) */ 1268 1269 #if defined(DDB) 1270 static bt_t * 1271 vmem_whatis_lookup(vmem_t *vm, uintptr_t addr) 1272 { 1273 bt_t *bt; 1274 1275 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1276 if (BT_ISSPAN_P(bt)) { 1277 continue; 1278 } 1279 if (bt->bt_start <= addr && addr < BT_END(bt)) { 1280 return bt; 1281 } 1282 } 1283 1284 return NULL; 1285 } 1286 1287 void 1288 vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...)) 1289 { 1290 vmem_t *vm; 1291 1292 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1293 bt_t *bt; 1294 1295 bt = vmem_whatis_lookup(vm, addr); 1296 if (bt == NULL) { 1297 continue; 1298 } 1299 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n", 1300 (void *)addr, (void *)bt->bt_start, 1301 (size_t)(addr - bt->bt_start), vm->vm_name, 1302 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free"); 1303 } 1304 } 1305 1306 void 1307 vmem_printall(const char *modif, void (*pr)(const char *, ...)) 1308 { 1309 const vmem_t *vm; 1310 1311 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1312 vmem_dump(vm, pr); 1313 } 1314 } 1315 1316 void 1317 vmem_print(uintptr_t addr, const char *modif, void (*pr)(const char *, ...)) 1318 { 1319 const vmem_t *vm = (const void *)addr; 1320 1321 vmem_dump(vm, pr); 1322 } 1323 #endif /* defined(DDB) */ 1324 1325 #if !defined(_KERNEL) 1326 #include <stdio.h> 1327 #endif /* !defined(_KERNEL) */ 1328 1329 #if defined(VMEM_SANITY) 1330 1331 static bool 1332 vmem_check_sanity(vmem_t *vm) 1333 { 1334 const bt_t *bt, *bt2; 1335 1336 KASSERT(vm != NULL); 1337 1338 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1339 if (bt->bt_start >= BT_END(bt)) { 1340 printf("corrupted tag\n"); 1341 bt_dump(bt, (void *)printf); 1342 return false; 1343 } 1344 } 1345 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1346 CIRCLEQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) { 1347 if (bt == bt2) { 1348 continue; 1349 } 1350 if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) { 1351 continue; 1352 } 1353 if (bt->bt_start < BT_END(bt2) && 1354 bt2->bt_start < BT_END(bt)) { 1355 printf("overwrapped tags\n"); 1356 bt_dump(bt, (void *)printf); 1357 bt_dump(bt2, (void *)printf); 1358 return false; 1359 } 1360 } 1361 } 1362 1363 return true; 1364 } 1365 1366 static void 1367 vmem_check(vmem_t *vm) 1368 { 1369 1370 if (!vmem_check_sanity(vm)) { 1371 panic("insanity vmem %p", vm); 1372 } 1373 } 1374 1375 #endif /* defined(VMEM_SANITY) */ 1376 1377 #if defined(UNITTEST) 1378 int 1379 main(void) 1380 { 1381 vmem_t *vm; 1382 vmem_addr_t p; 1383 struct reg { 1384 vmem_addr_t p; 1385 vmem_size_t sz; 1386 bool x; 1387 } *reg = NULL; 1388 int nreg = 0; 1389 int nalloc = 0; 1390 int nfree = 0; 1391 vmem_size_t total = 0; 1392 #if 1 1393 vm_flag_t strat = VM_INSTANTFIT; 1394 #else 1395 vm_flag_t strat = VM_BESTFIT; 1396 #endif 1397 1398 vm = vmem_create("test", VMEM_ADDR_NULL, 0, 1, 1399 NULL, NULL, NULL, 0, VM_SLEEP, 0/*XXX*/); 1400 if (vm == NULL) { 1401 printf("vmem_create\n"); 1402 exit(EXIT_FAILURE); 1403 } 1404 vmem_dump(vm, (void *)printf); 1405 1406 p = vmem_add(vm, 100, 200, VM_SLEEP); 1407 p = vmem_add(vm, 2000, 1, VM_SLEEP); 1408 p = vmem_add(vm, 40000, 0x10000000>>12, VM_SLEEP); 1409 p = vmem_add(vm, 10000, 10000, VM_SLEEP); 1410 p = vmem_add(vm, 500, 1000, VM_SLEEP); 1411 vmem_dump(vm, (void *)printf); 1412 for (;;) { 1413 struct reg *r; 1414 int t = rand() % 100; 1415 1416 if (t > 45) { 1417 /* alloc */ 1418 vmem_size_t sz = rand() % 500 + 1; 1419 bool x; 1420 vmem_size_t align, phase, nocross; 1421 vmem_addr_t minaddr, maxaddr; 1422 1423 if (t > 70) { 1424 x = true; 1425 /* XXX */ 1426 align = 1 << (rand() % 15); 1427 phase = rand() % 65536; 1428 nocross = 1 << (rand() % 15); 1429 if (align <= phase) { 1430 phase = 0; 1431 } 1432 if (VMEM_CROSS_P(phase, phase + sz - 1, 1433 nocross)) { 1434 nocross = 0; 1435 } 1436 minaddr = rand() % 50000; 1437 maxaddr = rand() % 70000; 1438 if (minaddr > maxaddr) { 1439 minaddr = 0; 1440 maxaddr = 0; 1441 } 1442 printf("=== xalloc %" PRIu64 1443 " align=%" PRIu64 ", phase=%" PRIu64 1444 ", nocross=%" PRIu64 ", min=%" PRIu64 1445 ", max=%" PRIu64 "\n", 1446 (uint64_t)sz, 1447 (uint64_t)align, 1448 (uint64_t)phase, 1449 (uint64_t)nocross, 1450 (uint64_t)minaddr, 1451 (uint64_t)maxaddr); 1452 p = vmem_xalloc(vm, sz, align, phase, nocross, 1453 minaddr, maxaddr, strat|VM_SLEEP); 1454 } else { 1455 x = false; 1456 printf("=== alloc %" PRIu64 "\n", (uint64_t)sz); 1457 p = vmem_alloc(vm, sz, strat|VM_SLEEP); 1458 } 1459 printf("-> %" PRIu64 "\n", (uint64_t)p); 1460 vmem_dump(vm, (void *)printf); 1461 if (p == VMEM_ADDR_NULL) { 1462 if (x) { 1463 continue; 1464 } 1465 break; 1466 } 1467 nreg++; 1468 reg = realloc(reg, sizeof(*reg) * nreg); 1469 r = ®[nreg - 1]; 1470 r->p = p; 1471 r->sz = sz; 1472 r->x = x; 1473 total += sz; 1474 nalloc++; 1475 } else if (nreg != 0) { 1476 /* free */ 1477 r = ®[rand() % nreg]; 1478 printf("=== free %" PRIu64 ", %" PRIu64 "\n", 1479 (uint64_t)r->p, (uint64_t)r->sz); 1480 if (r->x) { 1481 vmem_xfree(vm, r->p, r->sz); 1482 } else { 1483 vmem_free(vm, r->p, r->sz); 1484 } 1485 total -= r->sz; 1486 vmem_dump(vm, (void *)printf); 1487 *r = reg[nreg - 1]; 1488 nreg--; 1489 nfree++; 1490 } 1491 printf("total=%" PRIu64 "\n", (uint64_t)total); 1492 } 1493 fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n", 1494 (uint64_t)total, nalloc, nfree); 1495 exit(EXIT_SUCCESS); 1496 } 1497 #endif /* defined(UNITTEST) */ 1498