1 /* $NetBSD: subr_vmem.c,v 1.58 2010/12/17 22:24:11 yamt Exp $ */ 2 3 /*- 4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * reference: 31 * - Magazines and Vmem: Extending the Slab Allocator 32 * to Many CPUs and Arbitrary Resources 33 * http://www.usenix.org/event/usenix01/bonwick.html 34 * 35 * todo: 36 * - decide how to import segments for vmem_xalloc. 37 * - don't rely on malloc(9). 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.58 2010/12/17 22:24:11 yamt Exp $"); 42 43 #if defined(_KERNEL) 44 #include "opt_ddb.h" 45 #define QCACHE 46 #endif /* defined(_KERNEL) */ 47 48 #include <sys/param.h> 49 #include <sys/hash.h> 50 #include <sys/queue.h> 51 52 #if defined(_KERNEL) 53 #include <sys/systm.h> 54 #include <sys/kernel.h> /* hz */ 55 #include <sys/callout.h> 56 #include <sys/malloc.h> 57 #include <sys/once.h> 58 #include <sys/pool.h> 59 #include <sys/vmem.h> 60 #include <sys/workqueue.h> 61 #else /* defined(_KERNEL) */ 62 #include "../sys/vmem.h" 63 #endif /* defined(_KERNEL) */ 64 65 #if defined(_KERNEL) 66 #define LOCK_DECL(name) \ 67 kmutex_t name; char lockpad[COHERENCY_UNIT - sizeof(kmutex_t)] 68 #else /* defined(_KERNEL) */ 69 #include <errno.h> 70 #include <assert.h> 71 #include <stdlib.h> 72 73 #define UNITTEST 74 #define KASSERT(a) assert(a) 75 #define LOCK_DECL(name) /* nothing */ 76 #define mutex_init(a, b, c) /* nothing */ 77 #define mutex_destroy(a) /* nothing */ 78 #define mutex_enter(a) /* nothing */ 79 #define mutex_tryenter(a) true 80 #define mutex_exit(a) /* nothing */ 81 #define mutex_owned(a) /* nothing */ 82 #define ASSERT_SLEEPABLE() /* nothing */ 83 #define panic(...) printf(__VA_ARGS__); abort() 84 #endif /* defined(_KERNEL) */ 85 86 struct vmem; 87 struct vmem_btag; 88 89 #if defined(VMEM_SANITY) 90 static void vmem_check(vmem_t *); 91 #else /* defined(VMEM_SANITY) */ 92 #define vmem_check(vm) /* nothing */ 93 #endif /* defined(VMEM_SANITY) */ 94 95 #define VMEM_MAXORDER (sizeof(vmem_size_t) * CHAR_BIT) 96 97 #define VMEM_HASHSIZE_MIN 1 /* XXX */ 98 #define VMEM_HASHSIZE_MAX 65536 /* XXX */ 99 #define VMEM_HASHSIZE_INIT 128 100 101 #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT) 102 103 CIRCLEQ_HEAD(vmem_seglist, vmem_btag); 104 LIST_HEAD(vmem_freelist, vmem_btag); 105 LIST_HEAD(vmem_hashlist, vmem_btag); 106 107 #if defined(QCACHE) 108 #define VMEM_QCACHE_IDX_MAX 32 109 110 #define QC_NAME_MAX 16 111 112 struct qcache { 113 pool_cache_t qc_cache; 114 vmem_t *qc_vmem; 115 char qc_name[QC_NAME_MAX]; 116 }; 117 typedef struct qcache qcache_t; 118 #define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache)) 119 #endif /* defined(QCACHE) */ 120 121 /* vmem arena */ 122 struct vmem { 123 LOCK_DECL(vm_lock); 124 vmem_addr_t (*vm_allocfn)(vmem_t *, vmem_size_t, vmem_size_t *, 125 vm_flag_t); 126 void (*vm_freefn)(vmem_t *, vmem_addr_t, vmem_size_t); 127 vmem_t *vm_source; 128 struct vmem_seglist vm_seglist; 129 struct vmem_freelist vm_freelist[VMEM_MAXORDER]; 130 size_t vm_hashsize; 131 size_t vm_nbusytag; 132 struct vmem_hashlist *vm_hashlist; 133 size_t vm_quantum_mask; 134 int vm_quantum_shift; 135 const char *vm_name; 136 LIST_ENTRY(vmem) vm_alllist; 137 138 #if defined(QCACHE) 139 /* quantum cache */ 140 size_t vm_qcache_max; 141 struct pool_allocator vm_qcache_allocator; 142 qcache_t vm_qcache_store[VMEM_QCACHE_IDX_MAX]; 143 qcache_t *vm_qcache[VMEM_QCACHE_IDX_MAX]; 144 #endif /* defined(QCACHE) */ 145 }; 146 147 #define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock) 148 #define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock) 149 #define VMEM_UNLOCK(vm) mutex_exit(&vm->vm_lock) 150 #define VMEM_LOCK_INIT(vm, ipl) mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl) 151 #define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock) 152 #define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock)) 153 154 /* boundary tag */ 155 struct vmem_btag { 156 CIRCLEQ_ENTRY(vmem_btag) bt_seglist; 157 union { 158 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */ 159 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */ 160 } bt_u; 161 #define bt_hashlist bt_u.u_hashlist 162 #define bt_freelist bt_u.u_freelist 163 vmem_addr_t bt_start; 164 vmem_size_t bt_size; 165 int bt_type; 166 }; 167 168 #define BT_TYPE_SPAN 1 169 #define BT_TYPE_SPAN_STATIC 2 170 #define BT_TYPE_FREE 3 171 #define BT_TYPE_BUSY 4 172 #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC) 173 174 #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size) 175 176 typedef struct vmem_btag bt_t; 177 178 /* ---- misc */ 179 180 #define VMEM_ALIGNUP(addr, align) \ 181 (-(-(addr) & -(align))) 182 #define VMEM_CROSS_P(addr1, addr2, boundary) \ 183 ((((addr1) ^ (addr2)) & -(boundary)) != 0) 184 185 #define ORDER2SIZE(order) ((vmem_size_t)1 << (order)) 186 187 static int 188 calc_order(vmem_size_t size) 189 { 190 vmem_size_t target; 191 int i; 192 193 KASSERT(size != 0); 194 195 i = 0; 196 target = size >> 1; 197 while (ORDER2SIZE(i) <= target) { 198 i++; 199 } 200 201 KASSERT(ORDER2SIZE(i) <= size); 202 KASSERT(size < ORDER2SIZE(i + 1) || ORDER2SIZE(i + 1) < ORDER2SIZE(i)); 203 204 return i; 205 } 206 207 #if defined(_KERNEL) 208 static MALLOC_DEFINE(M_VMEM, "vmem", "vmem"); 209 #endif /* defined(_KERNEL) */ 210 211 static void * 212 xmalloc(size_t sz, vm_flag_t flags) 213 { 214 215 #if defined(_KERNEL) 216 return malloc(sz, M_VMEM, 217 M_CANFAIL | ((flags & VM_SLEEP) ? M_WAITOK : M_NOWAIT)); 218 #else /* defined(_KERNEL) */ 219 return malloc(sz); 220 #endif /* defined(_KERNEL) */ 221 } 222 223 static void 224 xfree(void *p) 225 { 226 227 #if defined(_KERNEL) 228 return free(p, M_VMEM); 229 #else /* defined(_KERNEL) */ 230 return free(p); 231 #endif /* defined(_KERNEL) */ 232 } 233 234 /* ---- boundary tag */ 235 236 #if defined(_KERNEL) 237 static struct pool_cache bt_cache; 238 #endif /* defined(_KERNEL) */ 239 240 static bt_t * 241 bt_alloc(vmem_t *vm, vm_flag_t flags) 242 { 243 bt_t *bt; 244 245 #if defined(_KERNEL) 246 bt = pool_cache_get(&bt_cache, 247 (flags & VM_SLEEP) != 0 ? PR_WAITOK : PR_NOWAIT); 248 #else /* defined(_KERNEL) */ 249 bt = malloc(sizeof *bt); 250 #endif /* defined(_KERNEL) */ 251 252 return bt; 253 } 254 255 static void 256 bt_free(vmem_t *vm, bt_t *bt) 257 { 258 259 #if defined(_KERNEL) 260 pool_cache_put(&bt_cache, bt); 261 #else /* defined(_KERNEL) */ 262 free(bt); 263 #endif /* defined(_KERNEL) */ 264 } 265 266 /* 267 * freelist[0] ... [1, 1] 268 * freelist[1] ... [2, 3] 269 * freelist[2] ... [4, 7] 270 * freelist[3] ... [8, 15] 271 * : 272 * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1] 273 * : 274 */ 275 276 static struct vmem_freelist * 277 bt_freehead_tofree(vmem_t *vm, vmem_size_t size) 278 { 279 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 280 int idx; 281 282 KASSERT((size & vm->vm_quantum_mask) == 0); 283 KASSERT(size != 0); 284 285 idx = calc_order(qsize); 286 KASSERT(idx >= 0); 287 KASSERT(idx < VMEM_MAXORDER); 288 289 return &vm->vm_freelist[idx]; 290 } 291 292 static struct vmem_freelist * 293 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat) 294 { 295 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 296 int idx; 297 298 KASSERT((size & vm->vm_quantum_mask) == 0); 299 KASSERT(size != 0); 300 301 idx = calc_order(qsize); 302 if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) { 303 idx++; 304 /* check too large request? */ 305 } 306 KASSERT(idx >= 0); 307 KASSERT(idx < VMEM_MAXORDER); 308 309 return &vm->vm_freelist[idx]; 310 } 311 312 /* ---- boundary tag hash */ 313 314 static struct vmem_hashlist * 315 bt_hashhead(vmem_t *vm, vmem_addr_t addr) 316 { 317 struct vmem_hashlist *list; 318 unsigned int hash; 319 320 hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT); 321 list = &vm->vm_hashlist[hash % vm->vm_hashsize]; 322 323 return list; 324 } 325 326 static bt_t * 327 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr) 328 { 329 struct vmem_hashlist *list; 330 bt_t *bt; 331 332 list = bt_hashhead(vm, addr); 333 LIST_FOREACH(bt, list, bt_hashlist) { 334 if (bt->bt_start == addr) { 335 break; 336 } 337 } 338 339 return bt; 340 } 341 342 static void 343 bt_rembusy(vmem_t *vm, bt_t *bt) 344 { 345 346 KASSERT(vm->vm_nbusytag > 0); 347 vm->vm_nbusytag--; 348 LIST_REMOVE(bt, bt_hashlist); 349 } 350 351 static void 352 bt_insbusy(vmem_t *vm, bt_t *bt) 353 { 354 struct vmem_hashlist *list; 355 356 KASSERT(bt->bt_type == BT_TYPE_BUSY); 357 358 list = bt_hashhead(vm, bt->bt_start); 359 LIST_INSERT_HEAD(list, bt, bt_hashlist); 360 vm->vm_nbusytag++; 361 } 362 363 /* ---- boundary tag list */ 364 365 static void 366 bt_remseg(vmem_t *vm, bt_t *bt) 367 { 368 369 CIRCLEQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); 370 } 371 372 static void 373 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) 374 { 375 376 CIRCLEQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); 377 } 378 379 static void 380 bt_insseg_tail(vmem_t *vm, bt_t *bt) 381 { 382 383 CIRCLEQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); 384 } 385 386 static void 387 bt_remfree(vmem_t *vm, bt_t *bt) 388 { 389 390 KASSERT(bt->bt_type == BT_TYPE_FREE); 391 392 LIST_REMOVE(bt, bt_freelist); 393 } 394 395 static void 396 bt_insfree(vmem_t *vm, bt_t *bt) 397 { 398 struct vmem_freelist *list; 399 400 list = bt_freehead_tofree(vm, bt->bt_size); 401 LIST_INSERT_HEAD(list, bt, bt_freelist); 402 } 403 404 /* ---- vmem internal functions */ 405 406 #if defined(_KERNEL) 407 static kmutex_t vmem_list_lock; 408 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); 409 #endif /* defined(_KERNEL) */ 410 411 #if defined(QCACHE) 412 static inline vm_flag_t 413 prf_to_vmf(int prflags) 414 { 415 vm_flag_t vmflags; 416 417 KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0); 418 if ((prflags & PR_WAITOK) != 0) { 419 vmflags = VM_SLEEP; 420 } else { 421 vmflags = VM_NOSLEEP; 422 } 423 return vmflags; 424 } 425 426 static inline int 427 vmf_to_prf(vm_flag_t vmflags) 428 { 429 int prflags; 430 431 if ((vmflags & VM_SLEEP) != 0) { 432 prflags = PR_WAITOK; 433 } else { 434 prflags = PR_NOWAIT; 435 } 436 return prflags; 437 } 438 439 static size_t 440 qc_poolpage_size(size_t qcache_max) 441 { 442 int i; 443 444 for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) { 445 /* nothing */ 446 } 447 return ORDER2SIZE(i); 448 } 449 450 static void * 451 qc_poolpage_alloc(struct pool *pool, int prflags) 452 { 453 qcache_t *qc = QC_POOL_TO_QCACHE(pool); 454 vmem_t *vm = qc->qc_vmem; 455 456 return (void *)vmem_alloc(vm, pool->pr_alloc->pa_pagesz, 457 prf_to_vmf(prflags) | VM_INSTANTFIT); 458 } 459 460 static void 461 qc_poolpage_free(struct pool *pool, void *addr) 462 { 463 qcache_t *qc = QC_POOL_TO_QCACHE(pool); 464 vmem_t *vm = qc->qc_vmem; 465 466 vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz); 467 } 468 469 static void 470 qc_init(vmem_t *vm, size_t qcache_max, int ipl) 471 { 472 qcache_t *prevqc; 473 struct pool_allocator *pa; 474 int qcache_idx_max; 475 int i; 476 477 KASSERT((qcache_max & vm->vm_quantum_mask) == 0); 478 if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) { 479 qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift; 480 } 481 vm->vm_qcache_max = qcache_max; 482 pa = &vm->vm_qcache_allocator; 483 memset(pa, 0, sizeof(*pa)); 484 pa->pa_alloc = qc_poolpage_alloc; 485 pa->pa_free = qc_poolpage_free; 486 pa->pa_pagesz = qc_poolpage_size(qcache_max); 487 488 qcache_idx_max = qcache_max >> vm->vm_quantum_shift; 489 prevqc = NULL; 490 for (i = qcache_idx_max; i > 0; i--) { 491 qcache_t *qc = &vm->vm_qcache_store[i - 1]; 492 size_t size = i << vm->vm_quantum_shift; 493 494 qc->qc_vmem = vm; 495 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", 496 vm->vm_name, size); 497 qc->qc_cache = pool_cache_init(size, 498 ORDER2SIZE(vm->vm_quantum_shift), 0, 499 PR_NOALIGN | PR_NOTOUCH /* XXX */, 500 qc->qc_name, pa, ipl, NULL, NULL, NULL); 501 KASSERT(qc->qc_cache != NULL); /* XXX */ 502 if (prevqc != NULL && 503 qc->qc_cache->pc_pool.pr_itemsperpage == 504 prevqc->qc_cache->pc_pool.pr_itemsperpage) { 505 pool_cache_destroy(qc->qc_cache); 506 vm->vm_qcache[i - 1] = prevqc; 507 continue; 508 } 509 qc->qc_cache->pc_pool.pr_qcache = qc; 510 vm->vm_qcache[i - 1] = qc; 511 prevqc = qc; 512 } 513 } 514 515 static void 516 qc_destroy(vmem_t *vm) 517 { 518 const qcache_t *prevqc; 519 int i; 520 int qcache_idx_max; 521 522 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 523 prevqc = NULL; 524 for (i = 0; i < qcache_idx_max; i++) { 525 qcache_t *qc = vm->vm_qcache[i]; 526 527 if (prevqc == qc) { 528 continue; 529 } 530 pool_cache_destroy(qc->qc_cache); 531 prevqc = qc; 532 } 533 } 534 535 static bool 536 qc_reap(vmem_t *vm) 537 { 538 const qcache_t *prevqc; 539 int i; 540 int qcache_idx_max; 541 bool didsomething = false; 542 543 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 544 prevqc = NULL; 545 for (i = 0; i < qcache_idx_max; i++) { 546 qcache_t *qc = vm->vm_qcache[i]; 547 548 if (prevqc == qc) { 549 continue; 550 } 551 if (pool_cache_reclaim(qc->qc_cache) != 0) { 552 didsomething = true; 553 } 554 prevqc = qc; 555 } 556 557 return didsomething; 558 } 559 #endif /* defined(QCACHE) */ 560 561 #if defined(_KERNEL) 562 static int 563 vmem_init(void) 564 { 565 566 mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_NONE); 567 pool_cache_bootstrap(&bt_cache, sizeof(bt_t), 0, 0, 0, "vmembt", 568 NULL, IPL_VM, NULL, NULL, NULL); 569 return 0; 570 } 571 #endif /* defined(_KERNEL) */ 572 573 static vmem_addr_t 574 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags, 575 int spanbttype) 576 { 577 bt_t *btspan; 578 bt_t *btfree; 579 580 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 581 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 582 KASSERT(spanbttype == BT_TYPE_SPAN || 583 spanbttype == BT_TYPE_SPAN_STATIC); 584 585 btspan = bt_alloc(vm, flags); 586 if (btspan == NULL) { 587 return VMEM_ADDR_NULL; 588 } 589 btfree = bt_alloc(vm, flags); 590 if (btfree == NULL) { 591 bt_free(vm, btspan); 592 return VMEM_ADDR_NULL; 593 } 594 595 btspan->bt_type = spanbttype; 596 btspan->bt_start = addr; 597 btspan->bt_size = size; 598 599 btfree->bt_type = BT_TYPE_FREE; 600 btfree->bt_start = addr; 601 btfree->bt_size = size; 602 603 VMEM_LOCK(vm); 604 bt_insseg_tail(vm, btspan); 605 bt_insseg(vm, btfree, btspan); 606 bt_insfree(vm, btfree); 607 VMEM_UNLOCK(vm); 608 609 return addr; 610 } 611 612 static void 613 vmem_destroy1(vmem_t *vm) 614 { 615 616 #if defined(QCACHE) 617 qc_destroy(vm); 618 #endif /* defined(QCACHE) */ 619 if (vm->vm_hashlist != NULL) { 620 int i; 621 622 for (i = 0; i < vm->vm_hashsize; i++) { 623 bt_t *bt; 624 625 while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) { 626 KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC); 627 bt_free(vm, bt); 628 } 629 } 630 xfree(vm->vm_hashlist); 631 } 632 VMEM_LOCK_DESTROY(vm); 633 xfree(vm); 634 } 635 636 static int 637 vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags) 638 { 639 vmem_addr_t addr; 640 641 if (vm->vm_allocfn == NULL) { 642 return EINVAL; 643 } 644 645 addr = (*vm->vm_allocfn)(vm->vm_source, size, &size, flags); 646 if (addr == VMEM_ADDR_NULL) { 647 return ENOMEM; 648 } 649 650 if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) == VMEM_ADDR_NULL) { 651 (*vm->vm_freefn)(vm->vm_source, addr, size); 652 return ENOMEM; 653 } 654 655 return 0; 656 } 657 658 static int 659 vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags) 660 { 661 bt_t *bt; 662 int i; 663 struct vmem_hashlist *newhashlist; 664 struct vmem_hashlist *oldhashlist; 665 size_t oldhashsize; 666 667 KASSERT(newhashsize > 0); 668 669 newhashlist = 670 xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags); 671 if (newhashlist == NULL) { 672 return ENOMEM; 673 } 674 for (i = 0; i < newhashsize; i++) { 675 LIST_INIT(&newhashlist[i]); 676 } 677 678 if (!VMEM_TRYLOCK(vm)) { 679 xfree(newhashlist); 680 return EBUSY; 681 } 682 oldhashlist = vm->vm_hashlist; 683 oldhashsize = vm->vm_hashsize; 684 vm->vm_hashlist = newhashlist; 685 vm->vm_hashsize = newhashsize; 686 if (oldhashlist == NULL) { 687 VMEM_UNLOCK(vm); 688 return 0; 689 } 690 for (i = 0; i < oldhashsize; i++) { 691 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { 692 bt_rembusy(vm, bt); /* XXX */ 693 bt_insbusy(vm, bt); 694 } 695 } 696 VMEM_UNLOCK(vm); 697 698 xfree(oldhashlist); 699 700 return 0; 701 } 702 703 /* 704 * vmem_fit: check if a bt can satisfy the given restrictions. 705 */ 706 707 static vmem_addr_t 708 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, vmem_size_t phase, 709 vmem_size_t nocross, vmem_addr_t minaddr, vmem_addr_t maxaddr) 710 { 711 vmem_addr_t start; 712 vmem_addr_t end; 713 714 KASSERT(bt->bt_size >= size); 715 716 /* 717 * XXX assumption: vmem_addr_t and vmem_size_t are 718 * unsigned integer of the same size. 719 */ 720 721 start = bt->bt_start; 722 if (start < minaddr) { 723 start = minaddr; 724 } 725 end = BT_END(bt); 726 if (end > maxaddr - 1) { 727 end = maxaddr - 1; 728 } 729 if (start >= end) { 730 return VMEM_ADDR_NULL; 731 } 732 733 start = VMEM_ALIGNUP(start - phase, align) + phase; 734 if (start < bt->bt_start) { 735 start += align; 736 } 737 if (VMEM_CROSS_P(start, start + size - 1, nocross)) { 738 KASSERT(align < nocross); 739 start = VMEM_ALIGNUP(start - phase, nocross) + phase; 740 } 741 if (start < end && end - start >= size) { 742 KASSERT((start & (align - 1)) == phase); 743 KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross)); 744 KASSERT(minaddr <= start); 745 KASSERT(maxaddr == 0 || start + size <= maxaddr); 746 KASSERT(bt->bt_start <= start); 747 KASSERT(start + size <= BT_END(bt)); 748 return start; 749 } 750 return VMEM_ADDR_NULL; 751 } 752 753 /* ---- vmem API */ 754 755 /* 756 * vmem_create: create an arena. 757 * 758 * => must not be called from interrupt context. 759 */ 760 761 vmem_t * 762 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, 763 vmem_size_t quantum, 764 vmem_addr_t (*allocfn)(vmem_t *, vmem_size_t, vmem_size_t *, vm_flag_t), 765 void (*freefn)(vmem_t *, vmem_addr_t, vmem_size_t), 766 vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, 767 int ipl) 768 { 769 vmem_t *vm; 770 int i; 771 #if defined(_KERNEL) 772 static ONCE_DECL(control); 773 #endif /* defined(_KERNEL) */ 774 775 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 776 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 777 778 #if defined(_KERNEL) 779 if (RUN_ONCE(&control, vmem_init)) { 780 return NULL; 781 } 782 #endif /* defined(_KERNEL) */ 783 vm = xmalloc(sizeof(*vm), flags); 784 if (vm == NULL) { 785 return NULL; 786 } 787 788 VMEM_LOCK_INIT(vm, ipl); 789 vm->vm_name = name; 790 vm->vm_quantum_mask = quantum - 1; 791 vm->vm_quantum_shift = calc_order(quantum); 792 KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum); 793 vm->vm_allocfn = allocfn; 794 vm->vm_freefn = freefn; 795 vm->vm_source = source; 796 vm->vm_nbusytag = 0; 797 #if defined(QCACHE) 798 qc_init(vm, qcache_max, ipl); 799 #endif /* defined(QCACHE) */ 800 801 CIRCLEQ_INIT(&vm->vm_seglist); 802 for (i = 0; i < VMEM_MAXORDER; i++) { 803 LIST_INIT(&vm->vm_freelist[i]); 804 } 805 vm->vm_hashlist = NULL; 806 if (vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags)) { 807 vmem_destroy1(vm); 808 return NULL; 809 } 810 811 if (size != 0) { 812 if (vmem_add(vm, base, size, flags) == 0) { 813 vmem_destroy1(vm); 814 return NULL; 815 } 816 } 817 818 #if defined(_KERNEL) 819 mutex_enter(&vmem_list_lock); 820 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); 821 mutex_exit(&vmem_list_lock); 822 #endif /* defined(_KERNEL) */ 823 824 return vm; 825 } 826 827 void 828 vmem_destroy(vmem_t *vm) 829 { 830 831 #if defined(_KERNEL) 832 mutex_enter(&vmem_list_lock); 833 LIST_REMOVE(vm, vm_alllist); 834 mutex_exit(&vmem_list_lock); 835 #endif /* defined(_KERNEL) */ 836 837 vmem_destroy1(vm); 838 } 839 840 vmem_size_t 841 vmem_roundup_size(vmem_t *vm, vmem_size_t size) 842 { 843 844 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; 845 } 846 847 /* 848 * vmem_alloc: 849 * 850 * => caller must ensure appropriate spl, 851 * if the arena can be accessed from interrupt context. 852 */ 853 854 vmem_addr_t 855 vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags) 856 { 857 const vm_flag_t strat __unused = flags & VM_FITMASK; 858 859 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 860 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 861 862 KASSERT(size > 0); 863 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 864 if ((flags & VM_SLEEP) != 0) { 865 ASSERT_SLEEPABLE(); 866 } 867 868 #if defined(QCACHE) 869 if (size <= vm->vm_qcache_max) { 870 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 871 qcache_t *qc = vm->vm_qcache[qidx - 1]; 872 873 return (vmem_addr_t)pool_cache_get(qc->qc_cache, 874 vmf_to_prf(flags)); 875 } 876 #endif /* defined(QCACHE) */ 877 878 return vmem_xalloc(vm, size, 0, 0, 0, 0, 0, flags); 879 } 880 881 vmem_addr_t 882 vmem_xalloc(vmem_t *vm, vmem_size_t size0, vmem_size_t align, vmem_size_t phase, 883 vmem_size_t nocross, vmem_addr_t minaddr, vmem_addr_t maxaddr, 884 vm_flag_t flags) 885 { 886 struct vmem_freelist *list; 887 struct vmem_freelist *first; 888 struct vmem_freelist *end; 889 bt_t *bt; 890 bt_t *btnew; 891 bt_t *btnew2; 892 const vmem_size_t size = vmem_roundup_size(vm, size0); 893 vm_flag_t strat = flags & VM_FITMASK; 894 vmem_addr_t start; 895 896 KASSERT(size0 > 0); 897 KASSERT(size > 0); 898 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 899 if ((flags & VM_SLEEP) != 0) { 900 ASSERT_SLEEPABLE(); 901 } 902 KASSERT((align & vm->vm_quantum_mask) == 0); 903 KASSERT((align & (align - 1)) == 0); 904 KASSERT((phase & vm->vm_quantum_mask) == 0); 905 KASSERT((nocross & vm->vm_quantum_mask) == 0); 906 KASSERT((nocross & (nocross - 1)) == 0); 907 KASSERT((align == 0 && phase == 0) || phase < align); 908 KASSERT(nocross == 0 || nocross >= size); 909 KASSERT(maxaddr == 0 || minaddr < maxaddr); 910 KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 911 912 if (align == 0) { 913 align = vm->vm_quantum_mask + 1; 914 } 915 btnew = bt_alloc(vm, flags); 916 if (btnew == NULL) { 917 return VMEM_ADDR_NULL; 918 } 919 btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */ 920 if (btnew2 == NULL) { 921 bt_free(vm, btnew); 922 return VMEM_ADDR_NULL; 923 } 924 925 retry_strat: 926 first = bt_freehead_toalloc(vm, size, strat); 927 end = &vm->vm_freelist[VMEM_MAXORDER]; 928 retry: 929 bt = NULL; 930 VMEM_LOCK(vm); 931 vmem_check(vm); 932 if (strat == VM_INSTANTFIT) { 933 for (list = first; list < end; list++) { 934 bt = LIST_FIRST(list); 935 if (bt != NULL) { 936 start = vmem_fit(bt, size, align, phase, 937 nocross, minaddr, maxaddr); 938 if (start != VMEM_ADDR_NULL) { 939 goto gotit; 940 } 941 } 942 } 943 } else { /* VM_BESTFIT */ 944 for (list = first; list < end; list++) { 945 LIST_FOREACH(bt, list, bt_freelist) { 946 if (bt->bt_size >= size) { 947 start = vmem_fit(bt, size, align, phase, 948 nocross, minaddr, maxaddr); 949 if (start != VMEM_ADDR_NULL) { 950 goto gotit; 951 } 952 } 953 } 954 } 955 } 956 VMEM_UNLOCK(vm); 957 #if 1 958 if (strat == VM_INSTANTFIT) { 959 strat = VM_BESTFIT; 960 goto retry_strat; 961 } 962 #endif 963 if (align != vm->vm_quantum_mask + 1 || phase != 0 || 964 nocross != 0 || minaddr != 0 || maxaddr != 0) { 965 966 /* 967 * XXX should try to import a region large enough to 968 * satisfy restrictions? 969 */ 970 971 goto fail; 972 } 973 if (vmem_import(vm, size, flags) == 0) { 974 goto retry; 975 } 976 /* XXX */ 977 fail: 978 bt_free(vm, btnew); 979 bt_free(vm, btnew2); 980 return VMEM_ADDR_NULL; 981 982 gotit: 983 KASSERT(bt->bt_type == BT_TYPE_FREE); 984 KASSERT(bt->bt_size >= size); 985 bt_remfree(vm, bt); 986 vmem_check(vm); 987 if (bt->bt_start != start) { 988 btnew2->bt_type = BT_TYPE_FREE; 989 btnew2->bt_start = bt->bt_start; 990 btnew2->bt_size = start - bt->bt_start; 991 bt->bt_start = start; 992 bt->bt_size -= btnew2->bt_size; 993 bt_insfree(vm, btnew2); 994 bt_insseg(vm, btnew2, CIRCLEQ_PREV(bt, bt_seglist)); 995 btnew2 = NULL; 996 vmem_check(vm); 997 } 998 KASSERT(bt->bt_start == start); 999 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { 1000 /* split */ 1001 btnew->bt_type = BT_TYPE_BUSY; 1002 btnew->bt_start = bt->bt_start; 1003 btnew->bt_size = size; 1004 bt->bt_start = bt->bt_start + size; 1005 bt->bt_size -= size; 1006 bt_insfree(vm, bt); 1007 bt_insseg(vm, btnew, CIRCLEQ_PREV(bt, bt_seglist)); 1008 bt_insbusy(vm, btnew); 1009 vmem_check(vm); 1010 VMEM_UNLOCK(vm); 1011 } else { 1012 bt->bt_type = BT_TYPE_BUSY; 1013 bt_insbusy(vm, bt); 1014 vmem_check(vm); 1015 VMEM_UNLOCK(vm); 1016 bt_free(vm, btnew); 1017 btnew = bt; 1018 } 1019 if (btnew2 != NULL) { 1020 bt_free(vm, btnew2); 1021 } 1022 KASSERT(btnew->bt_size >= size); 1023 btnew->bt_type = BT_TYPE_BUSY; 1024 1025 return btnew->bt_start; 1026 } 1027 1028 /* 1029 * vmem_free: 1030 * 1031 * => caller must ensure appropriate spl, 1032 * if the arena can be accessed from interrupt context. 1033 */ 1034 1035 void 1036 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1037 { 1038 1039 KASSERT(addr != VMEM_ADDR_NULL); 1040 KASSERT(size > 0); 1041 1042 #if defined(QCACHE) 1043 if (size <= vm->vm_qcache_max) { 1044 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 1045 qcache_t *qc = vm->vm_qcache[qidx - 1]; 1046 1047 return pool_cache_put(qc->qc_cache, (void *)addr); 1048 } 1049 #endif /* defined(QCACHE) */ 1050 1051 vmem_xfree(vm, addr, size); 1052 } 1053 1054 void 1055 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1056 { 1057 bt_t *bt; 1058 bt_t *t; 1059 1060 KASSERT(addr != VMEM_ADDR_NULL); 1061 KASSERT(size > 0); 1062 1063 VMEM_LOCK(vm); 1064 1065 bt = bt_lookupbusy(vm, addr); 1066 KASSERT(bt != NULL); 1067 KASSERT(bt->bt_start == addr); 1068 KASSERT(bt->bt_size == vmem_roundup_size(vm, size) || 1069 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1070 KASSERT(bt->bt_type == BT_TYPE_BUSY); 1071 bt_rembusy(vm, bt); 1072 bt->bt_type = BT_TYPE_FREE; 1073 1074 /* coalesce */ 1075 t = CIRCLEQ_NEXT(bt, bt_seglist); 1076 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1077 KASSERT(BT_END(bt) == t->bt_start); 1078 bt_remfree(vm, t); 1079 bt_remseg(vm, t); 1080 bt->bt_size += t->bt_size; 1081 bt_free(vm, t); 1082 } 1083 t = CIRCLEQ_PREV(bt, bt_seglist); 1084 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1085 KASSERT(BT_END(t) == bt->bt_start); 1086 bt_remfree(vm, t); 1087 bt_remseg(vm, t); 1088 bt->bt_size += t->bt_size; 1089 bt->bt_start = t->bt_start; 1090 bt_free(vm, t); 1091 } 1092 1093 t = CIRCLEQ_PREV(bt, bt_seglist); 1094 KASSERT(t != NULL); 1095 KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY); 1096 if (vm->vm_freefn != NULL && t->bt_type == BT_TYPE_SPAN && 1097 t->bt_size == bt->bt_size) { 1098 vmem_addr_t spanaddr; 1099 vmem_size_t spansize; 1100 1101 KASSERT(t->bt_start == bt->bt_start); 1102 spanaddr = bt->bt_start; 1103 spansize = bt->bt_size; 1104 bt_remseg(vm, bt); 1105 bt_free(vm, bt); 1106 bt_remseg(vm, t); 1107 bt_free(vm, t); 1108 VMEM_UNLOCK(vm); 1109 (*vm->vm_freefn)(vm->vm_source, spanaddr, spansize); 1110 } else { 1111 bt_insfree(vm, bt); 1112 VMEM_UNLOCK(vm); 1113 } 1114 } 1115 1116 /* 1117 * vmem_add: 1118 * 1119 * => caller must ensure appropriate spl, 1120 * if the arena can be accessed from interrupt context. 1121 */ 1122 1123 vmem_addr_t 1124 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags) 1125 { 1126 1127 return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC); 1128 } 1129 1130 /* 1131 * vmem_reap: reap unused resources. 1132 * 1133 * => return true if we successfully reaped something. 1134 */ 1135 1136 bool 1137 vmem_reap(vmem_t *vm) 1138 { 1139 bool didsomething = false; 1140 1141 #if defined(QCACHE) 1142 didsomething = qc_reap(vm); 1143 #endif /* defined(QCACHE) */ 1144 return didsomething; 1145 } 1146 1147 /* ---- rehash */ 1148 1149 #if defined(_KERNEL) 1150 static struct callout vmem_rehash_ch; 1151 static int vmem_rehash_interval; 1152 static struct workqueue *vmem_rehash_wq; 1153 static struct work vmem_rehash_wk; 1154 1155 static void 1156 vmem_rehash_all(struct work *wk, void *dummy) 1157 { 1158 vmem_t *vm; 1159 1160 KASSERT(wk == &vmem_rehash_wk); 1161 mutex_enter(&vmem_list_lock); 1162 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1163 size_t desired; 1164 size_t current; 1165 1166 if (!VMEM_TRYLOCK(vm)) { 1167 continue; 1168 } 1169 desired = vm->vm_nbusytag; 1170 current = vm->vm_hashsize; 1171 VMEM_UNLOCK(vm); 1172 1173 if (desired > VMEM_HASHSIZE_MAX) { 1174 desired = VMEM_HASHSIZE_MAX; 1175 } else if (desired < VMEM_HASHSIZE_MIN) { 1176 desired = VMEM_HASHSIZE_MIN; 1177 } 1178 if (desired > current * 2 || desired * 2 < current) { 1179 vmem_rehash(vm, desired, VM_NOSLEEP); 1180 } 1181 } 1182 mutex_exit(&vmem_list_lock); 1183 1184 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1185 } 1186 1187 static void 1188 vmem_rehash_all_kick(void *dummy) 1189 { 1190 1191 workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL); 1192 } 1193 1194 void 1195 vmem_rehash_start(void) 1196 { 1197 int error; 1198 1199 error = workqueue_create(&vmem_rehash_wq, "vmem_rehash", 1200 vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, WQ_MPSAFE); 1201 if (error) { 1202 panic("%s: workqueue_create %d\n", __func__, error); 1203 } 1204 callout_init(&vmem_rehash_ch, CALLOUT_MPSAFE); 1205 callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL); 1206 1207 vmem_rehash_interval = hz * 10; 1208 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1209 } 1210 #endif /* defined(_KERNEL) */ 1211 1212 /* ---- debug */ 1213 1214 #if defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) 1215 1216 static void bt_dump(const bt_t *, void (*)(const char *, ...)); 1217 1218 static const char * 1219 bt_type_string(int type) 1220 { 1221 static const char * const table[] = { 1222 [BT_TYPE_BUSY] = "busy", 1223 [BT_TYPE_FREE] = "free", 1224 [BT_TYPE_SPAN] = "span", 1225 [BT_TYPE_SPAN_STATIC] = "static span", 1226 }; 1227 1228 if (type >= __arraycount(table)) { 1229 return "BOGUS"; 1230 } 1231 return table[type]; 1232 } 1233 1234 static void 1235 bt_dump(const bt_t *bt, void (*pr)(const char *, ...)) 1236 { 1237 1238 (*pr)("\t%p: %" PRIu64 ", %" PRIu64 ", %d(%s)\n", 1239 bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size, 1240 bt->bt_type, bt_type_string(bt->bt_type)); 1241 } 1242 1243 static void 1244 vmem_dump(const vmem_t *vm , void (*pr)(const char *, ...)) 1245 { 1246 const bt_t *bt; 1247 int i; 1248 1249 (*pr)("vmem %p '%s'\n", vm, vm->vm_name); 1250 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1251 bt_dump(bt, pr); 1252 } 1253 1254 for (i = 0; i < VMEM_MAXORDER; i++) { 1255 const struct vmem_freelist *fl = &vm->vm_freelist[i]; 1256 1257 if (LIST_EMPTY(fl)) { 1258 continue; 1259 } 1260 1261 (*pr)("freelist[%d]\n", i); 1262 LIST_FOREACH(bt, fl, bt_freelist) { 1263 bt_dump(bt, pr); 1264 } 1265 } 1266 } 1267 1268 #endif /* defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) */ 1269 1270 #if defined(DDB) 1271 static bt_t * 1272 vmem_whatis_lookup(vmem_t *vm, uintptr_t addr) 1273 { 1274 bt_t *bt; 1275 1276 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1277 if (BT_ISSPAN_P(bt)) { 1278 continue; 1279 } 1280 if (bt->bt_start <= addr && addr < BT_END(bt)) { 1281 return bt; 1282 } 1283 } 1284 1285 return NULL; 1286 } 1287 1288 void 1289 vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...)) 1290 { 1291 vmem_t *vm; 1292 1293 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1294 bt_t *bt; 1295 1296 bt = vmem_whatis_lookup(vm, addr); 1297 if (bt == NULL) { 1298 continue; 1299 } 1300 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n", 1301 (void *)addr, (void *)bt->bt_start, 1302 (size_t)(addr - bt->bt_start), vm->vm_name, 1303 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free"); 1304 } 1305 } 1306 1307 void 1308 vmem_printall(const char *modif, void (*pr)(const char *, ...)) 1309 { 1310 const vmem_t *vm; 1311 1312 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1313 vmem_dump(vm, pr); 1314 } 1315 } 1316 1317 void 1318 vmem_print(uintptr_t addr, const char *modif, void (*pr)(const char *, ...)) 1319 { 1320 const vmem_t *vm = (const void *)addr; 1321 1322 vmem_dump(vm, pr); 1323 } 1324 #endif /* defined(DDB) */ 1325 1326 #if !defined(_KERNEL) 1327 #include <stdio.h> 1328 #endif /* !defined(_KERNEL) */ 1329 1330 #if defined(VMEM_SANITY) 1331 1332 static bool 1333 vmem_check_sanity(vmem_t *vm) 1334 { 1335 const bt_t *bt, *bt2; 1336 1337 KASSERT(vm != NULL); 1338 1339 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1340 if (bt->bt_start >= BT_END(bt)) { 1341 printf("corrupted tag\n"); 1342 bt_dump(bt, (void *)printf); 1343 return false; 1344 } 1345 } 1346 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1347 CIRCLEQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) { 1348 if (bt == bt2) { 1349 continue; 1350 } 1351 if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) { 1352 continue; 1353 } 1354 if (bt->bt_start < BT_END(bt2) && 1355 bt2->bt_start < BT_END(bt)) { 1356 printf("overwrapped tags\n"); 1357 bt_dump(bt, (void *)printf); 1358 bt_dump(bt2, (void *)printf); 1359 return false; 1360 } 1361 } 1362 } 1363 1364 return true; 1365 } 1366 1367 static void 1368 vmem_check(vmem_t *vm) 1369 { 1370 1371 if (!vmem_check_sanity(vm)) { 1372 panic("insanity vmem %p", vm); 1373 } 1374 } 1375 1376 #endif /* defined(VMEM_SANITY) */ 1377 1378 #if defined(UNITTEST) 1379 int 1380 main(void) 1381 { 1382 vmem_t *vm; 1383 vmem_addr_t p; 1384 struct reg { 1385 vmem_addr_t p; 1386 vmem_size_t sz; 1387 bool x; 1388 } *reg = NULL; 1389 int nreg = 0; 1390 int nalloc = 0; 1391 int nfree = 0; 1392 vmem_size_t total = 0; 1393 #if 1 1394 vm_flag_t strat = VM_INSTANTFIT; 1395 #else 1396 vm_flag_t strat = VM_BESTFIT; 1397 #endif 1398 1399 vm = vmem_create("test", VMEM_ADDR_NULL, 0, 1, 1400 NULL, NULL, NULL, 0, VM_SLEEP, 0/*XXX*/); 1401 if (vm == NULL) { 1402 printf("vmem_create\n"); 1403 exit(EXIT_FAILURE); 1404 } 1405 vmem_dump(vm, (void *)printf); 1406 1407 p = vmem_add(vm, 100, 200, VM_SLEEP); 1408 p = vmem_add(vm, 2000, 1, VM_SLEEP); 1409 p = vmem_add(vm, 40000, 0x10000000>>12, VM_SLEEP); 1410 p = vmem_add(vm, 10000, 10000, VM_SLEEP); 1411 p = vmem_add(vm, 500, 1000, VM_SLEEP); 1412 vmem_dump(vm, (void *)printf); 1413 for (;;) { 1414 struct reg *r; 1415 int t = rand() % 100; 1416 1417 if (t > 45) { 1418 /* alloc */ 1419 vmem_size_t sz = rand() % 500 + 1; 1420 bool x; 1421 vmem_size_t align, phase, nocross; 1422 vmem_addr_t minaddr, maxaddr; 1423 1424 if (t > 70) { 1425 x = true; 1426 /* XXX */ 1427 align = 1 << (rand() % 15); 1428 phase = rand() % 65536; 1429 nocross = 1 << (rand() % 15); 1430 if (align <= phase) { 1431 phase = 0; 1432 } 1433 if (VMEM_CROSS_P(phase, phase + sz - 1, 1434 nocross)) { 1435 nocross = 0; 1436 } 1437 minaddr = rand() % 50000; 1438 maxaddr = rand() % 70000; 1439 if (minaddr > maxaddr) { 1440 minaddr = 0; 1441 maxaddr = 0; 1442 } 1443 printf("=== xalloc %" PRIu64 1444 " align=%" PRIu64 ", phase=%" PRIu64 1445 ", nocross=%" PRIu64 ", min=%" PRIu64 1446 ", max=%" PRIu64 "\n", 1447 (uint64_t)sz, 1448 (uint64_t)align, 1449 (uint64_t)phase, 1450 (uint64_t)nocross, 1451 (uint64_t)minaddr, 1452 (uint64_t)maxaddr); 1453 p = vmem_xalloc(vm, sz, align, phase, nocross, 1454 minaddr, maxaddr, strat|VM_SLEEP); 1455 } else { 1456 x = false; 1457 printf("=== alloc %" PRIu64 "\n", (uint64_t)sz); 1458 p = vmem_alloc(vm, sz, strat|VM_SLEEP); 1459 } 1460 printf("-> %" PRIu64 "\n", (uint64_t)p); 1461 vmem_dump(vm, (void *)printf); 1462 if (p == VMEM_ADDR_NULL) { 1463 if (x) { 1464 continue; 1465 } 1466 break; 1467 } 1468 nreg++; 1469 reg = realloc(reg, sizeof(*reg) * nreg); 1470 r = ®[nreg - 1]; 1471 r->p = p; 1472 r->sz = sz; 1473 r->x = x; 1474 total += sz; 1475 nalloc++; 1476 } else if (nreg != 0) { 1477 /* free */ 1478 r = ®[rand() % nreg]; 1479 printf("=== free %" PRIu64 ", %" PRIu64 "\n", 1480 (uint64_t)r->p, (uint64_t)r->sz); 1481 if (r->x) { 1482 vmem_xfree(vm, r->p, r->sz); 1483 } else { 1484 vmem_free(vm, r->p, r->sz); 1485 } 1486 total -= r->sz; 1487 vmem_dump(vm, (void *)printf); 1488 *r = reg[nreg - 1]; 1489 nreg--; 1490 nfree++; 1491 } 1492 printf("total=%" PRIu64 "\n", (uint64_t)total); 1493 } 1494 fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n", 1495 (uint64_t)total, nalloc, nfree); 1496 exit(EXIT_SUCCESS); 1497 } 1498 #endif /* defined(UNITTEST) */ 1499