1 /* $NetBSD: subr_vmem.c,v 1.37 2007/12/13 02:45:10 yamt Exp $ */ 2 3 /*- 4 * Copyright (c)2006 YAMAMOTO Takashi, 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * reference: 31 * - Magazines and Vmem: Extending the Slab Allocator 32 * to Many CPUs and Arbitrary Resources 33 * http://www.usenix.org/event/usenix01/bonwick.html 34 * 35 * todo: 36 * - decide how to import segments for vmem_xalloc. 37 * - don't rely on malloc(9). 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.37 2007/12/13 02:45:10 yamt Exp $"); 42 43 #define VMEM_DEBUG 44 #if defined(_KERNEL) 45 #include "opt_ddb.h" 46 #define QCACHE 47 #endif /* defined(_KERNEL) */ 48 49 #include <sys/param.h> 50 #include <sys/hash.h> 51 #include <sys/queue.h> 52 53 #if defined(_KERNEL) 54 #include <sys/systm.h> 55 #include <sys/kernel.h> /* hz */ 56 #include <sys/callout.h> 57 #include <sys/lock.h> 58 #include <sys/malloc.h> 59 #include <sys/once.h> 60 #include <sys/pool.h> 61 #include <sys/proc.h> 62 #include <sys/vmem.h> 63 #include <sys/workqueue.h> 64 #else /* defined(_KERNEL) */ 65 #include "../sys/vmem.h" 66 #endif /* defined(_KERNEL) */ 67 68 #if defined(_KERNEL) 69 #define LOCK_DECL(name) kmutex_t name 70 #else /* defined(_KERNEL) */ 71 #include <errno.h> 72 #include <assert.h> 73 #include <stdlib.h> 74 75 #define KASSERT(a) assert(a) 76 #define LOCK_DECL(name) /* nothing */ 77 #define mutex_init(a, b, c) /* nothing */ 78 #define mutex_destroy(a) /* nothing */ 79 #define mutex_enter(a) /* nothing */ 80 #define mutex_exit(a) /* nothing */ 81 #define mutex_owned(a) /* nothing */ 82 #define ASSERT_SLEEPABLE(lk, msg) /* nothing */ 83 #define IPL_VM 0 84 #endif /* defined(_KERNEL) */ 85 86 struct vmem; 87 struct vmem_btag; 88 89 #if defined(VMEM_DEBUG) 90 void vmem_dump(const vmem_t *); 91 #endif /* defined(VMEM_DEBUG) */ 92 93 #define VMEM_MAXORDER (sizeof(vmem_size_t) * CHAR_BIT) 94 95 #define VMEM_HASHSIZE_MIN 1 /* XXX */ 96 #define VMEM_HASHSIZE_MAX 8192 /* XXX */ 97 #define VMEM_HASHSIZE_INIT VMEM_HASHSIZE_MIN 98 99 #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT) 100 101 CIRCLEQ_HEAD(vmem_seglist, vmem_btag); 102 LIST_HEAD(vmem_freelist, vmem_btag); 103 LIST_HEAD(vmem_hashlist, vmem_btag); 104 105 #if defined(QCACHE) 106 #define VMEM_QCACHE_IDX_MAX 32 107 108 #define QC_NAME_MAX 16 109 110 struct qcache { 111 pool_cache_t qc_cache; 112 vmem_t *qc_vmem; 113 char qc_name[QC_NAME_MAX]; 114 }; 115 typedef struct qcache qcache_t; 116 #define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache)) 117 #endif /* defined(QCACHE) */ 118 119 /* vmem arena */ 120 struct vmem { 121 LOCK_DECL(vm_lock); 122 vmem_addr_t (*vm_allocfn)(vmem_t *, vmem_size_t, vmem_size_t *, 123 vm_flag_t); 124 void (*vm_freefn)(vmem_t *, vmem_addr_t, vmem_size_t); 125 vmem_t *vm_source; 126 struct vmem_seglist vm_seglist; 127 struct vmem_freelist vm_freelist[VMEM_MAXORDER]; 128 size_t vm_hashsize; 129 size_t vm_nbusytag; 130 struct vmem_hashlist *vm_hashlist; 131 size_t vm_quantum_mask; 132 int vm_quantum_shift; 133 const char *vm_name; 134 LIST_ENTRY(vmem) vm_alllist; 135 136 #if defined(QCACHE) 137 /* quantum cache */ 138 size_t vm_qcache_max; 139 struct pool_allocator vm_qcache_allocator; 140 qcache_t vm_qcache_store[VMEM_QCACHE_IDX_MAX]; 141 qcache_t *vm_qcache[VMEM_QCACHE_IDX_MAX]; 142 #endif /* defined(QCACHE) */ 143 }; 144 145 #define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock) 146 #define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock) 147 #define VMEM_UNLOCK(vm) mutex_exit(&vm->vm_lock) 148 #define VMEM_LOCK_INIT(vm, ipl) mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl) 149 #define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock) 150 #define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock)) 151 152 /* boundary tag */ 153 struct vmem_btag { 154 CIRCLEQ_ENTRY(vmem_btag) bt_seglist; 155 union { 156 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */ 157 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */ 158 } bt_u; 159 #define bt_hashlist bt_u.u_hashlist 160 #define bt_freelist bt_u.u_freelist 161 vmem_addr_t bt_start; 162 vmem_size_t bt_size; 163 int bt_type; 164 }; 165 166 #define BT_TYPE_SPAN 1 167 #define BT_TYPE_SPAN_STATIC 2 168 #define BT_TYPE_FREE 3 169 #define BT_TYPE_BUSY 4 170 #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC) 171 172 #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size) 173 174 typedef struct vmem_btag bt_t; 175 176 /* ---- misc */ 177 178 #define VMEM_ALIGNUP(addr, align) \ 179 (-(-(addr) & -(align))) 180 #define VMEM_CROSS_P(addr1, addr2, boundary) \ 181 ((((addr1) ^ (addr2)) & -(boundary)) != 0) 182 183 #define ORDER2SIZE(order) ((vmem_size_t)1 << (order)) 184 185 static int 186 calc_order(vmem_size_t size) 187 { 188 vmem_size_t target; 189 int i; 190 191 KASSERT(size != 0); 192 193 i = 0; 194 target = size >> 1; 195 while (ORDER2SIZE(i) <= target) { 196 i++; 197 } 198 199 KASSERT(ORDER2SIZE(i) <= size); 200 KASSERT(size < ORDER2SIZE(i + 1) || ORDER2SIZE(i + 1) < ORDER2SIZE(i)); 201 202 return i; 203 } 204 205 #if defined(_KERNEL) 206 static MALLOC_DEFINE(M_VMEM, "vmem", "vmem"); 207 #endif /* defined(_KERNEL) */ 208 209 static void * 210 xmalloc(size_t sz, vm_flag_t flags) 211 { 212 213 #if defined(_KERNEL) 214 return malloc(sz, M_VMEM, 215 M_CANFAIL | ((flags & VM_SLEEP) ? M_WAITOK : M_NOWAIT)); 216 #else /* defined(_KERNEL) */ 217 return malloc(sz); 218 #endif /* defined(_KERNEL) */ 219 } 220 221 static void 222 xfree(void *p) 223 { 224 225 #if defined(_KERNEL) 226 return free(p, M_VMEM); 227 #else /* defined(_KERNEL) */ 228 return free(p); 229 #endif /* defined(_KERNEL) */ 230 } 231 232 /* ---- boundary tag */ 233 234 #if defined(_KERNEL) 235 static struct pool_cache bt_cache; 236 #endif /* defined(_KERNEL) */ 237 238 static bt_t * 239 bt_alloc(vmem_t *vm, vm_flag_t flags) 240 { 241 bt_t *bt; 242 243 #if defined(_KERNEL) 244 bt = pool_cache_get(&bt_cache, 245 (flags & VM_SLEEP) != 0 ? PR_WAITOK : PR_NOWAIT); 246 #else /* defined(_KERNEL) */ 247 bt = malloc(sizeof *bt); 248 #endif /* defined(_KERNEL) */ 249 250 return bt; 251 } 252 253 static void 254 bt_free(vmem_t *vm, bt_t *bt) 255 { 256 257 #if defined(_KERNEL) 258 pool_cache_put(&bt_cache, bt); 259 #else /* defined(_KERNEL) */ 260 free(bt); 261 #endif /* defined(_KERNEL) */ 262 } 263 264 /* 265 * freelist[0] ... [1, 1] 266 * freelist[1] ... [2, 3] 267 * freelist[2] ... [4, 7] 268 * freelist[3] ... [8, 15] 269 * : 270 * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1] 271 * : 272 */ 273 274 static struct vmem_freelist * 275 bt_freehead_tofree(vmem_t *vm, vmem_size_t size) 276 { 277 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 278 int idx; 279 280 KASSERT((size & vm->vm_quantum_mask) == 0); 281 KASSERT(size != 0); 282 283 idx = calc_order(qsize); 284 KASSERT(idx >= 0); 285 KASSERT(idx < VMEM_MAXORDER); 286 287 return &vm->vm_freelist[idx]; 288 } 289 290 static struct vmem_freelist * 291 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat) 292 { 293 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 294 int idx; 295 296 KASSERT((size & vm->vm_quantum_mask) == 0); 297 KASSERT(size != 0); 298 299 idx = calc_order(qsize); 300 if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) { 301 idx++; 302 /* check too large request? */ 303 } 304 KASSERT(idx >= 0); 305 KASSERT(idx < VMEM_MAXORDER); 306 307 return &vm->vm_freelist[idx]; 308 } 309 310 /* ---- boundary tag hash */ 311 312 static struct vmem_hashlist * 313 bt_hashhead(vmem_t *vm, vmem_addr_t addr) 314 { 315 struct vmem_hashlist *list; 316 unsigned int hash; 317 318 hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT); 319 list = &vm->vm_hashlist[hash % vm->vm_hashsize]; 320 321 return list; 322 } 323 324 static bt_t * 325 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr) 326 { 327 struct vmem_hashlist *list; 328 bt_t *bt; 329 330 list = bt_hashhead(vm, addr); 331 LIST_FOREACH(bt, list, bt_hashlist) { 332 if (bt->bt_start == addr) { 333 break; 334 } 335 } 336 337 return bt; 338 } 339 340 static void 341 bt_rembusy(vmem_t *vm, bt_t *bt) 342 { 343 344 KASSERT(vm->vm_nbusytag > 0); 345 vm->vm_nbusytag--; 346 LIST_REMOVE(bt, bt_hashlist); 347 } 348 349 static void 350 bt_insbusy(vmem_t *vm, bt_t *bt) 351 { 352 struct vmem_hashlist *list; 353 354 KASSERT(bt->bt_type == BT_TYPE_BUSY); 355 356 list = bt_hashhead(vm, bt->bt_start); 357 LIST_INSERT_HEAD(list, bt, bt_hashlist); 358 vm->vm_nbusytag++; 359 } 360 361 /* ---- boundary tag list */ 362 363 static void 364 bt_remseg(vmem_t *vm, bt_t *bt) 365 { 366 367 CIRCLEQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); 368 } 369 370 static void 371 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) 372 { 373 374 CIRCLEQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); 375 } 376 377 static void 378 bt_insseg_tail(vmem_t *vm, bt_t *bt) 379 { 380 381 CIRCLEQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); 382 } 383 384 static void 385 bt_remfree(vmem_t *vm, bt_t *bt) 386 { 387 388 KASSERT(bt->bt_type == BT_TYPE_FREE); 389 390 LIST_REMOVE(bt, bt_freelist); 391 } 392 393 static void 394 bt_insfree(vmem_t *vm, bt_t *bt) 395 { 396 struct vmem_freelist *list; 397 398 list = bt_freehead_tofree(vm, bt->bt_size); 399 LIST_INSERT_HEAD(list, bt, bt_freelist); 400 } 401 402 /* ---- vmem internal functions */ 403 404 #if defined(_KERNEL) 405 static kmutex_t vmem_list_lock; 406 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); 407 #endif /* defined(_KERNEL) */ 408 409 #if defined(QCACHE) 410 static inline vm_flag_t 411 prf_to_vmf(int prflags) 412 { 413 vm_flag_t vmflags; 414 415 KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0); 416 if ((prflags & PR_WAITOK) != 0) { 417 vmflags = VM_SLEEP; 418 } else { 419 vmflags = VM_NOSLEEP; 420 } 421 return vmflags; 422 } 423 424 static inline int 425 vmf_to_prf(vm_flag_t vmflags) 426 { 427 int prflags; 428 429 if ((vmflags & VM_SLEEP) != 0) { 430 prflags = PR_WAITOK; 431 } else { 432 prflags = PR_NOWAIT; 433 } 434 return prflags; 435 } 436 437 static size_t 438 qc_poolpage_size(size_t qcache_max) 439 { 440 int i; 441 442 for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) { 443 /* nothing */ 444 } 445 return ORDER2SIZE(i); 446 } 447 448 static void * 449 qc_poolpage_alloc(struct pool *pool, int prflags) 450 { 451 qcache_t *qc = QC_POOL_TO_QCACHE(pool); 452 vmem_t *vm = qc->qc_vmem; 453 454 return (void *)vmem_alloc(vm, pool->pr_alloc->pa_pagesz, 455 prf_to_vmf(prflags) | VM_INSTANTFIT); 456 } 457 458 static void 459 qc_poolpage_free(struct pool *pool, void *addr) 460 { 461 qcache_t *qc = QC_POOL_TO_QCACHE(pool); 462 vmem_t *vm = qc->qc_vmem; 463 464 vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz); 465 } 466 467 static void 468 qc_init(vmem_t *vm, size_t qcache_max, int ipl) 469 { 470 qcache_t *prevqc; 471 struct pool_allocator *pa; 472 int qcache_idx_max; 473 int i; 474 475 KASSERT((qcache_max & vm->vm_quantum_mask) == 0); 476 if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) { 477 qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift; 478 } 479 vm->vm_qcache_max = qcache_max; 480 pa = &vm->vm_qcache_allocator; 481 memset(pa, 0, sizeof(*pa)); 482 pa->pa_alloc = qc_poolpage_alloc; 483 pa->pa_free = qc_poolpage_free; 484 pa->pa_pagesz = qc_poolpage_size(qcache_max); 485 486 qcache_idx_max = qcache_max >> vm->vm_quantum_shift; 487 prevqc = NULL; 488 for (i = qcache_idx_max; i > 0; i--) { 489 qcache_t *qc = &vm->vm_qcache_store[i - 1]; 490 size_t size = i << vm->vm_quantum_shift; 491 492 qc->qc_vmem = vm; 493 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", 494 vm->vm_name, size); 495 qc->qc_cache = pool_cache_init(size, 496 ORDER2SIZE(vm->vm_quantum_shift), 0, 497 PR_NOALIGN | PR_NOTOUCH /* XXX */, 498 qc->qc_name, pa, ipl, NULL, NULL, NULL); 499 KASSERT(qc->qc_cache != NULL); /* XXX */ 500 if (prevqc != NULL && 501 qc->qc_cache->pc_pool.pr_itemsperpage == 502 prevqc->qc_cache->pc_pool.pr_itemsperpage) { 503 pool_cache_destroy(qc->qc_cache); 504 vm->vm_qcache[i - 1] = prevqc; 505 continue; 506 } 507 qc->qc_cache->pc_pool.pr_qcache = qc; 508 vm->vm_qcache[i - 1] = qc; 509 prevqc = qc; 510 } 511 } 512 513 static void 514 qc_destroy(vmem_t *vm) 515 { 516 const qcache_t *prevqc; 517 int i; 518 int qcache_idx_max; 519 520 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 521 prevqc = NULL; 522 for (i = 0; i < qcache_idx_max; i++) { 523 qcache_t *qc = vm->vm_qcache[i]; 524 525 if (prevqc == qc) { 526 continue; 527 } 528 pool_cache_destroy(qc->qc_cache); 529 prevqc = qc; 530 } 531 } 532 533 static bool 534 qc_reap(vmem_t *vm) 535 { 536 const qcache_t *prevqc; 537 int i; 538 int qcache_idx_max; 539 bool didsomething = false; 540 541 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 542 prevqc = NULL; 543 for (i = 0; i < qcache_idx_max; i++) { 544 qcache_t *qc = vm->vm_qcache[i]; 545 546 if (prevqc == qc) { 547 continue; 548 } 549 if (pool_cache_reclaim(qc->qc_cache) != 0) { 550 didsomething = true; 551 } 552 prevqc = qc; 553 } 554 555 return didsomething; 556 } 557 #endif /* defined(QCACHE) */ 558 559 #if defined(_KERNEL) 560 static int 561 vmem_init(void) 562 { 563 564 mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_NONE); 565 pool_cache_bootstrap(&bt_cache, sizeof(bt_t), 0, 0, 0, "vmembt", 566 NULL, IPL_VM, NULL, NULL, NULL); 567 return 0; 568 } 569 #endif /* defined(_KERNEL) */ 570 571 static vmem_addr_t 572 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags, 573 int spanbttype) 574 { 575 bt_t *btspan; 576 bt_t *btfree; 577 578 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 579 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 580 581 btspan = bt_alloc(vm, flags); 582 if (btspan == NULL) { 583 return VMEM_ADDR_NULL; 584 } 585 btfree = bt_alloc(vm, flags); 586 if (btfree == NULL) { 587 bt_free(vm, btspan); 588 return VMEM_ADDR_NULL; 589 } 590 591 btspan->bt_type = spanbttype; 592 btspan->bt_start = addr; 593 btspan->bt_size = size; 594 595 btfree->bt_type = BT_TYPE_FREE; 596 btfree->bt_start = addr; 597 btfree->bt_size = size; 598 599 VMEM_LOCK(vm); 600 bt_insseg_tail(vm, btspan); 601 bt_insseg(vm, btfree, btspan); 602 bt_insfree(vm, btfree); 603 VMEM_UNLOCK(vm); 604 605 return addr; 606 } 607 608 static void 609 vmem_destroy1(vmem_t *vm) 610 { 611 612 #if defined(QCACHE) 613 qc_destroy(vm); 614 #endif /* defined(QCACHE) */ 615 if (vm->vm_hashlist != NULL) { 616 int i; 617 618 for (i = 0; i < vm->vm_hashsize; i++) { 619 bt_t *bt; 620 621 while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) { 622 KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC); 623 bt_free(vm, bt); 624 } 625 } 626 xfree(vm->vm_hashlist); 627 } 628 VMEM_LOCK_DESTROY(vm); 629 xfree(vm); 630 } 631 632 static int 633 vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags) 634 { 635 vmem_addr_t addr; 636 637 if (vm->vm_allocfn == NULL) { 638 return EINVAL; 639 } 640 641 addr = (*vm->vm_allocfn)(vm->vm_source, size, &size, flags); 642 if (addr == VMEM_ADDR_NULL) { 643 return ENOMEM; 644 } 645 646 if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) == VMEM_ADDR_NULL) { 647 (*vm->vm_freefn)(vm->vm_source, addr, size); 648 return ENOMEM; 649 } 650 651 return 0; 652 } 653 654 static int 655 vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags) 656 { 657 bt_t *bt; 658 int i; 659 struct vmem_hashlist *newhashlist; 660 struct vmem_hashlist *oldhashlist; 661 size_t oldhashsize; 662 663 KASSERT(newhashsize > 0); 664 665 newhashlist = 666 xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags); 667 if (newhashlist == NULL) { 668 return ENOMEM; 669 } 670 for (i = 0; i < newhashsize; i++) { 671 LIST_INIT(&newhashlist[i]); 672 } 673 674 if (!VMEM_TRYLOCK(vm)) { 675 xfree(newhashlist); 676 return EBUSY; 677 } 678 oldhashlist = vm->vm_hashlist; 679 oldhashsize = vm->vm_hashsize; 680 vm->vm_hashlist = newhashlist; 681 vm->vm_hashsize = newhashsize; 682 if (oldhashlist == NULL) { 683 VMEM_UNLOCK(vm); 684 return 0; 685 } 686 for (i = 0; i < oldhashsize; i++) { 687 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { 688 bt_rembusy(vm, bt); /* XXX */ 689 bt_insbusy(vm, bt); 690 } 691 } 692 VMEM_UNLOCK(vm); 693 694 xfree(oldhashlist); 695 696 return 0; 697 } 698 699 /* 700 * vmem_fit: check if a bt can satisfy the given restrictions. 701 */ 702 703 static vmem_addr_t 704 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, vmem_size_t phase, 705 vmem_size_t nocross, vmem_addr_t minaddr, vmem_addr_t maxaddr) 706 { 707 vmem_addr_t start; 708 vmem_addr_t end; 709 710 KASSERT(bt->bt_size >= size); 711 712 /* 713 * XXX assumption: vmem_addr_t and vmem_size_t are 714 * unsigned integer of the same size. 715 */ 716 717 start = bt->bt_start; 718 if (start < minaddr) { 719 start = minaddr; 720 } 721 end = BT_END(bt); 722 if (end > maxaddr - 1) { 723 end = maxaddr - 1; 724 } 725 if (start >= end) { 726 return VMEM_ADDR_NULL; 727 } 728 729 start = VMEM_ALIGNUP(start - phase, align) + phase; 730 if (start < bt->bt_start) { 731 start += align; 732 } 733 if (VMEM_CROSS_P(start, start + size - 1, nocross)) { 734 KASSERT(align < nocross); 735 start = VMEM_ALIGNUP(start - phase, nocross) + phase; 736 } 737 if (start < end && end - start >= size) { 738 KASSERT((start & (align - 1)) == phase); 739 KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross)); 740 KASSERT(minaddr <= start); 741 KASSERT(maxaddr == 0 || start + size <= maxaddr); 742 KASSERT(bt->bt_start <= start); 743 KASSERT(start + size <= BT_END(bt)); 744 return start; 745 } 746 return VMEM_ADDR_NULL; 747 } 748 749 /* ---- vmem API */ 750 751 /* 752 * vmem_create: create an arena. 753 * 754 * => must not be called from interrupt context. 755 */ 756 757 vmem_t * 758 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, 759 vmem_size_t quantum, 760 vmem_addr_t (*allocfn)(vmem_t *, vmem_size_t, vmem_size_t *, vm_flag_t), 761 void (*freefn)(vmem_t *, vmem_addr_t, vmem_size_t), 762 vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, 763 int ipl) 764 { 765 vmem_t *vm; 766 int i; 767 #if defined(_KERNEL) 768 static ONCE_DECL(control); 769 #endif /* defined(_KERNEL) */ 770 771 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 772 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 773 774 #if defined(_KERNEL) 775 if (RUN_ONCE(&control, vmem_init)) { 776 return NULL; 777 } 778 #endif /* defined(_KERNEL) */ 779 vm = xmalloc(sizeof(*vm), flags); 780 if (vm == NULL) { 781 return NULL; 782 } 783 784 VMEM_LOCK_INIT(vm, ipl); 785 vm->vm_name = name; 786 vm->vm_quantum_mask = quantum - 1; 787 vm->vm_quantum_shift = calc_order(quantum); 788 KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum); 789 vm->vm_allocfn = allocfn; 790 vm->vm_freefn = freefn; 791 vm->vm_source = source; 792 vm->vm_nbusytag = 0; 793 #if defined(QCACHE) 794 qc_init(vm, qcache_max, ipl); 795 #endif /* defined(QCACHE) */ 796 797 CIRCLEQ_INIT(&vm->vm_seglist); 798 for (i = 0; i < VMEM_MAXORDER; i++) { 799 LIST_INIT(&vm->vm_freelist[i]); 800 } 801 vm->vm_hashlist = NULL; 802 if (vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags)) { 803 vmem_destroy1(vm); 804 return NULL; 805 } 806 807 if (size != 0) { 808 if (vmem_add(vm, base, size, flags) == 0) { 809 vmem_destroy1(vm); 810 return NULL; 811 } 812 } 813 814 #if defined(_KERNEL) 815 mutex_enter(&vmem_list_lock); 816 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); 817 mutex_exit(&vmem_list_lock); 818 #endif /* defined(_KERNEL) */ 819 820 return vm; 821 } 822 823 void 824 vmem_destroy(vmem_t *vm) 825 { 826 827 #if defined(_KERNEL) 828 mutex_enter(&vmem_list_lock); 829 LIST_REMOVE(vm, vm_alllist); 830 mutex_exit(&vmem_list_lock); 831 #endif /* defined(_KERNEL) */ 832 833 vmem_destroy1(vm); 834 } 835 836 vmem_size_t 837 vmem_roundup_size(vmem_t *vm, vmem_size_t size) 838 { 839 840 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; 841 } 842 843 /* 844 * vmem_alloc: 845 * 846 * => caller must ensure appropriate spl, 847 * if the arena can be accessed from interrupt context. 848 */ 849 850 vmem_addr_t 851 vmem_alloc(vmem_t *vm, vmem_size_t size0, vm_flag_t flags) 852 { 853 const vmem_size_t size __unused = vmem_roundup_size(vm, size0); 854 const vm_flag_t strat __unused = flags & VM_FITMASK; 855 856 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 857 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 858 859 KASSERT(size0 > 0); 860 KASSERT(size > 0); 861 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 862 if ((flags & VM_SLEEP) != 0) { 863 ASSERT_SLEEPABLE(NULL, __func__); 864 } 865 866 #if defined(QCACHE) 867 if (size <= vm->vm_qcache_max) { 868 int qidx = size >> vm->vm_quantum_shift; 869 qcache_t *qc = vm->vm_qcache[qidx - 1]; 870 871 return (vmem_addr_t)pool_cache_get(qc->qc_cache, 872 vmf_to_prf(flags)); 873 } 874 #endif /* defined(QCACHE) */ 875 876 return vmem_xalloc(vm, size0, 0, 0, 0, 0, 0, flags); 877 } 878 879 vmem_addr_t 880 vmem_xalloc(vmem_t *vm, vmem_size_t size0, vmem_size_t align, vmem_size_t phase, 881 vmem_size_t nocross, vmem_addr_t minaddr, vmem_addr_t maxaddr, 882 vm_flag_t flags) 883 { 884 struct vmem_freelist *list; 885 struct vmem_freelist *first; 886 struct vmem_freelist *end; 887 bt_t *bt; 888 bt_t *btnew; 889 bt_t *btnew2; 890 const vmem_size_t size = vmem_roundup_size(vm, size0); 891 vm_flag_t strat = flags & VM_FITMASK; 892 vmem_addr_t start; 893 894 KASSERT(size0 > 0); 895 KASSERT(size > 0); 896 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 897 if ((flags & VM_SLEEP) != 0) { 898 ASSERT_SLEEPABLE(NULL, __func__); 899 } 900 KASSERT((align & vm->vm_quantum_mask) == 0); 901 KASSERT((align & (align - 1)) == 0); 902 KASSERT((phase & vm->vm_quantum_mask) == 0); 903 KASSERT((nocross & vm->vm_quantum_mask) == 0); 904 KASSERT((nocross & (nocross - 1)) == 0); 905 KASSERT((align == 0 && phase == 0) || phase < align); 906 KASSERT(nocross == 0 || nocross >= size); 907 KASSERT(maxaddr == 0 || minaddr < maxaddr); 908 KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 909 910 if (align == 0) { 911 align = vm->vm_quantum_mask + 1; 912 } 913 btnew = bt_alloc(vm, flags); 914 if (btnew == NULL) { 915 return VMEM_ADDR_NULL; 916 } 917 btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */ 918 if (btnew2 == NULL) { 919 bt_free(vm, btnew); 920 return VMEM_ADDR_NULL; 921 } 922 923 retry_strat: 924 first = bt_freehead_toalloc(vm, size, strat); 925 end = &vm->vm_freelist[VMEM_MAXORDER]; 926 retry: 927 bt = NULL; 928 VMEM_LOCK(vm); 929 if (strat == VM_INSTANTFIT) { 930 for (list = first; list < end; list++) { 931 bt = LIST_FIRST(list); 932 if (bt != NULL) { 933 start = vmem_fit(bt, size, align, phase, 934 nocross, minaddr, maxaddr); 935 if (start != VMEM_ADDR_NULL) { 936 goto gotit; 937 } 938 } 939 } 940 } else { /* VM_BESTFIT */ 941 for (list = first; list < end; list++) { 942 LIST_FOREACH(bt, list, bt_freelist) { 943 if (bt->bt_size >= size) { 944 start = vmem_fit(bt, size, align, phase, 945 nocross, minaddr, maxaddr); 946 if (start != VMEM_ADDR_NULL) { 947 goto gotit; 948 } 949 } 950 } 951 } 952 } 953 VMEM_UNLOCK(vm); 954 #if 1 955 if (strat == VM_INSTANTFIT) { 956 strat = VM_BESTFIT; 957 goto retry_strat; 958 } 959 #endif 960 if (align != vm->vm_quantum_mask + 1 || phase != 0 || 961 nocross != 0 || minaddr != 0 || maxaddr != 0) { 962 963 /* 964 * XXX should try to import a region large enough to 965 * satisfy restrictions? 966 */ 967 968 goto fail; 969 } 970 if (vmem_import(vm, size, flags) == 0) { 971 goto retry; 972 } 973 /* XXX */ 974 fail: 975 bt_free(vm, btnew); 976 bt_free(vm, btnew2); 977 return VMEM_ADDR_NULL; 978 979 gotit: 980 KASSERT(bt->bt_type == BT_TYPE_FREE); 981 KASSERT(bt->bt_size >= size); 982 bt_remfree(vm, bt); 983 if (bt->bt_start != start) { 984 btnew2->bt_type = BT_TYPE_FREE; 985 btnew2->bt_start = bt->bt_start; 986 btnew2->bt_size = start - bt->bt_start; 987 bt->bt_start = start; 988 bt->bt_size -= btnew2->bt_size; 989 bt_insfree(vm, btnew2); 990 bt_insseg(vm, btnew2, CIRCLEQ_PREV(bt, bt_seglist)); 991 btnew2 = NULL; 992 } 993 KASSERT(bt->bt_start == start); 994 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { 995 /* split */ 996 btnew->bt_type = BT_TYPE_BUSY; 997 btnew->bt_start = bt->bt_start; 998 btnew->bt_size = size; 999 bt->bt_start = bt->bt_start + size; 1000 bt->bt_size -= size; 1001 bt_insfree(vm, bt); 1002 bt_insseg(vm, btnew, CIRCLEQ_PREV(bt, bt_seglist)); 1003 bt_insbusy(vm, btnew); 1004 VMEM_UNLOCK(vm); 1005 } else { 1006 bt->bt_type = BT_TYPE_BUSY; 1007 bt_insbusy(vm, bt); 1008 VMEM_UNLOCK(vm); 1009 bt_free(vm, btnew); 1010 btnew = bt; 1011 } 1012 if (btnew2 != NULL) { 1013 bt_free(vm, btnew2); 1014 } 1015 KASSERT(btnew->bt_size >= size); 1016 btnew->bt_type = BT_TYPE_BUSY; 1017 1018 return btnew->bt_start; 1019 } 1020 1021 /* 1022 * vmem_free: 1023 * 1024 * => caller must ensure appropriate spl, 1025 * if the arena can be accessed from interrupt context. 1026 */ 1027 1028 void 1029 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1030 { 1031 1032 KASSERT(addr != VMEM_ADDR_NULL); 1033 KASSERT(size > 0); 1034 1035 #if defined(QCACHE) 1036 if (size <= vm->vm_qcache_max) { 1037 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 1038 qcache_t *qc = vm->vm_qcache[qidx - 1]; 1039 1040 return pool_cache_put(qc->qc_cache, (void *)addr); 1041 } 1042 #endif /* defined(QCACHE) */ 1043 1044 vmem_xfree(vm, addr, size); 1045 } 1046 1047 void 1048 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1049 { 1050 bt_t *bt; 1051 bt_t *t; 1052 1053 KASSERT(addr != VMEM_ADDR_NULL); 1054 KASSERT(size > 0); 1055 1056 VMEM_LOCK(vm); 1057 1058 bt = bt_lookupbusy(vm, addr); 1059 KASSERT(bt != NULL); 1060 KASSERT(bt->bt_start == addr); 1061 KASSERT(bt->bt_size == vmem_roundup_size(vm, size) || 1062 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1063 KASSERT(bt->bt_type == BT_TYPE_BUSY); 1064 bt_rembusy(vm, bt); 1065 bt->bt_type = BT_TYPE_FREE; 1066 1067 /* coalesce */ 1068 t = CIRCLEQ_NEXT(bt, bt_seglist); 1069 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1070 KASSERT(BT_END(bt) == t->bt_start); 1071 bt_remfree(vm, t); 1072 bt_remseg(vm, t); 1073 bt->bt_size += t->bt_size; 1074 bt_free(vm, t); 1075 } 1076 t = CIRCLEQ_PREV(bt, bt_seglist); 1077 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1078 KASSERT(BT_END(t) == bt->bt_start); 1079 bt_remfree(vm, t); 1080 bt_remseg(vm, t); 1081 bt->bt_size += t->bt_size; 1082 bt->bt_start = t->bt_start; 1083 bt_free(vm, t); 1084 } 1085 1086 t = CIRCLEQ_PREV(bt, bt_seglist); 1087 KASSERT(t != NULL); 1088 KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY); 1089 if (vm->vm_freefn != NULL && t->bt_type == BT_TYPE_SPAN && 1090 t->bt_size == bt->bt_size) { 1091 vmem_addr_t spanaddr; 1092 vmem_size_t spansize; 1093 1094 KASSERT(t->bt_start == bt->bt_start); 1095 spanaddr = bt->bt_start; 1096 spansize = bt->bt_size; 1097 bt_remseg(vm, bt); 1098 bt_free(vm, bt); 1099 bt_remseg(vm, t); 1100 bt_free(vm, t); 1101 VMEM_UNLOCK(vm); 1102 (*vm->vm_freefn)(vm->vm_source, spanaddr, spansize); 1103 } else { 1104 bt_insfree(vm, bt); 1105 VMEM_UNLOCK(vm); 1106 } 1107 } 1108 1109 /* 1110 * vmem_add: 1111 * 1112 * => caller must ensure appropriate spl, 1113 * if the arena can be accessed from interrupt context. 1114 */ 1115 1116 vmem_addr_t 1117 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags) 1118 { 1119 1120 return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC); 1121 } 1122 1123 /* 1124 * vmem_reap: reap unused resources. 1125 * 1126 * => return true if we successfully reaped something. 1127 */ 1128 1129 bool 1130 vmem_reap(vmem_t *vm) 1131 { 1132 bool didsomething = false; 1133 1134 #if defined(QCACHE) 1135 didsomething = qc_reap(vm); 1136 #endif /* defined(QCACHE) */ 1137 return didsomething; 1138 } 1139 1140 /* ---- rehash */ 1141 1142 #if defined(_KERNEL) 1143 static struct callout vmem_rehash_ch; 1144 static int vmem_rehash_interval; 1145 static struct workqueue *vmem_rehash_wq; 1146 static struct work vmem_rehash_wk; 1147 1148 static void 1149 vmem_rehash_all(struct work *wk, void *dummy) 1150 { 1151 vmem_t *vm; 1152 1153 KASSERT(wk == &vmem_rehash_wk); 1154 mutex_enter(&vmem_list_lock); 1155 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1156 size_t desired; 1157 size_t current; 1158 1159 if (!VMEM_TRYLOCK(vm)) { 1160 continue; 1161 } 1162 desired = vm->vm_nbusytag; 1163 current = vm->vm_hashsize; 1164 VMEM_UNLOCK(vm); 1165 1166 if (desired > VMEM_HASHSIZE_MAX) { 1167 desired = VMEM_HASHSIZE_MAX; 1168 } else if (desired < VMEM_HASHSIZE_MIN) { 1169 desired = VMEM_HASHSIZE_MIN; 1170 } 1171 if (desired > current * 2 || desired * 2 < current) { 1172 vmem_rehash(vm, desired, VM_NOSLEEP); 1173 } 1174 } 1175 mutex_exit(&vmem_list_lock); 1176 1177 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1178 } 1179 1180 static void 1181 vmem_rehash_all_kick(void *dummy) 1182 { 1183 1184 workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL); 1185 } 1186 1187 void 1188 vmem_rehash_start(void) 1189 { 1190 int error; 1191 1192 error = workqueue_create(&vmem_rehash_wq, "vmem_rehash", 1193 vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, 0); 1194 if (error) { 1195 panic("%s: workqueue_create %d\n", __func__, error); 1196 } 1197 callout_init(&vmem_rehash_ch, 0); 1198 callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL); 1199 1200 vmem_rehash_interval = hz * 10; 1201 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1202 } 1203 #endif /* defined(_KERNEL) */ 1204 1205 /* ---- debug */ 1206 1207 #if defined(DDB) 1208 static bt_t * 1209 vmem_whatis_lookup(vmem_t *vm, uintptr_t addr) 1210 { 1211 int i; 1212 1213 for (i = 0; i < vm->vm_hashsize; i++) { 1214 bt_t *bt; 1215 1216 LIST_FOREACH(bt, &vm->vm_hashlist[i], bt_hashlist) { 1217 if (bt->bt_start <= addr && addr < BT_END(bt)) { 1218 return bt; 1219 } 1220 } 1221 } 1222 1223 return NULL; 1224 } 1225 1226 void 1227 vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...)) 1228 { 1229 vmem_t *vm; 1230 1231 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1232 bt_t *bt; 1233 1234 bt = vmem_whatis_lookup(vm, addr); 1235 if (bt == NULL) { 1236 continue; 1237 } 1238 (*pr)("%p is %p+%zu from VMEM '%s'\n", 1239 (void *)addr, (void *)bt->bt_start, 1240 (size_t)(addr - bt->bt_start), vm->vm_name); 1241 } 1242 } 1243 #endif /* defined(DDB) */ 1244 1245 #if defined(VMEM_DEBUG) 1246 1247 #if !defined(_KERNEL) 1248 #include <stdio.h> 1249 #endif /* !defined(_KERNEL) */ 1250 1251 void bt_dump(const bt_t *); 1252 1253 void 1254 bt_dump(const bt_t *bt) 1255 { 1256 1257 printf("\t%p: %" PRIu64 ", %" PRIu64 ", %d\n", 1258 bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size, 1259 bt->bt_type); 1260 } 1261 1262 void 1263 vmem_dump(const vmem_t *vm) 1264 { 1265 const bt_t *bt; 1266 int i; 1267 1268 printf("vmem %p '%s'\n", vm, vm->vm_name); 1269 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1270 bt_dump(bt); 1271 } 1272 1273 for (i = 0; i < VMEM_MAXORDER; i++) { 1274 const struct vmem_freelist *fl = &vm->vm_freelist[i]; 1275 1276 if (LIST_EMPTY(fl)) { 1277 continue; 1278 } 1279 1280 printf("freelist[%d]\n", i); 1281 LIST_FOREACH(bt, fl, bt_freelist) { 1282 bt_dump(bt); 1283 if (bt->bt_size) { 1284 } 1285 } 1286 } 1287 } 1288 1289 #if !defined(_KERNEL) 1290 1291 int 1292 main() 1293 { 1294 vmem_t *vm; 1295 vmem_addr_t p; 1296 struct reg { 1297 vmem_addr_t p; 1298 vmem_size_t sz; 1299 bool x; 1300 } *reg = NULL; 1301 int nreg = 0; 1302 int nalloc = 0; 1303 int nfree = 0; 1304 vmem_size_t total = 0; 1305 #if 1 1306 vm_flag_t strat = VM_INSTANTFIT; 1307 #else 1308 vm_flag_t strat = VM_BESTFIT; 1309 #endif 1310 1311 vm = vmem_create("test", VMEM_ADDR_NULL, 0, 1, 1312 NULL, NULL, NULL, 0, VM_SLEEP); 1313 if (vm == NULL) { 1314 printf("vmem_create\n"); 1315 exit(EXIT_FAILURE); 1316 } 1317 vmem_dump(vm); 1318 1319 p = vmem_add(vm, 100, 200, VM_SLEEP); 1320 p = vmem_add(vm, 2000, 1, VM_SLEEP); 1321 p = vmem_add(vm, 40000, 0x10000000>>12, VM_SLEEP); 1322 p = vmem_add(vm, 10000, 10000, VM_SLEEP); 1323 p = vmem_add(vm, 500, 1000, VM_SLEEP); 1324 vmem_dump(vm); 1325 for (;;) { 1326 struct reg *r; 1327 int t = rand() % 100; 1328 1329 if (t > 45) { 1330 /* alloc */ 1331 vmem_size_t sz = rand() % 500 + 1; 1332 bool x; 1333 vmem_size_t align, phase, nocross; 1334 vmem_addr_t minaddr, maxaddr; 1335 1336 if (t > 70) { 1337 x = true; 1338 /* XXX */ 1339 align = 1 << (rand() % 15); 1340 phase = rand() % 65536; 1341 nocross = 1 << (rand() % 15); 1342 if (align <= phase) { 1343 phase = 0; 1344 } 1345 if (VMEM_CROSS_P(phase, phase + sz - 1, 1346 nocross)) { 1347 nocross = 0; 1348 } 1349 minaddr = rand() % 50000; 1350 maxaddr = rand() % 70000; 1351 if (minaddr > maxaddr) { 1352 minaddr = 0; 1353 maxaddr = 0; 1354 } 1355 printf("=== xalloc %" PRIu64 1356 " align=%" PRIu64 ", phase=%" PRIu64 1357 ", nocross=%" PRIu64 ", min=%" PRIu64 1358 ", max=%" PRIu64 "\n", 1359 (uint64_t)sz, 1360 (uint64_t)align, 1361 (uint64_t)phase, 1362 (uint64_t)nocross, 1363 (uint64_t)minaddr, 1364 (uint64_t)maxaddr); 1365 p = vmem_xalloc(vm, sz, align, phase, nocross, 1366 minaddr, maxaddr, strat|VM_SLEEP); 1367 } else { 1368 x = false; 1369 printf("=== alloc %" PRIu64 "\n", (uint64_t)sz); 1370 p = vmem_alloc(vm, sz, strat|VM_SLEEP); 1371 } 1372 printf("-> %" PRIu64 "\n", (uint64_t)p); 1373 vmem_dump(vm); 1374 if (p == VMEM_ADDR_NULL) { 1375 if (x) { 1376 continue; 1377 } 1378 break; 1379 } 1380 nreg++; 1381 reg = realloc(reg, sizeof(*reg) * nreg); 1382 r = ®[nreg - 1]; 1383 r->p = p; 1384 r->sz = sz; 1385 r->x = x; 1386 total += sz; 1387 nalloc++; 1388 } else if (nreg != 0) { 1389 /* free */ 1390 r = ®[rand() % nreg]; 1391 printf("=== free %" PRIu64 ", %" PRIu64 "\n", 1392 (uint64_t)r->p, (uint64_t)r->sz); 1393 if (r->x) { 1394 vmem_xfree(vm, r->p, r->sz); 1395 } else { 1396 vmem_free(vm, r->p, r->sz); 1397 } 1398 total -= r->sz; 1399 vmem_dump(vm); 1400 *r = reg[nreg - 1]; 1401 nreg--; 1402 nfree++; 1403 } 1404 printf("total=%" PRIu64 "\n", (uint64_t)total); 1405 } 1406 fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n", 1407 (uint64_t)total, nalloc, nfree); 1408 exit(EXIT_SUCCESS); 1409 } 1410 #endif /* !defined(_KERNEL) */ 1411 #endif /* defined(VMEM_DEBUG) */ 1412