1 /* $NetBSD: subr_vmem.c,v 1.42 2008/03/17 08:27:50 yamt Exp $ */ 2 3 /*- 4 * Copyright (c)2006 YAMAMOTO Takashi, 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * reference: 31 * - Magazines and Vmem: Extending the Slab Allocator 32 * to Many CPUs and Arbitrary Resources 33 * http://www.usenix.org/event/usenix01/bonwick.html 34 * 35 * todo: 36 * - decide how to import segments for vmem_xalloc. 37 * - don't rely on malloc(9). 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.42 2008/03/17 08:27:50 yamt Exp $"); 42 43 #define VMEM_DEBUG 44 #if defined(_KERNEL) 45 #include "opt_ddb.h" 46 #define QCACHE 47 #endif /* defined(_KERNEL) */ 48 49 #include <sys/param.h> 50 #include <sys/hash.h> 51 #include <sys/queue.h> 52 53 #if defined(_KERNEL) 54 #include <sys/systm.h> 55 #include <sys/kernel.h> /* hz */ 56 #include <sys/callout.h> 57 #include <sys/malloc.h> 58 #include <sys/once.h> 59 #include <sys/pool.h> 60 #include <sys/vmem.h> 61 #include <sys/workqueue.h> 62 #else /* defined(_KERNEL) */ 63 #include "../sys/vmem.h" 64 #endif /* defined(_KERNEL) */ 65 66 #if defined(_KERNEL) 67 #define LOCK_DECL(name) kmutex_t name 68 #else /* defined(_KERNEL) */ 69 #include <errno.h> 70 #include <assert.h> 71 #include <stdlib.h> 72 73 #define KASSERT(a) assert(a) 74 #define LOCK_DECL(name) /* nothing */ 75 #define mutex_init(a, b, c) /* nothing */ 76 #define mutex_destroy(a) /* nothing */ 77 #define mutex_enter(a) /* nothing */ 78 #define mutex_exit(a) /* nothing */ 79 #define mutex_owned(a) /* nothing */ 80 #define ASSERT_SLEEPABLE() /* nothing */ 81 #define IPL_VM 0 82 #endif /* defined(_KERNEL) */ 83 84 struct vmem; 85 struct vmem_btag; 86 87 #if defined(VMEM_DEBUG) 88 void vmem_dump(const vmem_t *); 89 #endif /* defined(VMEM_DEBUG) */ 90 91 #define VMEM_MAXORDER (sizeof(vmem_size_t) * CHAR_BIT) 92 93 #define VMEM_HASHSIZE_MIN 1 /* XXX */ 94 #define VMEM_HASHSIZE_MAX 8192 /* XXX */ 95 #define VMEM_HASHSIZE_INIT VMEM_HASHSIZE_MIN 96 97 #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT) 98 99 CIRCLEQ_HEAD(vmem_seglist, vmem_btag); 100 LIST_HEAD(vmem_freelist, vmem_btag); 101 LIST_HEAD(vmem_hashlist, vmem_btag); 102 103 #if defined(QCACHE) 104 #define VMEM_QCACHE_IDX_MAX 32 105 106 #define QC_NAME_MAX 16 107 108 struct qcache { 109 pool_cache_t qc_cache; 110 vmem_t *qc_vmem; 111 char qc_name[QC_NAME_MAX]; 112 }; 113 typedef struct qcache qcache_t; 114 #define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache)) 115 #endif /* defined(QCACHE) */ 116 117 /* vmem arena */ 118 struct vmem { 119 LOCK_DECL(vm_lock); 120 vmem_addr_t (*vm_allocfn)(vmem_t *, vmem_size_t, vmem_size_t *, 121 vm_flag_t); 122 void (*vm_freefn)(vmem_t *, vmem_addr_t, vmem_size_t); 123 vmem_t *vm_source; 124 struct vmem_seglist vm_seglist; 125 struct vmem_freelist vm_freelist[VMEM_MAXORDER]; 126 size_t vm_hashsize; 127 size_t vm_nbusytag; 128 struct vmem_hashlist *vm_hashlist; 129 size_t vm_quantum_mask; 130 int vm_quantum_shift; 131 const char *vm_name; 132 LIST_ENTRY(vmem) vm_alllist; 133 134 #if defined(QCACHE) 135 /* quantum cache */ 136 size_t vm_qcache_max; 137 struct pool_allocator vm_qcache_allocator; 138 qcache_t vm_qcache_store[VMEM_QCACHE_IDX_MAX]; 139 qcache_t *vm_qcache[VMEM_QCACHE_IDX_MAX]; 140 #endif /* defined(QCACHE) */ 141 }; 142 143 #define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock) 144 #define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock) 145 #define VMEM_UNLOCK(vm) mutex_exit(&vm->vm_lock) 146 #define VMEM_LOCK_INIT(vm, ipl) mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl) 147 #define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock) 148 #define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock)) 149 150 /* boundary tag */ 151 struct vmem_btag { 152 CIRCLEQ_ENTRY(vmem_btag) bt_seglist; 153 union { 154 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */ 155 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */ 156 } bt_u; 157 #define bt_hashlist bt_u.u_hashlist 158 #define bt_freelist bt_u.u_freelist 159 vmem_addr_t bt_start; 160 vmem_size_t bt_size; 161 int bt_type; 162 }; 163 164 #define BT_TYPE_SPAN 1 165 #define BT_TYPE_SPAN_STATIC 2 166 #define BT_TYPE_FREE 3 167 #define BT_TYPE_BUSY 4 168 #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC) 169 170 #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size) 171 172 typedef struct vmem_btag bt_t; 173 174 /* ---- misc */ 175 176 #define VMEM_ALIGNUP(addr, align) \ 177 (-(-(addr) & -(align))) 178 #define VMEM_CROSS_P(addr1, addr2, boundary) \ 179 ((((addr1) ^ (addr2)) & -(boundary)) != 0) 180 181 #define ORDER2SIZE(order) ((vmem_size_t)1 << (order)) 182 183 static int 184 calc_order(vmem_size_t size) 185 { 186 vmem_size_t target; 187 int i; 188 189 KASSERT(size != 0); 190 191 i = 0; 192 target = size >> 1; 193 while (ORDER2SIZE(i) <= target) { 194 i++; 195 } 196 197 KASSERT(ORDER2SIZE(i) <= size); 198 KASSERT(size < ORDER2SIZE(i + 1) || ORDER2SIZE(i + 1) < ORDER2SIZE(i)); 199 200 return i; 201 } 202 203 #if defined(_KERNEL) 204 static MALLOC_DEFINE(M_VMEM, "vmem", "vmem"); 205 #endif /* defined(_KERNEL) */ 206 207 static void * 208 xmalloc(size_t sz, vm_flag_t flags) 209 { 210 211 #if defined(_KERNEL) 212 return malloc(sz, M_VMEM, 213 M_CANFAIL | ((flags & VM_SLEEP) ? M_WAITOK : M_NOWAIT)); 214 #else /* defined(_KERNEL) */ 215 return malloc(sz); 216 #endif /* defined(_KERNEL) */ 217 } 218 219 static void 220 xfree(void *p) 221 { 222 223 #if defined(_KERNEL) 224 return free(p, M_VMEM); 225 #else /* defined(_KERNEL) */ 226 return free(p); 227 #endif /* defined(_KERNEL) */ 228 } 229 230 /* ---- boundary tag */ 231 232 #if defined(_KERNEL) 233 static struct pool_cache bt_cache; 234 #endif /* defined(_KERNEL) */ 235 236 static bt_t * 237 bt_alloc(vmem_t *vm, vm_flag_t flags) 238 { 239 bt_t *bt; 240 241 #if defined(_KERNEL) 242 bt = pool_cache_get(&bt_cache, 243 (flags & VM_SLEEP) != 0 ? PR_WAITOK : PR_NOWAIT); 244 #else /* defined(_KERNEL) */ 245 bt = malloc(sizeof *bt); 246 #endif /* defined(_KERNEL) */ 247 248 return bt; 249 } 250 251 static void 252 bt_free(vmem_t *vm, bt_t *bt) 253 { 254 255 #if defined(_KERNEL) 256 pool_cache_put(&bt_cache, bt); 257 #else /* defined(_KERNEL) */ 258 free(bt); 259 #endif /* defined(_KERNEL) */ 260 } 261 262 /* 263 * freelist[0] ... [1, 1] 264 * freelist[1] ... [2, 3] 265 * freelist[2] ... [4, 7] 266 * freelist[3] ... [8, 15] 267 * : 268 * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1] 269 * : 270 */ 271 272 static struct vmem_freelist * 273 bt_freehead_tofree(vmem_t *vm, vmem_size_t size) 274 { 275 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 276 int idx; 277 278 KASSERT((size & vm->vm_quantum_mask) == 0); 279 KASSERT(size != 0); 280 281 idx = calc_order(qsize); 282 KASSERT(idx >= 0); 283 KASSERT(idx < VMEM_MAXORDER); 284 285 return &vm->vm_freelist[idx]; 286 } 287 288 static struct vmem_freelist * 289 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat) 290 { 291 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 292 int idx; 293 294 KASSERT((size & vm->vm_quantum_mask) == 0); 295 KASSERT(size != 0); 296 297 idx = calc_order(qsize); 298 if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) { 299 idx++; 300 /* check too large request? */ 301 } 302 KASSERT(idx >= 0); 303 KASSERT(idx < VMEM_MAXORDER); 304 305 return &vm->vm_freelist[idx]; 306 } 307 308 /* ---- boundary tag hash */ 309 310 static struct vmem_hashlist * 311 bt_hashhead(vmem_t *vm, vmem_addr_t addr) 312 { 313 struct vmem_hashlist *list; 314 unsigned int hash; 315 316 hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT); 317 list = &vm->vm_hashlist[hash % vm->vm_hashsize]; 318 319 return list; 320 } 321 322 static bt_t * 323 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr) 324 { 325 struct vmem_hashlist *list; 326 bt_t *bt; 327 328 list = bt_hashhead(vm, addr); 329 LIST_FOREACH(bt, list, bt_hashlist) { 330 if (bt->bt_start == addr) { 331 break; 332 } 333 } 334 335 return bt; 336 } 337 338 static void 339 bt_rembusy(vmem_t *vm, bt_t *bt) 340 { 341 342 KASSERT(vm->vm_nbusytag > 0); 343 vm->vm_nbusytag--; 344 LIST_REMOVE(bt, bt_hashlist); 345 } 346 347 static void 348 bt_insbusy(vmem_t *vm, bt_t *bt) 349 { 350 struct vmem_hashlist *list; 351 352 KASSERT(bt->bt_type == BT_TYPE_BUSY); 353 354 list = bt_hashhead(vm, bt->bt_start); 355 LIST_INSERT_HEAD(list, bt, bt_hashlist); 356 vm->vm_nbusytag++; 357 } 358 359 /* ---- boundary tag list */ 360 361 static void 362 bt_remseg(vmem_t *vm, bt_t *bt) 363 { 364 365 CIRCLEQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); 366 } 367 368 static void 369 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) 370 { 371 372 CIRCLEQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); 373 } 374 375 static void 376 bt_insseg_tail(vmem_t *vm, bt_t *bt) 377 { 378 379 CIRCLEQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); 380 } 381 382 static void 383 bt_remfree(vmem_t *vm, bt_t *bt) 384 { 385 386 KASSERT(bt->bt_type == BT_TYPE_FREE); 387 388 LIST_REMOVE(bt, bt_freelist); 389 } 390 391 static void 392 bt_insfree(vmem_t *vm, bt_t *bt) 393 { 394 struct vmem_freelist *list; 395 396 list = bt_freehead_tofree(vm, bt->bt_size); 397 LIST_INSERT_HEAD(list, bt, bt_freelist); 398 } 399 400 /* ---- vmem internal functions */ 401 402 #if defined(_KERNEL) 403 static kmutex_t vmem_list_lock; 404 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); 405 #endif /* defined(_KERNEL) */ 406 407 #if defined(QCACHE) 408 static inline vm_flag_t 409 prf_to_vmf(int prflags) 410 { 411 vm_flag_t vmflags; 412 413 KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0); 414 if ((prflags & PR_WAITOK) != 0) { 415 vmflags = VM_SLEEP; 416 } else { 417 vmflags = VM_NOSLEEP; 418 } 419 return vmflags; 420 } 421 422 static inline int 423 vmf_to_prf(vm_flag_t vmflags) 424 { 425 int prflags; 426 427 if ((vmflags & VM_SLEEP) != 0) { 428 prflags = PR_WAITOK; 429 } else { 430 prflags = PR_NOWAIT; 431 } 432 return prflags; 433 } 434 435 static size_t 436 qc_poolpage_size(size_t qcache_max) 437 { 438 int i; 439 440 for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) { 441 /* nothing */ 442 } 443 return ORDER2SIZE(i); 444 } 445 446 static void * 447 qc_poolpage_alloc(struct pool *pool, int prflags) 448 { 449 qcache_t *qc = QC_POOL_TO_QCACHE(pool); 450 vmem_t *vm = qc->qc_vmem; 451 452 return (void *)vmem_alloc(vm, pool->pr_alloc->pa_pagesz, 453 prf_to_vmf(prflags) | VM_INSTANTFIT); 454 } 455 456 static void 457 qc_poolpage_free(struct pool *pool, void *addr) 458 { 459 qcache_t *qc = QC_POOL_TO_QCACHE(pool); 460 vmem_t *vm = qc->qc_vmem; 461 462 vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz); 463 } 464 465 static void 466 qc_init(vmem_t *vm, size_t qcache_max, int ipl) 467 { 468 qcache_t *prevqc; 469 struct pool_allocator *pa; 470 int qcache_idx_max; 471 int i; 472 473 KASSERT((qcache_max & vm->vm_quantum_mask) == 0); 474 if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) { 475 qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift; 476 } 477 vm->vm_qcache_max = qcache_max; 478 pa = &vm->vm_qcache_allocator; 479 memset(pa, 0, sizeof(*pa)); 480 pa->pa_alloc = qc_poolpage_alloc; 481 pa->pa_free = qc_poolpage_free; 482 pa->pa_pagesz = qc_poolpage_size(qcache_max); 483 484 qcache_idx_max = qcache_max >> vm->vm_quantum_shift; 485 prevqc = NULL; 486 for (i = qcache_idx_max; i > 0; i--) { 487 qcache_t *qc = &vm->vm_qcache_store[i - 1]; 488 size_t size = i << vm->vm_quantum_shift; 489 490 qc->qc_vmem = vm; 491 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", 492 vm->vm_name, size); 493 qc->qc_cache = pool_cache_init(size, 494 ORDER2SIZE(vm->vm_quantum_shift), 0, 495 PR_NOALIGN | PR_NOTOUCH /* XXX */, 496 qc->qc_name, pa, ipl, NULL, NULL, NULL); 497 KASSERT(qc->qc_cache != NULL); /* XXX */ 498 if (prevqc != NULL && 499 qc->qc_cache->pc_pool.pr_itemsperpage == 500 prevqc->qc_cache->pc_pool.pr_itemsperpage) { 501 pool_cache_destroy(qc->qc_cache); 502 vm->vm_qcache[i - 1] = prevqc; 503 continue; 504 } 505 qc->qc_cache->pc_pool.pr_qcache = qc; 506 vm->vm_qcache[i - 1] = qc; 507 prevqc = qc; 508 } 509 } 510 511 static void 512 qc_destroy(vmem_t *vm) 513 { 514 const qcache_t *prevqc; 515 int i; 516 int qcache_idx_max; 517 518 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 519 prevqc = NULL; 520 for (i = 0; i < qcache_idx_max; i++) { 521 qcache_t *qc = vm->vm_qcache[i]; 522 523 if (prevqc == qc) { 524 continue; 525 } 526 pool_cache_destroy(qc->qc_cache); 527 prevqc = qc; 528 } 529 } 530 531 static bool 532 qc_reap(vmem_t *vm) 533 { 534 const qcache_t *prevqc; 535 int i; 536 int qcache_idx_max; 537 bool didsomething = false; 538 539 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 540 prevqc = NULL; 541 for (i = 0; i < qcache_idx_max; i++) { 542 qcache_t *qc = vm->vm_qcache[i]; 543 544 if (prevqc == qc) { 545 continue; 546 } 547 if (pool_cache_reclaim(qc->qc_cache) != 0) { 548 didsomething = true; 549 } 550 prevqc = qc; 551 } 552 553 return didsomething; 554 } 555 #endif /* defined(QCACHE) */ 556 557 #if defined(_KERNEL) 558 static int 559 vmem_init(void) 560 { 561 562 mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_NONE); 563 pool_cache_bootstrap(&bt_cache, sizeof(bt_t), 0, 0, 0, "vmembt", 564 NULL, IPL_VM, NULL, NULL, NULL); 565 return 0; 566 } 567 #endif /* defined(_KERNEL) */ 568 569 static vmem_addr_t 570 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags, 571 int spanbttype) 572 { 573 bt_t *btspan; 574 bt_t *btfree; 575 576 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 577 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 578 579 btspan = bt_alloc(vm, flags); 580 if (btspan == NULL) { 581 return VMEM_ADDR_NULL; 582 } 583 btfree = bt_alloc(vm, flags); 584 if (btfree == NULL) { 585 bt_free(vm, btspan); 586 return VMEM_ADDR_NULL; 587 } 588 589 btspan->bt_type = spanbttype; 590 btspan->bt_start = addr; 591 btspan->bt_size = size; 592 593 btfree->bt_type = BT_TYPE_FREE; 594 btfree->bt_start = addr; 595 btfree->bt_size = size; 596 597 VMEM_LOCK(vm); 598 bt_insseg_tail(vm, btspan); 599 bt_insseg(vm, btfree, btspan); 600 bt_insfree(vm, btfree); 601 VMEM_UNLOCK(vm); 602 603 return addr; 604 } 605 606 static void 607 vmem_destroy1(vmem_t *vm) 608 { 609 610 #if defined(QCACHE) 611 qc_destroy(vm); 612 #endif /* defined(QCACHE) */ 613 if (vm->vm_hashlist != NULL) { 614 int i; 615 616 for (i = 0; i < vm->vm_hashsize; i++) { 617 bt_t *bt; 618 619 while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) { 620 KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC); 621 bt_free(vm, bt); 622 } 623 } 624 xfree(vm->vm_hashlist); 625 } 626 VMEM_LOCK_DESTROY(vm); 627 xfree(vm); 628 } 629 630 static int 631 vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags) 632 { 633 vmem_addr_t addr; 634 635 if (vm->vm_allocfn == NULL) { 636 return EINVAL; 637 } 638 639 addr = (*vm->vm_allocfn)(vm->vm_source, size, &size, flags); 640 if (addr == VMEM_ADDR_NULL) { 641 return ENOMEM; 642 } 643 644 if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) == VMEM_ADDR_NULL) { 645 (*vm->vm_freefn)(vm->vm_source, addr, size); 646 return ENOMEM; 647 } 648 649 return 0; 650 } 651 652 static int 653 vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags) 654 { 655 bt_t *bt; 656 int i; 657 struct vmem_hashlist *newhashlist; 658 struct vmem_hashlist *oldhashlist; 659 size_t oldhashsize; 660 661 KASSERT(newhashsize > 0); 662 663 newhashlist = 664 xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags); 665 if (newhashlist == NULL) { 666 return ENOMEM; 667 } 668 for (i = 0; i < newhashsize; i++) { 669 LIST_INIT(&newhashlist[i]); 670 } 671 672 if (!VMEM_TRYLOCK(vm)) { 673 xfree(newhashlist); 674 return EBUSY; 675 } 676 oldhashlist = vm->vm_hashlist; 677 oldhashsize = vm->vm_hashsize; 678 vm->vm_hashlist = newhashlist; 679 vm->vm_hashsize = newhashsize; 680 if (oldhashlist == NULL) { 681 VMEM_UNLOCK(vm); 682 return 0; 683 } 684 for (i = 0; i < oldhashsize; i++) { 685 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { 686 bt_rembusy(vm, bt); /* XXX */ 687 bt_insbusy(vm, bt); 688 } 689 } 690 VMEM_UNLOCK(vm); 691 692 xfree(oldhashlist); 693 694 return 0; 695 } 696 697 /* 698 * vmem_fit: check if a bt can satisfy the given restrictions. 699 */ 700 701 static vmem_addr_t 702 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, vmem_size_t phase, 703 vmem_size_t nocross, vmem_addr_t minaddr, vmem_addr_t maxaddr) 704 { 705 vmem_addr_t start; 706 vmem_addr_t end; 707 708 KASSERT(bt->bt_size >= size); 709 710 /* 711 * XXX assumption: vmem_addr_t and vmem_size_t are 712 * unsigned integer of the same size. 713 */ 714 715 start = bt->bt_start; 716 if (start < minaddr) { 717 start = minaddr; 718 } 719 end = BT_END(bt); 720 if (end > maxaddr - 1) { 721 end = maxaddr - 1; 722 } 723 if (start >= end) { 724 return VMEM_ADDR_NULL; 725 } 726 727 start = VMEM_ALIGNUP(start - phase, align) + phase; 728 if (start < bt->bt_start) { 729 start += align; 730 } 731 if (VMEM_CROSS_P(start, start + size - 1, nocross)) { 732 KASSERT(align < nocross); 733 start = VMEM_ALIGNUP(start - phase, nocross) + phase; 734 } 735 if (start < end && end - start >= size) { 736 KASSERT((start & (align - 1)) == phase); 737 KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross)); 738 KASSERT(minaddr <= start); 739 KASSERT(maxaddr == 0 || start + size <= maxaddr); 740 KASSERT(bt->bt_start <= start); 741 KASSERT(start + size <= BT_END(bt)); 742 return start; 743 } 744 return VMEM_ADDR_NULL; 745 } 746 747 /* ---- vmem API */ 748 749 /* 750 * vmem_create: create an arena. 751 * 752 * => must not be called from interrupt context. 753 */ 754 755 vmem_t * 756 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, 757 vmem_size_t quantum, 758 vmem_addr_t (*allocfn)(vmem_t *, vmem_size_t, vmem_size_t *, vm_flag_t), 759 void (*freefn)(vmem_t *, vmem_addr_t, vmem_size_t), 760 vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, 761 int ipl) 762 { 763 vmem_t *vm; 764 int i; 765 #if defined(_KERNEL) 766 static ONCE_DECL(control); 767 #endif /* defined(_KERNEL) */ 768 769 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 770 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 771 772 #if defined(_KERNEL) 773 if (RUN_ONCE(&control, vmem_init)) { 774 return NULL; 775 } 776 #endif /* defined(_KERNEL) */ 777 vm = xmalloc(sizeof(*vm), flags); 778 if (vm == NULL) { 779 return NULL; 780 } 781 782 VMEM_LOCK_INIT(vm, ipl); 783 vm->vm_name = name; 784 vm->vm_quantum_mask = quantum - 1; 785 vm->vm_quantum_shift = calc_order(quantum); 786 KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum); 787 vm->vm_allocfn = allocfn; 788 vm->vm_freefn = freefn; 789 vm->vm_source = source; 790 vm->vm_nbusytag = 0; 791 #if defined(QCACHE) 792 qc_init(vm, qcache_max, ipl); 793 #endif /* defined(QCACHE) */ 794 795 CIRCLEQ_INIT(&vm->vm_seglist); 796 for (i = 0; i < VMEM_MAXORDER; i++) { 797 LIST_INIT(&vm->vm_freelist[i]); 798 } 799 vm->vm_hashlist = NULL; 800 if (vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags)) { 801 vmem_destroy1(vm); 802 return NULL; 803 } 804 805 if (size != 0) { 806 if (vmem_add(vm, base, size, flags) == 0) { 807 vmem_destroy1(vm); 808 return NULL; 809 } 810 } 811 812 #if defined(_KERNEL) 813 mutex_enter(&vmem_list_lock); 814 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); 815 mutex_exit(&vmem_list_lock); 816 #endif /* defined(_KERNEL) */ 817 818 return vm; 819 } 820 821 void 822 vmem_destroy(vmem_t *vm) 823 { 824 825 #if defined(_KERNEL) 826 mutex_enter(&vmem_list_lock); 827 LIST_REMOVE(vm, vm_alllist); 828 mutex_exit(&vmem_list_lock); 829 #endif /* defined(_KERNEL) */ 830 831 vmem_destroy1(vm); 832 } 833 834 vmem_size_t 835 vmem_roundup_size(vmem_t *vm, vmem_size_t size) 836 { 837 838 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; 839 } 840 841 /* 842 * vmem_alloc: 843 * 844 * => caller must ensure appropriate spl, 845 * if the arena can be accessed from interrupt context. 846 */ 847 848 vmem_addr_t 849 vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags) 850 { 851 const vm_flag_t strat __unused = flags & VM_FITMASK; 852 853 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 854 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 855 856 KASSERT(size > 0); 857 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 858 if ((flags & VM_SLEEP) != 0) { 859 ASSERT_SLEEPABLE(); 860 } 861 862 #if defined(QCACHE) 863 if (size <= vm->vm_qcache_max) { 864 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 865 qcache_t *qc = vm->vm_qcache[qidx - 1]; 866 867 return (vmem_addr_t)pool_cache_get(qc->qc_cache, 868 vmf_to_prf(flags)); 869 } 870 #endif /* defined(QCACHE) */ 871 872 return vmem_xalloc(vm, size, 0, 0, 0, 0, 0, flags); 873 } 874 875 vmem_addr_t 876 vmem_xalloc(vmem_t *vm, vmem_size_t size0, vmem_size_t align, vmem_size_t phase, 877 vmem_size_t nocross, vmem_addr_t minaddr, vmem_addr_t maxaddr, 878 vm_flag_t flags) 879 { 880 struct vmem_freelist *list; 881 struct vmem_freelist *first; 882 struct vmem_freelist *end; 883 bt_t *bt; 884 bt_t *btnew; 885 bt_t *btnew2; 886 const vmem_size_t size = vmem_roundup_size(vm, size0); 887 vm_flag_t strat = flags & VM_FITMASK; 888 vmem_addr_t start; 889 890 KASSERT(size0 > 0); 891 KASSERT(size > 0); 892 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 893 if ((flags & VM_SLEEP) != 0) { 894 ASSERT_SLEEPABLE(); 895 } 896 KASSERT((align & vm->vm_quantum_mask) == 0); 897 KASSERT((align & (align - 1)) == 0); 898 KASSERT((phase & vm->vm_quantum_mask) == 0); 899 KASSERT((nocross & vm->vm_quantum_mask) == 0); 900 KASSERT((nocross & (nocross - 1)) == 0); 901 KASSERT((align == 0 && phase == 0) || phase < align); 902 KASSERT(nocross == 0 || nocross >= size); 903 KASSERT(maxaddr == 0 || minaddr < maxaddr); 904 KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 905 906 if (align == 0) { 907 align = vm->vm_quantum_mask + 1; 908 } 909 btnew = bt_alloc(vm, flags); 910 if (btnew == NULL) { 911 return VMEM_ADDR_NULL; 912 } 913 btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */ 914 if (btnew2 == NULL) { 915 bt_free(vm, btnew); 916 return VMEM_ADDR_NULL; 917 } 918 919 retry_strat: 920 first = bt_freehead_toalloc(vm, size, strat); 921 end = &vm->vm_freelist[VMEM_MAXORDER]; 922 retry: 923 bt = NULL; 924 VMEM_LOCK(vm); 925 if (strat == VM_INSTANTFIT) { 926 for (list = first; list < end; list++) { 927 bt = LIST_FIRST(list); 928 if (bt != NULL) { 929 start = vmem_fit(bt, size, align, phase, 930 nocross, minaddr, maxaddr); 931 if (start != VMEM_ADDR_NULL) { 932 goto gotit; 933 } 934 } 935 } 936 } else { /* VM_BESTFIT */ 937 for (list = first; list < end; list++) { 938 LIST_FOREACH(bt, list, bt_freelist) { 939 if (bt->bt_size >= size) { 940 start = vmem_fit(bt, size, align, phase, 941 nocross, minaddr, maxaddr); 942 if (start != VMEM_ADDR_NULL) { 943 goto gotit; 944 } 945 } 946 } 947 } 948 } 949 VMEM_UNLOCK(vm); 950 #if 1 951 if (strat == VM_INSTANTFIT) { 952 strat = VM_BESTFIT; 953 goto retry_strat; 954 } 955 #endif 956 if (align != vm->vm_quantum_mask + 1 || phase != 0 || 957 nocross != 0 || minaddr != 0 || maxaddr != 0) { 958 959 /* 960 * XXX should try to import a region large enough to 961 * satisfy restrictions? 962 */ 963 964 goto fail; 965 } 966 if (vmem_import(vm, size, flags) == 0) { 967 goto retry; 968 } 969 /* XXX */ 970 fail: 971 bt_free(vm, btnew); 972 bt_free(vm, btnew2); 973 return VMEM_ADDR_NULL; 974 975 gotit: 976 KASSERT(bt->bt_type == BT_TYPE_FREE); 977 KASSERT(bt->bt_size >= size); 978 bt_remfree(vm, bt); 979 if (bt->bt_start != start) { 980 btnew2->bt_type = BT_TYPE_FREE; 981 btnew2->bt_start = bt->bt_start; 982 btnew2->bt_size = start - bt->bt_start; 983 bt->bt_start = start; 984 bt->bt_size -= btnew2->bt_size; 985 bt_insfree(vm, btnew2); 986 bt_insseg(vm, btnew2, CIRCLEQ_PREV(bt, bt_seglist)); 987 btnew2 = NULL; 988 } 989 KASSERT(bt->bt_start == start); 990 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { 991 /* split */ 992 btnew->bt_type = BT_TYPE_BUSY; 993 btnew->bt_start = bt->bt_start; 994 btnew->bt_size = size; 995 bt->bt_start = bt->bt_start + size; 996 bt->bt_size -= size; 997 bt_insfree(vm, bt); 998 bt_insseg(vm, btnew, CIRCLEQ_PREV(bt, bt_seglist)); 999 bt_insbusy(vm, btnew); 1000 VMEM_UNLOCK(vm); 1001 } else { 1002 bt->bt_type = BT_TYPE_BUSY; 1003 bt_insbusy(vm, bt); 1004 VMEM_UNLOCK(vm); 1005 bt_free(vm, btnew); 1006 btnew = bt; 1007 } 1008 if (btnew2 != NULL) { 1009 bt_free(vm, btnew2); 1010 } 1011 KASSERT(btnew->bt_size >= size); 1012 btnew->bt_type = BT_TYPE_BUSY; 1013 1014 return btnew->bt_start; 1015 } 1016 1017 /* 1018 * vmem_free: 1019 * 1020 * => caller must ensure appropriate spl, 1021 * if the arena can be accessed from interrupt context. 1022 */ 1023 1024 void 1025 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1026 { 1027 1028 KASSERT(addr != VMEM_ADDR_NULL); 1029 KASSERT(size > 0); 1030 1031 #if defined(QCACHE) 1032 if (size <= vm->vm_qcache_max) { 1033 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 1034 qcache_t *qc = vm->vm_qcache[qidx - 1]; 1035 1036 return pool_cache_put(qc->qc_cache, (void *)addr); 1037 } 1038 #endif /* defined(QCACHE) */ 1039 1040 vmem_xfree(vm, addr, size); 1041 } 1042 1043 void 1044 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1045 { 1046 bt_t *bt; 1047 bt_t *t; 1048 1049 KASSERT(addr != VMEM_ADDR_NULL); 1050 KASSERT(size > 0); 1051 1052 VMEM_LOCK(vm); 1053 1054 bt = bt_lookupbusy(vm, addr); 1055 KASSERT(bt != NULL); 1056 KASSERT(bt->bt_start == addr); 1057 KASSERT(bt->bt_size == vmem_roundup_size(vm, size) || 1058 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1059 KASSERT(bt->bt_type == BT_TYPE_BUSY); 1060 bt_rembusy(vm, bt); 1061 bt->bt_type = BT_TYPE_FREE; 1062 1063 /* coalesce */ 1064 t = CIRCLEQ_NEXT(bt, bt_seglist); 1065 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1066 KASSERT(BT_END(bt) == t->bt_start); 1067 bt_remfree(vm, t); 1068 bt_remseg(vm, t); 1069 bt->bt_size += t->bt_size; 1070 bt_free(vm, t); 1071 } 1072 t = CIRCLEQ_PREV(bt, bt_seglist); 1073 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1074 KASSERT(BT_END(t) == bt->bt_start); 1075 bt_remfree(vm, t); 1076 bt_remseg(vm, t); 1077 bt->bt_size += t->bt_size; 1078 bt->bt_start = t->bt_start; 1079 bt_free(vm, t); 1080 } 1081 1082 t = CIRCLEQ_PREV(bt, bt_seglist); 1083 KASSERT(t != NULL); 1084 KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY); 1085 if (vm->vm_freefn != NULL && t->bt_type == BT_TYPE_SPAN && 1086 t->bt_size == bt->bt_size) { 1087 vmem_addr_t spanaddr; 1088 vmem_size_t spansize; 1089 1090 KASSERT(t->bt_start == bt->bt_start); 1091 spanaddr = bt->bt_start; 1092 spansize = bt->bt_size; 1093 bt_remseg(vm, bt); 1094 bt_free(vm, bt); 1095 bt_remseg(vm, t); 1096 bt_free(vm, t); 1097 VMEM_UNLOCK(vm); 1098 (*vm->vm_freefn)(vm->vm_source, spanaddr, spansize); 1099 } else { 1100 bt_insfree(vm, bt); 1101 VMEM_UNLOCK(vm); 1102 } 1103 } 1104 1105 /* 1106 * vmem_add: 1107 * 1108 * => caller must ensure appropriate spl, 1109 * if the arena can be accessed from interrupt context. 1110 */ 1111 1112 vmem_addr_t 1113 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags) 1114 { 1115 1116 return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC); 1117 } 1118 1119 /* 1120 * vmem_reap: reap unused resources. 1121 * 1122 * => return true if we successfully reaped something. 1123 */ 1124 1125 bool 1126 vmem_reap(vmem_t *vm) 1127 { 1128 bool didsomething = false; 1129 1130 #if defined(QCACHE) 1131 didsomething = qc_reap(vm); 1132 #endif /* defined(QCACHE) */ 1133 return didsomething; 1134 } 1135 1136 /* ---- rehash */ 1137 1138 #if defined(_KERNEL) 1139 static struct callout vmem_rehash_ch; 1140 static int vmem_rehash_interval; 1141 static struct workqueue *vmem_rehash_wq; 1142 static struct work vmem_rehash_wk; 1143 1144 static void 1145 vmem_rehash_all(struct work *wk, void *dummy) 1146 { 1147 vmem_t *vm; 1148 1149 KASSERT(wk == &vmem_rehash_wk); 1150 mutex_enter(&vmem_list_lock); 1151 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1152 size_t desired; 1153 size_t current; 1154 1155 if (!VMEM_TRYLOCK(vm)) { 1156 continue; 1157 } 1158 desired = vm->vm_nbusytag; 1159 current = vm->vm_hashsize; 1160 VMEM_UNLOCK(vm); 1161 1162 if (desired > VMEM_HASHSIZE_MAX) { 1163 desired = VMEM_HASHSIZE_MAX; 1164 } else if (desired < VMEM_HASHSIZE_MIN) { 1165 desired = VMEM_HASHSIZE_MIN; 1166 } 1167 if (desired > current * 2 || desired * 2 < current) { 1168 vmem_rehash(vm, desired, VM_NOSLEEP); 1169 } 1170 } 1171 mutex_exit(&vmem_list_lock); 1172 1173 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1174 } 1175 1176 static void 1177 vmem_rehash_all_kick(void *dummy) 1178 { 1179 1180 workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL); 1181 } 1182 1183 void 1184 vmem_rehash_start(void) 1185 { 1186 int error; 1187 1188 error = workqueue_create(&vmem_rehash_wq, "vmem_rehash", 1189 vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, WQ_MPSAFE); 1190 if (error) { 1191 panic("%s: workqueue_create %d\n", __func__, error); 1192 } 1193 callout_init(&vmem_rehash_ch, CALLOUT_MPSAFE); 1194 callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL); 1195 1196 vmem_rehash_interval = hz * 10; 1197 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1198 } 1199 #endif /* defined(_KERNEL) */ 1200 1201 /* ---- debug */ 1202 1203 #if defined(DDB) 1204 static bt_t * 1205 vmem_whatis_lookup(vmem_t *vm, uintptr_t addr) 1206 { 1207 bt_t *bt; 1208 1209 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1210 if (BT_ISSPAN_P(bt)) { 1211 continue; 1212 } 1213 if (bt->bt_start <= addr && addr < BT_END(bt)) { 1214 return bt; 1215 } 1216 } 1217 1218 return NULL; 1219 } 1220 1221 void 1222 vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...)) 1223 { 1224 vmem_t *vm; 1225 1226 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1227 bt_t *bt; 1228 1229 bt = vmem_whatis_lookup(vm, addr); 1230 if (bt == NULL) { 1231 continue; 1232 } 1233 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n", 1234 (void *)addr, (void *)bt->bt_start, 1235 (size_t)(addr - bt->bt_start), vm->vm_name, 1236 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free"); 1237 } 1238 } 1239 #endif /* defined(DDB) */ 1240 1241 #if defined(VMEM_DEBUG) 1242 1243 #if !defined(_KERNEL) 1244 #include <stdio.h> 1245 #endif /* !defined(_KERNEL) */ 1246 1247 void bt_dump(const bt_t *); 1248 1249 void 1250 bt_dump(const bt_t *bt) 1251 { 1252 1253 printf("\t%p: %" PRIu64 ", %" PRIu64 ", %d\n", 1254 bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size, 1255 bt->bt_type); 1256 } 1257 1258 void 1259 vmem_dump(const vmem_t *vm) 1260 { 1261 const bt_t *bt; 1262 int i; 1263 1264 printf("vmem %p '%s'\n", vm, vm->vm_name); 1265 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1266 bt_dump(bt); 1267 } 1268 1269 for (i = 0; i < VMEM_MAXORDER; i++) { 1270 const struct vmem_freelist *fl = &vm->vm_freelist[i]; 1271 1272 if (LIST_EMPTY(fl)) { 1273 continue; 1274 } 1275 1276 printf("freelist[%d]\n", i); 1277 LIST_FOREACH(bt, fl, bt_freelist) { 1278 bt_dump(bt); 1279 if (bt->bt_size) { 1280 } 1281 } 1282 } 1283 } 1284 1285 #if !defined(_KERNEL) 1286 1287 int 1288 main() 1289 { 1290 vmem_t *vm; 1291 vmem_addr_t p; 1292 struct reg { 1293 vmem_addr_t p; 1294 vmem_size_t sz; 1295 bool x; 1296 } *reg = NULL; 1297 int nreg = 0; 1298 int nalloc = 0; 1299 int nfree = 0; 1300 vmem_size_t total = 0; 1301 #if 1 1302 vm_flag_t strat = VM_INSTANTFIT; 1303 #else 1304 vm_flag_t strat = VM_BESTFIT; 1305 #endif 1306 1307 vm = vmem_create("test", VMEM_ADDR_NULL, 0, 1, 1308 NULL, NULL, NULL, 0, VM_SLEEP); 1309 if (vm == NULL) { 1310 printf("vmem_create\n"); 1311 exit(EXIT_FAILURE); 1312 } 1313 vmem_dump(vm); 1314 1315 p = vmem_add(vm, 100, 200, VM_SLEEP); 1316 p = vmem_add(vm, 2000, 1, VM_SLEEP); 1317 p = vmem_add(vm, 40000, 0x10000000>>12, VM_SLEEP); 1318 p = vmem_add(vm, 10000, 10000, VM_SLEEP); 1319 p = vmem_add(vm, 500, 1000, VM_SLEEP); 1320 vmem_dump(vm); 1321 for (;;) { 1322 struct reg *r; 1323 int t = rand() % 100; 1324 1325 if (t > 45) { 1326 /* alloc */ 1327 vmem_size_t sz = rand() % 500 + 1; 1328 bool x; 1329 vmem_size_t align, phase, nocross; 1330 vmem_addr_t minaddr, maxaddr; 1331 1332 if (t > 70) { 1333 x = true; 1334 /* XXX */ 1335 align = 1 << (rand() % 15); 1336 phase = rand() % 65536; 1337 nocross = 1 << (rand() % 15); 1338 if (align <= phase) { 1339 phase = 0; 1340 } 1341 if (VMEM_CROSS_P(phase, phase + sz - 1, 1342 nocross)) { 1343 nocross = 0; 1344 } 1345 minaddr = rand() % 50000; 1346 maxaddr = rand() % 70000; 1347 if (minaddr > maxaddr) { 1348 minaddr = 0; 1349 maxaddr = 0; 1350 } 1351 printf("=== xalloc %" PRIu64 1352 " align=%" PRIu64 ", phase=%" PRIu64 1353 ", nocross=%" PRIu64 ", min=%" PRIu64 1354 ", max=%" PRIu64 "\n", 1355 (uint64_t)sz, 1356 (uint64_t)align, 1357 (uint64_t)phase, 1358 (uint64_t)nocross, 1359 (uint64_t)minaddr, 1360 (uint64_t)maxaddr); 1361 p = vmem_xalloc(vm, sz, align, phase, nocross, 1362 minaddr, maxaddr, strat|VM_SLEEP); 1363 } else { 1364 x = false; 1365 printf("=== alloc %" PRIu64 "\n", (uint64_t)sz); 1366 p = vmem_alloc(vm, sz, strat|VM_SLEEP); 1367 } 1368 printf("-> %" PRIu64 "\n", (uint64_t)p); 1369 vmem_dump(vm); 1370 if (p == VMEM_ADDR_NULL) { 1371 if (x) { 1372 continue; 1373 } 1374 break; 1375 } 1376 nreg++; 1377 reg = realloc(reg, sizeof(*reg) * nreg); 1378 r = ®[nreg - 1]; 1379 r->p = p; 1380 r->sz = sz; 1381 r->x = x; 1382 total += sz; 1383 nalloc++; 1384 } else if (nreg != 0) { 1385 /* free */ 1386 r = ®[rand() % nreg]; 1387 printf("=== free %" PRIu64 ", %" PRIu64 "\n", 1388 (uint64_t)r->p, (uint64_t)r->sz); 1389 if (r->x) { 1390 vmem_xfree(vm, r->p, r->sz); 1391 } else { 1392 vmem_free(vm, r->p, r->sz); 1393 } 1394 total -= r->sz; 1395 vmem_dump(vm); 1396 *r = reg[nreg - 1]; 1397 nreg--; 1398 nfree++; 1399 } 1400 printf("total=%" PRIu64 "\n", (uint64_t)total); 1401 } 1402 fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n", 1403 (uint64_t)total, nalloc, nfree); 1404 exit(EXIT_SUCCESS); 1405 } 1406 #endif /* !defined(_KERNEL) */ 1407 #endif /* defined(VMEM_DEBUG) */ 1408