1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $DragonFly: src/sys/kern/kern_objcache.c,v 1.11 2006/09/05 03:48:12 dillon Exp $ 33 */ 34 35 #include <sys/param.h> 36 #include <sys/kernel.h> 37 #include <sys/systm.h> 38 #include <sys/callout.h> 39 #include <sys/globaldata.h> 40 #include <sys/malloc.h> 41 #include <sys/queue.h> 42 #include <sys/objcache.h> 43 #include <sys/thread.h> 44 #include <sys/thread2.h> 45 46 static MALLOC_DEFINE(M_OBJCACHE, "objcache", "Object Cache"); 47 static MALLOC_DEFINE(M_OBJMAG, "objcache magazine", "Object Cache Magazine"); 48 49 #define INITIAL_MAG_CAPACITY 256 50 51 struct magazine { 52 int rounds; 53 int capacity; 54 SLIST_ENTRY(magazine) nextmagazine; 55 void *objects[]; 56 }; 57 58 SLIST_HEAD(magazinelist, magazine); 59 60 /* 61 * per-cluster cache of magazines 62 * All fields in this structure are protected by the token. 63 */ 64 struct magazinedepot { 65 /* 66 * The per-cpu object caches only exchanges completely full or 67 * completely empty magazines with the depot layer, so only have 68 * to cache these two types of magazines. 69 */ 70 struct magazinelist fullmagazines; 71 struct magazinelist emptymagazines; 72 int magcapacity; 73 74 /* protect this structure */ 75 struct lwkt_token token; 76 77 /* magazines not yet allocated towards limit */ 78 int unallocated_objects; 79 80 /* infrequently used fields */ 81 int waiting; /* waiting for another cpu to 82 * return a full magazine to 83 * the depot */ 84 int contested; /* depot contention count */ 85 }; 86 87 /* 88 * per-cpu object cache 89 * All fields in this structure are protected by crit_enter(). 90 */ 91 struct percpu_objcache { 92 struct magazine *loaded_magazine; /* active magazine */ 93 struct magazine *previous_magazine; /* backup magazine */ 94 95 /* statistics */ 96 int gets_cumulative; /* total calls to get */ 97 int gets_null; /* objcache_get returned NULL */ 98 int puts_cumulative; /* total calls to put */ 99 int puts_othercluster; /* returned to other cluster */ 100 101 /* infrequently used fields */ 102 int waiting; /* waiting for a thread on this cpu to 103 * return an obj to the per-cpu cache */ 104 }; 105 106 /* only until we have NUMA cluster topology information XXX */ 107 #define MAXCLUSTERS 1 108 #define myclusterid 0 109 #define CLUSTER_OF(obj) 0 110 111 /* 112 * Two-level object cache consisting of NUMA cluster-level depots of 113 * fully loaded or completely empty magazines and cpu-level caches of 114 * individual objects. 115 */ 116 struct objcache { 117 char *name; 118 119 /* object constructor and destructor from blank storage */ 120 objcache_ctor_fn *ctor; 121 objcache_dtor_fn *dtor; 122 void *private; 123 124 /* interface to underlying allocator */ 125 objcache_alloc_fn *alloc; 126 objcache_free_fn *free; 127 void *allocator_args; 128 129 SLIST_ENTRY(objcache) oc_next; 130 131 /* NUMA-cluster level caches */ 132 struct magazinedepot depot[MAXCLUSTERS]; 133 134 struct percpu_objcache cache_percpu[]; /* per-cpu caches */ 135 }; 136 137 static struct lwkt_token objcachelist_token; 138 static SLIST_HEAD(objcachelist, objcache) allobjcaches; 139 140 static struct magazine * 141 mag_alloc(int capacity) 142 { 143 struct magazine *mag; 144 145 mag = kmalloc(__offsetof(struct magazine, objects[capacity]), 146 M_OBJMAG, M_INTWAIT | M_ZERO); 147 mag->capacity = capacity; 148 mag->rounds = 0; 149 return (mag); 150 } 151 152 /* 153 * Create an object cache. 154 */ 155 struct objcache * 156 objcache_create(const char *name, int cluster_limit, int mag_capacity, 157 objcache_ctor_fn *ctor, objcache_dtor_fn *dtor, void *private, 158 objcache_alloc_fn *alloc, objcache_free_fn *free, 159 void *allocator_args) 160 { 161 struct objcache *oc; 162 struct magazinedepot *depot; 163 lwkt_tokref olock; 164 int cpuid; 165 166 /* allocate object cache structure */ 167 oc = kmalloc(__offsetof(struct objcache, cache_percpu[ncpus]), 168 M_OBJCACHE, M_WAITOK | M_ZERO); 169 oc->name = kstrdup(name, M_TEMP); 170 oc->ctor = ctor; 171 oc->dtor = dtor; 172 oc->private = private; 173 oc->free = free; 174 oc->allocator_args = allocator_args; 175 176 /* initialize depots */ 177 depot = &oc->depot[0]; 178 179 lwkt_token_init(&depot->token); 180 SLIST_INIT(&depot->fullmagazines); 181 SLIST_INIT(&depot->emptymagazines); 182 183 if (mag_capacity == 0) 184 mag_capacity = INITIAL_MAG_CAPACITY; 185 depot->magcapacity = mag_capacity; 186 187 /* 188 * The cluster_limit must be sufficient to have three magazines per 189 * cpu. 190 */ 191 if (cluster_limit == 0) { 192 depot->unallocated_objects = -1; 193 } else { 194 if (cluster_limit < mag_capacity * ncpus * 3) 195 cluster_limit = mag_capacity * ncpus * 3; 196 depot->unallocated_objects = cluster_limit; 197 } 198 oc->alloc = alloc; 199 200 /* initialize per-cpu caches */ 201 for (cpuid = 0; cpuid < ncpus; cpuid++) { 202 struct percpu_objcache *cache_percpu = &oc->cache_percpu[cpuid]; 203 204 cache_percpu->loaded_magazine = mag_alloc(mag_capacity); 205 cache_percpu->previous_magazine = mag_alloc(mag_capacity); 206 } 207 lwkt_gettoken(&olock, &objcachelist_token); 208 SLIST_INSERT_HEAD(&allobjcaches, oc, oc_next); 209 lwkt_reltoken(&olock); 210 211 return (oc); 212 } 213 214 struct objcache * 215 objcache_create_simple(malloc_type_t mtype, size_t objsize) 216 { 217 struct objcache_malloc_args *margs; 218 struct objcache *oc; 219 220 margs = kmalloc(sizeof(*margs), M_OBJCACHE, M_WAITOK|M_ZERO); 221 margs->objsize = objsize; 222 margs->mtype = mtype; 223 oc = objcache_create(mtype->ks_shortdesc, 0, 0, 224 null_ctor, null_dtor, NULL, 225 objcache_malloc_alloc, objcache_malloc_free, 226 margs); 227 return (oc); 228 } 229 230 #define MAGAZINE_EMPTY(mag) (mag->rounds == 0) 231 #define MAGAZINE_NOTEMPTY(mag) (mag->rounds != 0) 232 #define MAGAZINE_FULL(mag) (mag->rounds == mag->capacity) 233 234 #define swap(x, y) ({ struct magazine *t = x; x = y; y = t; }) 235 236 /* 237 * Get an object from the object cache. 238 * 239 * WARNING! ocflags are only used when we have to go to the underlying 240 * allocator, so we cannot depend on flags such as M_ZERO. 241 */ 242 void * 243 objcache_get(struct objcache *oc, int ocflags) 244 { 245 struct percpu_objcache *cpucache = &oc->cache_percpu[mycpuid]; 246 struct magazine *loadedmag; 247 struct magazine *emptymag; 248 void *obj; 249 struct magazinedepot *depot; 250 lwkt_tokref ilock; 251 252 crit_enter(); 253 ++cpucache->gets_cumulative; 254 255 retry: 256 /* 257 * Loaded magazine has an object. This is the hot path. 258 * It is lock-free and uses a critical section to block 259 * out interrupt handlers on the same processor. 260 */ 261 loadedmag = cpucache->loaded_magazine; 262 if (MAGAZINE_NOTEMPTY(loadedmag)) { 263 obj = loadedmag->objects[--loadedmag->rounds]; 264 crit_exit(); 265 return (obj); 266 } 267 268 /* Previous magazine has an object. */ 269 if (MAGAZINE_NOTEMPTY(cpucache->previous_magazine)) { 270 swap(cpucache->loaded_magazine, cpucache->previous_magazine); 271 loadedmag = cpucache->loaded_magazine; 272 obj = loadedmag->objects[--loadedmag->rounds]; 273 crit_exit(); 274 return (obj); 275 } 276 277 /* 278 * Both magazines empty. Get a full magazine from the depot and 279 * move one of the empty ones to the depot. 280 * 281 * Obtain the depot token. 282 */ 283 depot = &oc->depot[myclusterid]; 284 lwkt_gettoken(&ilock, &depot->token); 285 286 /* 287 * We might have blocked obtaining the token, we must recheck 288 * the cpucache before potentially falling through to the blocking 289 * code or we might deadlock the tsleep() on a low-memory machine. 290 */ 291 if (MAGAZINE_NOTEMPTY(cpucache->loaded_magazine) || 292 MAGAZINE_NOTEMPTY(cpucache->previous_magazine) 293 ) { 294 lwkt_reltoken(&ilock); 295 goto retry; 296 } 297 298 /* Check if depot has a full magazine. */ 299 if (!SLIST_EMPTY(&depot->fullmagazines)) { 300 emptymag = cpucache->previous_magazine; 301 cpucache->previous_magazine = cpucache->loaded_magazine; 302 cpucache->loaded_magazine = SLIST_FIRST(&depot->fullmagazines); 303 SLIST_REMOVE_HEAD(&depot->fullmagazines, nextmagazine); 304 305 /* 306 * Return emptymag to the depot. 307 */ 308 KKASSERT(MAGAZINE_EMPTY(emptymag)); 309 SLIST_INSERT_HEAD(&depot->emptymagazines, 310 emptymag, nextmagazine); 311 lwkt_reltoken(&ilock); 312 goto retry; 313 } 314 315 /* 316 * The depot does not have any non-empty magazines. If we have 317 * not hit our object limit we can allocate a new object using 318 * the back-end allocator. 319 * 320 * note: unallocated_objects can be initialized to -1, which has 321 * the effect of removing any allocation limits. 322 */ 323 if (depot->unallocated_objects) { 324 --depot->unallocated_objects; 325 lwkt_reltoken(&ilock); 326 crit_exit(); 327 328 obj = oc->alloc(oc->allocator_args, ocflags); 329 if (obj) { 330 if (oc->ctor(obj, oc->private, ocflags)) 331 return (obj); 332 oc->free(obj, oc->allocator_args); 333 lwkt_gettoken(&ilock, &depot->token); 334 ++depot->unallocated_objects; 335 if (depot->waiting) 336 wakeup(depot); 337 lwkt_reltoken(&ilock); 338 obj = NULL; 339 } 340 if (obj == NULL) { 341 crit_enter(); 342 /* 343 * makes debugging easier when gets_cumulative does 344 * not include gets_null. 345 */ 346 ++cpucache->gets_null; 347 --cpucache->gets_cumulative; 348 crit_exit(); 349 } 350 return(obj); 351 } 352 353 /* 354 * Otherwise block if allowed to. 355 */ 356 if ((ocflags & (M_WAITOK|M_NULLOK)) == M_WAITOK) { 357 ++cpucache->waiting; 358 ++depot->waiting; 359 tsleep(depot, 0, "objcache_get", 0); 360 --cpucache->waiting; 361 --depot->waiting; 362 lwkt_reltoken(&ilock); 363 goto retry; 364 } 365 366 /* 367 * Otherwise fail 368 */ 369 ++cpucache->gets_null; 370 --cpucache->gets_cumulative; 371 crit_exit(); 372 lwkt_reltoken(&ilock); 373 return (NULL); 374 } 375 376 /* 377 * Wrapper for malloc allocation routines. 378 */ 379 void * 380 objcache_malloc_alloc(void *allocator_args, int ocflags) 381 { 382 struct objcache_malloc_args *alloc_args = allocator_args; 383 384 return (kmalloc(alloc_args->objsize, alloc_args->mtype, 385 ocflags & OC_MFLAGS)); 386 } 387 388 void 389 objcache_malloc_free(void *obj, void *allocator_args) 390 { 391 struct objcache_malloc_args *alloc_args = allocator_args; 392 393 kfree(obj, alloc_args->mtype); 394 } 395 396 /* 397 * Wrapper for allocation policies that pre-allocate at initialization time 398 * and don't do run-time allocation. 399 */ 400 void * 401 objcache_nop_alloc(void *allocator_args, int ocflags) 402 { 403 return (NULL); 404 } 405 406 void 407 objcache_nop_free(void *obj, void *allocator_args) 408 { 409 } 410 411 /* 412 * Return an object to the object cache. 413 */ 414 void 415 objcache_put(struct objcache *oc, void *obj) 416 { 417 struct percpu_objcache *cpucache = &oc->cache_percpu[mycpuid]; 418 struct magazine *loadedmag; 419 struct magazinedepot *depot; 420 lwkt_tokref ilock; 421 422 crit_enter(); 423 ++cpucache->puts_cumulative; 424 425 if (CLUSTER_OF(obj) != myclusterid) { 426 #ifdef notyet 427 /* use lazy IPI to send object to owning cluster XXX todo */ 428 ++cpucache->puts_othercluster; 429 crit_exit(); 430 return; 431 #endif 432 } 433 434 retry: 435 /* 436 * Free slot available in loaded magazine. This is the hot path. 437 * It is lock-free and uses a critical section to block out interrupt 438 * handlers on the same processor. 439 */ 440 loadedmag = cpucache->loaded_magazine; 441 if (!MAGAZINE_FULL(loadedmag)) { 442 loadedmag->objects[loadedmag->rounds++] = obj; 443 if (cpucache->waiting) 444 wakeup_mycpu(&oc->depot[myclusterid]); 445 crit_exit(); 446 return; 447 } 448 449 /* 450 * Current magazine full, but previous magazine has room. XXX 451 */ 452 if (!MAGAZINE_FULL(cpucache->previous_magazine)) { 453 swap(cpucache->loaded_magazine, cpucache->previous_magazine); 454 loadedmag = cpucache->loaded_magazine; 455 loadedmag->objects[loadedmag->rounds++] = obj; 456 if (cpucache->waiting) 457 wakeup_mycpu(&oc->depot[myclusterid]); 458 crit_exit(); 459 return; 460 } 461 462 /* 463 * Both magazines full. Get an empty magazine from the depot and 464 * move a full loaded magazine to the depot. Even though the 465 * magazine may wind up with space available after we block on 466 * the token, we still cycle it through to avoid the non-optimal 467 * corner-case. 468 * 469 * Obtain the depot token. 470 */ 471 depot = &oc->depot[myclusterid]; 472 lwkt_gettoken(&ilock, &depot->token); 473 474 /* 475 * If an empty magazine is available in the depot, cycle it 476 * through and retry. 477 */ 478 if (!SLIST_EMPTY(&depot->emptymagazines)) { 479 loadedmag = cpucache->previous_magazine; 480 cpucache->previous_magazine = cpucache->loaded_magazine; 481 cpucache->loaded_magazine = SLIST_FIRST(&depot->emptymagazines); 482 SLIST_REMOVE_HEAD(&depot->emptymagazines, nextmagazine); 483 484 /* 485 * Return loadedmag to the depot. Due to blocking it may 486 * not be entirely full and could even be empty. 487 */ 488 if (MAGAZINE_EMPTY(loadedmag)) { 489 SLIST_INSERT_HEAD(&depot->emptymagazines, 490 loadedmag, nextmagazine); 491 } else { 492 SLIST_INSERT_HEAD(&depot->fullmagazines, 493 loadedmag, nextmagazine); 494 if (depot->waiting) 495 wakeup(depot); 496 } 497 lwkt_reltoken(&ilock); 498 goto retry; 499 } 500 501 /* 502 * An empty mag is not available. This is a corner case which can 503 * occur due to cpus holding partially full magazines. Do not try 504 * to allocate a mag, just free the object. 505 */ 506 ++depot->unallocated_objects; 507 if (depot->waiting) 508 wakeup(depot); 509 lwkt_reltoken(&ilock); 510 crit_exit(); 511 oc->dtor(obj, oc->private); 512 oc->free(obj, oc->allocator_args); 513 } 514 515 /* 516 * The object is being put back into the cache, but the caller has 517 * indicated that the object is not in any shape to be reused and should 518 * be dtor'd immediately. 519 */ 520 void 521 objcache_dtor(struct objcache *oc, void *obj) 522 { 523 struct magazinedepot *depot; 524 lwkt_tokref ilock; 525 526 depot = &oc->depot[myclusterid]; 527 lwkt_gettoken(&ilock, &depot->token); 528 ++depot->unallocated_objects; 529 if (depot->waiting) 530 wakeup(depot); 531 lwkt_reltoken(&ilock); 532 oc->dtor(obj, oc->private); 533 oc->free(obj, oc->allocator_args); 534 } 535 536 /* 537 * Utility routine for objects that don't require any de-construction. 538 */ 539 void 540 null_dtor(void *obj, void *private) 541 { 542 /* do nothing */ 543 } 544 545 boolean_t 546 null_ctor(void *obj, void *private, int ocflags) 547 { 548 return TRUE; 549 } 550 551 /* 552 * De-construct and de-allocate objects in a magazine. 553 * Returns the number of objects freed. 554 * Does not de-allocate the magazine itself. 555 */ 556 static int 557 mag_purge(struct objcache *oc, struct magazine *mag) 558 { 559 int ndeleted; 560 void *obj; 561 562 ndeleted = 0; 563 crit_enter(); 564 while (mag->rounds) { 565 obj = mag->objects[--mag->rounds]; 566 crit_exit(); 567 oc->dtor(obj, oc->private); 568 oc->free(obj, oc->allocator_args); 569 ++ndeleted; 570 crit_enter(); 571 } 572 crit_exit(); 573 return(ndeleted); 574 } 575 576 /* 577 * De-allocate all magazines in a magazine list. 578 * Returns number of objects de-allocated. 579 */ 580 static int 581 maglist_purge(struct objcache *oc, struct magazinelist *maglist, 582 boolean_t purgeall) 583 { 584 struct magazine *mag; 585 int ndeleted = 0; 586 587 /* can't use SLIST_FOREACH because blocking releases the depot token */ 588 while ((mag = SLIST_FIRST(maglist))) { 589 SLIST_REMOVE_HEAD(maglist, nextmagazine); 590 ndeleted += mag_purge(oc, mag); /* could block! */ 591 kfree(mag, M_OBJMAG); /* could block! */ 592 if (!purgeall && ndeleted > 0) 593 break; 594 } 595 return (ndeleted); 596 } 597 598 /* 599 * De-allocates all magazines on the full and empty magazine lists. 600 */ 601 static void 602 depot_purge(struct magazinedepot *depot, struct objcache *oc) 603 { 604 depot->unallocated_objects += 605 maglist_purge(oc, &depot->fullmagazines, TRUE); 606 depot->unallocated_objects += 607 maglist_purge(oc, &depot->emptymagazines, TRUE); 608 if (depot->unallocated_objects && depot->waiting) 609 wakeup(depot); 610 } 611 612 #ifdef notneeded 613 void 614 objcache_reclaim(struct objcache *oc) 615 { 616 struct percpu_objcache *cache_percpu = &oc->cache_percpu[myclusterid]; 617 struct magazinedepot *depot = &oc->depot[myclusterid]; 618 619 mag_purge(oc, cache_percpu->loaded_magazine); 620 mag_purge(oc, cache_percpu->previous_magazine); 621 622 /* XXX need depot token */ 623 depot_purge(depot, oc); 624 } 625 #endif 626 627 /* 628 * Try to free up some memory. Return as soon as some free memory found. 629 * For each object cache on the reclaim list, first try the current per-cpu 630 * cache, then the full magazine depot. 631 */ 632 boolean_t 633 objcache_reclaimlist(struct objcache *oclist[], int nlist, int ocflags) 634 { 635 struct objcache *oc; 636 struct percpu_objcache *cpucache; 637 struct magazinedepot *depot; 638 lwkt_tokref ilock; 639 int i, ndel; 640 641 for (i = 0; i < nlist; i++) { 642 oc = oclist[i]; 643 cpucache = &oc->cache_percpu[mycpuid]; 644 depot = &oc->depot[myclusterid]; 645 646 crit_enter(); 647 if ((ndel = mag_purge(oc, cpucache->loaded_magazine)) > 0 || 648 (ndel = mag_purge(oc, cpucache->previous_magazine)) > 0) { 649 crit_exit(); 650 lwkt_gettoken(&ilock, &depot->token); 651 depot->unallocated_objects += ndel; 652 if (depot->unallocated_objects && depot->waiting) 653 wakeup(depot); 654 lwkt_reltoken(&ilock); 655 return (TRUE); 656 } 657 crit_exit(); 658 lwkt_gettoken(&ilock, &depot->token); 659 if ((ndel = 660 maglist_purge(oc, &depot->fullmagazines, FALSE)) > 0) { 661 depot->unallocated_objects += ndel; 662 if (depot->unallocated_objects && depot->waiting) 663 wakeup(depot); 664 lwkt_reltoken(&ilock); 665 return (TRUE); 666 } 667 lwkt_reltoken(&ilock); 668 } 669 return (FALSE); 670 } 671 672 /* 673 * Destroy an object cache. Must have no existing references. 674 * XXX Not clear this is a useful API function. 675 */ 676 void 677 objcache_destroy(struct objcache *oc) 678 { 679 struct percpu_objcache *cache_percpu; 680 int clusterid, cpuid; 681 682 /* XXX need depot token? */ 683 for (clusterid = 0; clusterid < MAXCLUSTERS; clusterid++) 684 depot_purge(&oc->depot[clusterid], oc); 685 686 for (cpuid = 0; cpuid < ncpus; cpuid++) { 687 cache_percpu = &oc->cache_percpu[cpuid]; 688 689 mag_purge(oc, cache_percpu->loaded_magazine); 690 kfree(cache_percpu->loaded_magazine, M_OBJMAG); 691 692 mag_purge(oc, cache_percpu->previous_magazine); 693 kfree(cache_percpu->previous_magazine, M_OBJMAG); 694 } 695 696 kfree(oc->name, M_TEMP); 697 kfree(oc, M_OBJCACHE); 698 } 699 700 #if 0 701 /* 702 * Populate the per-cluster depot with elements from a linear block 703 * of memory. Must be called for individually for each cluster. 704 * Populated depots should not be destroyed. 705 */ 706 void 707 objcache_populate_linear(struct objcache *oc, void *base, int nelts, int size) 708 { 709 char *p = base; 710 char *end = (char *)base + (nelts * size); 711 struct magazinedepot *depot = &oc->depot[myclusterid]; 712 lwkt_tokref ilock; 713 struct magazine sentinelfullmag = { 0, 0 }; 714 struct magazine *emptymag = &sentinelfullmag; 715 716 lwkt_gettoken(&ilock, &depot->token); 717 while (p < end) { 718 if (MAGAZINE_FULL(emptymag)) { 719 emptymag = mag_alloc(depot->magcapacity); 720 SLIST_INSERT_HEAD(&depot->fullmagazines, emptymag, 721 nextmagazine); 722 } 723 emptymag->objects[emptymag->rounds++] = p; 724 p += size; 725 } 726 depot->unallocated_objects += nelts; 727 if (depot->unallocated_objects && depot->waiting) 728 wakeup(depot); 729 lwkt_reltoken(&ilock); 730 } 731 #endif 732 733 #if 0 734 /* 735 * Check depot contention once a minute. 736 * 2 contested locks per second allowed. 737 */ 738 static int objcache_rebalance_period; 739 static const int objcache_contention_rate = 120; 740 static struct callout objcache_callout; 741 742 #define MAXMAGSIZE 512 743 744 /* 745 * Check depot contention and increase magazine size if necessary. 746 */ 747 static void 748 objcache_timer(void *dummy) 749 { 750 struct objcache *oc; 751 struct magazinedepot *depot; 752 lwkt_tokref olock, dlock; 753 754 lwkt_gettoken(&olock, &objcachelist_token); 755 SLIST_FOREACH(oc, &allobjcaches, oc_next) { 756 depot = &oc->depot[myclusterid]; 757 if (depot->magcapacity < MAXMAGSIZE) { 758 if (depot->contested > objcache_contention_rate) { 759 lwkt_gettoken(&dlock, &depot->token); 760 depot_purge(depot, oc); 761 depot->magcapacity *= 2; 762 lwkt_reltoken(&dlock); 763 printf("objcache_timer: increasing cache %s" 764 " magsize to %d, contested %d times\n", 765 oc->name, depot->magcapacity, 766 depot->contested); 767 } 768 depot->contested = 0; 769 } 770 } 771 lwkt_reltoken(&olock); 772 773 callout_reset(&objcache_callout, objcache_rebalance_period, 774 objcache_timer, NULL); 775 } 776 777 #endif 778 779 static void 780 objcache_init(void) 781 { 782 lwkt_token_init(&objcachelist_token); 783 #if 0 784 callout_init(&objcache_callout); 785 objcache_rebalance_period = 60 * hz; 786 callout_reset(&objcache_callout, objcache_rebalance_period, 787 objcache_timer, NULL); 788 #endif 789 } 790 SYSINIT(objcache, SI_SUB_CPU, SI_ORDER_ANY, objcache_init, 0); 791