xref: /dflybsd-src/sys/kern/kern_objcache.c (revision a6d5e0d89a9c7db991a06391d6a4a6ddadf6af51)
1 /*
2  * Copyright (c) 2005 Jeffrey M. Hsu.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Jeffrey M. Hsu.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of The DragonFly Project nor the names of its
16  *    contributors may be used to endorse or promote products derived
17  *    from this software without specific, prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $DragonFly: src/sys/kern/kern_objcache.c,v 1.3 2005/06/09 16:53:10 dillon Exp $
33  */
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/callout.h>
39 #include <sys/globaldata.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/objcache.h>
43 #include <sys/thread.h>
44 #include <sys/thread2.h>
45 
46 static MALLOC_DEFINE(M_OBJCACHE, "objcache", "Object Cache");
47 static MALLOC_DEFINE(M_OBJMAG, "objcache magazine", "Object Cache Magazine");
48 
49 #define	INITIAL_MAG_CAPACITY	256
50 
51 struct magazine {
52 	int			 rounds;
53 	int			 capacity;
54 	SLIST_ENTRY(magazine)	 nextmagazine;
55 	void			*objects[];
56 };
57 
58 SLIST_HEAD(magazinelist, magazine);
59 
60 /*
61  * per-cluster cache of magazines
62  * All fields in this structure are protected by the token.
63  */
64 struct magazinedepot {
65 	/*
66 	 * The per-cpu object caches only exchanges completely full or
67 	 * completely empty magazines with the depot layer, so only have
68 	 * to cache these two types of magazines.
69 	 */
70 	struct magazinelist	fullmagazines;
71 	struct magazinelist	emptymagazines;
72 	int			magcapacity;
73 
74 	/* protect this structure */
75 	struct lwkt_token	token;
76 
77 	/* magazines not yet allocated towards limit */
78 	int			unallocated_objects;
79 
80 	/* infrequently used fields */
81 	int			waiting;	/* waiting for another cpu to
82 						 * return a full magazine to
83 						 * the depot */
84 	int			contested;	/* depot contention count */
85 };
86 
87 /*
88  * per-cpu object cache
89  * All fields in this structure are protected by crit_enter().
90  */
91 struct percpu_objcache {
92 	struct magazine	*loaded_magazine;	/* active magazine */
93 	struct magazine	*previous_magazine;	/* backup magazine */
94 
95 	/* statistics */
96 	int		gets_cumulative;	/* total calls to get */
97 	int		gets_null;		/* objcache_get returned NULL */
98 	int		puts_cumulative;	/* total calls to put */
99 	int		puts_othercluster;	/* returned to other cluster */
100 
101 	/* infrequently used fields */
102 	int		waiting;	/* waiting for a thread on this cpu to
103 					 * return an obj to the per-cpu cache */
104 };
105 
106 /* only until we have NUMA cluster topology information XXX */
107 #define MAXCLUSTERS 1
108 #define myclusterid 0
109 #define CLUSTER_OF(obj) 0
110 
111 /*
112  * Two-level object cache consisting of NUMA cluster-level depots of
113  * fully loaded or completely empty magazines and cpu-level caches of
114  * individual objects.
115  */
116 struct objcache {
117 	char			*name;
118 
119 	/* object constructor and destructor from blank storage */
120 	objcache_ctor_fn	*ctor;
121 	objcache_dtor_fn	*dtor;
122 	void			*private;
123 
124 	/* interface to underlying allocator */
125 	objcache_alloc_fn	*alloc;
126 	objcache_free_fn	*free;
127 	void			*allocator_args;
128 
129 	SLIST_ENTRY(objcache)	oc_next;
130 
131 	/* NUMA-cluster level caches */
132 	struct magazinedepot	depot[MAXCLUSTERS];
133 
134 	struct percpu_objcache	cache_percpu[];		/* per-cpu caches */
135 };
136 
137 static struct lwkt_token objcachelist_token;
138 static SLIST_HEAD(objcachelist, objcache) allobjcaches;
139 
140 static struct magazine *
141 mag_alloc(int capacity)
142 {
143 	struct magazine *mag;
144 
145 	mag = malloc(__offsetof(struct magazine, objects[capacity]),
146 			M_OBJMAG, M_INTWAIT | M_ZERO);
147 	mag->capacity = capacity;
148 	mag->rounds = 0;
149 	return (mag);
150 }
151 
152 /*
153  * Create an object cache.
154  */
155 struct objcache *
156 objcache_create(char *name, int cluster_limit, int mag_capacity,
157 		objcache_ctor_fn *ctor, objcache_dtor_fn *dtor, void *private,
158 		objcache_alloc_fn *alloc, objcache_free_fn *free,
159 		void *allocator_args)
160 {
161 	struct objcache *oc;
162 	struct magazinedepot *depot;
163 	lwkt_tokref olock;
164 	int cpuid;
165 
166 	/* allocate object cache structure */
167 	oc = malloc(__offsetof(struct objcache, cache_percpu[ncpus]),
168 		    M_OBJCACHE, M_WAITOK | M_ZERO);
169 	oc->name = strdup(name, M_TEMP);
170 	oc->ctor = ctor;
171 	oc->dtor = dtor;
172 	oc->private = private;
173 	oc->free = free;
174 	oc->allocator_args = allocator_args;
175 
176 	/* initialize depots */
177 	depot = &oc->depot[0];
178 
179 	lwkt_token_init(&depot->token);
180 	SLIST_INIT(&depot->fullmagazines);
181 	SLIST_INIT(&depot->emptymagazines);
182 
183 	if (mag_capacity == 0)
184 		mag_capacity = INITIAL_MAG_CAPACITY;
185 	depot->magcapacity = mag_capacity;
186 
187 	/*
188 	 * The cluster_limit must be sufficient to have three magazines per
189 	 * cpu.
190 	 */
191 	if (cluster_limit == 0) {
192 		depot->unallocated_objects = -1;
193 	} else {
194 		if (cluster_limit < mag_capacity * ncpus * 3)
195 			cluster_limit = mag_capacity * ncpus * 3;
196 		depot->unallocated_objects = cluster_limit;
197 	}
198 	oc->alloc = alloc;
199 
200 	/* initialize per-cpu caches */
201 	for (cpuid = 0; cpuid < ncpus; cpuid++) {
202 		struct percpu_objcache *cache_percpu = &oc->cache_percpu[cpuid];
203 
204 		cache_percpu->loaded_magazine = mag_alloc(mag_capacity);
205 		cache_percpu->previous_magazine = mag_alloc(mag_capacity);
206 	}
207 	lwkt_gettoken(&olock, &objcachelist_token);
208 	SLIST_INSERT_HEAD(&allobjcaches, oc, oc_next);
209 	lwkt_reltoken(&olock);
210 
211 	return (oc);
212 }
213 
214 #define MAGAZINE_EMPTY(mag)	(mag->rounds == 0)
215 #define MAGAZINE_NOTEMPTY(mag)	(mag->rounds != 0)
216 #define MAGAZINE_FULL(mag)	(mag->rounds == mag->capacity)
217 
218 #define	swap(x, y)	({ struct magazine *t = x; x = y; y = t; })
219 
220 /*
221  * Get an object from the object cache.
222  */
223 void *
224 objcache_get(struct objcache *oc, int ocflags)
225 {
226 	struct percpu_objcache *cpucache = &oc->cache_percpu[mycpuid];
227 	struct magazine *loadedmag;
228 	struct magazine *emptymag;
229 	void *obj;
230 	struct magazinedepot *depot;
231 	lwkt_tokref ilock;
232 
233 	crit_enter();
234 	++cpucache->gets_cumulative;
235 
236 retry:
237 	/*
238 	 * Loaded magazine has an object.  This is the hot path.
239 	 * It is lock-free and uses a critical section to block
240 	 * out interrupt handlers on the same processor.
241 	 */
242 	loadedmag = cpucache->loaded_magazine;
243 	if (MAGAZINE_NOTEMPTY(loadedmag)) {
244 		obj = loadedmag->objects[--loadedmag->rounds];
245 		crit_exit();
246 		return (obj);
247 	}
248 
249 	/* Previous magazine has an object. */
250 	if (MAGAZINE_NOTEMPTY(cpucache->previous_magazine)) {
251 		swap(cpucache->loaded_magazine, cpucache->previous_magazine);
252 		loadedmag = cpucache->loaded_magazine;
253 		obj = loadedmag->objects[--loadedmag->rounds];
254 		crit_exit();
255 		return (obj);
256 	}
257 
258 	/*
259 	 * Both magazines empty.  Get a full magazine from the depot and
260 	 * move one of the empty ones to the depot.  Do this even if we
261 	 * block on the token to avoid a non-optimal corner case.
262 	 *
263 	 * Obtain the depot token.
264 	 */
265 	depot = &oc->depot[myclusterid];
266 	if (!lwkt_trytoken(&ilock, &depot->token)) {
267 		lwkt_gettoken(&ilock, &depot->token);
268 		++depot->contested;
269 	}
270 
271 	/* Check if depot has a full magazine. */
272 	if (!SLIST_EMPTY(&depot->fullmagazines)) {
273 		emptymag = cpucache->previous_magazine;
274 		cpucache->previous_magazine = cpucache->loaded_magazine;
275 		cpucache->loaded_magazine = SLIST_FIRST(&depot->fullmagazines);
276 		SLIST_REMOVE_HEAD(&depot->fullmagazines, nextmagazine);
277 
278 		/*
279 		 * Return emptymag to the depot.  Due to blocking it may
280 		 * not be entirely empty.
281 		 */
282 		if (MAGAZINE_EMPTY(emptymag)) {
283 			SLIST_INSERT_HEAD(&depot->emptymagazines,
284 					  emptymag, nextmagazine);
285 		} else {
286 			/*
287 			 * NOTE: magazine is not necessarily entirely full
288 			 */
289 			SLIST_INSERT_HEAD(&depot->fullmagazines,
290 					  emptymag, nextmagazine);
291 			if (depot->waiting)
292 				wakeup(depot);
293 		}
294 		lwkt_reltoken(&ilock);
295 		goto retry;
296 	}
297 
298 	/*
299 	 * The depot does not have any non-empty magazines.  If we have
300 	 * not hit our object limit we can allocate a new object using
301 	 * the back-end allocator.
302 	 *
303 	 * note: unallocated_objects can be initialized to -1, which has
304 	 * the effect of removing any allocation limits.
305 	 */
306 	if (depot->unallocated_objects) {
307 		--depot->unallocated_objects;
308 		lwkt_reltoken(&ilock);
309 		crit_exit();
310 
311 		obj = oc->alloc(oc->allocator_args, ocflags);
312 		if (obj) {
313 			if (oc->ctor(obj, oc->private, ocflags))
314 				return (obj);
315 			oc->free(obj, oc->allocator_args);
316 			lwkt_gettoken(&ilock, &depot->token);
317 			++depot->unallocated_objects;
318 			if (depot->waiting)
319 				wakeup(depot);
320 			lwkt_reltoken(&ilock);
321 			obj = NULL;
322 		}
323 		if (obj == NULL) {
324 			crit_enter();
325 			/*
326 			 * makes debugging easier when gets_cumulative does
327 			 * not include gets_null.
328 			 */
329 			++cpucache->gets_null;
330 			--cpucache->gets_cumulative;
331 			crit_exit();
332 		}
333 		return(obj);
334 	}
335 
336 	/*
337 	 * Otherwise block if allowed to.
338 	 */
339 	if ((ocflags & (M_WAITOK|M_NULLOK)) == M_WAITOK) {
340 		++cpucache->waiting;
341 		++depot->waiting;
342 		tsleep(depot, PCATCH, "objcache_get", 0);
343 		--cpucache->waiting;
344 		--depot->waiting;
345 		lwkt_reltoken(&ilock);
346 		goto retry;
347 	}
348 
349 	/*
350 	 * Otherwise fail
351 	 */
352 	++cpucache->gets_null;
353 	--cpucache->gets_cumulative;
354 	crit_exit();
355 	lwkt_reltoken(&ilock);
356 	return (NULL);
357 }
358 
359 /*
360  * Wrapper for malloc allocation routines.
361  */
362 void *
363 objcache_malloc_alloc(void *allocator_args, int ocflags)
364 {
365 	struct objcache_malloc_args *alloc_args = allocator_args;
366 
367 	return (malloc(alloc_args->objsize, alloc_args->mtype,
368 		       ocflags & OC_MFLAGS));
369 }
370 
371 void
372 objcache_malloc_free(void *obj, void *allocator_args)
373 {
374 	struct objcache_malloc_args *alloc_args = allocator_args;
375 
376 	free(obj, alloc_args->mtype);
377 }
378 
379 /*
380  * Wrapper for allocation policies that pre-allocate at initialization time
381  * and don't do run-time allocation.
382  */
383 void *
384 objcache_nop_alloc(void *allocator_args, int ocflags)
385 {
386 	return (NULL);
387 }
388 
389 void
390 objcache_nop_free(void *obj, void *allocator_args)
391 {
392 }
393 
394 /*
395  * Return an object to the object cache.
396  */
397 void
398 objcache_put(struct objcache *oc, void *obj)
399 {
400 	struct percpu_objcache *cpucache = &oc->cache_percpu[mycpuid];
401 	struct magazine *loadedmag;
402 	struct magazinedepot *depot;
403 	lwkt_tokref ilock;
404 
405 	crit_enter();
406 	++cpucache->puts_cumulative;
407 
408 	if (CLUSTER_OF(obj) != myclusterid) {
409 #ifdef notyet
410 		/* use lazy IPI to send object to owning cluster XXX todo */
411 		++cpucache->puts_othercluster;
412 		crit_exit();
413 		return;
414 #endif
415 	}
416 
417 retry:
418 	/*
419 	 * Free slot available in loaded magazine.  This is the hot path.
420 	 * It is lock-free and uses a critical section to block out interrupt
421 	 * handlers on the same processor.
422 	 */
423 	loadedmag = cpucache->loaded_magazine;
424 	if (!MAGAZINE_FULL(loadedmag)) {
425 		loadedmag->objects[loadedmag->rounds++] = obj;
426 		if (cpucache->waiting)
427 			wakeup(&oc->depot[myclusterid]);
428 		crit_exit();
429 		return;
430 	}
431 
432 	/*
433 	 * Current magazine full, but previous magazine has room.  XXX
434 	 */
435 	if (!MAGAZINE_FULL(cpucache->previous_magazine)) {
436 		swap(cpucache->loaded_magazine, cpucache->previous_magazine);
437 		loadedmag = cpucache->loaded_magazine;
438 		loadedmag->objects[loadedmag->rounds++] = obj;
439 		if (cpucache->waiting)
440 			wakeup(&oc->depot[myclusterid]);
441 		crit_exit();
442 		return;
443 	}
444 
445 	/*
446 	 * Both magazines full.  Get an empty magazine from the depot and
447 	 * move a full loaded magazine to the depot.  Even though the
448 	 * magazine may wind up with space available after we block on
449 	 * the token, we still cycle it through to avoid the non-optimal
450 	 * corner-case.
451 	 *
452 	 * Obtain the depot token.
453 	 */
454 	depot = &oc->depot[myclusterid];
455 	if (!lwkt_trytoken(&ilock, &depot->token)) {
456 		lwkt_gettoken(&ilock, &depot->token);
457 		++depot->contested;
458 	}
459 
460 	/*
461 	 * If an empty magazine is available in the depot, cycle it
462 	 * through and retry.
463 	 */
464 	if (!SLIST_EMPTY(&depot->emptymagazines)) {
465 		loadedmag = cpucache->previous_magazine;
466 		cpucache->previous_magazine = cpucache->loaded_magazine;
467 		cpucache->loaded_magazine = SLIST_FIRST(&depot->emptymagazines);
468 		SLIST_REMOVE_HEAD(&depot->emptymagazines, nextmagazine);
469 
470 		/*
471 		 * Return loadedmag to the depot.  Due to blocking it may
472 		 * not be entirely full and could even be empty.
473 		 */
474 		if (MAGAZINE_EMPTY(loadedmag)) {
475 			SLIST_INSERT_HEAD(&depot->emptymagazines,
476 					  loadedmag, nextmagazine);
477 		} else {
478 			SLIST_INSERT_HEAD(&depot->fullmagazines,
479 					  loadedmag, nextmagazine);
480 			if (depot->waiting)
481 				wakeup(depot);
482 		}
483 		lwkt_reltoken(&ilock);
484 		goto retry;
485 	}
486 
487 	/*
488 	 * An empty mag is not available.  This is a corner case which can
489 	 * occur due to cpus holding partially full magazines.  Do not try
490 	 * to allocate a mag, just free the object.
491 	 */
492 	++depot->unallocated_objects;
493 	if (depot->waiting)
494 		wakeup(depot);
495 	lwkt_reltoken(&ilock);
496 	crit_exit();
497 	oc->dtor(obj, oc->private);
498 	oc->free(obj, oc->allocator_args);
499 }
500 
501 /*
502  * The object is being put back into the cache, but the caller has
503  * indicated that the object is not in any shape to be reused and should
504  * be dtor'd immediately.
505  */
506 void
507 objcache_dtor(struct objcache *oc, void *obj)
508 {
509 	struct magazinedepot *depot;
510 	lwkt_tokref ilock;
511 
512 	depot = &oc->depot[myclusterid];
513 	if (!lwkt_trytoken(&ilock, &depot->token)) {
514 		lwkt_gettoken(&ilock, &depot->token);
515 		++depot->contested;
516 	}
517 	++depot->unallocated_objects;
518 	if (depot->waiting)
519 		wakeup(depot);
520 	lwkt_reltoken(&ilock);
521 	oc->dtor(obj, oc->private);
522 	oc->free(obj, oc->allocator_args);
523 }
524 
525 /*
526  * Utility routine for objects that don't require any de-construction.
527  */
528 void
529 null_dtor(void *obj, void *private)
530 {
531 	/* do nothing */
532 }
533 
534 /*
535  * De-construct and de-allocate objects in a magazine.
536  * Returns the number of objects freed.
537  * Does not de-allocate the magazine itself.
538  */
539 static int
540 mag_purge(struct objcache *oc, struct magazine *mag)
541 {
542 	int ndeleted;
543 	void *obj;
544 
545 	ndeleted = 0;
546 	crit_enter();
547 	while (mag->rounds) {
548 		obj = mag->objects[--mag->rounds];
549 		crit_exit();
550 		oc->dtor(obj, oc->private);
551 		oc->free(obj, oc->allocator_args);
552 		++ndeleted;
553 		crit_enter();
554 	}
555 	crit_exit();
556 	return(ndeleted);
557 }
558 
559 /*
560  * De-allocate all magazines in a magazine list.
561  * Returns number of objects de-allocated.
562  */
563 static int
564 maglist_purge(struct objcache *oc, struct magazinelist *maglist,
565 	      boolean_t purgeall)
566 {
567 	struct magazine *mag;
568 	int ndeleted = 0;
569 
570 	/* can't use SLIST_FOREACH because blocking releases the depot token */
571 	while ((mag = SLIST_FIRST(maglist))) {
572 		SLIST_REMOVE_HEAD(maglist, nextmagazine);
573 		ndeleted += mag_purge(oc, mag);		/* could block! */
574 		free(mag, M_OBJMAG);			/* could block! */
575 		if (!purgeall && ndeleted > 0)
576 			break;
577 	}
578 	return (ndeleted);
579 }
580 
581 /*
582  * De-allocates all magazines on the full and empty magazine lists.
583  */
584 static void
585 depot_purge(struct magazinedepot *depot, struct objcache *oc)
586 {
587 	depot->unallocated_objects +=
588 		maglist_purge(oc, &depot->fullmagazines, TRUE);
589 	depot->unallocated_objects +=
590 		maglist_purge(oc, &depot->emptymagazines, TRUE);
591 	if (depot->unallocated_objects && depot->waiting)
592 		wakeup(depot);
593 }
594 
595 #ifdef notneeded
596 void
597 objcache_reclaim(struct objcache *oc)
598 {
599 	struct percpu_objcache *cache_percpu = &oc->cache_percpu[myclusterid];
600 	struct magazinedepot *depot = &oc->depot[myclusterid];
601 
602 	mag_purge(oc, cache_percpu->loaded_magazine);
603 	mag_purge(oc, cache_percpu->previous_magazine);
604 
605 	/* XXX need depot token */
606 	depot_purge(depot, oc);
607 }
608 #endif
609 
610 /*
611  * Try to free up some memory.  Return as soon as some free memory found.
612  * For each object cache on the reclaim list, first try the current per-cpu
613  * cache, then the full magazine depot.
614  */
615 boolean_t
616 objcache_reclaimlist(struct objcache *oclist[], int nlist, int ocflags)
617 {
618 	struct objcache *oc;
619 	struct percpu_objcache *cpucache;
620 	struct magazinedepot *depot;
621 	lwkt_tokref ilock;
622 	int i, ndel;
623 
624 	for (i = 0; i < nlist; i++) {
625 		oc = oclist[i];
626 		cpucache = &oc->cache_percpu[mycpuid];
627 		depot = &oc->depot[myclusterid];
628 
629 		crit_enter();
630 		if ((ndel = mag_purge(oc, cpucache->loaded_magazine)) > 0 ||
631 		    (ndel = mag_purge(oc, cpucache->previous_magazine)) > 0) {
632 			crit_exit();
633 			lwkt_gettoken(&ilock, &depot->token);
634 			depot->unallocated_objects += ndel;
635 			if (depot->unallocated_objects && depot->waiting)
636 				wakeup(depot);
637 			lwkt_reltoken(&ilock);
638 			return (TRUE);
639 		}
640 		crit_exit();
641 		lwkt_gettoken(&ilock, &depot->token);
642 		if ((ndel =
643 		     maglist_purge(oc, &depot->fullmagazines, FALSE)) > 0) {
644 			depot->unallocated_objects += ndel;
645 			if (depot->unallocated_objects && depot->waiting)
646 				wakeup(depot);
647 			lwkt_reltoken(&ilock);
648 			return (TRUE);
649 		}
650 		lwkt_reltoken(&ilock);
651 	}
652 	return (FALSE);
653 }
654 
655 /*
656  * Destroy an object cache.  Must have no existing references.
657  * XXX Not clear this is a useful API function.
658  */
659 void
660 objcache_destroy(struct objcache *oc)
661 {
662 	struct percpu_objcache *cache_percpu;
663 	int clusterid, cpuid;
664 
665 	/* XXX need depot token? */
666 	for (clusterid = 0; clusterid < MAXCLUSTERS; clusterid++)
667 		depot_purge(&oc->depot[clusterid], oc);
668 
669 	for (cpuid = 0; cpuid < ncpus; cpuid++) {
670 		cache_percpu = &oc->cache_percpu[cpuid];
671 
672 		mag_purge(oc, cache_percpu->loaded_magazine);
673 		free(cache_percpu->loaded_magazine, M_OBJMAG);
674 
675 		mag_purge(oc, cache_percpu->previous_magazine);
676 		free(cache_percpu->previous_magazine, M_OBJMAG);
677 	}
678 
679 	free(oc->name, M_TEMP);
680 	free(oc, M_OBJCACHE);
681 }
682 
683 #if 0
684 /*
685  * Populate the per-cluster depot with elements from a linear block
686  * of memory.  Must be called for individually for each cluster.
687  * Populated depots should not be destroyed.
688  */
689 void
690 objcache_populate_linear(struct objcache *oc, void *base, int nelts, int size)
691 {
692 	char *p = base;
693 	char *end = (char *)base + (nelts * size);
694 	struct magazinedepot *depot = &oc->depot[myclusterid];
695 	lwkt_tokref ilock;
696 	struct magazine sentinelfullmag = { 0, 0 };
697 	struct magazine *emptymag = &sentinelfullmag;
698 
699 	lwkt_gettoken(&ilock, &depot->token);
700 	while (p < end) {
701 		if (MAGAZINE_FULL(emptymag)) {
702 			emptymag = mag_alloc(depot->magcapacity);
703 			SLIST_INSERT_HEAD(&depot->fullmagazines, emptymag,
704 					  nextmagazine);
705 		}
706 		emptymag->objects[emptymag->rounds++] = p;
707 		p += size;
708 	}
709 	depot->unallocated_objects += nelts;
710 	if (depot->unallocated_objects && depot->waiting)
711 		wakeup(depot);
712 	lwkt_reltoken(&ilock);
713 }
714 #endif
715 
716 #if 0
717 /*
718  * Check depot contention once a minute.
719  * 2 contested locks per second allowed.
720  */
721 static int objcache_rebalance_period;
722 static const int objcache_contention_rate = 120;
723 static struct callout objcache_callout;
724 
725 #define MAXMAGSIZE 512
726 
727 /*
728  * Check depot contention and increase magazine size if necessary.
729  */
730 static void
731 objcache_timer(void *dummy)
732 {
733 	struct objcache *oc;
734 	struct magazinedepot *depot;
735 	lwkt_tokref olock, dlock;
736 
737 	lwkt_gettoken(&olock, &objcachelist_token);
738 	SLIST_FOREACH(oc, &allobjcaches, oc_next) {
739 		depot = &oc->depot[myclusterid];
740 		if (depot->magcapacity < MAXMAGSIZE) {
741 			if (depot->contested > objcache_contention_rate) {
742 				lwkt_gettoken(&dlock, &depot->token);
743 				depot_purge(depot, oc);
744 				depot->magcapacity *= 2;
745 				lwkt_reltoken(&dlock);
746 				printf("objcache_timer: increasing cache %s"
747 				       " magsize to %d, contested %d times\n",
748 				    oc->name, depot->magcapacity,
749 				    depot->contested);
750 			}
751 			depot->contested = 0;
752 		}
753 	}
754 	lwkt_reltoken(&olock);
755 
756 	callout_reset(&objcache_callout, objcache_rebalance_period,
757 		      objcache_timer, NULL);
758 }
759 
760 #endif
761 
762 static void
763 objcache_init(void)
764 {
765 	lwkt_token_init(&objcachelist_token);
766 #if 0
767 	callout_init(&objcache_callout);
768 	objcache_rebalance_period = 60 * hz;
769 	callout_reset(&objcache_callout, objcache_rebalance_period,
770 		      objcache_timer, NULL);
771 #endif
772 }
773 SYSINIT(objcache, SI_SUB_CPU, SI_ORDER_ANY, objcache_init, 0);
774