Lines Matching defs:skc

136 static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
139 kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
144 if (skc->skc_flags & KMC_RECLAIMABLE)
155 kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
179 spl_sks_size(spl_kmem_cache_t *skc)
182 skc->skc_obj_align, uint32_t));
189 spl_obj_size(spl_kmem_cache_t *skc)
191 uint32_t align = skc->skc_obj_align;
193 return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) +
215 spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
217 return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size,
218 skc->skc_obj_align, uint32_t));
251 spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
257 base = kv_alloc(skc, skc->skc_slab_size, flags);
263 sks->sks_objs = skc->skc_slab_objs;
265 sks->sks_cache = skc;
269 obj_size = spl_obj_size(skc);
272 void *obj = base + spl_sks_size(skc) + (i * obj_size);
274 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
275 spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj);
288 * the 'skc->skc_lock' held but the actual free must be performed
295 spl_kmem_cache_t *skc;
300 skc = sks->sks_cache;
301 ASSERT(skc->skc_magic == SKC_MAGIC);
305 * slab from the skc->skc_partial_list. Finally add the slab
309 skc->skc_obj_total -= sks->sks_objs;
310 skc->skc_slab_total--;
320 spl_slab_reclaim(spl_kmem_cache_t *skc)
330 * at the end of skc->skc_partial_list, therefore once a non-empty
333 spin_lock(&skc->skc_lock);
335 &skc->skc_partial_list, sks_list) {
342 spin_unlock(&skc->skc_lock);
347 * skc->skc_lock since this allows the destructor to sleep, and
358 kv_free(skc, sks, skc->skc_slab_size);
412 spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
416 int order = get_order(skc->skc_obj_size);
420 spin_lock(&skc->skc_lock);
421 empty = list_empty(&skc->skc_partial_list);
422 spin_unlock(&skc->skc_lock);
426 if (skc->skc_flags & KMC_RECLAIMABLE)
438 spin_lock(&skc->skc_lock);
439 empty = spl_emergency_insert(&skc->skc_emergency_tree, ske);
441 skc->skc_obj_total++;
442 skc->skc_obj_emergency++;
443 if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
444 skc->skc_obj_emergency_max = skc->skc_obj_emergency;
446 spin_unlock(&skc->skc_lock);
463 spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
466 int order = get_order(skc->skc_obj_size);
468 spin_lock(&skc->skc_lock);
469 ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
471 rb_erase(&ske->ske_node, &skc->skc_emergency_tree);
472 skc->skc_obj_emergency--;
473 skc->skc_obj_total--;
475 spin_unlock(&skc->skc_lock);
491 spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
493 spin_lock(&skc->skc_lock);
495 ASSERT(skc->skc_magic == SKC_MAGIC);
500 spl_cache_shrink(skc, skm->skm_objs[i]);
506 spin_unlock(&skc->skc_lock);
516 spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
520 sks_size = spl_sks_size(skc);
521 obj_size = spl_obj_size(skc);
547 spl_magazine_size(spl_kmem_cache_t *skc)
549 uint32_t obj_size = spl_obj_size(skc);
574 spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
578 sizeof (void *) * skc->skc_mag_size;
584 skm->skm_size = skc->skc_mag_size;
585 skm->skm_refill = skc->skc_mag_refill;
586 skm->skm_cache = skc;
608 spl_magazine_create(spl_kmem_cache_t *skc)
612 ASSERT((skc->skc_flags & KMC_SLAB) == 0);
614 skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) *
616 skc->skc_mag_size = spl_magazine_size(skc);
617 skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
620 skc->skc_mag[i] = spl_magazine_alloc(skc, i);
621 if (!skc->skc_mag[i]) {
623 spl_magazine_free(skc->skc_mag[i]);
625 kfree(skc->skc_mag);
637 spl_magazine_destroy(spl_kmem_cache_t *skc)
642 ASSERT((skc->skc_flags & KMC_SLAB) == 0);
645 skm = skc->skc_mag[i];
646 spl_cache_flush(skc, skm, skm->skm_avail);
650 kfree(skc->skc_mag);
675 spl_kmem_cache_t *skc;
686 skc = kzalloc(sizeof (*skc), lflags);
687 if (skc == NULL)
690 skc->skc_magic = SKC_MAGIC;
691 skc->skc_name_size = strlen(name) + 1;
692 skc->skc_name = kmalloc(skc->skc_name_size, lflags);
693 if (skc->skc_name == NULL) {
694 kfree(skc);
697 strlcpy(skc->skc_name, name, skc->skc_name_size);
699 skc->skc_ctor = ctor;
700 skc->skc_dtor = dtor;
701 skc->skc_private = priv;
702 skc->skc_vmp = vmp;
703 skc->skc_linux_cache = NULL;
704 skc->skc_flags = flags;
705 skc->skc_obj_size = size;
706 skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
707 atomic_set(&skc->skc_ref, 0);
709 INIT_LIST_HEAD(&skc->skc_list);
710 INIT_LIST_HEAD(&skc->skc_complete_list);
711 INIT_LIST_HEAD(&skc->skc_partial_list);
712 skc->skc_emergency_tree = RB_ROOT;
713 spin_lock_init(&skc->skc_lock);
714 init_waitqueue_head(&skc->skc_waitq);
715 skc->skc_slab_fail = 0;
716 skc->skc_slab_create = 0;
717 skc->skc_slab_destroy = 0;
718 skc->skc_slab_total = 0;
719 skc->skc_slab_alloc = 0;
720 skc->skc_slab_max = 0;
721 skc->skc_obj_total = 0;
722 skc->skc_obj_alloc = 0;
723 skc->skc_obj_max = 0;
724 skc->skc_obj_deadlock = 0;
725 skc->skc_obj_emergency = 0;
726 skc->skc_obj_emergency_max = 0;
728 rc = percpu_counter_init(&skc->skc_linux_alloc, 0, GFP_KERNEL);
730 kfree(skc);
741 skc->skc_obj_align = align;
749 if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) {
756 skc->skc_flags |= KMC_SLAB;
762 skc->skc_flags |= KMC_KVMEM;
769 if (skc->skc_flags & KMC_KVMEM) {
770 rc = spl_slab_size(skc,
771 &skc->skc_slab_objs, &skc->skc_slab_size);
775 rc = spl_magazine_create(skc);
784 if (skc->skc_flags & KMC_RECLAIMABLE)
787 skc->skc_linux_cache = kmem_cache_create_usercopy(
788 skc->skc_name, size, align, slabflags, 0, size, NULL);
789 if (skc->skc_linux_cache == NULL)
794 list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
797 return (skc);
799 kfree(skc->skc_name);
800 percpu_counter_destroy(&skc->skc_linux_alloc);
801 kfree(skc);
811 spl_kmem_cache_set_move(spl_kmem_cache_t *skc,
822 spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
827 ASSERT(skc->skc_magic == SKC_MAGIC);
828 ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB));
831 list_del_init(&skc->skc_list);
835 VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
837 spin_lock(&skc->skc_lock);
838 id = skc->skc_taskqid;
839 spin_unlock(&skc->skc_lock);
848 wait_event(wq, atomic_read(&skc->skc_ref) == 0);
850 if (skc->skc_flags & KMC_KVMEM) {
851 spl_magazine_destroy(skc);
852 spl_slab_reclaim(skc);
854 ASSERT(skc->skc_flags & KMC_SLAB);
855 kmem_cache_destroy(skc->skc_linux_cache);
858 spin_lock(&skc->skc_lock);
864 ASSERT3U(skc->skc_slab_alloc, ==, 0);
865 ASSERT3U(skc->skc_obj_alloc, ==, 0);
866 ASSERT3U(skc->skc_slab_total, ==, 0);
867 ASSERT3U(skc->skc_obj_total, ==, 0);
868 ASSERT3U(skc->skc_obj_emergency, ==, 0);
869 ASSERT(list_empty(&skc->skc_complete_list));
871 ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0);
872 percpu_counter_destroy(&skc->skc_linux_alloc);
874 spin_unlock(&skc->skc_lock);
876 kfree(skc->skc_name);
877 kfree(skc);
886 spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
890 ASSERT(skc->skc_magic == SKC_MAGIC);
902 skc->skc_obj_alloc++;
905 if (skc->skc_obj_alloc > skc->skc_obj_max)
906 skc->skc_obj_max = skc->skc_obj_alloc;
910 skc->skc_slab_alloc++;
912 if (skc->skc_slab_alloc > skc->skc_slab_max)
913 skc->skc_slab_max = skc->skc_slab_alloc;
925 __spl_cache_grow(spl_kmem_cache_t *skc, int flags)
930 sks = spl_slab_alloc(skc, flags);
933 spin_lock(&skc->skc_lock);
935 skc->skc_slab_total++;
936 skc->skc_obj_total += sks->sks_objs;
937 list_add_tail(&sks->sks_list, &skc->skc_partial_list);
940 clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
943 spin_unlock(&skc->skc_lock);
952 spl_kmem_cache_t *skc = ska->ska_cache;
954 int error = __spl_cache_grow(skc, ska->ska_flags);
956 atomic_dec(&skc->skc_ref);
958 clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
961 wake_up_all(&skc->skc_waitq);
970 spl_cache_grow_wait(spl_kmem_cache_t *skc)
972 return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags));
981 spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
986 ASSERT(skc->skc_magic == SKC_MAGIC);
987 ASSERT((skc->skc_flags & KMC_SLAB) == 0);
997 return (spl_emergency_alloc(skc, flags, obj));
1005 if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
1006 rc = wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
1026 if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) {
1031 clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags);
1033 wake_up_all(&skc->skc_waitq);
1037 atomic_inc(&skc->skc_ref);
1038 ska->ska_cache = skc;
1054 if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) {
1055 rc = spl_emergency_alloc(skc, flags, obj);
1057 remaining = wait_event_timeout(skc->skc_waitq,
1058 spl_cache_grow_wait(skc), HZ / 10);
1061 spin_lock(&skc->skc_lock);
1062 if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) {
1063 set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
1064 skc->skc_obj_deadlock++;
1066 spin_unlock(&skc->skc_lock);
1083 spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
1089 ASSERT(skc->skc_magic == SKC_MAGIC);
1093 spin_lock(&skc->skc_lock);
1097 if (list_empty(&skc->skc_partial_list)) {
1098 spin_unlock(&skc->skc_lock);
1101 rc = spl_cache_grow(skc, flags, &obj);
1112 if (skm != skc->skc_mag[smp_processor_id()])
1122 spin_lock(&skc->skc_lock);
1127 sks = list_entry((&skc->skc_partial_list)->next,
1142 spl_cache_obj(skc, sks);
1148 list_add(&sks->sks_list, &skc->skc_complete_list);
1152 spin_unlock(&skc->skc_lock);
1161 spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
1166 ASSERT(skc->skc_magic == SKC_MAGIC);
1168 sko = spl_sko_from_obj(skc, obj);
1172 ASSERT(sks->sks_cache == skc);
1177 skc->skc_obj_alloc--;
1186 list_add(&sks->sks_list, &skc->skc_partial_list);
1195 list_add_tail(&sks->sks_list, &skc->skc_partial_list);
1196 skc->skc_slab_alloc--;
1205 spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
1211 ASSERT(skc->skc_magic == SKC_MAGIC);
1212 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1219 if (skc->skc_flags & KMC_SLAB) {
1220 struct kmem_cache *slc = skc->skc_linux_cache;
1232 percpu_counter_inc(&skc->skc_linux_alloc);
1246 skm = skc->skc_mag[smp_processor_id()];
1253 obj = spl_cache_refill(skc, skm, flags);
1263 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
1268 if (obj && skc->skc_ctor)
1269 skc->skc_ctor(obj, skc->skc_private, flags);
1285 spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
1292 ASSERT(skc->skc_magic == SKC_MAGIC);
1293 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1298 if (skc->skc_dtor)
1299 skc->skc_dtor(obj, skc->skc_private);
1304 if (skc->skc_flags & KMC_SLAB) {
1305 kmem_cache_free(skc->skc_linux_cache, obj);
1306 percpu_counter_dec(&skc->skc_linux_alloc);
1317 spin_lock(&skc->skc_lock);
1318 do_emergency = (skc->skc_obj_emergency > 0);
1319 spin_unlock(&skc->skc_lock);
1321 if (do_emergency && (spl_emergency_free(skc, obj) == 0))
1333 skm = skc->skc_mag[smp_processor_id()];
1342 spl_cache_flush(skc, skm, skm->skm_refill);
1352 spl_slab_reclaim(skc);
1364 spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
1366 ASSERT(skc->skc_magic == SKC_MAGIC);
1367 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1369 if (skc->skc_flags & KMC_SLAB)
1372 atomic_inc(&skc->skc_ref);
1377 if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags))
1383 spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
1384 spl_cache_flush(skc, skm, skm->skm_avail);
1387 spl_slab_reclaim(skc);
1388 clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags);
1390 wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
1392 atomic_dec(&skc->skc_ref);
1414 spl_kmem_cache_t *skc = NULL;
1417 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
1418 spl_kmem_cache_reap_now(skc);