Lines Matching defs:bucket
247 * Compute the actual number of bucket entries to pack them in power
256 /* Literal bucket sizes. */
304 static void zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata,
412 * For each zone, calculate the memory required for each bucket, consisting
432 * Given a desired number of entries for a bucket, return the zone from which
433 * to allocate the bucket.
467 uma_bucket_t bucket;
476 * To limit bucket recursion we store the original zone flags
480 * a bucket for a bucket zone so we do not allow infinite bucket
482 * buckets via the allocation path or bucket allocations in the
497 bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
498 if (bucket) {
500 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
502 bucket->ub_cnt = 0;
503 bucket->ub_entries = min(ubz->ubz_entries,
505 bucket->ub_seq = SMR_SEQ_INVALID;
506 CTR3(KTR_UMA, "bucket_alloc: zone %s(%p) allocated bucket %p",
507 zone->uz_name, zone, bucket);
510 return (bucket);
514 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
518 if (bucket->ub_cnt != 0)
519 bucket_drain(zone, bucket);
521 KASSERT(bucket->ub_cnt == 0,
522 ("bucket_free: Freeing a non free bucket."));
523 KASSERT(bucket->ub_seq == SMR_SEQ_INVALID,
524 ("bucket_free: Freeing an SMR bucket."));
527 ubz = bucket_zone_lookup(bucket->ub_entries);
528 uma_zfree_arg(ubz->ubz_zone, bucket, udata);
787 * Attempt to satisfy an allocation by retrieving a full bucket from one of the
788 * zone's caches. If a bucket is found the zone is not locked on return.
793 uma_bucket_t bucket;
800 if ((bucket = STAILQ_FIRST(&zdom->uzd_buckets)) == NULL)
805 bucket->ub_seq != SMR_SEQ_INVALID) {
806 if (!smr_poll(zone->uz_smr, bucket->ub_seq, false))
808 bucket->ub_seq = SMR_SEQ_INVALID;
810 if (STAILQ_NEXT(bucket, ub_link) != NULL)
811 zdom->uzd_seq = STAILQ_NEXT(bucket, ub_link)->ub_seq;
815 KASSERT(zdom->uzd_nitems >= bucket->ub_cnt,
817 __func__, zdom->uzd_nitems, bucket->ub_cnt));
818 KASSERT(bucket->ub_cnt > 0,
819 ("%s: empty bucket in bucket cache", __func__));
820 zdom->uzd_nitems -= bucket->ub_cnt;
827 cnt = lmin(zdom->uzd_bimin, bucket->ub_cnt);
830 zdom->uzd_imin -= lmin(zdom->uzd_imin, bucket->ub_cnt);
831 if (zdom->uzd_limin >= bucket->ub_cnt) {
832 zdom->uzd_limin -= bucket->ub_cnt;
845 for (i = 0; i < bucket->ub_cnt; i++)
846 item_dtor(zone, bucket->ub_bucket[i], zone->uz_size,
849 return (bucket);
853 * Insert a full bucket into the specified cache. The "ws" parameter indicates
854 * whether the bucket's contents should be counted as part of the zone's working
855 * set. The bucket may be freed if it exceeds the bucket limit.
858 zone_put_bucket(uma_zone_t zone, int domain, uma_bucket_t bucket, void *udata,
864 if (bucket->ub_cnt == 0)
871 zdom->uzd_nitems += bucket->ub_cnt;
880 atomic_add_long(&zdom->uzd_imax, bucket->ub_cnt);
881 zdom->uzd_imin += bucket->ub_cnt;
882 zdom->uzd_bimin += bucket->ub_cnt;
883 zdom->uzd_limin += bucket->ub_cnt;
886 zdom->uzd_seq = bucket->ub_seq;
892 if (bucket->ub_seq == SMR_SEQ_INVALID)
893 STAILQ_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
895 STAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link);
899 zdom->uzd_nitems -= bucket->ub_cnt;
902 bucket_free(zone, bucket, udata);
905 /* Pops an item out of a per-cpu cache bucket. */
907 cache_bucket_pop(uma_cache_t cache, uma_cache_bucket_t bucket)
913 bucket->ucb_cnt--;
914 item = bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt];
916 bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = NULL;
924 /* Pushes an item into a per-cpu cache bucket. */
926 cache_bucket_push(uma_cache_t cache, uma_cache_bucket_t bucket, void *item)
930 KASSERT(bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] == NULL,
931 ("uma_zfree: Freeing to non free bucket index."));
933 bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = item;
934 bucket->ucb_cnt++;
939 * Unload a UMA bucket from a per-cpu cache.
942 cache_bucket_unload(uma_cache_bucket_t bucket)
946 b = bucket->ucb_bucket;
948 MPASS(b->ub_entries == bucket->ucb_entries);
949 b->ub_cnt = bucket->ucb_cnt;
950 bucket->ucb_bucket = NULL;
951 bucket->ucb_entries = bucket->ucb_cnt = 0;
979 * Load a bucket into a per-cpu cache bucket.
982 cache_bucket_load(uma_cache_bucket_t bucket, uma_bucket_t b)
986 MPASS(bucket->ucb_bucket == NULL);
989 bucket->ucb_bucket = b;
990 bucket->ucb_cnt = b->ub_cnt;
991 bucket->ucb_entries = b->ub_entries;
1045 * Attempt to fetch a bucket from a zone on behalf of the current cpu cache.
1051 uma_bucket_t bucket;
1070 if ((bucket = zone_fetch_bucket(zone, zdom, false)) != NULL)
1071 return (bucket);
1119 * Update the working set size estimates for the zone's bucket cache.
1314 * Free the hash bucket to the appropriate backing store.
1317 * slab_hash The hash bucket we're freeing
1318 * hashsize The number of entries in that hash bucket
1335 * Frees all outstanding items in a bucket
1339 * bucket The free/alloc bucket with items.
1345 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
1349 if (bucket->ub_cnt == 0)
1353 bucket->ub_seq != SMR_SEQ_INVALID) {
1354 smr_wait(zone->uz_smr, bucket->ub_seq);
1355 bucket->ub_seq = SMR_SEQ_INVALID;
1356 for (i = 0; i < bucket->ub_cnt; i++)
1357 item_dtor(zone, bucket->ub_bucket[i],
1361 for (i = 0; i < bucket->ub_cnt; i++) {
1362 kasan_mark_item_valid(zone, bucket->ub_bucket[i]);
1363 zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
1364 kasan_mark_item_invalid(zone, bucket->ub_bucket[i]);
1366 zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
1368 zone_free_limit(zone, bucket->ub_cnt);
1370 bzero(bucket->ub_bucket, sizeof(void *) * bucket->ub_cnt);
1372 bucket->ub_cnt = 0;
1392 uma_bucket_t bucket;
1409 bucket = cache_bucket_unload_alloc(cache);
1410 if (bucket != NULL)
1411 bucket_free(zone, bucket, NULL);
1412 bucket = cache_bucket_unload_free(cache);
1413 if (bucket != NULL) {
1414 bucket->ub_seq = seq;
1415 bucket_free(zone, bucket, NULL);
1417 bucket = cache_bucket_unload_cross(cache);
1418 if (bucket != NULL) {
1419 bucket->ub_seq = seq;
1420 bucket_free(zone, bucket, NULL);
1457 * bucket and forces every free to synchronize().
1477 * Safely drain per-CPU caches of a zone(s) to alloc bucket.
1489 * Polite bucket sizes shrinking was not enough, shrink aggressively.
1520 uma_bucket_t bucket;
1525 * The cross bucket is partially filled and not part of
1531 bucket = zdom->uzd_cross;
1534 if (bucket != NULL)
1535 bucket_free(zone, bucket, NULL);
1540 * this bucket cache is empty. If trim, we reclaim items in
1558 while ((bucket = STAILQ_FIRST(&zdom->uzd_buckets)) != NULL &&
1559 zdom->uzd_nitems >= target + bucket->ub_cnt) {
1560 bucket = zone_fetch_bucket(zone, zdom, true);
1561 if (bucket == NULL)
1563 bucket_free(zone, bucket, NULL);
1577 * Shrink the zone bucket size to ensure that the per-CPU caches
2746 "Maximum number of items in each domain's bucket cache");
3674 cache_alloc_item(uma_zone_t zone, uma_cache_t cache, uma_cache_bucket_t bucket,
3680 item = cache_bucket_pop(cache, bucket);
3690 uma_cache_bucket_t bucket;
3695 bucket = &cache->uc_allocbucket;
3696 if (__predict_false(bucket->ucb_cnt == 0))
3698 return (cache_alloc_item(zone, cache, bucket, udata, flags));
3703 * We can not get a bucket so try to return a single item.
3716 uma_cache_bucket_t bucket;
3733 bucket = &cache->uc_allocbucket;
3734 if (__predict_false(bucket->ucb_cnt == 0))
3736 return (cache_alloc_item(zone, cache, bucket, NULL, flags));
3743 uma_cache_bucket_t bucket;
3775 bucket = &cache->uc_allocbucket;
3776 if (__predict_false(bucket->ucb_cnt == 0))
3778 return (cache_alloc_item(zone, cache, bucket, udata, flags));
3782 * Replenish an alloc bucket and possibly restore an old one. Called in
3791 uma_bucket_t bucket;
3798 * If we have run out of items in our alloc bucket see
3799 * if we can switch with the free bucket.
3801 * SMR Zones can't re-use the free bucket until the sequence has
3812 * Discard any empty allocation bucket while we hold no locks.
3814 bucket = cache_bucket_unload_alloc(cache);
3817 if (bucket != NULL) {
3818 KASSERT(bucket->ub_cnt == 0,
3819 ("cache_alloc: Entered with non-empty alloc bucket."));
3820 bucket_free(zone, bucket, udata);
3836 bucket = cache_fetch_bucket(zone, cache, domain);
3837 if (bucket == NULL && zone->uz_bucket_size != 0 && !bucketdisable) {
3838 bucket = zone_alloc_bucket(zone, udata, domain, flags);
3844 CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
3845 zone->uz_name, zone, bucket);
3846 if (bucket == NULL) {
3853 * initialized bucket to make this less likely or claim
3864 bucket->ub_cnt);
3865 cache_bucket_load_alloc(cache, bucket);
3870 * We lost the race, release this bucket and start over.
3873 zone_put_bucket(zone, domain, bucket, udata, !new);
3883 uma_bucket_t bucket;
3910 * Try to allocate from the bucket cache before falling back to the keg.
3917 if ((bucket = zone_fetch_bucket(zone, zdom, false)) != NULL) {
3918 item = bucket->ub_bucket[bucket->ub_cnt - 1];
3920 bucket->ub_bucket[bucket->ub_cnt - 1] = NULL;
3922 bucket->ub_cnt--;
3923 zone_put_bucket(zone, domain, bucket, udata, true);
3928 ("%s: bucket cache item %p from wrong domain",
4110 zone_import(void *arg, void **bucket, int max, int domain, int flags)
4133 bucket[i++] = slab_alloc_item(keg, slab);
4148 * instead pick a new domain for each bucket rather
4149 * than stripe within each bucket. The current option
4324 uma_bucket_t bucket;
4345 bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
4346 if (bucket == NULL) {
4351 bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
4352 MIN(maxbucket, bucket->ub_entries), domain, flags);
4357 if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
4360 for (i = 0; i < bucket->ub_cnt; i++) {
4361 kasan_mark_item_valid(zone, bucket->ub_bucket[i]);
4362 error = zone->uz_init(bucket->ub_bucket[i],
4364 kasan_mark_item_invalid(zone, bucket->ub_bucket[i]);
4370 * If we couldn't initialize the whole bucket, put the
4373 if (i != bucket->ub_cnt) {
4374 zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
4375 bucket->ub_cnt - i);
4377 bzero(&bucket->ub_bucket[i],
4378 sizeof(void *) * (bucket->ub_cnt - i));
4380 bucket->ub_cnt = i;
4384 cnt = bucket->ub_cnt;
4385 if (bucket->ub_cnt == 0) {
4386 bucket_free(zone, bucket, udata);
4388 bucket = NULL;
4394 return (bucket);
4472 uma_cache_bucket_t bucket;
4499 /* SMR Zones must free to the free bucket. */
4500 bucket = &cache->uc_freebucket;
4504 bucket = &cache->uc_crossbucket;
4507 if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) {
4508 cache_bucket_push(cache, bucket, item);
4526 uma_cache_bucket_t bucket;
4590 bucket = &cache->uc_allocbucket;
4594 bucket = &cache->uc_crossbucket;
4597 if (bucket->ucb_cnt == bucket->ucb_entries &&
4602 if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) {
4603 cache_bucket_push(cache, bucket, item);
4623 zone_free_cross(uma_zone_t zone, uma_bucket_t bucket, void *udata)
4633 "uma_zfree: zone %s(%p) draining cross bucket %p",
4634 zone->uz_name, zone, bucket);
4638 * the current smr seq rather than accepting the bucket's.
4646 * lock on the current crossfree bucket. A full matrix with
4652 for (; bucket->ub_cnt > 0; bucket->ub_cnt--) {
4653 item = bucket->ub_bucket[bucket->ub_cnt - 1];
4662 * Avoid allocating a bucket with the cross lock
4664 * cross-domain free and bucket zones may
4692 if (bucket->ub_cnt == 0)
4693 bucket->ub_seq = SMR_SEQ_INVALID;
4694 bucket_free(zone, bucket, udata);
4709 zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata,
4722 zone_free_cross(zone, bucket, udata);
4728 * Attempt to save the bucket in the zone's domain bucket cache.
4731 "uma_zfree: zone %s(%p) putting bucket %p on free list",
4732 zone->uz_name, zone, bucket);
4736 zone_put_bucket(zone, itemdomain, bucket, udata, ws);
4740 * Populate a free or cross bucket for the current cpu cache. Free any
4741 * existing full bucket either to the zone cache or back to the slab layer.
4751 uma_bucket_t newbucket, bucket;
4763 * enabled this is the zdom of the item. The bucket is the
4764 * cross bucket if the current domain and itemdomain do not match.
4777 bucket = cache_bucket_unload(cbucket);
4778 KASSERT(bucket == NULL || bucket->ub_cnt == bucket->ub_entries,
4779 ("cache_free: Entered with non-full free bucket."));
4785 * Don't let SMR zones operate without a free bucket. Force
4788 * item if we fail to allocate a bucket.
4791 if (bucket != NULL)
4792 bucket->ub_seq = smr_advance(zone->uz_smr);
4794 if (newbucket == NULL && bucket != NULL) {
4795 bucket_drain(zone, bucket);
4796 newbucket = bucket;
4797 bucket = NULL;
4802 if (bucket != NULL)
4803 zone_free_bucket(zone, bucket, udata, itemdomain, true);
4806 if ((bucket = newbucket) == NULL)
4811 * Check to see if we should be populating the cross bucket. If it
4813 * the free bucket.
4818 cache_bucket_load_cross(cache, bucket);
4824 * We may have lost the race to fill the bucket or switched CPUs.
4828 bucket_free(zone, bucket, udata);
4831 cache_bucket_load_free(cache, bucket);
4867 zone_release(void *arg, void **bucket, int cnt)
4883 item = bucket[i];
4985 * frees we use an additional bucket per CPU and per domain. Select the
4986 * largest bucket size that does not exceed half of the requested limit,
4987 * with the left over space given to the full bucket cache.