Lines Matching full:size
38 * allocator. A set of fixed-size buckets are used for smaller allocations,
130 * When realloc() is called, if the new size is sufficiently smaller than
131 * the old size, realloc() will allocate a new, smaller block to avoid
136 #define REALLOC_FRACTION 1 /* new block if <= half the size */
196 "Size of kernel memory");
200 "Maximum allocation size that malloc(9) would use UMA as backend");
204 "Minimum size of kernel memory");
208 "Maximum size of kernel memory");
212 "Scale factor for kernel memory size");
217 sysctl_kmem_map_size, "LU", "Current kmem allocation size");
266 u_long size;
268 size = uma_size();
269 return (sysctl_handle_long(oidp, &size, 0, req));
275 u_long size, limit;
278 size = uma_size();
280 if (size > limit)
281 size = 0;
283 size = limit - size;
284 return (sysctl_handle_long(oidp, &size, 0, req));
383 * amount of bucket size. Occurs within a critical section so that the
388 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
397 if (size > 0) {
398 mtsp->mts_memalloced += size;
410 (uintptr_t) mtsp, size, zindx);
418 malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
421 if (size > 0)
422 malloc_type_zone_allocated(mtp, size, -1);
427 * amount of the bucket size. Occurs within a critical section so that the
432 malloc_type_freed(struct malloc_type *mtp, unsigned long size)
440 mtsp->mts_memfreed += size;
449 (uintptr_t) mtsp, size, 0);
480 contigmalloc(unsigned long size, struct malloc_type *type, int flags,
486 ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment,
490 vsetzoneslab((uintptr_t)ret, NULL, CONTIG_MALLOC_SLAB(size));
491 malloc_type_allocated(type, round_page(size));
497 contigmalloc_domainset(unsigned long size, struct malloc_type *type,
503 ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high,
507 vsetzoneslab((uintptr_t)ret, NULL, CONTIG_MALLOC_SLAB(size));
508 malloc_type_allocated(type, round_page(size));
517 contigfree(void *addr, unsigned long size __unused, struct malloc_type *type)
594 malloc_large(size_t size, struct malloc_type *mtp, struct domainset *policy,
599 size = roundup(size, PAGE_SIZE);
600 va = kmem_malloc_domainset(policy, size, flags);
603 vsetzoneslab((uintptr_t)va, NULL, MALLOC_LARGE_SLAB(size));
604 uma_total_inc(size);
606 malloc_type_allocated(mtp, va == NULL ? 0 : size);
614 kasan_mark(va, osize, size, KASAN_MALLOC_REDZONE);
620 free_large(void *addr, size_t size)
623 kmem_free(addr, size);
624 uma_total_dec(size);
638 (malloc)(size_t size, struct malloc_type *mtp, int flags)
644 unsigned long osize = size;
651 if (malloc_dbg(&va, &size, mtp, flags) != 0)
655 if (__predict_false(size > kmem_zmax))
656 return (malloc_large(size, mtp, DOMAINSET_RR(), flags
659 if (size & KMEM_ZMASK)
660 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
661 indx = kmemsize[size >> KMEM_ZSHIFT];
665 size = zone->uz_size;
667 kmsan_mark(va, size, KMSAN_STATE_UNINIT);
668 kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR);
671 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
682 kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
693 size_t size;
696 size = *sizep;
697 KASSERT(size <= kmem_zmax && (flags & M_EXEC) == 0,
698 ("malloc_domain: Called with bad flag / size combination"));
699 if (size & KMEM_ZMASK)
700 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
701 indx = kmemsize[size >> KMEM_ZSHIFT];
711 malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds,
719 unsigned long osize = size;
726 if (malloc_dbg(&va, &size, mtp, flags) != 0)
730 if (__predict_false(size > kmem_zmax))
731 return (malloc_large(size, mtp, DOMAINSET_RR(), flags
736 va = malloc_domain(&size, &indx, mtp, domain, flags);
738 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
749 kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
753 kmsan_mark(va, size, KMSAN_STATE_UNINIT);
754 kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR);
764 malloc_exec(size_t size, struct malloc_type *mtp, int flags)
767 return (malloc_domainset_exec(size, mtp, DOMAINSET_RR(), flags));
771 malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds,
775 unsigned long osize = size;
785 if (malloc_dbg(&va, &size, mtp, flags) != 0)
789 return (malloc_large(size, mtp, ds, flags DEBUG_REDZONE_ARG));
793 malloc_aligned(size_t size, size_t align, struct malloc_type *type, int flags)
795 return (malloc_domainset_aligned(size, align, type, DOMAINSET_RR(),
800 malloc_domainset_aligned(size_t size, size_t align,
807 ("malloc_domainset_aligned: wrong align %#zx size %#zx",
808 align, size));
810 ("malloc_domainset_aligned: align %#zx (size %#zx) too large",
811 align, size));
814 * Round the allocation size up to the next power of 2,
817 * allocation size to align if the rounded size is less than
819 * size.
821 if (size == 0)
822 size = 1;
823 asize = size <= align ? align : 1UL << flsl(size - 1);
827 ("malloc_domainset_aligned: result not aligned %p size %#zx "
828 "allocsize %#zx align %#zx", res, size, asize, align));
833 mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags)
836 if (WOULD_OVERFLOW(nmemb, size))
837 panic("mallocarray: %zu * %zu overflowed", nmemb, size);
839 return (malloc(size * nmemb, type, flags));
843 mallocarray_domainset(size_t nmemb, size_t size, struct malloc_type *type,
847 if (WOULD_OVERFLOW(nmemb, size))
848 panic("mallocarray_domainset: %zu * %zu overflowed", nmemb, size);
850 return (malloc_domainset(size * nmemb, type, ds, flags));
855 free_save_type(void *addr, struct malloc_type *mtp, u_long size)
864 * This code assumes that size is a multiple of 8 bytes for
868 mtpp += (size - sizeof(struct malloc_type *)) /
910 u_long size;
927 size = zone->uz_size;
929 free_save_type(addr, mtp, size);
932 kasan_mark(addr, size, size, 0);
933 explicit_bzero(addr, size);
938 size = malloc_large_size(slab);
940 kasan_mark(addr, size, size, 0);
941 explicit_bzero(addr, size);
943 free_large(addr, size);
946 size = round_page(contigmalloc_size(slab));
948 explicit_bzero(addr, size);
949 kmem_free(addr, size);
956 malloc_type_freed(mtp, size);
982 * realloc: change the size of a memory block
985 realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
1001 return (malloc(size, mtp, flags));
1010 return (memguard_realloc(addr, size, mtp, flags));
1022 /* Get the size of the original block */
1039 if (size <= alloc &&
1040 (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) {
1041 kasan_mark((void *)addr, size, alloc, KASAN_MALLOC_REDZONE);
1047 if ((newaddr = malloc(size, mtp, flags)) == NULL)
1055 bcopy(addr, newaddr, min(size, alloc));
1064 reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags)
1068 if ((mem = realloc(addr, size, mtp, flags)) == NULL)
1075 * specified size
1078 malloc_size(size_t size)
1082 if (size > kmem_zmax)
1083 return (round_page(size));
1084 if (size & KMEM_ZMASK)
1085 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
1086 indx = kmemsize[size >> KMEM_ZSHIFT];
1091 * malloc_usable_size: returns the usable size of the allocation.
1100 u_long size;
1111 size = redzone_get_size(__DECONST(void *, addr));
1120 size = zone->uz_size;
1123 size = malloc_large_size(slab);
1127 size = 0;
1134 * (presumably) about to use the full allocation size.
1136 kasan_mark(addr, size, size, 0);
1138 return (size);
1167 * of machines, it is a function of the physical memory size,
1170 * min(max(physical memory size / VM_KMEM_SIZE_SCALE,
1207 * twice the physical memory size, which has been sufficient to handle
1261 int size = kmemzones[indx].kz_size;
1267 if (powerof2(size) && size > sizeof(void *))
1268 align = MIN(size, PAGE_SIZE) - 1;
1271 uma_zcreate(name, size,
1279 for (;i <= size; i+= KMEM_ZBASE)
1507 int64_t size;
1523 /* Select sort, largest size first. */
1533 * In the case of size ties, print out mtypes
1543 size = get_malloc_stats(&mtp->ks_mti, &allocs,
1545 if (size > cur_size && size < last_size + ties) {
1546 cur_size = size;
1553 size = get_malloc_stats(&cur_mtype->ks_mti, &allocs, &inuse);
1555 howmany(size, 1024), allocs);