xref: /openbsd-src/sys/kern/kern_malloc.c (revision c90a81c56dcebd6a1b73fe4aff9b03385b8e63b3)
1 /*	$OpenBSD: kern_malloc.c,v 1.136 2018/07/10 10:17:42 bluhm Exp $	*/
2 /*	$NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $	*/
3 
4 /*
5  * Copyright (c) 1987, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
33  */
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/proc.h>
39 #include <sys/stdint.h>
40 #include <sys/systm.h>
41 #include <sys/sysctl.h>
42 #include <sys/time.h>
43 #include <sys/mutex.h>
44 #include <sys/rwlock.h>
45 
46 #include <uvm/uvm_extern.h>
47 
48 static
49 #ifndef SMALL_KERNEL
50 __inline__
51 #endif
52 long BUCKETINDX(size_t sz)
53 {
54 	long b, d;
55 
56 	/* note that this relies upon MINALLOCSIZE being 1 << MINBUCKET */
57 	b = 7 + MINBUCKET; d = 4;
58 	while (d != 0) {
59 		if (sz <= (1 << b))
60 			b -= d;
61 		else
62 			b += d;
63 		d >>= 1;
64 	}
65 	if (sz <= (1 << b))
66 		b += 0;
67 	else
68 		b += 1;
69 	return b;
70 }
71 
72 static struct vm_map kmem_map_store;
73 struct vm_map *kmem_map = NULL;
74 
75 /*
76  * Default number of pages in kmem_map.  We attempt to calculate this
77  * at run-time, but allow it to be either patched or set in the kernel
78  * config file.
79  */
80 #ifndef NKMEMPAGES
81 #define	NKMEMPAGES	0
82 #endif
83 u_int	nkmempages = NKMEMPAGES;
84 
85 /*
86  * Defaults for lower- and upper-bounds for the kmem_map page count.
87  * Can be overridden by kernel config options.
88  */
89 #ifndef	NKMEMPAGES_MIN
90 #define	NKMEMPAGES_MIN	0
91 #endif
92 u_int	nkmempages_min = 0;
93 
94 #ifndef NKMEMPAGES_MAX
95 #define	NKMEMPAGES_MAX	NKMEMPAGES_MAX_DEFAULT
96 #endif
97 u_int	nkmempages_max = 0;
98 
99 struct mutex malloc_mtx = MUTEX_INITIALIZER(IPL_VM);
100 struct kmembuckets bucket[MINBUCKET + 16];
101 #ifdef KMEMSTATS
102 struct kmemstats kmemstats[M_LAST];
103 #endif
104 struct kmemusage *kmemusage;
105 char *kmembase, *kmemlimit;
106 char buckstring[16 * sizeof("123456,")];
107 int buckstring_init = 0;
108 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES)
109 char *memname[] = INITKMEMNAMES;
110 char *memall = NULL;
111 struct rwlock sysctl_kmemlock = RWLOCK_INITIALIZER("sysctlklk");
112 #endif
113 
114 /*
115  * Normally the freelist structure is used only to hold the list pointer
116  * for free objects.  However, when running with diagnostics, the first
117  * 8 bytes of the structure is unused except for diagnostic information,
118  * and the free list pointer is at offset 8 in the structure.  Since the
119  * first 8 bytes is the portion of the structure most often modified, this
120  * helps to detect memory reuse problems and avoid free list corruption.
121  */
122 struct kmem_freelist {
123 	int32_t	kf_spare0;
124 	int16_t	kf_type;
125 	int16_t	kf_spare1;
126 	XSIMPLEQ_ENTRY(kmem_freelist) kf_flist;
127 };
128 
129 #ifdef DIAGNOSTIC
130 /*
131  * This structure provides a set of masks to catch unaligned frees.
132  */
133 const long addrmask[] = { 0,
134 	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
135 	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
136 	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
137 	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
138 };
139 
140 #endif /* DIAGNOSTIC */
141 
142 #ifndef SMALL_KERNEL
143 struct timeval malloc_errintvl = { 5, 0 };
144 struct timeval malloc_lasterr;
145 #endif
146 
147 /*
148  * Allocate a block of memory
149  */
150 void *
151 malloc(size_t size, int type, int flags)
152 {
153 	struct kmembuckets *kbp;
154 	struct kmemusage *kup;
155 	struct kmem_freelist *freep;
156 	long indx, npg, allocsize;
157 	caddr_t va, cp;
158 	int s;
159 #ifdef DIAGNOSTIC
160 	int freshalloc;
161 	char *savedtype;
162 #endif
163 #ifdef KMEMSTATS
164 	struct kmemstats *ksp = &kmemstats[type];
165 	int wake;
166 
167 	if (((unsigned long)type) <= 1 || ((unsigned long)type) >= M_LAST)
168 		panic("malloc: bogus type %d", type);
169 #endif
170 
171 	KASSERT(flags & (M_WAITOK | M_NOWAIT));
172 
173 #ifdef DIAGNOSTIC
174 	if ((flags & M_NOWAIT) == 0) {
175 		extern int pool_debug;
176 		assertwaitok();
177 		if (pool_debug == 2)
178 			yield();
179 	}
180 #endif
181 
182 	if (size > 65535 * PAGE_SIZE) {
183 		if (flags & M_CANFAIL) {
184 #ifndef SMALL_KERNEL
185 			/* XXX lock */
186 			if (ratecheck(&malloc_lasterr, &malloc_errintvl))
187 				printf("malloc(): allocation too large, "
188 				    "type = %d, size = %lu\n", type, size);
189 #endif
190 			return (NULL);
191 		} else
192 			panic("malloc: allocation too large, "
193 			    "type = %d, size = %lu\n", type, size);
194 	}
195 
196 	indx = BUCKETINDX(size);
197 	if (size > MAXALLOCSAVE)
198 		allocsize = round_page(size);
199 	else
200 		allocsize = 1 << indx;
201 	kbp = &bucket[indx];
202 	mtx_enter(&malloc_mtx);
203 #ifdef KMEMSTATS
204 	while (ksp->ks_memuse >= ksp->ks_limit) {
205 		if (flags & M_NOWAIT) {
206 			mtx_leave(&malloc_mtx);
207 			return (NULL);
208 		}
209 #ifdef DIAGNOSTIC
210 		if (ISSET(flags, M_WAITOK) && curproc == &proc0)
211 			panic("%s: cannot sleep for memory during boot",
212 			    __func__);
213 #endif
214 		if (ksp->ks_limblocks < 65535)
215 			ksp->ks_limblocks++;
216 		msleep(ksp, &malloc_mtx, PSWP+2, memname[type], 0);
217 	}
218 	ksp->ks_memuse += allocsize; /* account for this early */
219 	ksp->ks_size |= 1 << indx;
220 #endif
221 	if (XSIMPLEQ_FIRST(&kbp->kb_freelist) == NULL) {
222 		mtx_leave(&malloc_mtx);
223 		npg = atop(round_page(allocsize));
224 		s = splvm();
225 		va = (caddr_t)uvm_km_kmemalloc_pla(kmem_map, NULL,
226 		    (vsize_t)ptoa(npg), 0,
227 		    ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) |
228 		    ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0),
229 		    no_constraint.ucr_low, no_constraint.ucr_high,
230 		    0, 0, 0);
231 		splx(s);
232 		if (va == NULL) {
233 			/*
234 			 * Kmem_malloc() can return NULL, even if it can
235 			 * wait, if there is no map space available, because
236 			 * it can't fix that problem.  Neither can we,
237 			 * right now.  (We should release pages which
238 			 * are completely free and which are in buckets
239 			 * with too many free elements.)
240 			 */
241 			if ((flags & (M_NOWAIT|M_CANFAIL)) == 0)
242 				panic("malloc: out of space in kmem_map");
243 
244 #ifdef KMEMSTATS
245 			mtx_enter(&malloc_mtx);
246 			ksp->ks_memuse -= allocsize;
247 			wake = ksp->ks_memuse + allocsize >= ksp->ks_limit &&
248 			    ksp->ks_memuse < ksp->ks_limit;
249 			mtx_leave(&malloc_mtx);
250 			if (wake)
251 				wakeup(ksp);
252 #endif
253 			return (NULL);
254 		}
255 		mtx_enter(&malloc_mtx);
256 #ifdef KMEMSTATS
257 		kbp->kb_total += kbp->kb_elmpercl;
258 #endif
259 		kup = btokup(va);
260 		kup->ku_indx = indx;
261 #ifdef DIAGNOSTIC
262 		freshalloc = 1;
263 #endif
264 		if (allocsize > MAXALLOCSAVE) {
265 			kup->ku_pagecnt = npg;
266 			goto out;
267 		}
268 #ifdef KMEMSTATS
269 		kup->ku_freecnt = kbp->kb_elmpercl;
270 		kbp->kb_totalfree += kbp->kb_elmpercl;
271 #endif
272 		cp = va + (npg * PAGE_SIZE) - allocsize;
273 		for (;;) {
274 			freep = (struct kmem_freelist *)cp;
275 #ifdef DIAGNOSTIC
276 			/*
277 			 * Copy in known text to detect modification
278 			 * after freeing.
279 			 */
280 			poison_mem(cp, allocsize);
281 			freep->kf_type = M_FREE;
282 #endif /* DIAGNOSTIC */
283 			XSIMPLEQ_INSERT_HEAD(&kbp->kb_freelist, freep,
284 			    kf_flist);
285 			if (cp <= va)
286 				break;
287 			cp -= allocsize;
288 		}
289 	} else {
290 #ifdef DIAGNOSTIC
291 		freshalloc = 0;
292 #endif
293 	}
294 	freep = XSIMPLEQ_FIRST(&kbp->kb_freelist);
295 	XSIMPLEQ_REMOVE_HEAD(&kbp->kb_freelist, kf_flist);
296 	va = (caddr_t)freep;
297 #ifdef DIAGNOSTIC
298 	savedtype = (unsigned)freep->kf_type < M_LAST ?
299 		memname[freep->kf_type] : "???";
300 	if (freshalloc == 0 && XSIMPLEQ_FIRST(&kbp->kb_freelist)) {
301 		int rv;
302 		vaddr_t addr = (vaddr_t)XSIMPLEQ_FIRST(&kbp->kb_freelist);
303 
304 		vm_map_lock(kmem_map);
305 		rv = uvm_map_checkprot(kmem_map, addr,
306 		    addr + sizeof(struct kmem_freelist), PROT_WRITE);
307 		vm_map_unlock(kmem_map);
308 
309 		if (!rv)  {
310 			printf("%s %zd of object %p size 0x%lx %s %s"
311 			    " (invalid addr %p)\n",
312 			    "Data modified on freelist: word",
313 			    (int32_t *)&addr - (int32_t *)kbp, va, size,
314 			    "previous type", savedtype, (void *)addr);
315 		}
316 	}
317 
318 	/* Fill the fields that we've used with poison */
319 	poison_mem(freep, sizeof(*freep));
320 
321 	/* and check that the data hasn't been modified. */
322 	if (freshalloc == 0) {
323 		size_t pidx;
324 		uint32_t pval;
325 		if (poison_check(va, allocsize, &pidx, &pval)) {
326 			panic("%s %zd of object %p size 0x%lx %s %s"
327 			    " (0x%x != 0x%x)\n",
328 			    "Data modified on freelist: word",
329 			    pidx, va, size, "previous type",
330 			    savedtype, ((int32_t*)va)[pidx], pval);
331 		}
332 	}
333 
334 	freep->kf_spare0 = 0;
335 #endif /* DIAGNOSTIC */
336 #ifdef KMEMSTATS
337 	kup = btokup(va);
338 	if (kup->ku_indx != indx)
339 		panic("malloc: wrong bucket");
340 	if (kup->ku_freecnt == 0)
341 		panic("malloc: lost data");
342 	kup->ku_freecnt--;
343 	kbp->kb_totalfree--;
344 out:
345 	kbp->kb_calls++;
346 	ksp->ks_inuse++;
347 	ksp->ks_calls++;
348 	if (ksp->ks_memuse > ksp->ks_maxused)
349 		ksp->ks_maxused = ksp->ks_memuse;
350 #else
351 out:
352 #endif
353 	mtx_leave(&malloc_mtx);
354 
355 	if ((flags & M_ZERO) && va != NULL)
356 		memset(va, 0, size);
357 	return (va);
358 }
359 
360 /*
361  * Free a block of memory allocated by malloc.
362  */
363 void
364 free(void *addr, int type, size_t freedsize)
365 {
366 	struct kmembuckets *kbp;
367 	struct kmemusage *kup;
368 	struct kmem_freelist *freep;
369 	long size;
370 	int s;
371 #ifdef DIAGNOSTIC
372 	long alloc;
373 #endif
374 #ifdef KMEMSTATS
375 	struct kmemstats *ksp = &kmemstats[type];
376 	int wake;
377 #endif
378 
379 	if (addr == NULL)
380 		return;
381 
382 #ifdef DIAGNOSTIC
383 	if (addr < (void *)kmembase || addr >= (void *)kmemlimit)
384 		panic("free: non-malloced addr %p type %s", addr,
385 		    memname[type]);
386 #endif
387 
388 	mtx_enter(&malloc_mtx);
389 	kup = btokup(addr);
390 	size = 1 << kup->ku_indx;
391 	kbp = &bucket[kup->ku_indx];
392 	if (size > MAXALLOCSAVE)
393 		size = kup->ku_pagecnt << PAGE_SHIFT;
394 #ifdef DIAGNOSTIC
395 	if (freedsize != 0 && freedsize > size)
396 		panic("free: size too large %zu > %ld (%p) type %s",
397 		    freedsize, size, addr, memname[type]);
398 	if (freedsize != 0 && size > MINALLOCSIZE && freedsize <= size / 2)
399 		panic("free: size too small %zu <= %ld / 2 (%p) type %s",
400 		    freedsize, size, addr, memname[type]);
401 	/*
402 	 * Check for returns of data that do not point to the
403 	 * beginning of the allocation.
404 	 */
405 	if (size > PAGE_SIZE)
406 		alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
407 	else
408 		alloc = addrmask[kup->ku_indx];
409 	if (((u_long)addr & alloc) != 0)
410 		panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
411 			addr, size, memname[type], alloc);
412 #endif /* DIAGNOSTIC */
413 	if (size > MAXALLOCSAVE) {
414 		u_short pagecnt = kup->ku_pagecnt;
415 
416 		kup->ku_indx = 0;
417 		kup->ku_pagecnt = 0;
418 		mtx_leave(&malloc_mtx);
419 		s = splvm();
420 		uvm_km_free(kmem_map, (vaddr_t)addr, ptoa(pagecnt));
421 		splx(s);
422 #ifdef KMEMSTATS
423 		mtx_enter(&malloc_mtx);
424 		ksp->ks_memuse -= size;
425 		wake = ksp->ks_memuse + size >= ksp->ks_limit &&
426 		    ksp->ks_memuse < ksp->ks_limit;
427 		ksp->ks_inuse--;
428 		kbp->kb_total -= 1;
429 		mtx_leave(&malloc_mtx);
430 		if (wake)
431 			wakeup(ksp);
432 #endif
433 		return;
434 	}
435 	freep = (struct kmem_freelist *)addr;
436 #ifdef DIAGNOSTIC
437 	/*
438 	 * Check for multiple frees. Use a quick check to see if
439 	 * it looks free before laboriously searching the freelist.
440 	 */
441 	if (freep->kf_spare0 == poison_value(freep)) {
442 		struct kmem_freelist *fp;
443 		XSIMPLEQ_FOREACH(fp, &kbp->kb_freelist, kf_flist) {
444 			if (addr != fp)
445 				continue;
446 			printf("multiply freed item %p\n", addr);
447 			panic("free: duplicated free");
448 		}
449 	}
450 	/*
451 	 * Copy in known text to detect modification after freeing
452 	 * and to make it look free. Also, save the type being freed
453 	 * so we can list likely culprit if modification is detected
454 	 * when the object is reallocated.
455 	 */
456 	poison_mem(addr, size);
457 	freep->kf_spare0 = poison_value(freep);
458 
459 	freep->kf_type = type;
460 #endif /* DIAGNOSTIC */
461 #ifdef KMEMSTATS
462 	kup->ku_freecnt++;
463 	if (kup->ku_freecnt >= kbp->kb_elmpercl) {
464 		if (kup->ku_freecnt > kbp->kb_elmpercl)
465 			panic("free: multiple frees");
466 		else if (kbp->kb_totalfree > kbp->kb_highwat)
467 			kbp->kb_couldfree++;
468 	}
469 	kbp->kb_totalfree++;
470 	ksp->ks_memuse -= size;
471 	wake = ksp->ks_memuse + size >= ksp->ks_limit &&
472 	    ksp->ks_memuse < ksp->ks_limit;
473 	ksp->ks_inuse--;
474 #endif
475 	XSIMPLEQ_INSERT_TAIL(&kbp->kb_freelist, freep, kf_flist);
476 	mtx_leave(&malloc_mtx);
477 #ifdef KMEMSTATS
478 	if (wake)
479 		wakeup(ksp);
480 #endif
481 }
482 
483 /*
484  * Compute the number of pages that kmem_map will map, that is,
485  * the size of the kernel malloc arena.
486  */
487 void
488 kmeminit_nkmempages(void)
489 {
490 	u_int npages;
491 
492 	if (nkmempages != 0) {
493 		/*
494 		 * It's already been set (by us being here before, or
495 		 * by patching or kernel config options), bail out now.
496 		 */
497 		return;
498 	}
499 
500 	/*
501 	 * We can't initialize these variables at compilation time, since
502 	 * the page size may not be known (on sparc GENERIC kernels, for
503 	 * example). But we still want the MD code to be able to provide
504 	 * better values.
505 	 */
506 	if (nkmempages_min == 0)
507 		nkmempages_min = NKMEMPAGES_MIN;
508 	if (nkmempages_max == 0)
509 		nkmempages_max = NKMEMPAGES_MAX;
510 
511 	/*
512 	 * We use the following (simple) formula:
513 	 *
514 	 *	- Starting point is physical memory / 4.
515 	 *
516 	 *	- Clamp it down to nkmempages_max.
517 	 *
518 	 *	- Round it up to nkmempages_min.
519 	 */
520 	npages = physmem / 4;
521 
522 	if (npages > nkmempages_max)
523 		npages = nkmempages_max;
524 
525 	if (npages < nkmempages_min)
526 		npages = nkmempages_min;
527 
528 	nkmempages = npages;
529 }
530 
531 /*
532  * Initialize the kernel memory allocator
533  */
534 void
535 kmeminit(void)
536 {
537 	vaddr_t base, limit;
538 	long indx;
539 
540 #ifdef DIAGNOSTIC
541 	if (sizeof(struct kmem_freelist) > (1 << MINBUCKET))
542 		panic("kmeminit: minbucket too small/struct freelist too big");
543 #endif
544 
545 	/*
546 	 * Compute the number of kmem_map pages, if we have not
547 	 * done so already.
548 	 */
549 	kmeminit_nkmempages();
550 	base = vm_map_min(kernel_map);
551 	kmem_map = uvm_km_suballoc(kernel_map, &base, &limit,
552 	    (vsize_t)nkmempages << PAGE_SHIFT,
553 #ifdef KVA_GUARDPAGES
554 	    VM_MAP_INTRSAFE | VM_MAP_GUARDPAGES,
555 #else
556 	    VM_MAP_INTRSAFE,
557 #endif
558 	    FALSE, &kmem_map_store);
559 	kmembase = (char *)base;
560 	kmemlimit = (char *)limit;
561 	kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
562 		(vsize_t)(nkmempages * sizeof(struct kmemusage)));
563 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
564 		XSIMPLEQ_INIT(&bucket[indx].kb_freelist);
565 	}
566 #ifdef KMEMSTATS
567 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
568 		if (1 << indx >= PAGE_SIZE)
569 			bucket[indx].kb_elmpercl = 1;
570 		else
571 			bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
572 		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
573 	}
574 	for (indx = 0; indx < M_LAST; indx++)
575 		kmemstats[indx].ks_limit = nkmempages * PAGE_SIZE * 6 / 10;
576 #endif
577 }
578 
579 /*
580  * Return kernel malloc statistics information.
581  */
582 int
583 sysctl_malloc(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
584     size_t newlen, struct proc *p)
585 {
586 	struct kmembuckets kb;
587 #ifdef KMEMSTATS
588 	struct kmemstats km;
589 #endif
590 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES)
591 	int error;
592 #endif
593 	int i, siz;
594 
595 	if (namelen != 2 && name[0] != KERN_MALLOC_BUCKETS &&
596 	    name[0] != KERN_MALLOC_KMEMNAMES)
597 		return (ENOTDIR);		/* overloaded */
598 
599 	switch (name[0]) {
600 	case KERN_MALLOC_BUCKETS:
601 		/* Initialize the first time */
602 		if (buckstring_init == 0) {
603 			buckstring_init = 1;
604 			memset(buckstring, 0, sizeof(buckstring));
605 			for (siz = 0, i = MINBUCKET; i < MINBUCKET + 16; i++) {
606 				snprintf(buckstring + siz,
607 				    sizeof buckstring - siz,
608 				    "%d,", (u_int)(1<<i));
609 				siz += strlen(buckstring + siz);
610 			}
611 			/* Remove trailing comma */
612 			if (siz)
613 				buckstring[siz - 1] = '\0';
614 		}
615 		return (sysctl_rdstring(oldp, oldlenp, newp, buckstring));
616 
617 	case KERN_MALLOC_BUCKET:
618 		mtx_enter(&malloc_mtx);
619 		memcpy(&kb, &bucket[BUCKETINDX(name[1])], sizeof(kb));
620 		mtx_leave(&malloc_mtx);
621 		memset(&kb.kb_freelist, 0, sizeof(kb.kb_freelist));
622 		return (sysctl_rdstruct(oldp, oldlenp, newp, &kb, sizeof(kb)));
623 	case KERN_MALLOC_KMEMSTATS:
624 #ifdef KMEMSTATS
625 		if ((name[1] < 0) || (name[1] >= M_LAST))
626 			return (EINVAL);
627 		mtx_enter(&malloc_mtx);
628 		memcpy(&km, &kmemstats[name[1]], sizeof(km));
629 		mtx_leave(&malloc_mtx);
630 		return (sysctl_rdstruct(oldp, oldlenp, newp, &km, sizeof(km)));
631 #else
632 		return (EOPNOTSUPP);
633 #endif
634 	case KERN_MALLOC_KMEMNAMES:
635 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES)
636 		error = rw_enter(&sysctl_kmemlock, RW_WRITE|RW_INTR);
637 		if (error)
638 			return (error);
639 		if (memall == NULL) {
640 			int totlen;
641 
642 			/* Figure out how large a buffer we need */
643 			for (totlen = 0, i = 0; i < M_LAST; i++) {
644 				if (memname[i])
645 					totlen += strlen(memname[i]);
646 				totlen++;
647 			}
648 			memall = malloc(totlen + M_LAST, M_SYSCTL,
649 			    M_WAITOK|M_ZERO);
650 			for (siz = 0, i = 0; i < M_LAST; i++) {
651 				snprintf(memall + siz,
652 				    totlen + M_LAST - siz,
653 				    "%s,", memname[i] ? memname[i] : "");
654 				siz += strlen(memall + siz);
655 			}
656 			/* Remove trailing comma */
657 			if (siz)
658 				memall[siz - 1] = '\0';
659 
660 			/* Now, convert all spaces to underscores */
661 			for (i = 0; i < totlen; i++)
662 				if (memall[i] == ' ')
663 					memall[i] = '_';
664 		}
665 		rw_exit_write(&sysctl_kmemlock);
666 		return (sysctl_rdstring(oldp, oldlenp, newp, memall));
667 #else
668 		return (EOPNOTSUPP);
669 #endif
670 	default:
671 		return (EOPNOTSUPP);
672 	}
673 	/* NOTREACHED */
674 }
675 
676 /*
677  * Round up a size to how much malloc would actually allocate.
678  */
679 size_t
680 malloc_roundup(size_t sz)
681 {
682 	if (sz > MAXALLOCSAVE)
683 		return round_page(sz);
684 
685 	return (1 << BUCKETINDX(sz));
686 }
687 
688 #if defined(DDB)
689 #include <machine/db_machdep.h>
690 #include <ddb/db_output.h>
691 
692 void
693 malloc_printit(
694     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
695 {
696 #ifdef KMEMSTATS
697 	struct kmemstats *km;
698 	int i;
699 
700 	(*pr)("%15s %5s  %6s  %7s  %6s %9s %8s %8s\n",
701 	    "Type", "InUse", "MemUse", "HighUse", "Limit", "Requests",
702 	    "Type Lim", "Kern Lim");
703 	for (i = 0, km = kmemstats; i < M_LAST; i++, km++) {
704 		if (!km->ks_calls || !memname[i])
705 			continue;
706 
707 		(*pr)("%15s %5ld %6ldK %7ldK %6ldK %9ld %8d %8d\n",
708 		    memname[i], km->ks_inuse, km->ks_memuse / 1024,
709 		    km->ks_maxused / 1024, km->ks_limit / 1024,
710 		    km->ks_calls, km->ks_limblocks, km->ks_mapblocks);
711 	}
712 #else
713 	(*pr)("No KMEMSTATS compiled in\n");
714 #endif
715 }
716 #endif /* DDB */
717 
718 /*
719  * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
720  *
721  * Permission to use, copy, modify, and distribute this software for any
722  * purpose with or without fee is hereby granted, provided that the above
723  * copyright notice and this permission notice appear in all copies.
724  *
725  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
726  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
727  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
728  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
729  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
730  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
731  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
732  */
733 
734 /*
735  * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
736  * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
737  */
738 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
739 
740 void *
741 mallocarray(size_t nmemb, size_t size, int type, int flags)
742 {
743 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
744 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
745 		if (flags & M_CANFAIL)
746 			return (NULL);
747 		panic("mallocarray: overflow %zu * %zu", nmemb, size);
748 	}
749 	return (malloc(size * nmemb, type, flags));
750 }
751