xref: /openbsd-src/sys/kern/kern_malloc.c (revision 46035553bfdd96e63c94e32da0210227ec2e3cf1)
1 /*	$OpenBSD: kern_malloc.c,v 1.143 2020/12/31 11:04:35 claudio Exp $	*/
2 /*	$NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $	*/
3 
4 /*
5  * Copyright (c) 1987, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
33  */
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/proc.h>
39 #include <sys/stdint.h>
40 #include <sys/systm.h>
41 #include <sys/sysctl.h>
42 #include <sys/time.h>
43 #include <sys/mutex.h>
44 #include <sys/rwlock.h>
45 #include <sys/tracepoint.h>
46 
47 #include <uvm/uvm_extern.h>
48 
49 #if defined(DDB)
50 #include <machine/db_machdep.h>
51 #include <ddb/db_output.h>
52 #endif
53 
54 static
55 #ifndef SMALL_KERNEL
56 __inline__
57 #endif
58 long BUCKETINDX(size_t sz)
59 {
60 	long b, d;
61 
62 	/* note that this relies upon MINALLOCSIZE being 1 << MINBUCKET */
63 	b = 7 + MINBUCKET; d = 4;
64 	while (d != 0) {
65 		if (sz <= (1 << b))
66 			b -= d;
67 		else
68 			b += d;
69 		d >>= 1;
70 	}
71 	if (sz <= (1 << b))
72 		b += 0;
73 	else
74 		b += 1;
75 	return b;
76 }
77 
78 static struct vm_map kmem_map_store;
79 struct vm_map *kmem_map = NULL;
80 
81 /*
82  * Default number of pages in kmem_map.  We attempt to calculate this
83  * at run-time, but allow it to be either patched or set in the kernel
84  * config file.
85  */
86 #ifndef NKMEMPAGES
87 #define	NKMEMPAGES	0
88 #endif
89 u_int	nkmempages = NKMEMPAGES;
90 
91 /*
92  * Defaults for lower- and upper-bounds for the kmem_map page count.
93  * Can be overridden by kernel config options.
94  */
95 #ifndef	NKMEMPAGES_MIN
96 #define	NKMEMPAGES_MIN	0
97 #endif
98 u_int	nkmempages_min = 0;
99 
100 #ifndef NKMEMPAGES_MAX
101 #define	NKMEMPAGES_MAX	NKMEMPAGES_MAX_DEFAULT
102 #endif
103 u_int	nkmempages_max = 0;
104 
105 struct mutex malloc_mtx = MUTEX_INITIALIZER(IPL_VM);
106 struct kmembuckets bucket[MINBUCKET + 16];
107 #ifdef KMEMSTATS
108 struct kmemstats kmemstats[M_LAST];
109 #endif
110 struct kmemusage *kmemusage;
111 char *kmembase, *kmemlimit;
112 char buckstring[16 * sizeof("123456,")];
113 int buckstring_init = 0;
114 #if defined(KMEMSTATS) || defined(DIAGNOSTIC)
115 char *memname[] = INITKMEMNAMES;
116 char *memall = NULL;
117 struct rwlock sysctl_kmemlock = RWLOCK_INITIALIZER("sysctlklk");
118 #endif
119 
120 /*
121  * Normally the freelist structure is used only to hold the list pointer
122  * for free objects.  However, when running with diagnostics, the first
123  * 8 bytes of the structure is unused except for diagnostic information,
124  * and the free list pointer is at offset 8 in the structure.  Since the
125  * first 8 bytes is the portion of the structure most often modified, this
126  * helps to detect memory reuse problems and avoid free list corruption.
127  */
128 struct kmem_freelist {
129 	int32_t	kf_spare0;
130 	int16_t	kf_type;
131 	int16_t	kf_spare1;
132 	XSIMPLEQ_ENTRY(kmem_freelist) kf_flist;
133 };
134 
135 #ifdef DIAGNOSTIC
136 /*
137  * This structure provides a set of masks to catch unaligned frees.
138  */
139 const long addrmask[] = { 0,
140 	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
141 	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
142 	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
143 	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
144 };
145 
146 #endif /* DIAGNOSTIC */
147 
148 #ifndef SMALL_KERNEL
149 struct timeval malloc_errintvl = { 5, 0 };
150 struct timeval malloc_lasterr;
151 #endif
152 
153 /*
154  * Allocate a block of memory
155  */
156 void *
157 malloc(size_t size, int type, int flags)
158 {
159 	struct kmembuckets *kbp;
160 	struct kmemusage *kup;
161 	struct kmem_freelist *freep;
162 	long indx, npg, allocsize;
163 	caddr_t va, cp;
164 	int s;
165 #ifdef DIAGNOSTIC
166 	int freshalloc;
167 	char *savedtype;
168 #endif
169 #ifdef KMEMSTATS
170 	struct kmemstats *ksp = &kmemstats[type];
171 	int wake;
172 
173 	if (((unsigned long)type) <= 1 || ((unsigned long)type) >= M_LAST)
174 		panic("malloc: bogus type %d", type);
175 #endif
176 
177 	KASSERT(flags & (M_WAITOK | M_NOWAIT));
178 
179 #ifdef DIAGNOSTIC
180 	if ((flags & M_NOWAIT) == 0) {
181 		extern int pool_debug;
182 		assertwaitok();
183 		if (pool_debug == 2)
184 			yield();
185 	}
186 #endif
187 
188 	if (size > 65535 * PAGE_SIZE) {
189 		if (flags & M_CANFAIL) {
190 #ifndef SMALL_KERNEL
191 			/* XXX lock */
192 			if (ratecheck(&malloc_lasterr, &malloc_errintvl))
193 				printf("malloc(): allocation too large, "
194 				    "type = %d, size = %lu\n", type, size);
195 #endif
196 			return (NULL);
197 		} else
198 			panic("malloc: allocation too large, "
199 			    "type = %d, size = %lu\n", type, size);
200 	}
201 
202 	indx = BUCKETINDX(size);
203 	if (size > MAXALLOCSAVE)
204 		allocsize = round_page(size);
205 	else
206 		allocsize = 1 << indx;
207 	kbp = &bucket[indx];
208 	mtx_enter(&malloc_mtx);
209 #ifdef KMEMSTATS
210 	while (ksp->ks_memuse >= ksp->ks_limit) {
211 		if (flags & M_NOWAIT) {
212 			mtx_leave(&malloc_mtx);
213 			return (NULL);
214 		}
215 #ifdef DIAGNOSTIC
216 		if (ISSET(flags, M_WAITOK) && curproc == &proc0)
217 			panic("%s: cannot sleep for memory during boot",
218 			    __func__);
219 #endif
220 		if (ksp->ks_limblocks < 65535)
221 			ksp->ks_limblocks++;
222 		msleep_nsec(ksp, &malloc_mtx, PSWP+2, memname[type], INFSLP);
223 	}
224 	ksp->ks_memuse += allocsize; /* account for this early */
225 	ksp->ks_size |= 1 << indx;
226 #endif
227 	if (XSIMPLEQ_FIRST(&kbp->kb_freelist) == NULL) {
228 		mtx_leave(&malloc_mtx);
229 		npg = atop(round_page(allocsize));
230 		s = splvm();
231 		va = (caddr_t)uvm_km_kmemalloc_pla(kmem_map, NULL,
232 		    (vsize_t)ptoa(npg), 0,
233 		    ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) |
234 		    ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0),
235 		    no_constraint.ucr_low, no_constraint.ucr_high,
236 		    0, 0, 0);
237 		splx(s);
238 		if (va == NULL) {
239 			/*
240 			 * Kmem_malloc() can return NULL, even if it can
241 			 * wait, if there is no map space available, because
242 			 * it can't fix that problem.  Neither can we,
243 			 * right now.  (We should release pages which
244 			 * are completely free and which are in buckets
245 			 * with too many free elements.)
246 			 */
247 			if ((flags & (M_NOWAIT|M_CANFAIL)) == 0)
248 				panic("malloc: out of space in kmem_map");
249 
250 #ifdef KMEMSTATS
251 			mtx_enter(&malloc_mtx);
252 			ksp->ks_memuse -= allocsize;
253 			wake = ksp->ks_memuse + allocsize >= ksp->ks_limit &&
254 			    ksp->ks_memuse < ksp->ks_limit;
255 			mtx_leave(&malloc_mtx);
256 			if (wake)
257 				wakeup(ksp);
258 #endif
259 			return (NULL);
260 		}
261 		mtx_enter(&malloc_mtx);
262 #ifdef KMEMSTATS
263 		kbp->kb_total += kbp->kb_elmpercl;
264 #endif
265 		kup = btokup(va);
266 		kup->ku_indx = indx;
267 #ifdef DIAGNOSTIC
268 		freshalloc = 1;
269 #endif
270 		if (allocsize > MAXALLOCSAVE) {
271 			kup->ku_pagecnt = npg;
272 			goto out;
273 		}
274 #ifdef KMEMSTATS
275 		kup->ku_freecnt = kbp->kb_elmpercl;
276 		kbp->kb_totalfree += kbp->kb_elmpercl;
277 #endif
278 		cp = va + (npg * PAGE_SIZE) - allocsize;
279 		for (;;) {
280 			freep = (struct kmem_freelist *)cp;
281 #ifdef DIAGNOSTIC
282 			/*
283 			 * Copy in known text to detect modification
284 			 * after freeing.
285 			 */
286 			poison_mem(cp, allocsize);
287 			freep->kf_type = M_FREE;
288 #endif /* DIAGNOSTIC */
289 			XSIMPLEQ_INSERT_HEAD(&kbp->kb_freelist, freep,
290 			    kf_flist);
291 			if (cp <= va)
292 				break;
293 			cp -= allocsize;
294 		}
295 	} else {
296 #ifdef DIAGNOSTIC
297 		freshalloc = 0;
298 #endif
299 	}
300 	freep = XSIMPLEQ_FIRST(&kbp->kb_freelist);
301 	XSIMPLEQ_REMOVE_HEAD(&kbp->kb_freelist, kf_flist);
302 	va = (caddr_t)freep;
303 #ifdef DIAGNOSTIC
304 	savedtype = (unsigned)freep->kf_type < M_LAST ?
305 		memname[freep->kf_type] : "???";
306 	if (freshalloc == 0 && XSIMPLEQ_FIRST(&kbp->kb_freelist)) {
307 		int rv;
308 		vaddr_t addr = (vaddr_t)XSIMPLEQ_FIRST(&kbp->kb_freelist);
309 
310 		vm_map_lock(kmem_map);
311 		rv = uvm_map_checkprot(kmem_map, addr,
312 		    addr + sizeof(struct kmem_freelist), PROT_WRITE);
313 		vm_map_unlock(kmem_map);
314 
315 		if (!rv)  {
316 			printf("%s %zd of object %p size 0x%lx %s %s"
317 			    " (invalid addr %p)\n",
318 			    "Data modified on freelist: word",
319 			    (int32_t *)&addr - (int32_t *)kbp, va, size,
320 			    "previous type", savedtype, (void *)addr);
321 		}
322 	}
323 
324 	/* Fill the fields that we've used with poison */
325 	poison_mem(freep, sizeof(*freep));
326 
327 	/* and check that the data hasn't been modified. */
328 	if (freshalloc == 0) {
329 		size_t pidx;
330 		uint32_t pval;
331 		if (poison_check(va, allocsize, &pidx, &pval)) {
332 			panic("%s %zd of object %p size 0x%lx %s %s"
333 			    " (0x%x != 0x%x)\n",
334 			    "Data modified on freelist: word",
335 			    pidx, va, size, "previous type",
336 			    savedtype, ((int32_t*)va)[pidx], pval);
337 		}
338 	}
339 
340 	freep->kf_spare0 = 0;
341 #endif /* DIAGNOSTIC */
342 #ifdef KMEMSTATS
343 	kup = btokup(va);
344 	if (kup->ku_indx != indx)
345 		panic("malloc: wrong bucket");
346 	if (kup->ku_freecnt == 0)
347 		panic("malloc: lost data");
348 	kup->ku_freecnt--;
349 	kbp->kb_totalfree--;
350 out:
351 	kbp->kb_calls++;
352 	ksp->ks_inuse++;
353 	ksp->ks_calls++;
354 	if (ksp->ks_memuse > ksp->ks_maxused)
355 		ksp->ks_maxused = ksp->ks_memuse;
356 #else
357 out:
358 #endif
359 	mtx_leave(&malloc_mtx);
360 
361 	if ((flags & M_ZERO) && va != NULL)
362 		memset(va, 0, size);
363 
364 	TRACEPOINT(uvm, malloc, type, va, size, flags);
365 
366 	return (va);
367 }
368 
369 /*
370  * Free a block of memory allocated by malloc.
371  */
372 void
373 free(void *addr, int type, size_t freedsize)
374 {
375 	struct kmembuckets *kbp;
376 	struct kmemusage *kup;
377 	struct kmem_freelist *freep;
378 	long size;
379 	int s;
380 #ifdef DIAGNOSTIC
381 	long alloc;
382 #endif
383 #ifdef KMEMSTATS
384 	struct kmemstats *ksp = &kmemstats[type];
385 	int wake;
386 #endif
387 
388 	if (addr == NULL)
389 		return;
390 
391 #ifdef DIAGNOSTIC
392 	if (addr < (void *)kmembase || addr >= (void *)kmemlimit)
393 		panic("free: non-malloced addr %p type %s", addr,
394 		    memname[type]);
395 #endif
396 
397 	TRACEPOINT(uvm, free, type, addr, freedsize);
398 
399 	mtx_enter(&malloc_mtx);
400 	kup = btokup(addr);
401 	size = 1 << kup->ku_indx;
402 	kbp = &bucket[kup->ku_indx];
403 	if (size > MAXALLOCSAVE)
404 		size = kup->ku_pagecnt << PAGE_SHIFT;
405 #ifdef DIAGNOSTIC
406 #if 0
407 	if (freedsize == 0) {
408 		static int zerowarnings;
409 		if (zerowarnings < 5) {
410 			zerowarnings++;
411 			printf("free with zero size: (%d)\n", type);
412 #ifdef DDB
413 			db_stack_dump();
414 #endif
415 	}
416 #endif
417 	if (freedsize != 0 && freedsize > size)
418 		panic("free: size too large %zu > %ld (%p) type %s",
419 		    freedsize, size, addr, memname[type]);
420 	if (freedsize != 0 && size > MINALLOCSIZE && freedsize <= size / 2)
421 		panic("free: size too small %zu <= %ld / 2 (%p) type %s",
422 		    freedsize, size, addr, memname[type]);
423 	/*
424 	 * Check for returns of data that do not point to the
425 	 * beginning of the allocation.
426 	 */
427 	if (size > PAGE_SIZE)
428 		alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
429 	else
430 		alloc = addrmask[kup->ku_indx];
431 	if (((u_long)addr & alloc) != 0)
432 		panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
433 			addr, size, memname[type], alloc);
434 #endif /* DIAGNOSTIC */
435 	if (size > MAXALLOCSAVE) {
436 		u_short pagecnt = kup->ku_pagecnt;
437 
438 		kup->ku_indx = 0;
439 		kup->ku_pagecnt = 0;
440 		mtx_leave(&malloc_mtx);
441 		s = splvm();
442 		uvm_km_free(kmem_map, (vaddr_t)addr, ptoa(pagecnt));
443 		splx(s);
444 #ifdef KMEMSTATS
445 		mtx_enter(&malloc_mtx);
446 		ksp->ks_memuse -= size;
447 		wake = ksp->ks_memuse + size >= ksp->ks_limit &&
448 		    ksp->ks_memuse < ksp->ks_limit;
449 		ksp->ks_inuse--;
450 		kbp->kb_total -= 1;
451 		mtx_leave(&malloc_mtx);
452 		if (wake)
453 			wakeup(ksp);
454 #endif
455 		return;
456 	}
457 	freep = (struct kmem_freelist *)addr;
458 #ifdef DIAGNOSTIC
459 	/*
460 	 * Check for multiple frees. Use a quick check to see if
461 	 * it looks free before laboriously searching the freelist.
462 	 */
463 	if (freep->kf_spare0 == poison_value(freep)) {
464 		struct kmem_freelist *fp;
465 		XSIMPLEQ_FOREACH(fp, &kbp->kb_freelist, kf_flist) {
466 			if (addr != fp)
467 				continue;
468 			printf("multiply freed item %p\n", addr);
469 			panic("free: duplicated free");
470 		}
471 	}
472 	/*
473 	 * Copy in known text to detect modification after freeing
474 	 * and to make it look free. Also, save the type being freed
475 	 * so we can list likely culprit if modification is detected
476 	 * when the object is reallocated.
477 	 */
478 	poison_mem(addr, size);
479 	freep->kf_spare0 = poison_value(freep);
480 
481 	freep->kf_type = type;
482 #endif /* DIAGNOSTIC */
483 #ifdef KMEMSTATS
484 	kup->ku_freecnt++;
485 	if (kup->ku_freecnt >= kbp->kb_elmpercl) {
486 		if (kup->ku_freecnt > kbp->kb_elmpercl)
487 			panic("free: multiple frees");
488 		else if (kbp->kb_totalfree > kbp->kb_highwat)
489 			kbp->kb_couldfree++;
490 	}
491 	kbp->kb_totalfree++;
492 	ksp->ks_memuse -= size;
493 	wake = ksp->ks_memuse + size >= ksp->ks_limit &&
494 	    ksp->ks_memuse < ksp->ks_limit;
495 	ksp->ks_inuse--;
496 #endif
497 	XSIMPLEQ_INSERT_TAIL(&kbp->kb_freelist, freep, kf_flist);
498 	mtx_leave(&malloc_mtx);
499 #ifdef KMEMSTATS
500 	if (wake)
501 		wakeup(ksp);
502 #endif
503 }
504 
505 /*
506  * Compute the number of pages that kmem_map will map, that is,
507  * the size of the kernel malloc arena.
508  */
509 void
510 kmeminit_nkmempages(void)
511 {
512 	u_int npages;
513 
514 	if (nkmempages != 0) {
515 		/*
516 		 * It's already been set (by us being here before, or
517 		 * by patching or kernel config options), bail out now.
518 		 */
519 		return;
520 	}
521 
522 	/*
523 	 * We can't initialize these variables at compilation time, since
524 	 * the page size may not be known (on sparc GENERIC kernels, for
525 	 * example). But we still want the MD code to be able to provide
526 	 * better values.
527 	 */
528 	if (nkmempages_min == 0)
529 		nkmempages_min = NKMEMPAGES_MIN;
530 	if (nkmempages_max == 0)
531 		nkmempages_max = NKMEMPAGES_MAX;
532 
533 	/*
534 	 * We use the following (simple) formula:
535 	 *
536 	 *	- Starting point is physical memory / 4.
537 	 *
538 	 *	- Clamp it down to nkmempages_max.
539 	 *
540 	 *	- Round it up to nkmempages_min.
541 	 */
542 	npages = physmem / 4;
543 
544 	if (npages > nkmempages_max)
545 		npages = nkmempages_max;
546 
547 	if (npages < nkmempages_min)
548 		npages = nkmempages_min;
549 
550 	nkmempages = npages;
551 }
552 
553 /*
554  * Initialize the kernel memory allocator
555  */
556 void
557 kmeminit(void)
558 {
559 	vaddr_t base, limit;
560 	long indx;
561 
562 #ifdef DIAGNOSTIC
563 	if (sizeof(struct kmem_freelist) > (1 << MINBUCKET))
564 		panic("kmeminit: minbucket too small/struct freelist too big");
565 #endif
566 
567 	/*
568 	 * Compute the number of kmem_map pages, if we have not
569 	 * done so already.
570 	 */
571 	kmeminit_nkmempages();
572 	base = vm_map_min(kernel_map);
573 	kmem_map = uvm_km_suballoc(kernel_map, &base, &limit,
574 	    (vsize_t)nkmempages << PAGE_SHIFT,
575 #ifdef KVA_GUARDPAGES
576 	    VM_MAP_INTRSAFE | VM_MAP_GUARDPAGES,
577 #else
578 	    VM_MAP_INTRSAFE,
579 #endif
580 	    FALSE, &kmem_map_store);
581 	kmembase = (char *)base;
582 	kmemlimit = (char *)limit;
583 	kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
584 		(vsize_t)(nkmempages * sizeof(struct kmemusage)));
585 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
586 		XSIMPLEQ_INIT(&bucket[indx].kb_freelist);
587 	}
588 #ifdef KMEMSTATS
589 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
590 		if (1 << indx >= PAGE_SIZE)
591 			bucket[indx].kb_elmpercl = 1;
592 		else
593 			bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
594 		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
595 	}
596 	for (indx = 0; indx < M_LAST; indx++)
597 		kmemstats[indx].ks_limit = nkmempages * PAGE_SIZE * 6 / 10;
598 #endif
599 }
600 
601 /*
602  * Return kernel malloc statistics information.
603  */
604 int
605 sysctl_malloc(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
606     size_t newlen, struct proc *p)
607 {
608 	struct kmembuckets kb;
609 #ifdef KMEMSTATS
610 	struct kmemstats km;
611 #endif
612 #if defined(KMEMSTATS) || defined(DIAGNOSTIC)
613 	int error;
614 #endif
615 	int i, siz;
616 
617 	if (namelen != 2 && name[0] != KERN_MALLOC_BUCKETS &&
618 	    name[0] != KERN_MALLOC_KMEMNAMES)
619 		return (ENOTDIR);		/* overloaded */
620 
621 	switch (name[0]) {
622 	case KERN_MALLOC_BUCKETS:
623 		/* Initialize the first time */
624 		if (buckstring_init == 0) {
625 			buckstring_init = 1;
626 			memset(buckstring, 0, sizeof(buckstring));
627 			for (siz = 0, i = MINBUCKET; i < MINBUCKET + 16; i++) {
628 				snprintf(buckstring + siz,
629 				    sizeof buckstring - siz,
630 				    "%d,", (u_int)(1<<i));
631 				siz += strlen(buckstring + siz);
632 			}
633 			/* Remove trailing comma */
634 			if (siz)
635 				buckstring[siz - 1] = '\0';
636 		}
637 		return (sysctl_rdstring(oldp, oldlenp, newp, buckstring));
638 
639 	case KERN_MALLOC_BUCKET:
640 		mtx_enter(&malloc_mtx);
641 		memcpy(&kb, &bucket[BUCKETINDX(name[1])], sizeof(kb));
642 		mtx_leave(&malloc_mtx);
643 		memset(&kb.kb_freelist, 0, sizeof(kb.kb_freelist));
644 		return (sysctl_rdstruct(oldp, oldlenp, newp, &kb, sizeof(kb)));
645 	case KERN_MALLOC_KMEMSTATS:
646 #ifdef KMEMSTATS
647 		if ((name[1] < 0) || (name[1] >= M_LAST))
648 			return (EINVAL);
649 		mtx_enter(&malloc_mtx);
650 		memcpy(&km, &kmemstats[name[1]], sizeof(km));
651 		mtx_leave(&malloc_mtx);
652 		return (sysctl_rdstruct(oldp, oldlenp, newp, &km, sizeof(km)));
653 #else
654 		return (EOPNOTSUPP);
655 #endif
656 	case KERN_MALLOC_KMEMNAMES:
657 #if defined(KMEMSTATS) || defined(DIAGNOSTIC)
658 		error = rw_enter(&sysctl_kmemlock, RW_WRITE|RW_INTR);
659 		if (error)
660 			return (error);
661 		if (memall == NULL) {
662 			int totlen;
663 
664 			/* Figure out how large a buffer we need */
665 			for (totlen = 0, i = 0; i < M_LAST; i++) {
666 				if (memname[i])
667 					totlen += strlen(memname[i]);
668 				totlen++;
669 			}
670 			memall = malloc(totlen + M_LAST, M_SYSCTL,
671 			    M_WAITOK|M_ZERO);
672 			for (siz = 0, i = 0; i < M_LAST; i++) {
673 				snprintf(memall + siz,
674 				    totlen + M_LAST - siz,
675 				    "%s,", memname[i] ? memname[i] : "");
676 				siz += strlen(memall + siz);
677 			}
678 			/* Remove trailing comma */
679 			if (siz)
680 				memall[siz - 1] = '\0';
681 
682 			/* Now, convert all spaces to underscores */
683 			for (i = 0; i < totlen; i++)
684 				if (memall[i] == ' ')
685 					memall[i] = '_';
686 		}
687 		rw_exit_write(&sysctl_kmemlock);
688 		return (sysctl_rdstring(oldp, oldlenp, newp, memall));
689 #else
690 		return (EOPNOTSUPP);
691 #endif
692 	default:
693 		return (EOPNOTSUPP);
694 	}
695 	/* NOTREACHED */
696 }
697 
698 /*
699  * Round up a size to how much malloc would actually allocate.
700  */
701 size_t
702 malloc_roundup(size_t sz)
703 {
704 	if (sz > MAXALLOCSAVE)
705 		return round_page(sz);
706 
707 	return (1 << BUCKETINDX(sz));
708 }
709 
710 #if defined(DDB)
711 
712 void
713 malloc_printit(
714     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
715 {
716 #ifdef KMEMSTATS
717 	struct kmemstats *km;
718 	int i;
719 
720 	(*pr)("%15s %5s  %6s  %7s  %6s %9s %8s\n",
721 	    "Type", "InUse", "MemUse", "HighUse", "Limit", "Requests",
722 	    "Type Lim");
723 	for (i = 0, km = kmemstats; i < M_LAST; i++, km++) {
724 		if (!km->ks_calls || !memname[i])
725 			continue;
726 
727 		(*pr)("%15s %5ld %6ldK %7ldK %6ldK %9ld %8d\n",
728 		    memname[i], km->ks_inuse, km->ks_memuse / 1024,
729 		    km->ks_maxused / 1024, km->ks_limit / 1024,
730 		    km->ks_calls, km->ks_limblocks);
731 	}
732 #else
733 	(*pr)("No KMEMSTATS compiled in\n");
734 #endif
735 }
736 #endif /* DDB */
737 
738 /*
739  * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
740  *
741  * Permission to use, copy, modify, and distribute this software for any
742  * purpose with or without fee is hereby granted, provided that the above
743  * copyright notice and this permission notice appear in all copies.
744  *
745  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
746  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
747  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
748  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
749  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
750  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
751  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
752  */
753 
754 /*
755  * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
756  * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
757  */
758 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
759 
760 void *
761 mallocarray(size_t nmemb, size_t size, int type, int flags)
762 {
763 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
764 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
765 		if (flags & M_CANFAIL)
766 			return (NULL);
767 		panic("mallocarray: overflow %zu * %zu", nmemb, size);
768 	}
769 	return (malloc(size * nmemb, type, flags));
770 }
771