xref: /openbsd-src/sys/kern/kern_malloc.c (revision 94fd4554194a14f126fba33b837cc68a1df42468)
1 /*	$OpenBSD: kern_malloc.c,v 1.69 2007/04/12 21:47:45 miod Exp $	*/
2 /*	$NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $	*/
3 
4 /*
5  * Copyright (c) 1987, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
33  */
34 
35 #include <sys/param.h>
36 #include <sys/proc.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/systm.h>
40 #include <sys/sysctl.h>
41 #include <sys/time.h>
42 #include <sys/rwlock.h>
43 
44 #include <uvm/uvm_extern.h>
45 
46 static struct vm_map kmem_map_store;
47 struct vm_map *kmem_map = NULL;
48 
49 #ifdef NKMEMCLUSTERS
50 #error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size
51 #endif
52 
53 /*
54  * Default number of pages in kmem_map.  We attempt to calculate this
55  * at run-time, but allow it to be either patched or set in the kernel
56  * config file.
57  */
58 #ifndef NKMEMPAGES
59 #define	NKMEMPAGES	0
60 #endif
61 u_int	nkmempages = NKMEMPAGES;
62 
63 /*
64  * Defaults for lower- and upper-bounds for the kmem_map page count.
65  * Can be overridden by kernel config options.
66  */
67 #ifndef	NKMEMPAGES_MIN
68 #define	NKMEMPAGES_MIN	NKMEMPAGES_MIN_DEFAULT
69 #endif
70 u_int	nkmempages_min = 0;
71 
72 #ifndef NKMEMPAGES_MAX
73 #define	NKMEMPAGES_MAX	NKMEMPAGES_MAX_DEFAULT
74 #endif
75 u_int	nkmempages_max = 0;
76 
77 struct kmembuckets bucket[MINBUCKET + 16];
78 struct kmemstats kmemstats[M_LAST];
79 struct kmemusage *kmemusage;
80 char *kmembase, *kmemlimit;
81 char buckstring[16 * sizeof("123456,")];
82 int buckstring_init = 0;
83 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES)
84 char *memname[] = INITKMEMNAMES;
85 char *memall = NULL;
86 struct rwlock sysctl_kmemlock = RWLOCK_INITIALIZER;
87 #endif
88 
89 #ifdef DIAGNOSTIC
90 /*
91  * This structure provides a set of masks to catch unaligned frees.
92  */
93 const long addrmask[] = { 0,
94 	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
95 	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
96 	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
97 	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
98 };
99 
100 /*
101  * The WEIRD_ADDR is used as known text to copy into free objects so
102  * that modifications after frees can be detected.
103  */
104 #ifdef DEADBEEF0
105 #define WEIRD_ADDR	((unsigned) DEADBEEF0)
106 #else
107 #define WEIRD_ADDR	((unsigned) 0xdeadbeef)
108 #endif
109 #define MAX_COPY	32
110 
111 /*
112  * Normally the freelist structure is used only to hold the list pointer
113  * for free objects.  However, when running with diagnostics, the first
114  * 8 bytes of the structure is unused except for diagnostic information,
115  * and the free list pointer is at offset 8 in the structure.  Since the
116  * first 8 bytes is the portion of the structure most often modified, this
117  * helps to detect memory reuse problems and avoid free list corruption.
118  */
119 struct freelist {
120 	int32_t	spare0;
121 	int16_t	type;
122 	int16_t	spare1;
123 	caddr_t	next;
124 };
125 #else /* !DIAGNOSTIC */
126 struct freelist {
127 	caddr_t	next;
128 };
129 #endif /* DIAGNOSTIC */
130 
131 #ifndef SMALL_KERNEL
132 struct timeval malloc_errintvl = { 5, 0 };
133 struct timeval malloc_lasterr;
134 #endif
135 
136 /*
137  * Allocate a block of memory
138  */
139 void *
140 malloc(unsigned long size, int type, int flags)
141 {
142 	struct kmembuckets *kbp;
143 	struct kmemusage *kup;
144 	struct freelist *freep;
145 	long indx, npg, allocsize;
146 	int s;
147 	caddr_t va, cp, savedlist;
148 #ifdef DIAGNOSTIC
149 	int32_t *end, *lp;
150 	int copysize;
151 	char *savedtype;
152 #endif
153 #ifdef KMEMSTATS
154 	struct kmemstats *ksp = &kmemstats[type];
155 
156 	if (((unsigned long)type) >= M_LAST)
157 		panic("malloc - bogus type");
158 #endif
159 
160 #ifdef MALLOC_DEBUG
161 	if (debug_malloc(size, type, flags, (void **)&va))
162 		return (va);
163 #endif
164 
165 	if (size > 65535 * PAGE_SIZE) {
166 		if (flags & M_CANFAIL) {
167 #ifndef SMALL_KERNEL
168 			if (ratecheck(&malloc_lasterr, &malloc_errintvl))
169 				printf("malloc(): allocation too large, "
170 				    "type = %d, size = %lu\n", type, size);
171 #endif
172 			return (NULL);
173 		} else
174 			panic("malloc: allocation too large");
175 	}
176 
177 	indx = BUCKETINDX(size);
178 	kbp = &bucket[indx];
179 	s = splvm();
180 #ifdef KMEMSTATS
181 	while (ksp->ks_memuse >= ksp->ks_limit) {
182 		if (flags & M_NOWAIT) {
183 			splx(s);
184 			return (NULL);
185 		}
186 		if (ksp->ks_limblocks < 65535)
187 			ksp->ks_limblocks++;
188 		tsleep(ksp, PSWP+2, memname[type], 0);
189 	}
190 	ksp->ks_size |= 1 << indx;
191 #endif
192 #ifdef DIAGNOSTIC
193 	copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
194 #endif
195 	if (kbp->kb_next == NULL) {
196 		kbp->kb_last = NULL;
197 		if (size > MAXALLOCSAVE)
198 			allocsize = round_page(size);
199 		else
200 			allocsize = 1 << indx;
201 		npg = btoc(allocsize);
202 		va = (caddr_t) uvm_km_kmemalloc(kmem_map, NULL,
203 		    (vsize_t)ctob(npg),
204 		    ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) |
205 		    ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0));
206 		if (va == NULL) {
207 			/*
208 			 * Kmem_malloc() can return NULL, even if it can
209 			 * wait, if there is no map space available, because
210 			 * it can't fix that problem.  Neither can we,
211 			 * right now.  (We should release pages which
212 			 * are completely free and which are in buckets
213 			 * with too many free elements.)
214 			 */
215 			if ((flags & (M_NOWAIT|M_CANFAIL)) == 0)
216 				panic("malloc: out of space in kmem_map");
217 			splx(s);
218 			return (NULL);
219 		}
220 #ifdef KMEMSTATS
221 		kbp->kb_total += kbp->kb_elmpercl;
222 #endif
223 		kup = btokup(va);
224 		kup->ku_indx = indx;
225 		if (allocsize > MAXALLOCSAVE) {
226 			kup->ku_pagecnt = npg;
227 #ifdef KMEMSTATS
228 			ksp->ks_memuse += allocsize;
229 #endif
230 			goto out;
231 		}
232 #ifdef KMEMSTATS
233 		kup->ku_freecnt = kbp->kb_elmpercl;
234 		kbp->kb_totalfree += kbp->kb_elmpercl;
235 #endif
236 		/*
237 		 * Just in case we blocked while allocating memory,
238 		 * and someone else also allocated memory for this
239 		 * bucket, don't assume the list is still empty.
240 		 */
241 		savedlist = kbp->kb_next;
242 		kbp->kb_next = cp = va + (npg * PAGE_SIZE) - allocsize;
243 		for (;;) {
244 			freep = (struct freelist *)cp;
245 #ifdef DIAGNOSTIC
246 			/*
247 			 * Copy in known text to detect modification
248 			 * after freeing.
249 			 */
250 			end = (int32_t *)&cp[copysize];
251 			for (lp = (int32_t *)cp; lp < end; lp++)
252 				*lp = WEIRD_ADDR;
253 			freep->type = M_FREE;
254 #endif /* DIAGNOSTIC */
255 			if (cp <= va)
256 				break;
257 			cp -= allocsize;
258 			freep->next = cp;
259 		}
260 		freep->next = savedlist;
261 		if (kbp->kb_last == NULL)
262 			kbp->kb_last = (caddr_t)freep;
263 	}
264 	va = kbp->kb_next;
265 	kbp->kb_next = ((struct freelist *)va)->next;
266 #ifdef DIAGNOSTIC
267 	freep = (struct freelist *)va;
268 	savedtype = (unsigned)freep->type < M_LAST ?
269 		memname[freep->type] : "???";
270 	if (kbp->kb_next) {
271 		int rv;
272 		vaddr_t addr = (vaddr_t)kbp->kb_next;
273 
274 		vm_map_lock(kmem_map);
275 		rv = uvm_map_checkprot(kmem_map, addr,
276 		    addr + sizeof(struct freelist), VM_PROT_WRITE);
277 		vm_map_unlock(kmem_map);
278 
279 		if (!rv)  {
280 		printf("%s %d of object %p size 0x%lx %s %s (invalid addr %p)\n",
281 			"Data modified on freelist: word",
282 			(int32_t *)&kbp->kb_next - (int32_t *)kbp, va, size,
283 			"previous type", savedtype, kbp->kb_next);
284 		kbp->kb_next = NULL;
285 		}
286 	}
287 
288 	/* Fill the fields that we've used with WEIRD_ADDR */
289 #if BYTE_ORDER == BIG_ENDIAN
290 	freep->type = WEIRD_ADDR >> 16;
291 #endif
292 #if BYTE_ORDER == LITTLE_ENDIAN
293 	freep->type = (short)WEIRD_ADDR;
294 #endif
295 	end = (int32_t *)&freep->next +
296 	    (sizeof(freep->next) / sizeof(int32_t));
297 	for (lp = (int32_t *)&freep->next; lp < end; lp++)
298 		*lp = WEIRD_ADDR;
299 
300 	/* and check that the data hasn't been modified. */
301 	end = (int32_t *)&va[copysize];
302 	for (lp = (int32_t *)va; lp < end; lp++) {
303 		if (*lp == WEIRD_ADDR)
304 			continue;
305 		printf("%s %d of object %p size 0x%lx %s %s (0x%x != 0x%x)\n",
306 			"Data modified on freelist: word", lp - (int32_t *)va,
307 			va, size, "previous type", savedtype, *lp, WEIRD_ADDR);
308 		break;
309 	}
310 
311 	freep->spare0 = 0;
312 #endif /* DIAGNOSTIC */
313 #ifdef KMEMSTATS
314 	kup = btokup(va);
315 	if (kup->ku_indx != indx)
316 		panic("malloc: wrong bucket");
317 	if (kup->ku_freecnt == 0)
318 		panic("malloc: lost data");
319 	kup->ku_freecnt--;
320 	kbp->kb_totalfree--;
321 	ksp->ks_memuse += 1 << indx;
322 out:
323 	kbp->kb_calls++;
324 	ksp->ks_inuse++;
325 	ksp->ks_calls++;
326 	if (ksp->ks_memuse > ksp->ks_maxused)
327 		ksp->ks_maxused = ksp->ks_memuse;
328 #else
329 out:
330 #endif
331 	splx(s);
332 	return (va);
333 }
334 
335 /*
336  * Free a block of memory allocated by malloc.
337  */
338 void
339 free(void *addr, int type)
340 {
341 	struct kmembuckets *kbp;
342 	struct kmemusage *kup;
343 	struct freelist *freep;
344 	long size;
345 	int s;
346 #ifdef DIAGNOSTIC
347 	caddr_t cp;
348 	int32_t *end, *lp;
349 	long alloc, copysize;
350 #endif
351 #ifdef KMEMSTATS
352 	struct kmemstats *ksp = &kmemstats[type];
353 #endif
354 
355 #ifdef MALLOC_DEBUG
356 	if (debug_free(addr, type))
357 		return;
358 #endif
359 
360 #ifdef DIAGNOSTIC
361 	if (addr < (void *)kmembase || addr >= (void *)kmemlimit)
362 		panic("free: non-malloced addr %p type %s", addr,
363 		    memname[type]);
364 #endif
365 
366 	kup = btokup(addr);
367 	size = 1 << kup->ku_indx;
368 	kbp = &bucket[kup->ku_indx];
369 	s = splvm();
370 #ifdef DIAGNOSTIC
371 	/*
372 	 * Check for returns of data that do not point to the
373 	 * beginning of the allocation.
374 	 */
375 	if (size > PAGE_SIZE)
376 		alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
377 	else
378 		alloc = addrmask[kup->ku_indx];
379 	if (((u_long)addr & alloc) != 0)
380 		panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
381 			addr, size, memname[type], alloc);
382 #endif /* DIAGNOSTIC */
383 	if (size > MAXALLOCSAVE) {
384 		uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt));
385 #ifdef KMEMSTATS
386 		size = kup->ku_pagecnt << PGSHIFT;
387 		ksp->ks_memuse -= size;
388 		kup->ku_indx = 0;
389 		kup->ku_pagecnt = 0;
390 		if (ksp->ks_memuse + size >= ksp->ks_limit &&
391 		    ksp->ks_memuse < ksp->ks_limit)
392 			wakeup(ksp);
393 		ksp->ks_inuse--;
394 		kbp->kb_total -= 1;
395 #endif
396 		splx(s);
397 		return;
398 	}
399 	freep = (struct freelist *)addr;
400 #ifdef DIAGNOSTIC
401 	/*
402 	 * Check for multiple frees. Use a quick check to see if
403 	 * it looks free before laboriously searching the freelist.
404 	 */
405 	if (freep->spare0 == WEIRD_ADDR) {
406 		for (cp = kbp->kb_next; cp;
407 		    cp = ((struct freelist *)cp)->next) {
408 			if (addr != cp)
409 				continue;
410 			printf("multiply freed item %p\n", addr);
411 			panic("free: duplicated free");
412 		}
413 	}
414 	/*
415 	 * Copy in known text to detect modification after freeing
416 	 * and to make it look free. Also, save the type being freed
417 	 * so we can list likely culprit if modification is detected
418 	 * when the object is reallocated.
419 	 */
420 	copysize = size < MAX_COPY ? size : MAX_COPY;
421 	end = (int32_t *)&((caddr_t)addr)[copysize];
422 	for (lp = (int32_t *)addr; lp < end; lp++)
423 		*lp = WEIRD_ADDR;
424 	freep->type = type;
425 #endif /* DIAGNOSTIC */
426 #ifdef KMEMSTATS
427 	kup->ku_freecnt++;
428 	if (kup->ku_freecnt >= kbp->kb_elmpercl) {
429 		if (kup->ku_freecnt > kbp->kb_elmpercl)
430 			panic("free: multiple frees");
431 		else if (kbp->kb_totalfree > kbp->kb_highwat)
432 			kbp->kb_couldfree++;
433 	}
434 	kbp->kb_totalfree++;
435 	ksp->ks_memuse -= size;
436 	if (ksp->ks_memuse + size >= ksp->ks_limit &&
437 	    ksp->ks_memuse < ksp->ks_limit)
438 		wakeup(ksp);
439 	ksp->ks_inuse--;
440 #endif
441 	if (kbp->kb_next == NULL)
442 		kbp->kb_next = addr;
443 	else
444 		((struct freelist *)kbp->kb_last)->next = addr;
445 	freep->next = NULL;
446 	kbp->kb_last = addr;
447 	splx(s);
448 }
449 
450 /*
451  * Compute the number of pages that kmem_map will map, that is,
452  * the size of the kernel malloc arena.
453  */
454 void
455 kmeminit_nkmempages(void)
456 {
457 	u_int npages;
458 
459 	if (nkmempages != 0) {
460 		/*
461 		 * It's already been set (by us being here before, or
462 		 * by patching or kernel config options), bail out now.
463 		 */
464 		return;
465 	}
466 
467 	/*
468 	 * We can't initialize these variables at compilation time, since
469 	 * the page size may not be known (on sparc GENERIC kernels, for
470 	 * example). But we still want the MD code to be able to provide
471 	 * better values.
472 	 */
473 	if (nkmempages_min == 0)
474 		nkmempages_min = NKMEMPAGES_MIN;
475 	if (nkmempages_max == 0)
476 		nkmempages_max = NKMEMPAGES_MAX;
477 
478 	/*
479 	 * We use the following (simple) formula:
480 	 *
481 	 *	- Starting point is physical memory / 4.
482 	 *
483 	 *	- Clamp it down to nkmempages_max.
484 	 *
485 	 *	- Round it up to nkmempages_min.
486 	 */
487 	npages = physmem / 4;
488 
489 	if (npages > nkmempages_max)
490 		npages = nkmempages_max;
491 
492 	if (npages < nkmempages_min)
493 		npages = nkmempages_min;
494 
495 	nkmempages = npages;
496 }
497 
498 /*
499  * Initialize the kernel memory allocator
500  */
501 void
502 kmeminit(void)
503 {
504 	vaddr_t base, limit;
505 #ifdef KMEMSTATS
506 	long indx;
507 #endif
508 
509 #ifdef DIAGNOSTIC
510 	if (sizeof(struct freelist) > (1 << MINBUCKET))
511 		panic("kmeminit: minbucket too small/struct freelist too big");
512 #endif
513 
514 	/*
515 	 * Compute the number of kmem_map pages, if we have not
516 	 * done so already.
517 	 */
518 	kmeminit_nkmempages();
519 	base = vm_map_min(kernel_map);
520 	kmem_map = uvm_km_suballoc(kernel_map, &base, &limit,
521 	    (vsize_t)(nkmempages * PAGE_SIZE), VM_MAP_INTRSAFE, FALSE,
522 	    &kmem_map_store);
523 	kmembase = (char *)base;
524 	kmemlimit = (char *)limit;
525 	kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
526 		(vsize_t)(nkmempages * sizeof(struct kmemusage)));
527 #ifdef KMEMSTATS
528 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
529 		if (1 << indx >= PAGE_SIZE)
530 			bucket[indx].kb_elmpercl = 1;
531 		else
532 			bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
533 		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
534 	}
535 	for (indx = 0; indx < M_LAST; indx++)
536 		kmemstats[indx].ks_limit = nkmempages * PAGE_SIZE * 6 / 10;
537 #endif
538 #ifdef MALLOC_DEBUG
539 	debug_malloc_init();
540 #endif
541 }
542 
543 /*
544  * Return kernel malloc statistics information.
545  */
546 int
547 sysctl_malloc(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
548     size_t newlen, struct proc *p)
549 {
550 	struct kmembuckets kb;
551 	int i, siz;
552 
553 	if (namelen != 2 && name[0] != KERN_MALLOC_BUCKETS &&
554 	    name[0] != KERN_MALLOC_KMEMNAMES)
555 		return (ENOTDIR);		/* overloaded */
556 
557 	switch (name[0]) {
558 	case KERN_MALLOC_BUCKETS:
559 		/* Initialize the first time */
560 		if (buckstring_init == 0) {
561 			buckstring_init = 1;
562 			bzero(buckstring, sizeof(buckstring));
563 			for (siz = 0, i = MINBUCKET; i < MINBUCKET + 16; i++) {
564 				snprintf(buckstring + siz,
565 				    sizeof buckstring - siz,
566 				    "%d,", (u_int)(1<<i));
567 				siz += strlen(buckstring + siz);
568 			}
569 			/* Remove trailing comma */
570 			if (siz)
571 				buckstring[siz - 1] = '\0';
572 		}
573 		return (sysctl_rdstring(oldp, oldlenp, newp, buckstring));
574 
575 	case KERN_MALLOC_BUCKET:
576 		bcopy(&bucket[BUCKETINDX(name[1])], &kb, sizeof(kb));
577 		kb.kb_next = kb.kb_last = 0;
578 		return (sysctl_rdstruct(oldp, oldlenp, newp, &kb, sizeof(kb)));
579 	case KERN_MALLOC_KMEMSTATS:
580 #ifdef KMEMSTATS
581 		if ((name[1] < 0) || (name[1] >= M_LAST))
582 			return (EINVAL);
583 		return (sysctl_rdstruct(oldp, oldlenp, newp,
584 		    &kmemstats[name[1]], sizeof(struct kmemstats)));
585 #else
586 		return (EOPNOTSUPP);
587 #endif
588 	case KERN_MALLOC_KMEMNAMES:
589 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES)
590 		if (memall == NULL) {
591 			int totlen;
592 
593 			i = rw_enter(&sysctl_kmemlock, RW_WRITE|RW_INTR);
594 			if (i)
595 				return (i);
596 
597 			/* Figure out how large a buffer we need */
598 			for (totlen = 0, i = 0; i < M_LAST; i++) {
599 				if (memname[i])
600 					totlen += strlen(memname[i]);
601 				totlen++;
602 			}
603 			memall = malloc(totlen + M_LAST, M_SYSCTL, M_WAITOK);
604 			bzero(memall, totlen + M_LAST);
605 			for (siz = 0, i = 0; i < M_LAST; i++) {
606 				snprintf(memall + siz,
607 				    totlen + M_LAST - siz,
608 				    "%s,", memname[i] ? memname[i] : "");
609 				siz += strlen(memall + siz);
610 			}
611 			/* Remove trailing comma */
612 			if (siz)
613 				memall[siz - 1] = '\0';
614 
615 			/* Now, convert all spaces to underscores */
616 			for (i = 0; i < totlen; i++)
617 				if (memall[i] == ' ')
618 					memall[i] = '_';
619 			rw_exit_write(&sysctl_kmemlock);
620 		}
621 		return (sysctl_rdstring(oldp, oldlenp, newp, memall));
622 #else
623 		return (EOPNOTSUPP);
624 #endif
625 	default:
626 		return (EOPNOTSUPP);
627 	}
628 	/* NOTREACHED */
629 }
630 
631 /*
632  * Round up a size to how much malloc would actually allocate.
633  */
634 size_t
635 malloc_roundup(size_t sz)
636 {
637 	if (sz > MAXALLOCSAVE)
638 		return round_page(sz);
639 
640 	return (1 << BUCKETINDX(sz));
641 }
642 
643 #if defined(DDB)
644 #include <machine/db_machdep.h>
645 #include <ddb/db_interface.h>
646 #include <ddb/db_output.h>
647 
648 void
649 malloc_printit(int (*pr)(const char *, ...))
650 {
651 #ifdef KMEMSTATS
652 	struct kmemstats *km;
653 	int i;
654 
655 	(*pr)("%15s %5s  %6s  %7s  %6s %9s %8s %8s\n",
656 	    "Type", "InUse", "MemUse", "HighUse", "Limit", "Requests",
657 	    "Type Lim", "Kern Lim");
658 	for (i = 0, km = kmemstats; i < M_LAST; i++, km++) {
659 		if (!km->ks_calls || !memname[i])
660 			continue;
661 
662 		(*pr)("%15s %5ld %6ldK %7ldK %6ldK %9ld %8d %8d\n",
663 		    memname[i], km->ks_inuse, km->ks_memuse / 1024,
664 		    km->ks_maxused / 1024, km->ks_limit / 1024,
665 		    km->ks_calls, km->ks_limblocks, km->ks_mapblocks);
666 	}
667 #else
668 	(*pr)("No KMEMSTATS compiled in\n");
669 #endif
670 }
671 #endif /* DDB */
672