xref: /openbsd-src/sys/kern/kern_malloc.c (revision 5b859c19fe53bbea08f5c342e0a4470e99f883e1)
1 /*	$OpenBSD: kern_malloc.c,v 1.125 2014/11/18 02:37:31 tedu Exp $	*/
2 /*	$NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $	*/
3 
4 /*
5  * Copyright (c) 1987, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
33  */
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/systm.h>
39 #include <sys/sysctl.h>
40 #include <sys/time.h>
41 #include <sys/rwlock.h>
42 
43 #include <uvm/uvm_extern.h>
44 
45 static
46 #ifndef SMALL_KERNEL
47 __inline__
48 #endif
49 long BUCKETINDX(size_t sz)
50 {
51 	long b, d;
52 
53 	/* note that this relies upon MINALLOCSIZE being 1 << MINBUCKET */
54 	b = 7 + MINBUCKET; d = 4;
55 	while (d != 0) {
56 		if (sz <= (1 << b))
57 			b -= d;
58 		else
59 			b += d;
60 		d >>= 1;
61 	}
62 	if (sz <= (1 << b))
63 		b += 0;
64 	else
65 		b += 1;
66 	return b;
67 }
68 
69 static struct vm_map kmem_map_store;
70 struct vm_map *kmem_map = NULL;
71 
72 /*
73  * Default number of pages in kmem_map.  We attempt to calculate this
74  * at run-time, but allow it to be either patched or set in the kernel
75  * config file.
76  */
77 #ifndef NKMEMPAGES
78 #define	NKMEMPAGES	0
79 #endif
80 u_int	nkmempages = NKMEMPAGES;
81 
82 /*
83  * Defaults for lower- and upper-bounds for the kmem_map page count.
84  * Can be overridden by kernel config options.
85  */
86 #ifndef	NKMEMPAGES_MIN
87 #define	NKMEMPAGES_MIN	0
88 #endif
89 u_int	nkmempages_min = 0;
90 
91 #ifndef NKMEMPAGES_MAX
92 #define	NKMEMPAGES_MAX	NKMEMPAGES_MAX_DEFAULT
93 #endif
94 u_int	nkmempages_max = 0;
95 
96 struct kmembuckets bucket[MINBUCKET + 16];
97 #ifdef KMEMSTATS
98 struct kmemstats kmemstats[M_LAST];
99 #endif
100 struct kmemusage *kmemusage;
101 char *kmembase, *kmemlimit;
102 char buckstring[16 * sizeof("123456,")];
103 int buckstring_init = 0;
104 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES)
105 char *memname[] = INITKMEMNAMES;
106 char *memall = NULL;
107 struct rwlock sysctl_kmemlock = RWLOCK_INITIALIZER("sysctlklk");
108 #endif
109 
110 /*
111  * Normally the freelist structure is used only to hold the list pointer
112  * for free objects.  However, when running with diagnostics, the first
113  * 8 bytes of the structure is unused except for diagnostic information,
114  * and the free list pointer is at offset 8 in the structure.  Since the
115  * first 8 bytes is the portion of the structure most often modified, this
116  * helps to detect memory reuse problems and avoid free list corruption.
117  */
118 struct kmem_freelist {
119 	int32_t	kf_spare0;
120 	int16_t	kf_type;
121 	int16_t	kf_spare1;
122 	XSIMPLEQ_ENTRY(kmem_freelist) kf_flist;
123 };
124 
125 #ifdef DIAGNOSTIC
126 /*
127  * This structure provides a set of masks to catch unaligned frees.
128  */
129 const long addrmask[] = { 0,
130 	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
131 	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
132 	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
133 	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
134 };
135 
136 #endif /* DIAGNOSTIC */
137 
138 #ifndef SMALL_KERNEL
139 struct timeval malloc_errintvl = { 5, 0 };
140 struct timeval malloc_lasterr;
141 #endif
142 
143 /*
144  * Allocate a block of memory
145  */
146 void *
147 malloc(size_t size, int type, int flags)
148 {
149 	struct kmembuckets *kbp;
150 	struct kmemusage *kup;
151 	struct kmem_freelist *freep;
152 	long indx, npg, allocsize;
153 	int s;
154 	caddr_t va, cp;
155 #ifdef DIAGNOSTIC
156 	int freshalloc;
157 	char *savedtype;
158 #endif
159 #ifdef KMEMSTATS
160 	struct kmemstats *ksp = &kmemstats[type];
161 
162 	if (((unsigned long)type) <= 1 || ((unsigned long)type) >= M_LAST)
163 		panic("malloc: bogus type %d", type);
164 #endif
165 
166 	KASSERT(flags & (M_WAITOK | M_NOWAIT));
167 
168 	if ((flags & M_NOWAIT) == 0) {
169 		extern int pool_debug;
170 #ifdef DIAGNOSTIC
171 		assertwaitok();
172 		if (pool_debug == 2)
173 			yield();
174 #endif
175 		if (!cold && pool_debug) {
176 			KERNEL_UNLOCK();
177 			KERNEL_LOCK();
178 		}
179 	}
180 
181 #ifdef MALLOC_DEBUG
182 	if (debug_malloc(size, type, flags, (void **)&va)) {
183 		if ((flags & M_ZERO) && va != NULL)
184 			memset(va, 0, size);
185 		return (va);
186 	}
187 #endif
188 
189 	if (size > 65535 * PAGE_SIZE) {
190 		if (flags & M_CANFAIL) {
191 #ifndef SMALL_KERNEL
192 			if (ratecheck(&malloc_lasterr, &malloc_errintvl))
193 				printf("malloc(): allocation too large, "
194 				    "type = %d, size = %lu\n", type, size);
195 #endif
196 			return (NULL);
197 		} else
198 			panic("malloc: allocation too large, "
199 			    "type = %d, size = %lu\n", type, size);
200 	}
201 
202 	indx = BUCKETINDX(size);
203 	kbp = &bucket[indx];
204 	s = splvm();
205 #ifdef KMEMSTATS
206 	while (ksp->ks_memuse >= ksp->ks_limit) {
207 		if (flags & M_NOWAIT) {
208 			splx(s);
209 			return (NULL);
210 		}
211 		if (ksp->ks_limblocks < 65535)
212 			ksp->ks_limblocks++;
213 		tsleep(ksp, PSWP+2, memname[type], 0);
214 	}
215 	ksp->ks_size |= 1 << indx;
216 #endif
217 	if (size > MAXALLOCSAVE)
218 		allocsize = round_page(size);
219 	else
220 		allocsize = 1 << indx;
221 	if (XSIMPLEQ_FIRST(&kbp->kb_freelist) == NULL) {
222 		npg = atop(round_page(allocsize));
223 		va = (caddr_t)uvm_km_kmemalloc_pla(kmem_map, NULL,
224 		    (vsize_t)ptoa(npg), 0,
225 		    ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) |
226 		    ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0),
227 		    no_constraint.ucr_low, no_constraint.ucr_high,
228 		    0, 0, 0);
229 		if (va == NULL) {
230 			/*
231 			 * Kmem_malloc() can return NULL, even if it can
232 			 * wait, if there is no map space available, because
233 			 * it can't fix that problem.  Neither can we,
234 			 * right now.  (We should release pages which
235 			 * are completely free and which are in buckets
236 			 * with too many free elements.)
237 			 */
238 			if ((flags & (M_NOWAIT|M_CANFAIL)) == 0)
239 				panic("malloc: out of space in kmem_map");
240 			splx(s);
241 			return (NULL);
242 		}
243 #ifdef KMEMSTATS
244 		kbp->kb_total += kbp->kb_elmpercl;
245 #endif
246 		kup = btokup(va);
247 		kup->ku_indx = indx;
248 #ifdef DIAGNOSTIC
249 		freshalloc = 1;
250 #endif
251 		if (allocsize > MAXALLOCSAVE) {
252 			kup->ku_pagecnt = npg;
253 #ifdef KMEMSTATS
254 			ksp->ks_memuse += allocsize;
255 #endif
256 			goto out;
257 		}
258 #ifdef KMEMSTATS
259 		kup->ku_freecnt = kbp->kb_elmpercl;
260 		kbp->kb_totalfree += kbp->kb_elmpercl;
261 #endif
262 		cp = va + (npg * PAGE_SIZE) - allocsize;
263 		for (;;) {
264 			freep = (struct kmem_freelist *)cp;
265 #ifdef DIAGNOSTIC
266 			/*
267 			 * Copy in known text to detect modification
268 			 * after freeing.
269 			 */
270 			poison_mem(cp, allocsize);
271 			freep->kf_type = M_FREE;
272 #endif /* DIAGNOSTIC */
273 			XSIMPLEQ_INSERT_HEAD(&kbp->kb_freelist, freep, kf_flist);
274 			if (cp <= va)
275 				break;
276 			cp -= allocsize;
277 		}
278 	} else {
279 #ifdef DIAGNOSTIC
280 		freshalloc = 0;
281 #endif
282 	}
283 	freep = XSIMPLEQ_FIRST(&kbp->kb_freelist);
284 	XSIMPLEQ_REMOVE_HEAD(&kbp->kb_freelist, kf_flist);
285 	va = (caddr_t)freep;
286 #ifdef DIAGNOSTIC
287 	savedtype = (unsigned)freep->kf_type < M_LAST ?
288 		memname[freep->kf_type] : "???";
289 	if (freshalloc == 0 && XSIMPLEQ_FIRST(&kbp->kb_freelist)) {
290 		int rv;
291 		vaddr_t addr = (vaddr_t)XSIMPLEQ_FIRST(&kbp->kb_freelist);
292 
293 		vm_map_lock(kmem_map);
294 		rv = uvm_map_checkprot(kmem_map, addr,
295 		    addr + sizeof(struct kmem_freelist), PROT_WRITE);
296 		vm_map_unlock(kmem_map);
297 
298 		if (!rv)  {
299 			printf("%s %zd of object %p size 0x%lx %s %s"
300 			    " (invalid addr %p)\n",
301 			    "Data modified on freelist: word",
302 			    (int32_t *)&addr - (int32_t *)kbp, va, size,
303 			    "previous type", savedtype, (void *)addr);
304 		}
305 	}
306 
307 	/* Fill the fields that we've used with poison */
308 	poison_mem(freep, sizeof(*freep));
309 
310 	/* and check that the data hasn't been modified. */
311 	if (freshalloc == 0) {
312 		size_t pidx;
313 		uint32_t pval;
314 		if (poison_check(va, allocsize, &pidx, &pval)) {
315 			panic("%s %zd of object %p size 0x%lx %s %s"
316 			    " (0x%x != 0x%x)\n",
317 			    "Data modified on freelist: word",
318 			    pidx, va, size, "previous type",
319 			    savedtype, ((int32_t*)va)[pidx], pval);
320 		}
321 	}
322 
323 	freep->kf_spare0 = 0;
324 #endif /* DIAGNOSTIC */
325 #ifdef KMEMSTATS
326 	kup = btokup(va);
327 	if (kup->ku_indx != indx)
328 		panic("malloc: wrong bucket");
329 	if (kup->ku_freecnt == 0)
330 		panic("malloc: lost data");
331 	kup->ku_freecnt--;
332 	kbp->kb_totalfree--;
333 	ksp->ks_memuse += 1 << indx;
334 out:
335 	kbp->kb_calls++;
336 	ksp->ks_inuse++;
337 	ksp->ks_calls++;
338 	if (ksp->ks_memuse > ksp->ks_maxused)
339 		ksp->ks_maxused = ksp->ks_memuse;
340 #else
341 out:
342 #endif
343 	splx(s);
344 
345 	if ((flags & M_ZERO) && va != NULL)
346 		memset(va, 0, size);
347 	return (va);
348 }
349 
350 /*
351  * Free a block of memory allocated by malloc.
352  */
353 void
354 free(void *addr, int type, size_t freedsize)
355 {
356 	struct kmembuckets *kbp;
357 	struct kmemusage *kup;
358 	struct kmem_freelist *freep;
359 	long size;
360 	int s;
361 #ifdef DIAGNOSTIC
362 	long alloc;
363 #endif
364 #ifdef KMEMSTATS
365 	struct kmemstats *ksp = &kmemstats[type];
366 #endif
367 
368 	if (addr == NULL)
369 		return;
370 
371 #ifdef MALLOC_DEBUG
372 	if (debug_free(addr, type))
373 		return;
374 #endif
375 
376 #ifdef DIAGNOSTIC
377 	if (addr < (void *)kmembase || addr >= (void *)kmemlimit)
378 		panic("free: non-malloced addr %p type %s", addr,
379 		    memname[type]);
380 #endif
381 
382 	kup = btokup(addr);
383 	size = 1 << kup->ku_indx;
384 	kbp = &bucket[kup->ku_indx];
385 	if (size > MAXALLOCSAVE)
386 		size = kup->ku_pagecnt << PAGE_SHIFT;
387 	s = splvm();
388 #ifdef DIAGNOSTIC
389 	if (freedsize != 0 && freedsize > size)
390 		panic("free: size too large %zu > %ld (%p) type %s",
391 		    freedsize, size, addr, memname[type]);
392 	if (freedsize != 0 && size > MINALLOCSIZE && freedsize < size / 2)
393 		panic("free: size too small %zu < %ld / 2 (%p) type %s",
394 		    freedsize, size, addr, memname[type]);
395 	/*
396 	 * Check for returns of data that do not point to the
397 	 * beginning of the allocation.
398 	 */
399 	if (size > PAGE_SIZE)
400 		alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
401 	else
402 		alloc = addrmask[kup->ku_indx];
403 	if (((u_long)addr & alloc) != 0)
404 		panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
405 			addr, size, memname[type], alloc);
406 #endif /* DIAGNOSTIC */
407 	if (size > MAXALLOCSAVE) {
408 		uvm_km_free(kmem_map, (vaddr_t)addr, ptoa(kup->ku_pagecnt));
409 #ifdef KMEMSTATS
410 		ksp->ks_memuse -= size;
411 		kup->ku_indx = 0;
412 		kup->ku_pagecnt = 0;
413 		if (ksp->ks_memuse + size >= ksp->ks_limit &&
414 		    ksp->ks_memuse < ksp->ks_limit)
415 			wakeup(ksp);
416 		ksp->ks_inuse--;
417 		kbp->kb_total -= 1;
418 #endif
419 		splx(s);
420 		return;
421 	}
422 	freep = (struct kmem_freelist *)addr;
423 #ifdef DIAGNOSTIC
424 	/*
425 	 * Check for multiple frees. Use a quick check to see if
426 	 * it looks free before laboriously searching the freelist.
427 	 */
428 	if (freep->kf_spare0 == poison_value(freep)) {
429 		struct kmem_freelist *fp;
430 		XSIMPLEQ_FOREACH(fp, &kbp->kb_freelist, kf_flist) {
431 			if (addr != fp)
432 				continue;
433 			printf("multiply freed item %p\n", addr);
434 			panic("free: duplicated free");
435 		}
436 	}
437 	/*
438 	 * Copy in known text to detect modification after freeing
439 	 * and to make it look free. Also, save the type being freed
440 	 * so we can list likely culprit if modification is detected
441 	 * when the object is reallocated.
442 	 */
443 	poison_mem(addr, size);
444 	freep->kf_spare0 = poison_value(freep);
445 
446 	freep->kf_type = type;
447 #endif /* DIAGNOSTIC */
448 #ifdef KMEMSTATS
449 	kup->ku_freecnt++;
450 	if (kup->ku_freecnt >= kbp->kb_elmpercl) {
451 		if (kup->ku_freecnt > kbp->kb_elmpercl)
452 			panic("free: multiple frees");
453 		else if (kbp->kb_totalfree > kbp->kb_highwat)
454 			kbp->kb_couldfree++;
455 	}
456 	kbp->kb_totalfree++;
457 	ksp->ks_memuse -= size;
458 	if (ksp->ks_memuse + size >= ksp->ks_limit &&
459 	    ksp->ks_memuse < ksp->ks_limit)
460 		wakeup(ksp);
461 	ksp->ks_inuse--;
462 #endif
463 	XSIMPLEQ_INSERT_TAIL(&kbp->kb_freelist, freep, kf_flist);
464 	splx(s);
465 }
466 
467 /*
468  * Compute the number of pages that kmem_map will map, that is,
469  * the size of the kernel malloc arena.
470  */
471 void
472 kmeminit_nkmempages(void)
473 {
474 	u_int npages;
475 
476 	if (nkmempages != 0) {
477 		/*
478 		 * It's already been set (by us being here before, or
479 		 * by patching or kernel config options), bail out now.
480 		 */
481 		return;
482 	}
483 
484 	/*
485 	 * We can't initialize these variables at compilation time, since
486 	 * the page size may not be known (on sparc GENERIC kernels, for
487 	 * example). But we still want the MD code to be able to provide
488 	 * better values.
489 	 */
490 	if (nkmempages_min == 0)
491 		nkmempages_min = NKMEMPAGES_MIN;
492 	if (nkmempages_max == 0)
493 		nkmempages_max = NKMEMPAGES_MAX;
494 
495 	/*
496 	 * We use the following (simple) formula:
497 	 *
498 	 *	- Starting point is physical memory / 4.
499 	 *
500 	 *	- Clamp it down to nkmempages_max.
501 	 *
502 	 *	- Round it up to nkmempages_min.
503 	 */
504 	npages = physmem / 4;
505 
506 	if (npages > nkmempages_max)
507 		npages = nkmempages_max;
508 
509 	if (npages < nkmempages_min)
510 		npages = nkmempages_min;
511 
512 	nkmempages = npages;
513 }
514 
515 /*
516  * Initialize the kernel memory allocator
517  */
518 void
519 kmeminit(void)
520 {
521 	vaddr_t base, limit;
522 	long indx;
523 
524 #ifdef DIAGNOSTIC
525 	if (sizeof(struct kmem_freelist) > (1 << MINBUCKET))
526 		panic("kmeminit: minbucket too small/struct freelist too big");
527 #endif
528 
529 	/*
530 	 * Compute the number of kmem_map pages, if we have not
531 	 * done so already.
532 	 */
533 	kmeminit_nkmempages();
534 	base = vm_map_min(kernel_map);
535 	kmem_map = uvm_km_suballoc(kernel_map, &base, &limit,
536 	    (vsize_t)nkmempages << PAGE_SHIFT,
537 #ifdef KVA_GUARDPAGES
538 	    VM_MAP_INTRSAFE | VM_MAP_GUARDPAGES,
539 #else
540 	    VM_MAP_INTRSAFE,
541 #endif
542 	    FALSE, &kmem_map_store);
543 	kmembase = (char *)base;
544 	kmemlimit = (char *)limit;
545 	kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
546 		(vsize_t)(nkmempages * sizeof(struct kmemusage)));
547 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
548 		XSIMPLEQ_INIT(&bucket[indx].kb_freelist);
549 	}
550 #ifdef KMEMSTATS
551 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
552 		if (1 << indx >= PAGE_SIZE)
553 			bucket[indx].kb_elmpercl = 1;
554 		else
555 			bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
556 		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
557 	}
558 	for (indx = 0; indx < M_LAST; indx++)
559 		kmemstats[indx].ks_limit = nkmempages * PAGE_SIZE * 6 / 10;
560 #endif
561 #ifdef MALLOC_DEBUG
562 	debug_malloc_init();
563 #endif
564 }
565 
566 /*
567  * Return kernel malloc statistics information.
568  */
569 int
570 sysctl_malloc(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
571     size_t newlen, struct proc *p)
572 {
573 	struct kmembuckets kb;
574 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES)
575 	int error;
576 #endif
577 	int i, siz;
578 
579 	if (namelen != 2 && name[0] != KERN_MALLOC_BUCKETS &&
580 	    name[0] != KERN_MALLOC_KMEMNAMES)
581 		return (ENOTDIR);		/* overloaded */
582 
583 	switch (name[0]) {
584 	case KERN_MALLOC_BUCKETS:
585 		/* Initialize the first time */
586 		if (buckstring_init == 0) {
587 			buckstring_init = 1;
588 			memset(buckstring, 0, sizeof(buckstring));
589 			for (siz = 0, i = MINBUCKET; i < MINBUCKET + 16; i++) {
590 				snprintf(buckstring + siz,
591 				    sizeof buckstring - siz,
592 				    "%d,", (u_int)(1<<i));
593 				siz += strlen(buckstring + siz);
594 			}
595 			/* Remove trailing comma */
596 			if (siz)
597 				buckstring[siz - 1] = '\0';
598 		}
599 		return (sysctl_rdstring(oldp, oldlenp, newp, buckstring));
600 
601 	case KERN_MALLOC_BUCKET:
602 		bcopy(&bucket[BUCKETINDX(name[1])], &kb, sizeof(kb));
603 		memset(&kb.kb_freelist, 0, sizeof(kb.kb_freelist));
604 		return (sysctl_rdstruct(oldp, oldlenp, newp, &kb, sizeof(kb)));
605 	case KERN_MALLOC_KMEMSTATS:
606 #ifdef KMEMSTATS
607 		if ((name[1] < 0) || (name[1] >= M_LAST))
608 			return (EINVAL);
609 		return (sysctl_rdstruct(oldp, oldlenp, newp,
610 		    &kmemstats[name[1]], sizeof(struct kmemstats)));
611 #else
612 		return (EOPNOTSUPP);
613 #endif
614 	case KERN_MALLOC_KMEMNAMES:
615 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES)
616 		error = rw_enter(&sysctl_kmemlock, RW_WRITE|RW_INTR);
617 		if (error)
618 			return (error);
619 		if (memall == NULL) {
620 			int totlen;
621 
622 			/* Figure out how large a buffer we need */
623 			for (totlen = 0, i = 0; i < M_LAST; i++) {
624 				if (memname[i])
625 					totlen += strlen(memname[i]);
626 				totlen++;
627 			}
628 			memall = malloc(totlen + M_LAST, M_SYSCTL,
629 			    M_WAITOK|M_ZERO);
630 			for (siz = 0, i = 0; i < M_LAST; i++) {
631 				snprintf(memall + siz,
632 				    totlen + M_LAST - siz,
633 				    "%s,", memname[i] ? memname[i] : "");
634 				siz += strlen(memall + siz);
635 			}
636 			/* Remove trailing comma */
637 			if (siz)
638 				memall[siz - 1] = '\0';
639 
640 			/* Now, convert all spaces to underscores */
641 			for (i = 0; i < totlen; i++)
642 				if (memall[i] == ' ')
643 					memall[i] = '_';
644 		}
645 		rw_exit_write(&sysctl_kmemlock);
646 		return (sysctl_rdstring(oldp, oldlenp, newp, memall));
647 #else
648 		return (EOPNOTSUPP);
649 #endif
650 	default:
651 		return (EOPNOTSUPP);
652 	}
653 	/* NOTREACHED */
654 }
655 
656 /*
657  * Round up a size to how much malloc would actually allocate.
658  */
659 size_t
660 malloc_roundup(size_t sz)
661 {
662 	if (sz > MAXALLOCSAVE)
663 		return round_page(sz);
664 
665 	return (1 << BUCKETINDX(sz));
666 }
667 
668 #if defined(DDB)
669 #include <machine/db_machdep.h>
670 #include <ddb/db_interface.h>
671 #include <ddb/db_output.h>
672 
673 void
674 malloc_printit(
675     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
676 {
677 #ifdef KMEMSTATS
678 	struct kmemstats *km;
679 	int i;
680 
681 	(*pr)("%15s %5s  %6s  %7s  %6s %9s %8s %8s\n",
682 	    "Type", "InUse", "MemUse", "HighUse", "Limit", "Requests",
683 	    "Type Lim", "Kern Lim");
684 	for (i = 0, km = kmemstats; i < M_LAST; i++, km++) {
685 		if (!km->ks_calls || !memname[i])
686 			continue;
687 
688 		(*pr)("%15s %5ld %6ldK %7ldK %6ldK %9ld %8d %8d\n",
689 		    memname[i], km->ks_inuse, km->ks_memuse / 1024,
690 		    km->ks_maxused / 1024, km->ks_limit / 1024,
691 		    km->ks_calls, km->ks_limblocks, km->ks_mapblocks);
692 	}
693 #else
694 	(*pr)("No KMEMSTATS compiled in\n");
695 #endif
696 }
697 #endif /* DDB */
698 
699 /*
700  * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
701  *
702  * Permission to use, copy, modify, and distribute this software for any
703  * purpose with or without fee is hereby granted, provided that the above
704  * copyright notice and this permission notice appear in all copies.
705  *
706  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
707  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
708  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
709  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
710  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
711  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
712  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
713  */
714 
715 /*
716  * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
717  * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
718  */
719 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
720 
721 void *
722 mallocarray(size_t nmemb, size_t size, int type, int flags)
723 {
724 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
725 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
726 		if (flags & M_CANFAIL)
727 			return (NULL);
728 		panic("mallocarray: overflow %zu * %zu", nmemb, size);
729 	}
730 	return (malloc(size * nmemb, type, flags));
731 }
732