xref: /netbsd-src/sys/kern/kern_malloc.c (revision 17306b8fd0952c7489f93f0230818481e5a1e2c9)
1 /*	$NetBSD: kern_malloc.c,v 1.59 2001/06/05 04:39:02 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 1996 Christopher G. Demetriou.  All rights reserved.
5  * Copyright (c) 1987, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)kern_malloc.c	8.4 (Berkeley) 5/20/95
37  */
38 
39 #include "opt_lockdebug.h"
40 
41 #include <sys/param.h>
42 #include <sys/proc.h>
43 #include <sys/map.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/systm.h>
47 
48 #include <uvm/uvm_extern.h>
49 
50 static struct vm_map_intrsafe kmem_map_store;
51 struct vm_map *kmem_map = NULL;
52 
53 #include "opt_kmempages.h"
54 
55 #ifdef NKMEMCLUSTERS
56 #error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size
57 #endif
58 
59 /*
60  * Default number of pages in kmem_map.  We attempt to calculate this
61  * at run-time, but allow it to be either patched or set in the kernel
62  * config file.
63  */
64 #ifndef NKMEMPAGES
65 #define	NKMEMPAGES	0
66 #endif
67 int	nkmempages = NKMEMPAGES;
68 
69 /*
70  * Defaults for lower- and upper-bounds for the kmem_map page count.
71  * Can be overridden by kernel config options.
72  */
73 #ifndef	NKMEMPAGES_MIN
74 #define	NKMEMPAGES_MIN	NKMEMPAGES_MIN_DEFAULT
75 #endif
76 
77 #ifndef NKMEMPAGES_MAX
78 #define	NKMEMPAGES_MAX	NKMEMPAGES_MAX_DEFAULT
79 #endif
80 
81 #include "opt_kmemstats.h"
82 #include "opt_malloclog.h"
83 
84 struct kmembuckets bucket[MINBUCKET + 16];
85 struct kmemstats kmemstats[M_LAST];
86 struct kmemusage *kmemusage;
87 char *kmembase, *kmemlimit;
88 const char * const memname[] = INITKMEMNAMES;
89 
90 #ifdef MALLOCLOG
91 #ifndef MALLOCLOGSIZE
92 #define	MALLOCLOGSIZE	100000
93 #endif
94 
95 struct malloclog {
96 	void *addr;
97 	long size;
98 	int type;
99 	int action;
100 	const char *file;
101 	long line;
102 } malloclog[MALLOCLOGSIZE];
103 
104 long	malloclogptr;
105 
106 static void domlog __P((void *a, long size, int type, int action,
107 	const char *file, long line));
108 static void hitmlog __P((void *a));
109 
110 static void
111 domlog(a, size, type, action, file, line)
112 	void *a;
113 	long size;
114 	int type;
115 	int action;
116 	const char *file;
117 	long line;
118 {
119 
120 	malloclog[malloclogptr].addr = a;
121 	malloclog[malloclogptr].size = size;
122 	malloclog[malloclogptr].type = type;
123 	malloclog[malloclogptr].action = action;
124 	malloclog[malloclogptr].file = file;
125 	malloclog[malloclogptr].line = line;
126 	malloclogptr++;
127 	if (malloclogptr >= MALLOCLOGSIZE)
128 		malloclogptr = 0;
129 }
130 
131 static void
132 hitmlog(a)
133 	void *a;
134 {
135 	struct malloclog *lp;
136 	long l;
137 
138 #define	PRT \
139 	if (malloclog[l].addr == a && malloclog[l].action) { \
140 		lp = &malloclog[l]; \
141 		printf("malloc log entry %ld:\n", l); \
142 		printf("\taddr = %p\n", lp->addr); \
143 		printf("\tsize = %ld\n", lp->size); \
144 		printf("\ttype = %s\n", memname[lp->type]); \
145 		printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
146 		printf("\tfile = %s\n", lp->file); \
147 		printf("\tline = %ld\n", lp->line); \
148 	}
149 
150 	for (l = malloclogptr; l < MALLOCLOGSIZE; l++)
151 		PRT
152 
153 	for (l = 0; l < malloclogptr; l++)
154 		PRT
155 }
156 #endif /* MALLOCLOG */
157 
158 #ifdef DIAGNOSTIC
159 /*
160  * This structure provides a set of masks to catch unaligned frees.
161  */
162 const long addrmask[] = { 0,
163 	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
164 	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
165 	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
166 	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
167 };
168 
169 /*
170  * The WEIRD_ADDR is used as known text to copy into free objects so
171  * that modifications after frees can be detected.
172  */
173 #define WEIRD_ADDR	((unsigned) 0xdeadbeef)
174 #ifdef DEBUG
175 #define MAX_COPY	PAGE_SIZE
176 #else
177 #define MAX_COPY	32
178 #endif
179 
180 /*
181  * Normally the freelist structure is used only to hold the list pointer
182  * for free objects.  However, when running with diagnostics, the first
183  * 8 bytes of the structure is unused except for diagnostic information,
184  * and the free list pointer is at offst 8 in the structure.  Since the
185  * first 8 bytes is the portion of the structure most often modified, this
186  * helps to detect memory reuse problems and avoid free list corruption.
187  */
188 struct freelist {
189 	int32_t	spare0;
190 	int16_t	type;
191 	int16_t	spare1;
192 	caddr_t	next;
193 };
194 #else /* !DIAGNOSTIC */
195 struct freelist {
196 	caddr_t	next;
197 };
198 #endif /* DIAGNOSTIC */
199 
200 /*
201  * Allocate a block of memory
202  */
203 #ifdef MALLOCLOG
204 void *
205 _malloc(size, type, flags, file, line)
206 	unsigned long size;
207 	int type, flags;
208 	const char *file;
209 	long line;
210 #else
211 void *
212 malloc(size, type, flags)
213 	unsigned long size;
214 	int type, flags;
215 #endif /* MALLOCLOG */
216 {
217 	struct kmembuckets *kbp;
218 	struct kmemusage *kup;
219 	struct freelist *freep;
220 	long indx, npg, allocsize;
221 	int s;
222 	caddr_t va, cp, savedlist;
223 #ifdef DIAGNOSTIC
224 	int32_t *end, *lp;
225 	int copysize;
226 	const char *savedtype;
227 #endif
228 #ifdef KMEMSTATS
229 	struct kmemstats *ksp = &kmemstats[type];
230 
231 	if (__predict_false(((unsigned long)type) > M_LAST))
232 		panic("malloc - bogus type");
233 #endif
234 #ifdef LOCKDEBUG
235 	if ((flags & M_NOWAIT) == 0)
236 		simple_lock_only_held(NULL, "malloc");
237 #endif
238 	indx = BUCKETINDX(size);
239 	kbp = &bucket[indx];
240 	s = splvm();
241 #ifdef KMEMSTATS
242 	while (ksp->ks_memuse >= ksp->ks_limit) {
243 		if (flags & M_NOWAIT) {
244 			splx(s);
245 			return ((void *) NULL);
246 		}
247 		if (ksp->ks_limblocks < 65535)
248 			ksp->ks_limblocks++;
249 		tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
250 	}
251 	ksp->ks_size |= 1 << indx;
252 #endif
253 #ifdef DIAGNOSTIC
254 	copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
255 #endif
256 	if (kbp->kb_next == NULL) {
257 		kbp->kb_last = NULL;
258 		if (size > MAXALLOCSAVE)
259 			allocsize = roundup(size, PAGE_SIZE);
260 		else
261 			allocsize = 1 << indx;
262 		npg = btoc(allocsize);
263 		va = (caddr_t) uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object,
264 				(vsize_t)ctob(npg),
265 				(flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0);
266 		if (__predict_false(va == NULL)) {
267 			/*
268 			 * Kmem_malloc() can return NULL, even if it can
269 			 * wait, if there is no map space avaiable, because
270 			 * it can't fix that problem.  Neither can we,
271 			 * right now.  (We should release pages which
272 			 * are completely free and which are in buckets
273 			 * with too many free elements.)
274 			 */
275 			if ((flags & M_NOWAIT) == 0)
276 				panic("malloc: out of space in kmem_map");
277 			splx(s);
278 			return ((void *) NULL);
279 		}
280 #ifdef KMEMSTATS
281 		kbp->kb_total += kbp->kb_elmpercl;
282 #endif
283 		kup = btokup(va);
284 		kup->ku_indx = indx;
285 		if (allocsize > MAXALLOCSAVE) {
286 			if (npg > 65535)
287 				panic("malloc: allocation too large");
288 			kup->ku_pagecnt = npg;
289 #ifdef KMEMSTATS
290 			ksp->ks_memuse += allocsize;
291 #endif
292 			goto out;
293 		}
294 #ifdef KMEMSTATS
295 		kup->ku_freecnt = kbp->kb_elmpercl;
296 		kbp->kb_totalfree += kbp->kb_elmpercl;
297 #endif
298 		/*
299 		 * Just in case we blocked while allocating memory,
300 		 * and someone else also allocated memory for this
301 		 * bucket, don't assume the list is still empty.
302 		 */
303 		savedlist = kbp->kb_next;
304 		kbp->kb_next = cp = va + (npg << PAGE_SHIFT) - allocsize;
305 		for (;;) {
306 			freep = (struct freelist *)cp;
307 #ifdef DIAGNOSTIC
308 			/*
309 			 * Copy in known text to detect modification
310 			 * after freeing.
311 			 */
312 			end = (int32_t *)&cp[copysize];
313 			for (lp = (int32_t *)cp; lp < end; lp++)
314 				*lp = WEIRD_ADDR;
315 			freep->type = M_FREE;
316 #endif /* DIAGNOSTIC */
317 			if (cp <= va)
318 				break;
319 			cp -= allocsize;
320 			freep->next = cp;
321 		}
322 		freep->next = savedlist;
323 		if (kbp->kb_last == NULL)
324 			kbp->kb_last = (caddr_t)freep;
325 	}
326 	va = kbp->kb_next;
327 	kbp->kb_next = ((struct freelist *)va)->next;
328 #ifdef DIAGNOSTIC
329 	freep = (struct freelist *)va;
330 	savedtype = (unsigned)freep->type < M_LAST ?
331 		memname[freep->type] : "???";
332 	if (kbp->kb_next) {
333 		int rv;
334 		vaddr_t addr = (vaddr_t)kbp->kb_next;
335 
336 		vm_map_lock(kmem_map);
337 		rv = uvm_map_checkprot(kmem_map, addr,
338 				       addr + sizeof(struct freelist),
339 				       VM_PROT_WRITE);
340 		vm_map_unlock(kmem_map);
341 
342 		if (__predict_false(rv == 0)) {
343 			printf(
344 		    "%s %ld of object %p size %ld %s %s (invalid addr %p)\n",
345 			    "Data modified on freelist: word",
346 			    (long)((int32_t *)&kbp->kb_next - (int32_t *)kbp),
347 			    va, size, "previous type", savedtype, kbp->kb_next);
348 #ifdef MALLOCLOG
349 			hitmlog(va);
350 #endif
351 			kbp->kb_next = NULL;
352 		}
353 	}
354 
355 	/* Fill the fields that we've used with WEIRD_ADDR */
356 #if BYTE_ORDER == BIG_ENDIAN
357 	freep->type = WEIRD_ADDR >> 16;
358 #endif
359 #if BYTE_ORDER == LITTLE_ENDIAN
360 	freep->type = (short)WEIRD_ADDR;
361 #endif
362 	end = (int32_t *)&freep->next +
363 	    (sizeof(freep->next) / sizeof(int32_t));
364 	for (lp = (int32_t *)&freep->next; lp < end; lp++)
365 		*lp = WEIRD_ADDR;
366 
367 	/* and check that the data hasn't been modified. */
368 	end = (int32_t *)&va[copysize];
369 	for (lp = (int32_t *)va; lp < end; lp++) {
370 		if (__predict_true(*lp == WEIRD_ADDR))
371 			continue;
372 		printf("%s %ld of object %p size %ld %s %s (0x%x != 0x%x)\n",
373 		    "Data modified on freelist: word",
374 		    (long)(lp - (int32_t *)va), va, size, "previous type",
375 		    savedtype, *lp, WEIRD_ADDR);
376 #ifdef MALLOCLOG
377 		hitmlog(va);
378 #endif
379 		break;
380 	}
381 
382 	freep->spare0 = 0;
383 #endif /* DIAGNOSTIC */
384 #ifdef KMEMSTATS
385 	kup = btokup(va);
386 	if (kup->ku_indx != indx)
387 		panic("malloc: wrong bucket");
388 	if (kup->ku_freecnt == 0)
389 		panic("malloc: lost data");
390 	kup->ku_freecnt--;
391 	kbp->kb_totalfree--;
392 	ksp->ks_memuse += 1 << indx;
393 out:
394 	kbp->kb_calls++;
395 	ksp->ks_inuse++;
396 	ksp->ks_calls++;
397 	if (ksp->ks_memuse > ksp->ks_maxused)
398 		ksp->ks_maxused = ksp->ks_memuse;
399 #else
400 out:
401 #endif
402 #ifdef MALLOCLOG
403 	domlog(va, size, type, 1, file, line);
404 #endif
405 	splx(s);
406 	return ((void *) va);
407 }
408 
409 /*
410  * Free a block of memory allocated by malloc.
411  */
412 #ifdef MALLOCLOG
413 void
414 _free(addr, type, file, line)
415 	void *addr;
416 	int type;
417 	const char *file;
418 	long line;
419 #else
420 void
421 free(addr, type)
422 	void *addr;
423 	int type;
424 #endif /* MALLOCLOG */
425 {
426 	struct kmembuckets *kbp;
427 	struct kmemusage *kup;
428 	struct freelist *freep;
429 	long size;
430 	int s;
431 #ifdef DIAGNOSTIC
432 	caddr_t cp;
433 	int32_t *end, *lp;
434 	long alloc, copysize;
435 #endif
436 #ifdef KMEMSTATS
437 	struct kmemstats *ksp = &kmemstats[type];
438 #endif
439 
440 #ifdef DIAGNOSTIC
441 	/*
442 	 * Ensure that we're free'ing something that we could
443 	 * have allocated in the first place.  That is, check
444 	 * to see that the address is within kmem_map.
445 	 */
446 	if (__predict_false((vaddr_t)addr < kmem_map->header.start ||
447 			    (vaddr_t)addr >= kmem_map->header.end))
448 		panic("free: addr %p not within kmem_map", addr);
449 #endif
450 
451 	kup = btokup(addr);
452 	size = 1 << kup->ku_indx;
453 	kbp = &bucket[kup->ku_indx];
454 	s = splvm();
455 #ifdef MALLOCLOG
456 	domlog(addr, 0, type, 2, file, line);
457 #endif
458 #ifdef DIAGNOSTIC
459 	/*
460 	 * Check for returns of data that do not point to the
461 	 * beginning of the allocation.
462 	 */
463 	if (size > PAGE_SIZE)
464 		alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
465 	else
466 		alloc = addrmask[kup->ku_indx];
467 	if (((u_long)addr & alloc) != 0)
468 		panic("free: unaligned addr %p, size %ld, type %s, mask %ld\n",
469 			addr, size, memname[type], alloc);
470 #endif /* DIAGNOSTIC */
471 	if (size > MAXALLOCSAVE) {
472 		uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt));
473 #ifdef KMEMSTATS
474 		size = kup->ku_pagecnt << PGSHIFT;
475 		ksp->ks_memuse -= size;
476 		kup->ku_indx = 0;
477 		kup->ku_pagecnt = 0;
478 		if (ksp->ks_memuse + size >= ksp->ks_limit &&
479 		    ksp->ks_memuse < ksp->ks_limit)
480 			wakeup((caddr_t)ksp);
481 		ksp->ks_inuse--;
482 		kbp->kb_total -= 1;
483 #endif
484 		splx(s);
485 		return;
486 	}
487 	freep = (struct freelist *)addr;
488 #ifdef DIAGNOSTIC
489 	/*
490 	 * Check for multiple frees. Use a quick check to see if
491 	 * it looks free before laboriously searching the freelist.
492 	 */
493 	if (__predict_false(freep->spare0 == WEIRD_ADDR)) {
494 		for (cp = kbp->kb_next; cp;
495 		    cp = ((struct freelist *)cp)->next) {
496 			if (addr != cp)
497 				continue;
498 			printf("multiply freed item %p\n", addr);
499 #ifdef MALLOCLOG
500 			hitmlog(addr);
501 #endif
502 			panic("free: duplicated free");
503 		}
504 	}
505 #ifdef LOCKDEBUG
506 	/*
507 	 * Check if we're freeing a locked simple lock.
508 	 */
509 	simple_lock_freecheck(addr, (char *)addr + size);
510 #endif
511 	/*
512 	 * Copy in known text to detect modification after freeing
513 	 * and to make it look free. Also, save the type being freed
514 	 * so we can list likely culprit if modification is detected
515 	 * when the object is reallocated.
516 	 */
517 	copysize = size < MAX_COPY ? size : MAX_COPY;
518 	end = (int32_t *)&((caddr_t)addr)[copysize];
519 	for (lp = (int32_t *)addr; lp < end; lp++)
520 		*lp = WEIRD_ADDR;
521 	freep->type = type;
522 #endif /* DIAGNOSTIC */
523 #ifdef KMEMSTATS
524 	kup->ku_freecnt++;
525 	if (kup->ku_freecnt >= kbp->kb_elmpercl) {
526 		if (kup->ku_freecnt > kbp->kb_elmpercl)
527 			panic("free: multiple frees");
528 		else if (kbp->kb_totalfree > kbp->kb_highwat)
529 			kbp->kb_couldfree++;
530 	}
531 	kbp->kb_totalfree++;
532 	ksp->ks_memuse -= size;
533 	if (ksp->ks_memuse + size >= ksp->ks_limit &&
534 	    ksp->ks_memuse < ksp->ks_limit)
535 		wakeup((caddr_t)ksp);
536 	ksp->ks_inuse--;
537 #endif
538 	if (kbp->kb_next == NULL)
539 		kbp->kb_next = addr;
540 	else
541 		((struct freelist *)kbp->kb_last)->next = addr;
542 	freep->next = NULL;
543 	kbp->kb_last = addr;
544 	splx(s);
545 }
546 
547 /*
548  * Change the size of a block of memory.
549  */
550 void *
551 realloc(curaddr, newsize, type, flags)
552 	void *curaddr;
553 	unsigned long newsize;
554 	int type, flags;
555 {
556 	struct kmemusage *kup;
557 	long cursize;
558 	void *newaddr;
559 #ifdef DIAGNOSTIC
560 	long alloc;
561 #endif
562 
563 	/*
564 	 * Realloc() with a NULL pointer is the same as malloc().
565 	 */
566 	if (curaddr == NULL)
567 		return (malloc(newsize, type, flags));
568 
569 	/*
570 	 * Realloc() with zero size is the same as free().
571 	 */
572 	if (newsize == 0) {
573 		free(curaddr, type);
574 		return (NULL);
575 	}
576 
577 #ifdef LOCKDEBUG
578 	if ((flags & M_NOWAIT) == 0)
579 		simple_lock_only_held(NULL, "realloc");
580 #endif
581 
582 	/*
583 	 * Find out how large the old allocation was (and do some
584 	 * sanity checking).
585 	 */
586 	kup = btokup(curaddr);
587 	cursize = 1 << kup->ku_indx;
588 
589 #ifdef DIAGNOSTIC
590 	/*
591 	 * Check for returns of data that do not point to the
592 	 * beginning of the allocation.
593 	 */
594 	if (cursize > PAGE_SIZE)
595 		alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
596 	else
597 		alloc = addrmask[kup->ku_indx];
598 	if (((u_long)curaddr & alloc) != 0)
599 		panic("realloc: unaligned addr %p, size %ld, type %s, mask %ld\n",
600 			curaddr, cursize, memname[type], alloc);
601 #endif /* DIAGNOSTIC */
602 
603 	if (cursize > MAXALLOCSAVE)
604 		cursize = ctob(kup->ku_pagecnt);
605 
606 	/*
607 	 * If we already actually have as much as they want, we're done.
608 	 */
609 	if (newsize <= cursize)
610 		return (curaddr);
611 
612 	/*
613 	 * Can't satisfy the allocation with the existing block.
614 	 * Allocate a new one and copy the data.
615 	 */
616 	newaddr = malloc(newsize, type, flags);
617 	if (__predict_false(newaddr == NULL)) {
618 		/*
619 		 * Malloc() failed, because flags included M_NOWAIT.
620 		 * Return NULL to indicate that failure.  The old
621 		 * pointer is still valid.
622 		 */
623 		return NULL;
624 	}
625 	memcpy(newaddr, curaddr, cursize);
626 
627 	/*
628 	 * We were successful: free the old allocation and return
629 	 * the new one.
630 	 */
631 	free(curaddr, type);
632 	return (newaddr);
633 }
634 
635 /*
636  * Compute the number of pages that kmem_map will map, that is,
637  * the size of the kernel malloc arena.
638  */
639 void
640 kmeminit_nkmempages()
641 {
642 	int npages;
643 
644 	if (nkmempages != 0) {
645 		/*
646 		 * It's already been set (by us being here before, or
647 		 * by patching or kernel config options), bail out now.
648 		 */
649 		return;
650 	}
651 
652 	/*
653 	 * We use the following (simple) formula:
654 	 *
655 	 *	- Starting point is physical memory / 4.
656 	 *
657 	 *	- Clamp it down to NKMEMPAGES_MAX.
658 	 *
659 	 *	- Round it up to NKMEMPAGES_MIN.
660 	 */
661 	npages = physmem / 4;
662 
663 	if (npages > NKMEMPAGES_MAX)
664 		npages = NKMEMPAGES_MAX;
665 
666 	if (npages < NKMEMPAGES_MIN)
667 		npages = NKMEMPAGES_MIN;
668 
669 	nkmempages = npages;
670 }
671 
672 /*
673  * Initialize the kernel memory allocator
674  */
675 void
676 kmeminit()
677 {
678 #ifdef KMEMSTATS
679 	long indx;
680 #endif
681 
682 #if	((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
683 		ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
684 #endif
685 #if	(MAXALLOCSAVE > MINALLOCSIZE * 32768)
686 		ERROR!_kmeminit:_MAXALLOCSAVE_too_big
687 #endif
688 #if	(MAXALLOCSAVE < NBPG)
689 		ERROR!_kmeminit:_MAXALLOCSAVE_too_small
690 #endif
691 
692 	if (sizeof(struct freelist) > (1 << MINBUCKET))
693 		panic("minbucket too small/struct freelist too big");
694 
695 	/*
696 	 * Compute the number of kmem_map pages, if we have not
697 	 * done so already.
698 	 */
699 	kmeminit_nkmempages();
700 
701 	kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
702 		(vsize_t)(nkmempages * sizeof(struct kmemusage)));
703 	kmem_map = uvm_km_suballoc(kernel_map, (vaddr_t *)&kmembase,
704 		(vaddr_t *)&kmemlimit, (vsize_t)(nkmempages << PAGE_SHIFT),
705 			VM_MAP_INTRSAFE, FALSE, &kmem_map_store.vmi_map);
706 #ifdef KMEMSTATS
707 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
708 		if (1 << indx >= PAGE_SIZE)
709 			bucket[indx].kb_elmpercl = 1;
710 		else
711 			bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
712 		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
713 	}
714 	for (indx = 0; indx < M_LAST; indx++)
715 		kmemstats[indx].ks_limit = (nkmempages << PAGE_SHIFT) * 6 / 10;
716 #endif
717 }
718 
719 #ifdef DDB
720 #include <ddb/db_output.h>
721 
722 /*
723  * Dump kmem statistics from ddb.
724  *
725  * usage: call dump_kmemstats
726  */
727 void	dump_kmemstats __P((void));
728 
729 void
730 dump_kmemstats()
731 {
732 #ifdef KMEMSTATS
733 	const char *name;
734 	int i;
735 
736 	for (i = 0; i < M_LAST; i++) {
737 		name = memname[i] ? memname[i] : "";
738 
739 		db_printf("%2d %s%.*s %ld\n", i, name,
740 		    (int)(20 - strlen(name)), "                    ",
741 		    kmemstats[i].ks_memuse);
742 	}
743 #else
744 	db_printf("Kmem stats are not being collected.\n");
745 #endif /* KMEMSTATS */
746 }
747 #endif /* DDB */
748