xref: /openbsd-src/sys/kern/kern_malloc.c (revision ce7e0fc6a9d74d25b78fb6ad846387717f5172b6)
1 /*	$OpenBSD: kern_malloc.c,v 1.47 2002/02/12 17:19:41 provos Exp $	*/
2 /*	$NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $	*/
3 
4 /*
5  * Copyright (c) 1987, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
37  */
38 
39 #include <sys/param.h>
40 #include <sys/proc.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/systm.h>
44 #include <sys/sysctl.h>
45 
46 #include <uvm/uvm_extern.h>
47 
48 static struct vm_map_intrsafe kmem_map_store;
49 struct vm_map *kmem_map = NULL;
50 
51 #ifdef NKMEMCLUSTERS
52 #error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size
53 #endif
54 
55 /*
56  * Default number of pages in kmem_map.  We attempt to calculate this
57  * at run-time, but allow it to be either patched or set in the kernel
58  * config file.
59  */
60 #ifndef NKMEMPAGES
61 #define	NKMEMPAGES	0
62 #endif
63 int	nkmempages = NKMEMPAGES;
64 
65 /*
66  * Defaults for lower- and upper-bounds for the kmem_map page count.
67  * Can be overridden by kernel config options.
68  */
69 #ifndef	NKMEMPAGES_MIN
70 #define	NKMEMPAGES_MIN	NKMEMPAGES_MIN_DEFAULT
71 #endif
72 
73 #ifndef NKMEMPAGES_MAX
74 #define	NKMEMPAGES_MAX	NKMEMPAGES_MAX_DEFAULT
75 #endif
76 
77 struct kmembuckets bucket[MINBUCKET + 16];
78 struct kmemstats kmemstats[M_LAST];
79 struct kmemusage *kmemusage;
80 char *kmembase, *kmemlimit;
81 char buckstring[16 * sizeof("123456,")];
82 int buckstring_init = 0;
83 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES)
84 char *memname[] = INITKMEMNAMES;
85 char *memall = NULL;
86 extern struct lock sysctl_kmemlock;
87 #endif
88 
89 #ifdef DIAGNOSTIC
90 /*
91  * This structure provides a set of masks to catch unaligned frees.
92  */
93 long addrmask[] = { 0,
94 	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
95 	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
96 	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
97 	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
98 };
99 
100 /*
101  * The WEIRD_ADDR is used as known text to copy into free objects so
102  * that modifications after frees can be detected.
103  */
104 #define WEIRD_ADDR	((unsigned) 0xdeadbeef)
105 #define MAX_COPY	32
106 
107 /*
108  * Normally the freelist structure is used only to hold the list pointer
109  * for free objects.  However, when running with diagnostics, the first
110  * 8 bytes of the structure is unused except for diagnostic information,
111  * and the free list pointer is at offset 8 in the structure.  Since the
112  * first 8 bytes is the portion of the structure most often modified, this
113  * helps to detect memory reuse problems and avoid free list corruption.
114  */
115 struct freelist {
116 	int32_t	spare0;
117 	int16_t	type;
118 	int16_t	spare1;
119 	caddr_t	next;
120 };
121 #else /* !DIAGNOSTIC */
122 struct freelist {
123 	caddr_t	next;
124 };
125 #endif /* DIAGNOSTIC */
126 
127 /*
128  * Allocate a block of memory
129  */
130 void *
131 malloc(size, type, flags)
132 	unsigned long size;
133 	int type, flags;
134 {
135 	register struct kmembuckets *kbp;
136 	register struct kmemusage *kup;
137 	register struct freelist *freep;
138 	long indx, npg, allocsize;
139 	int s;
140 	caddr_t va, cp, savedlist;
141 #ifdef DIAGNOSTIC
142 	int32_t *end, *lp;
143 	int copysize;
144 	char *savedtype;
145 #endif
146 #ifdef KMEMSTATS
147 	register struct kmemstats *ksp = &kmemstats[type];
148 
149 	if (((unsigned long)type) > M_LAST)
150 		panic("malloc - bogus type");
151 #endif
152 
153 #ifdef MALLOC_DEBUG
154 	if (debug_malloc(size, type, flags, (void **)&va))
155 		return ((void *) va);
156 #endif
157 
158 	indx = BUCKETINDX(size);
159 	kbp = &bucket[indx];
160 	s = splimp();
161 #ifdef KMEMSTATS
162 	while (ksp->ks_memuse >= ksp->ks_limit) {
163 		if (flags & M_NOWAIT) {
164 			splx(s);
165 			return ((void *) NULL);
166 		}
167 		if (ksp->ks_limblocks < 65535)
168 			ksp->ks_limblocks++;
169 		tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
170 	}
171 	ksp->ks_size |= 1 << indx;
172 #endif
173 #ifdef DIAGNOSTIC
174 	copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
175 #endif
176 	if (kbp->kb_next == NULL) {
177 		kbp->kb_last = NULL;
178 		if (size > MAXALLOCSAVE)
179 			allocsize = round_page(size);
180 		else
181 			allocsize = 1 << indx;
182 		npg = btoc(allocsize);
183 		va = (caddr_t) uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object,
184 				(vsize_t)ctob(npg),
185 				(flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0);
186 		if (va == NULL) {
187 			/*
188 			 * Kmem_malloc() can return NULL, even if it can
189 			 * wait, if there is no map space available, because
190 			 * it can't fix that problem.  Neither can we,
191 			 * right now.  (We should release pages which
192 			 * are completely free and which are in buckets
193 			 * with too many free elements.)
194 			 */
195 			if ((flags & M_NOWAIT) == 0)
196 				panic("malloc: out of space in kmem_map");
197 			splx(s);
198 			return ((void *) NULL);
199 		}
200 #ifdef KMEMSTATS
201 		kbp->kb_total += kbp->kb_elmpercl;
202 #endif
203 		kup = btokup(va);
204 		kup->ku_indx = indx;
205 		if (allocsize > MAXALLOCSAVE) {
206 			if (npg > 65535)
207 				panic("malloc: allocation too large");
208 			kup->ku_pagecnt = npg;
209 #ifdef KMEMSTATS
210 			ksp->ks_memuse += allocsize;
211 #endif
212 			goto out;
213 		}
214 #ifdef KMEMSTATS
215 		kup->ku_freecnt = kbp->kb_elmpercl;
216 		kbp->kb_totalfree += kbp->kb_elmpercl;
217 #endif
218 		/*
219 		 * Just in case we blocked while allocating memory,
220 		 * and someone else also allocated memory for this
221 		 * bucket, don't assume the list is still empty.
222 		 */
223 		savedlist = kbp->kb_next;
224 		kbp->kb_next = cp = va + (npg * PAGE_SIZE) - allocsize;
225 		for (;;) {
226 			freep = (struct freelist *)cp;
227 #ifdef DIAGNOSTIC
228 			/*
229 			 * Copy in known text to detect modification
230 			 * after freeing.
231 			 */
232 			end = (int32_t *)&cp[copysize];
233 			for (lp = (int32_t *)cp; lp < end; lp++)
234 				*lp = WEIRD_ADDR;
235 			freep->type = M_FREE;
236 #endif /* DIAGNOSTIC */
237 			if (cp <= va)
238 				break;
239 			cp -= allocsize;
240 			freep->next = cp;
241 		}
242 		freep->next = savedlist;
243 		if (kbp->kb_last == NULL)
244 			kbp->kb_last = (caddr_t)freep;
245 	}
246 	va = kbp->kb_next;
247 	kbp->kb_next = ((struct freelist *)va)->next;
248 #ifdef DIAGNOSTIC
249 	freep = (struct freelist *)va;
250 	savedtype = (unsigned)freep->type < M_LAST ?
251 		memname[freep->type] : "???";
252 	if (kbp->kb_next) {
253 		int rv;
254 		vaddr_t addr = (vaddr_t)kbp->kb_next;
255 
256 		vm_map_lock(kmem_map);
257 		rv = uvm_map_checkprot(kmem_map, addr,
258 		    addr + sizeof(struct freelist), VM_PROT_WRITE);
259 		vm_map_unlock(kmem_map);
260 
261 		if (!rv)  {
262 		printf("%s %d of object %p size 0x%lx %s %s (invalid addr %p)\n",
263 			"Data modified on freelist: word",
264 			(int32_t *)&kbp->kb_next - (int32_t *)kbp, va, size,
265 			"previous type", savedtype, kbp->kb_next);
266 		kbp->kb_next = NULL;
267 		}
268 	}
269 
270 	/* Fill the fields that we've used with WEIRD_ADDR */
271 #if BYTE_ORDER == BIG_ENDIAN
272 	freep->type = WEIRD_ADDR >> 16;
273 #endif
274 #if BYTE_ORDER == LITTLE_ENDIAN
275 	freep->type = (short)WEIRD_ADDR;
276 #endif
277 	end = (int32_t *)&freep->next +
278 	    (sizeof(freep->next) / sizeof(int32_t));
279 	for (lp = (int32_t *)&freep->next; lp < end; lp++)
280 		*lp = WEIRD_ADDR;
281 
282 	/* and check that the data hasn't been modified. */
283 	end = (int32_t *)&va[copysize];
284 	for (lp = (int32_t *)va; lp < end; lp++) {
285 		if (*lp == WEIRD_ADDR)
286 			continue;
287 		printf("%s %d of object %p size 0x%lx %s %s (0x%x != 0x%x)\n",
288 			"Data modified on freelist: word", lp - (int32_t *)va,
289 			va, size, "previous type", savedtype, *lp, WEIRD_ADDR);
290 		break;
291 	}
292 
293 	freep->spare0 = 0;
294 #endif /* DIAGNOSTIC */
295 #ifdef KMEMSTATS
296 	kup = btokup(va);
297 	if (kup->ku_indx != indx)
298 		panic("malloc: wrong bucket");
299 	if (kup->ku_freecnt == 0)
300 		panic("malloc: lost data");
301 	kup->ku_freecnt--;
302 	kbp->kb_totalfree--;
303 	ksp->ks_memuse += 1 << indx;
304 out:
305 	kbp->kb_calls++;
306 	ksp->ks_inuse++;
307 	ksp->ks_calls++;
308 	if (ksp->ks_memuse > ksp->ks_maxused)
309 		ksp->ks_maxused = ksp->ks_memuse;
310 #else
311 out:
312 #endif
313 	splx(s);
314 	return ((void *) va);
315 }
316 
317 /*
318  * Free a block of memory allocated by malloc.
319  */
320 void
321 free(addr, type)
322 	void *addr;
323 	int type;
324 {
325 	register struct kmembuckets *kbp;
326 	register struct kmemusage *kup;
327 	register struct freelist *freep;
328 	long size;
329 	int s;
330 #ifdef DIAGNOSTIC
331 	caddr_t cp;
332 	int32_t *end, *lp;
333 	long alloc, copysize;
334 #endif
335 #ifdef KMEMSTATS
336 	register struct kmemstats *ksp = &kmemstats[type];
337 #endif
338 
339 #ifdef MALLOC_DEBUG
340 	if (debug_free(addr, type))
341 		return;
342 #endif
343 
344 #ifdef DIAGNOSTIC
345 	if (addr < (void *)kmembase || addr >= (void *)kmemlimit)
346 		panic("free: non-malloced addr %p type %s", addr,
347 		    memname[type]);
348 #endif
349 
350 	kup = btokup(addr);
351 	size = 1 << kup->ku_indx;
352 	kbp = &bucket[kup->ku_indx];
353 	s = splimp();
354 #ifdef DIAGNOSTIC
355 	/*
356 	 * Check for returns of data that do not point to the
357 	 * beginning of the allocation.
358 	 */
359 	if (size > PAGE_SIZE)
360 		alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
361 	else
362 		alloc = addrmask[kup->ku_indx];
363 	if (((u_long)addr & alloc) != 0)
364 		panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
365 			addr, size, memname[type], alloc);
366 #endif /* DIAGNOSTIC */
367 	if (size > MAXALLOCSAVE) {
368 		uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt));
369 #ifdef KMEMSTATS
370 		size = kup->ku_pagecnt << PGSHIFT;
371 		ksp->ks_memuse -= size;
372 		kup->ku_indx = 0;
373 		kup->ku_pagecnt = 0;
374 		if (ksp->ks_memuse + size >= ksp->ks_limit &&
375 		    ksp->ks_memuse < ksp->ks_limit)
376 			wakeup((caddr_t)ksp);
377 		ksp->ks_inuse--;
378 		kbp->kb_total -= 1;
379 #endif
380 		splx(s);
381 		return;
382 	}
383 	freep = (struct freelist *)addr;
384 #ifdef DIAGNOSTIC
385 	/*
386 	 * Check for multiple frees. Use a quick check to see if
387 	 * it looks free before laboriously searching the freelist.
388 	 */
389 	if (freep->spare0 == WEIRD_ADDR) {
390 		for (cp = kbp->kb_next; cp;
391 		    cp = ((struct freelist *)cp)->next) {
392 			if (addr != cp)
393 				continue;
394 			printf("multiply freed item %p\n", addr);
395 			panic("free: duplicated free");
396 		}
397 	}
398 	/*
399 	 * Copy in known text to detect modification after freeing
400 	 * and to make it look free. Also, save the type being freed
401 	 * so we can list likely culprit if modification is detected
402 	 * when the object is reallocated.
403 	 */
404 	copysize = size < MAX_COPY ? size : MAX_COPY;
405 	end = (int32_t *)&((caddr_t)addr)[copysize];
406 	for (lp = (int32_t *)addr; lp < end; lp++)
407 		*lp = WEIRD_ADDR;
408 	freep->type = type;
409 #endif /* DIAGNOSTIC */
410 #ifdef KMEMSTATS
411 	kup->ku_freecnt++;
412 	if (kup->ku_freecnt >= kbp->kb_elmpercl) {
413 		if (kup->ku_freecnt > kbp->kb_elmpercl)
414 			panic("free: multiple frees");
415 		else if (kbp->kb_totalfree > kbp->kb_highwat)
416 			kbp->kb_couldfree++;
417 	}
418 	kbp->kb_totalfree++;
419 	ksp->ks_memuse -= size;
420 	if (ksp->ks_memuse + size >= ksp->ks_limit &&
421 	    ksp->ks_memuse < ksp->ks_limit)
422 		wakeup((caddr_t)ksp);
423 	ksp->ks_inuse--;
424 #endif
425 	if (kbp->kb_next == NULL)
426 		kbp->kb_next = addr;
427 	else
428 		((struct freelist *)kbp->kb_last)->next = addr;
429 	freep->next = NULL;
430 	kbp->kb_last = addr;
431 	splx(s);
432 }
433 
434 /*
435  * Compute the number of pages that kmem_map will map, that is,
436  * the size of the kernel malloc arena.
437  */
438 void
439 kmeminit_nkmempages()
440 {
441 	int npages;
442 
443 	if (nkmempages != 0) {
444 		/*
445 		 * It's already been set (by us being here before, or
446 		 * by patching or kernel config options), bail out now.
447 		 */
448 		return;
449 	}
450 
451 	/*
452 	 * We use the following (simple) formula:
453 	 *
454 	 *	- Starting point is physical memory / 4.
455 	 *
456 	 *	- Clamp it down to NKMEMPAGES_MAX.
457 	 *
458 	 *	- Round it up to NKMEMPAGES_MIN.
459 	 */
460 	npages = physmem / 4;
461 
462 	if (npages > NKMEMPAGES_MAX)
463 		npages = NKMEMPAGES_MAX;
464 
465 	if (npages < NKMEMPAGES_MIN)
466 		npages = NKMEMPAGES_MIN;
467 
468 	nkmempages = npages;
469 }
470 
471 /*
472  * Initialize the kernel memory allocator
473  */
474 void
475 kmeminit()
476 {
477 #ifdef KMEMSTATS
478 	long indx;
479 #endif
480 
481 #ifdef DIAGNOSTIC
482 	if (sizeof(struct freelist) > (1 << MINBUCKET))
483 		panic("kmeminit: minbucket too small/struct freelist too big");
484 #endif
485 
486 	/*
487 	 * Compute the number of kmem_map pages, if we have not
488 	 * done so already.
489 	 */
490 	kmeminit_nkmempages();
491 
492 	kmem_map = uvm_km_suballoc(kernel_map, (vaddr_t *)&kmembase,
493 		(vaddr_t *)&kmemlimit, (vsize_t)(nkmempages * PAGE_SIZE),
494 			VM_MAP_INTRSAFE, FALSE, &kmem_map_store.vmi_map);
495 	kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
496 		(vsize_t)(nkmempages * sizeof(struct kmemusage)));
497 #ifdef KMEMSTATS
498 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
499 		if (1 << indx >= PAGE_SIZE)
500 			bucket[indx].kb_elmpercl = 1;
501 		else
502 			bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
503 		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
504 	}
505 	for (indx = 0; indx < M_LAST; indx++)
506 		kmemstats[indx].ks_limit = nkmempages * PAGE_SIZE * 6 / 10;
507 #endif
508 #ifdef MALLOC_DEBUG
509 	debug_malloc_init();
510 #endif
511 }
512 
513 /*
514  * Return kernel malloc statistics information.
515  */
516 int
517 sysctl_malloc(name, namelen, oldp, oldlenp, newp, newlen, p)
518 	int *name;
519 	u_int namelen;
520 	void *oldp;
521 	size_t *oldlenp;
522 	void *newp;
523 	size_t newlen;
524 	struct proc *p;
525 {
526 	struct kmembuckets kb;
527 	int i, siz;
528 
529 	if (namelen != 2 && name[0] != KERN_MALLOC_BUCKETS &&
530 	    name[0] != KERN_MALLOC_KMEMNAMES)
531 		return (ENOTDIR);		/* overloaded */
532 
533 	switch (name[0]) {
534 	case KERN_MALLOC_BUCKETS:
535 		/* Initialize the first time */
536 		if (buckstring_init == 0) {
537 			buckstring_init = 1;
538 			bzero(buckstring, sizeof(buckstring));
539 			for (siz = 0, i = MINBUCKET; i < MINBUCKET + 16; i++)
540 			    siz += sprintf(buckstring + siz,
541 			    "%d,", (u_int)(1<<i));
542 			/* Remove trailing comma */
543 			if (siz)
544 				buckstring[siz - 1] = '\0';
545 		}
546 		return (sysctl_rdstring(oldp, oldlenp, newp, buckstring));
547 
548 	case KERN_MALLOC_BUCKET:
549 		bcopy(&bucket[BUCKETINDX(name[1])], &kb, sizeof(kb));
550 		kb.kb_next = kb.kb_last = 0;
551 		return (sysctl_rdstruct(oldp, oldlenp, newp, &kb, sizeof(kb)));
552 	case KERN_MALLOC_KMEMSTATS:
553 #ifdef KMEMSTATS
554 		if ((name[1] < 0) || (name[1] >= M_LAST))
555 			return (EINVAL);
556 		return (sysctl_rdstruct(oldp, oldlenp, newp,
557 		    &kmemstats[name[1]], sizeof(struct kmemstats)));
558 #else
559 		return (EOPNOTSUPP);
560 #endif
561 	case KERN_MALLOC_KMEMNAMES:
562 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES)
563 		if (memall == NULL) {
564 			int totlen;
565 
566 			i = lockmgr(&sysctl_kmemlock, LK_EXCLUSIVE, NULL, p);
567 			if (i)
568 				return (i);
569 
570 			/* Figure out how large a buffer we need */
571 			for (totlen = 0, i = 0; i < M_LAST; i++) {
572 				if (memname[i])
573 					totlen += strlen(memname[i]);
574 				totlen++;
575 			}
576 			memall = malloc(totlen + M_LAST, M_SYSCTL, M_WAITOK);
577 			bzero(memall, totlen + M_LAST);
578 			for (siz = 0, i = 0; i < M_LAST; i++)
579 				siz += sprintf(memall + siz, "%s,",
580 				    memname[i] ? memname[i] : "");
581 
582 			/* Remove trailing comma */
583 			if (siz)
584 				memall[siz - 1] = '\0';
585 
586 			/* Now, convert all spaces to underscores */
587 			for (i = 0; i < totlen; i++)
588 				if (memall[i] == ' ')
589 					memall[i] = '_';
590 			lockmgr(&sysctl_kmemlock, LK_RELEASE, NULL, p);
591 		}
592 		return (sysctl_rdstring(oldp, oldlenp, newp, memall));
593 #else
594 		return (EOPNOTSUPP);
595 #endif
596 	default:
597 		return (EOPNOTSUPP);
598 	}
599 	/* NOTREACHED */
600 }
601 
602 /*
603  * Round up a size to how much malloc would actually allocate.
604  */
605 size_t
606 malloc_roundup(size_t sz)
607 {
608 	if (sz > MAXALLOCSAVE)
609 		return round_page(sz);
610 
611 	return (1 << BUCKETINDX(sz));
612 }
613