xref: /netbsd-src/sys/kern/subr_kmem.c (revision 796c32c94f6e154afc9de0f63da35c91bb739b45)
1 /*	$NetBSD: subr_kmem.c,v 1.65 2017/11/09 23:20:12 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2009-2015 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran and Maxime Villard.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*-
33  * Copyright (c)2006 YAMAMOTO Takashi,
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  *
45  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55  * SUCH DAMAGE.
56  */
57 
58 /*
59  * Allocator of kernel wired memory. This allocator has some debug features
60  * enabled with "option DIAGNOSTIC" and "option DEBUG".
61  */
62 
63 /*
64  * KMEM_SIZE: detect alloc/free size mismatch bugs.
65  *	Prefix each allocations with a fixed-sized, aligned header and record
66  *	the exact user-requested allocation size in it. When freeing, compare
67  *	it with kmem_free's "size" argument.
68  *
69  * KMEM_REDZONE: detect overrun bugs.
70  *	Add a 2-byte pattern (allocate one more memory chunk if needed) at the
71  *	end of each allocated buffer. Check this pattern on kmem_free.
72  *
73  * These options are enabled on DIAGNOSTIC.
74  *
75  *  |CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|
76  *  +-----+-----+-----+-----+-----+-----+-----+-----+-----+---+-+--+--+
77  *  |/////|     |     |     |     |     |     |     |     |   |*|**|UU|
78  *  |/HSZ/|     |     |     |     |     |     |     |     |   |*|**|UU|
79  *  |/////|     |     |     |     |     |     |     |     |   |*|**|UU|
80  *  +-----+-----+-----+-----+-----+-----+-----+-----+-----+---+-+--+--+
81  *  |Size |    Buffer usable by the caller (requested size)   |RedZ|Unused\
82  */
83 
84 /*
85  * KMEM_POISON: detect modify-after-free bugs.
86  *	Fill freed (in the sense of kmem_free) memory with a garbage pattern.
87  *	Check the pattern on allocation.
88  *
89  * KMEM_GUARD
90  *	A kernel with "option DEBUG" has "kmem_guard" debugging feature compiled
91  *	in. See the comment below for what kind of bugs it tries to detect. Even
92  *	if compiled in, it's disabled by default because it's very expensive.
93  *	You can enable it on boot by:
94  *		boot -d
95  *		db> w kmem_guard_depth 0t30000
96  *		db> c
97  *
98  *	The default value of kmem_guard_depth is 0, which means disabled.
99  *	It can be changed by KMEM_GUARD_DEPTH kernel config option.
100  */
101 
102 #include <sys/cdefs.h>
103 __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.65 2017/11/09 23:20:12 riastradh Exp $");
104 
105 #ifdef _KERNEL_OPT
106 #include "opt_kmem.h"
107 #endif
108 
109 #include <sys/param.h>
110 #include <sys/callback.h>
111 #include <sys/kmem.h>
112 #include <sys/pool.h>
113 #include <sys/debug.h>
114 #include <sys/lockdebug.h>
115 #include <sys/cpu.h>
116 
117 #include <uvm/uvm_extern.h>
118 #include <uvm/uvm_map.h>
119 
120 #include <lib/libkern/libkern.h>
121 
122 struct kmem_cache_info {
123 	size_t		kc_size;
124 	const char *	kc_name;
125 };
126 
127 static const struct kmem_cache_info kmem_cache_sizes[] = {
128 	{  8, "kmem-8" },
129 	{ 16, "kmem-16" },
130 	{ 24, "kmem-24" },
131 	{ 32, "kmem-32" },
132 	{ 40, "kmem-40" },
133 	{ 48, "kmem-48" },
134 	{ 56, "kmem-56" },
135 	{ 64, "kmem-64" },
136 	{ 80, "kmem-80" },
137 	{ 96, "kmem-96" },
138 	{ 112, "kmem-112" },
139 	{ 128, "kmem-128" },
140 	{ 160, "kmem-160" },
141 	{ 192, "kmem-192" },
142 	{ 224, "kmem-224" },
143 	{ 256, "kmem-256" },
144 	{ 320, "kmem-320" },
145 	{ 384, "kmem-384" },
146 	{ 448, "kmem-448" },
147 	{ 512, "kmem-512" },
148 	{ 768, "kmem-768" },
149 	{ 1024, "kmem-1024" },
150 	{ 0, NULL }
151 };
152 
153 static const struct kmem_cache_info kmem_cache_big_sizes[] = {
154 	{ 2048, "kmem-2048" },
155 	{ 4096, "kmem-4096" },
156 	{ 8192, "kmem-8192" },
157 	{ 16384, "kmem-16384" },
158 	{ 0, NULL }
159 };
160 
161 /*
162  * KMEM_ALIGN is the smallest guaranteed alignment and also the
163  * smallest allocateable quantum.
164  * Every cache size >= CACHE_LINE_SIZE gets CACHE_LINE_SIZE alignment.
165  */
166 #define	KMEM_ALIGN		8
167 #define	KMEM_SHIFT		3
168 #define	KMEM_MAXSIZE		1024
169 #define	KMEM_CACHE_COUNT	(KMEM_MAXSIZE >> KMEM_SHIFT)
170 
171 static pool_cache_t kmem_cache[KMEM_CACHE_COUNT] __cacheline_aligned;
172 static size_t kmem_cache_maxidx __read_mostly;
173 
174 #define	KMEM_BIG_ALIGN		2048
175 #define	KMEM_BIG_SHIFT		11
176 #define	KMEM_BIG_MAXSIZE	16384
177 #define	KMEM_CACHE_BIG_COUNT	(KMEM_BIG_MAXSIZE >> KMEM_BIG_SHIFT)
178 
179 static pool_cache_t kmem_cache_big[KMEM_CACHE_BIG_COUNT] __cacheline_aligned;
180 static size_t kmem_cache_big_maxidx __read_mostly;
181 
182 #if defined(DIAGNOSTIC) && defined(_HARDKERNEL)
183 #define	KMEM_SIZE
184 #define	KMEM_REDZONE
185 #endif /* defined(DIAGNOSTIC) */
186 
187 #if defined(DEBUG) && defined(_HARDKERNEL)
188 #define	KMEM_SIZE
189 #define	KMEM_POISON
190 #define	KMEM_GUARD
191 static void *kmem_freecheck;
192 #endif /* defined(DEBUG) */
193 
194 #if defined(KMEM_POISON)
195 static int kmem_poison_ctor(void *, void *, int);
196 static void kmem_poison_fill(void *, size_t);
197 static void kmem_poison_check(void *, size_t);
198 #else /* defined(KMEM_POISON) */
199 #define	kmem_poison_fill(p, sz)		/* nothing */
200 #define	kmem_poison_check(p, sz)	/* nothing */
201 #endif /* defined(KMEM_POISON) */
202 
203 #if defined(KMEM_REDZONE)
204 #define	REDZONE_SIZE	2
205 static void kmem_redzone_fill(void *, size_t);
206 static void kmem_redzone_check(void *, size_t);
207 #else /* defined(KMEM_REDZONE) */
208 #define	REDZONE_SIZE	0
209 #define	kmem_redzone_fill(p, sz)		/* nothing */
210 #define	kmem_redzone_check(p, sz)	/* nothing */
211 #endif /* defined(KMEM_REDZONE) */
212 
213 #if defined(KMEM_SIZE)
214 struct kmem_header {
215 	size_t		size;
216 } __aligned(KMEM_ALIGN);
217 #define	SIZE_SIZE	sizeof(struct kmem_header)
218 static void kmem_size_set(void *, size_t);
219 static void kmem_size_check(void *, size_t);
220 #else
221 #define	SIZE_SIZE	0
222 #define	kmem_size_set(p, sz)	/* nothing */
223 #define	kmem_size_check(p, sz)	/* nothing */
224 #endif
225 
226 #if defined(KMEM_GUARD)
227 #ifndef KMEM_GUARD_DEPTH
228 #define KMEM_GUARD_DEPTH 0
229 #endif
230 struct kmem_guard {
231 	u_int		kg_depth;
232 	intptr_t *	kg_fifo;
233 	u_int		kg_rotor;
234 	vmem_t *	kg_vmem;
235 };
236 
237 static bool	kmem_guard_init(struct kmem_guard *, u_int, vmem_t *);
238 static void *kmem_guard_alloc(struct kmem_guard *, size_t, bool);
239 static void kmem_guard_free(struct kmem_guard *, size_t, void *);
240 
241 int kmem_guard_depth = KMEM_GUARD_DEPTH;
242 static bool kmem_guard_enabled;
243 static struct kmem_guard kmem_guard;
244 #endif /* defined(KMEM_GUARD) */
245 
246 CTASSERT(KM_SLEEP == PR_WAITOK);
247 CTASSERT(KM_NOSLEEP == PR_NOWAIT);
248 
249 /*
250  * kmem_intr_alloc: allocate wired memory.
251  */
252 
253 void *
254 kmem_intr_alloc(size_t requested_size, km_flag_t kmflags)
255 {
256 	size_t allocsz, index;
257 	size_t size;
258 	pool_cache_t pc;
259 	uint8_t *p;
260 
261 	KASSERT(requested_size > 0);
262 
263 	KASSERT((kmflags & KM_SLEEP) || (kmflags & KM_NOSLEEP));
264 	KASSERT(!(kmflags & KM_SLEEP) || !(kmflags & KM_NOSLEEP));
265 
266 #ifdef KMEM_GUARD
267 	if (kmem_guard_enabled) {
268 		return kmem_guard_alloc(&kmem_guard, requested_size,
269 		    (kmflags & KM_SLEEP) != 0);
270 	}
271 #endif
272 	size = kmem_roundup_size(requested_size);
273 	allocsz = size + SIZE_SIZE;
274 
275 #ifdef KMEM_REDZONE
276 	if (size - requested_size < REDZONE_SIZE) {
277 		/* If there isn't enough space in the padding, allocate
278 		 * one more memory chunk for the red zone. */
279 		allocsz += kmem_roundup_size(REDZONE_SIZE);
280 	}
281 #endif
282 
283 	if ((index = ((allocsz -1) >> KMEM_SHIFT))
284 	    < kmem_cache_maxidx) {
285 		pc = kmem_cache[index];
286 	} else if ((index = ((allocsz - 1) >> KMEM_BIG_SHIFT))
287 	    < kmem_cache_big_maxidx) {
288 		pc = kmem_cache_big[index];
289 	} else {
290 		int ret = uvm_km_kmem_alloc(kmem_va_arena,
291 		    (vsize_t)round_page(size),
292 		    ((kmflags & KM_SLEEP) ? VM_SLEEP : VM_NOSLEEP)
293 		     | VM_INSTANTFIT, (vmem_addr_t *)&p);
294 		if (ret) {
295 			return NULL;
296 		}
297 		FREECHECK_OUT(&kmem_freecheck, p);
298 		return p;
299 	}
300 
301 	p = pool_cache_get(pc, kmflags);
302 
303 	if (__predict_true(p != NULL)) {
304 		kmem_poison_check(p, allocsz);
305 		FREECHECK_OUT(&kmem_freecheck, p);
306 		kmem_size_set(p, requested_size);
307 		kmem_redzone_fill(p, requested_size + SIZE_SIZE);
308 
309 		return p + SIZE_SIZE;
310 	}
311 	return p;
312 }
313 
314 /*
315  * kmem_intr_zalloc: allocate zeroed wired memory.
316  */
317 
318 void *
319 kmem_intr_zalloc(size_t size, km_flag_t kmflags)
320 {
321 	void *p;
322 
323 	p = kmem_intr_alloc(size, kmflags);
324 	if (p != NULL) {
325 		memset(p, 0, size);
326 	}
327 	return p;
328 }
329 
330 /*
331  * kmem_intr_free: free wired memory allocated by kmem_alloc.
332  */
333 
334 void
335 kmem_intr_free(void *p, size_t requested_size)
336 {
337 	size_t allocsz, index;
338 	size_t size;
339 	pool_cache_t pc;
340 
341 	KASSERT(p != NULL);
342 	KASSERT(requested_size > 0);
343 
344 #ifdef KMEM_GUARD
345 	if (kmem_guard_enabled) {
346 		kmem_guard_free(&kmem_guard, requested_size, p);
347 		return;
348 	}
349 #endif
350 
351 	size = kmem_roundup_size(requested_size);
352 	allocsz = size + SIZE_SIZE;
353 
354 #ifdef KMEM_REDZONE
355 	if (size - requested_size < REDZONE_SIZE) {
356 		allocsz += kmem_roundup_size(REDZONE_SIZE);
357 	}
358 #endif
359 
360 	if ((index = ((allocsz -1) >> KMEM_SHIFT))
361 	    < kmem_cache_maxidx) {
362 		pc = kmem_cache[index];
363 	} else if ((index = ((allocsz - 1) >> KMEM_BIG_SHIFT))
364 	    < kmem_cache_big_maxidx) {
365 		pc = kmem_cache_big[index];
366 	} else {
367 		FREECHECK_IN(&kmem_freecheck, p);
368 		uvm_km_kmem_free(kmem_va_arena, (vaddr_t)p,
369 		    round_page(size));
370 		return;
371 	}
372 
373 	p = (uint8_t *)p - SIZE_SIZE;
374 	kmem_size_check(p, requested_size);
375 	kmem_redzone_check(p, requested_size + SIZE_SIZE);
376 	FREECHECK_IN(&kmem_freecheck, p);
377 	LOCKDEBUG_MEM_CHECK(p, size);
378 	kmem_poison_fill(p, allocsz);
379 
380 	pool_cache_put(pc, p);
381 }
382 
383 /* ---- kmem API */
384 
385 /*
386  * kmem_alloc: allocate wired memory.
387  * => must not be called from interrupt context.
388  */
389 
390 void *
391 kmem_alloc(size_t size, km_flag_t kmflags)
392 {
393 	void *v;
394 
395 	KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()),
396 	    "kmem(9) should not be used from the interrupt context");
397 	v = kmem_intr_alloc(size, kmflags);
398 	KASSERT(v || (kmflags & KM_NOSLEEP) != 0);
399 	return v;
400 }
401 
402 /*
403  * kmem_zalloc: allocate zeroed wired memory.
404  * => must not be called from interrupt context.
405  */
406 
407 void *
408 kmem_zalloc(size_t size, km_flag_t kmflags)
409 {
410 	void *v;
411 
412 	KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()),
413 	    "kmem(9) should not be used from the interrupt context");
414 	v = kmem_intr_zalloc(size, kmflags);
415 	KASSERT(v || (kmflags & KM_NOSLEEP) != 0);
416 	return v;
417 }
418 
419 /*
420  * kmem_free: free wired memory allocated by kmem_alloc.
421  * => must not be called from interrupt context.
422  */
423 
424 void
425 kmem_free(void *p, size_t size)
426 {
427 	KASSERT(!cpu_intr_p());
428 	KASSERT(!cpu_softintr_p());
429 	kmem_intr_free(p, size);
430 }
431 
432 static size_t
433 kmem_create_caches(const struct kmem_cache_info *array,
434     pool_cache_t alloc_table[], size_t maxsize, int shift, int ipl)
435 {
436 	size_t maxidx = 0;
437 	size_t table_unit = (1 << shift);
438 	size_t size = table_unit;
439 	int i;
440 
441 	for (i = 0; array[i].kc_size != 0 ; i++) {
442 		const char *name = array[i].kc_name;
443 		size_t cache_size = array[i].kc_size;
444 		struct pool_allocator *pa;
445 		int flags = PR_NOALIGN;
446 		pool_cache_t pc;
447 		size_t align;
448 
449 		if ((cache_size & (CACHE_LINE_SIZE - 1)) == 0)
450 			align = CACHE_LINE_SIZE;
451 		else if ((cache_size & (PAGE_SIZE - 1)) == 0)
452 			align = PAGE_SIZE;
453 		else
454 			align = KMEM_ALIGN;
455 
456 		if (cache_size < CACHE_LINE_SIZE)
457 			flags |= PR_NOTOUCH;
458 
459 		/* check if we reached the requested size */
460 		if (cache_size > maxsize || cache_size > PAGE_SIZE) {
461 			break;
462 		}
463 		if ((cache_size >> shift) > maxidx) {
464 			maxidx = cache_size >> shift;
465 		}
466 
467 		if ((cache_size >> shift) > maxidx) {
468 			maxidx = cache_size >> shift;
469 		}
470 
471 		pa = &pool_allocator_kmem;
472 #if defined(KMEM_POISON)
473 		pc = pool_cache_init(cache_size, align, 0, flags,
474 		    name, pa, ipl, kmem_poison_ctor,
475 		    NULL, (void *)cache_size);
476 #else /* defined(KMEM_POISON) */
477 		pc = pool_cache_init(cache_size, align, 0, flags,
478 		    name, pa, ipl, NULL, NULL, NULL);
479 #endif /* defined(KMEM_POISON) */
480 
481 		while (size <= cache_size) {
482 			alloc_table[(size - 1) >> shift] = pc;
483 			size += table_unit;
484 		}
485 	}
486 	return maxidx;
487 }
488 
489 void
490 kmem_init(void)
491 {
492 #ifdef KMEM_GUARD
493 	kmem_guard_enabled = kmem_guard_init(&kmem_guard, kmem_guard_depth,
494 	    kmem_va_arena);
495 #endif
496 	kmem_cache_maxidx = kmem_create_caches(kmem_cache_sizes,
497 	    kmem_cache, KMEM_MAXSIZE, KMEM_SHIFT, IPL_VM);
498 	kmem_cache_big_maxidx = kmem_create_caches(kmem_cache_big_sizes,
499 	    kmem_cache_big, PAGE_SIZE, KMEM_BIG_SHIFT, IPL_VM);
500 }
501 
502 size_t
503 kmem_roundup_size(size_t size)
504 {
505 	return (size + (KMEM_ALIGN - 1)) & ~(KMEM_ALIGN - 1);
506 }
507 
508 /*
509  * Used to dynamically allocate string with kmem accordingly to format.
510  */
511 char *
512 kmem_asprintf(const char *fmt, ...)
513 {
514 	int size __diagused, len;
515 	va_list va;
516 	char *str;
517 
518 	va_start(va, fmt);
519 	len = vsnprintf(NULL, 0, fmt, va);
520 	va_end(va);
521 
522 	str = kmem_alloc(len + 1, KM_SLEEP);
523 
524 	va_start(va, fmt);
525 	size = vsnprintf(str, len + 1, fmt, va);
526 	va_end(va);
527 
528 	KASSERT(size == len);
529 
530 	return str;
531 }
532 
533 char *
534 kmem_strdupsize(const char *str, size_t *lenp, km_flag_t flags)
535 {
536 	size_t len = strlen(str) + 1;
537 	char *ptr = kmem_alloc(len, flags);
538 	if (ptr == NULL)
539 		return NULL;
540 
541 	if (lenp)
542 		*lenp = len;
543 	memcpy(ptr, str, len);
544 	return ptr;
545 }
546 
547 void
548 kmem_strfree(char *str)
549 {
550 	if (str == NULL)
551 		return;
552 
553 	kmem_free(str, strlen(str) + 1);
554 }
555 
556 /* ------------------ DEBUG / DIAGNOSTIC ------------------ */
557 
558 #if defined(KMEM_POISON) || defined(KMEM_REDZONE)
559 #if defined(_LP64)
560 #define PRIME 0x9e37fffffffc0000UL
561 #else /* defined(_LP64) */
562 #define PRIME 0x9e3779b1
563 #endif /* defined(_LP64) */
564 
565 static inline uint8_t
566 kmem_pattern_generate(const void *p)
567 {
568 	return (uint8_t)(((uintptr_t)p) * PRIME
569 	   >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
570 }
571 #endif /* defined(KMEM_POISON) || defined(KMEM_REDZONE) */
572 
573 #if defined(KMEM_POISON)
574 static int
575 kmem_poison_ctor(void *arg, void *obj, int flag)
576 {
577 	size_t sz = (size_t)arg;
578 
579 	kmem_poison_fill(obj, sz);
580 
581 	return 0;
582 }
583 
584 static void
585 kmem_poison_fill(void *p, size_t sz)
586 {
587 	uint8_t *cp;
588 	const uint8_t *ep;
589 
590 	cp = p;
591 	ep = cp + sz;
592 	while (cp < ep) {
593 		*cp = kmem_pattern_generate(cp);
594 		cp++;
595 	}
596 }
597 
598 static void
599 kmem_poison_check(void *p, size_t sz)
600 {
601 	uint8_t *cp;
602 	const uint8_t *ep;
603 
604 	cp = p;
605 	ep = cp + sz;
606 	while (cp < ep) {
607 		const uint8_t expected = kmem_pattern_generate(cp);
608 
609 		if (*cp != expected) {
610 			panic("%s: %p: 0x%02x != 0x%02x\n",
611 			   __func__, cp, *cp, expected);
612 		}
613 		cp++;
614 	}
615 }
616 #endif /* defined(KMEM_POISON) */
617 
618 #if defined(KMEM_SIZE)
619 static void
620 kmem_size_set(void *p, size_t sz)
621 {
622 	struct kmem_header *hd;
623 	hd = (struct kmem_header *)p;
624 	hd->size = sz;
625 }
626 
627 static void
628 kmem_size_check(void *p, size_t sz)
629 {
630 	struct kmem_header *hd;
631 	size_t hsz;
632 
633 	hd = (struct kmem_header *)p;
634 	hsz = hd->size;
635 
636 	if (hsz != sz) {
637 		panic("kmem_free(%p, %zu) != allocated size %zu",
638 		    (const uint8_t *)p + SIZE_SIZE, sz, hsz);
639 	}
640 }
641 #endif /* defined(KMEM_SIZE) */
642 
643 #if defined(KMEM_REDZONE)
644 #define STATIC_BYTE	0xFE
645 CTASSERT(REDZONE_SIZE > 1);
646 static void
647 kmem_redzone_fill(void *p, size_t sz)
648 {
649 	uint8_t *cp, pat;
650 	const uint8_t *ep;
651 
652 	cp = (uint8_t *)p + sz;
653 	ep = cp + REDZONE_SIZE;
654 
655 	/*
656 	 * We really don't want the first byte of the red zone to be '\0';
657 	 * an off-by-one in a string may not be properly detected.
658 	 */
659 	pat = kmem_pattern_generate(cp);
660 	*cp = (pat == '\0') ? STATIC_BYTE: pat;
661 	cp++;
662 
663 	while (cp < ep) {
664 		*cp = kmem_pattern_generate(cp);
665 		cp++;
666 	}
667 }
668 
669 static void
670 kmem_redzone_check(void *p, size_t sz)
671 {
672 	uint8_t *cp, pat, expected;
673 	const uint8_t *ep;
674 
675 	cp = (uint8_t *)p + sz;
676 	ep = cp + REDZONE_SIZE;
677 
678 	pat = kmem_pattern_generate(cp);
679 	expected = (pat == '\0') ? STATIC_BYTE: pat;
680 	if (expected != *cp) {
681 		panic("%s: %p: 0x%02x != 0x%02x\n",
682 		   __func__, cp, *cp, expected);
683 	}
684 	cp++;
685 
686 	while (cp < ep) {
687 		expected = kmem_pattern_generate(cp);
688 		if (*cp != expected) {
689 			panic("%s: %p: 0x%02x != 0x%02x\n",
690 			   __func__, cp, *cp, expected);
691 		}
692 		cp++;
693 	}
694 }
695 #endif /* defined(KMEM_REDZONE) */
696 
697 
698 #if defined(KMEM_GUARD)
699 /*
700  * The ultimate memory allocator for debugging, baby.  It tries to catch:
701  *
702  * 1. Overflow, in realtime. A guard page sits immediately after the
703  *    requested area; a read/write overflow therefore triggers a page
704  *    fault.
705  * 2. Invalid pointer/size passed, at free. A kmem_header structure sits
706  *    just before the requested area, and holds the allocated size. Any
707  *    difference with what is given at free triggers a panic.
708  * 3. Underflow, at free. If an underflow occurs, the kmem header will be
709  *    modified, and 2. will trigger a panic.
710  * 4. Use-after-free. When freeing, the memory is unmapped, and depending
711  *    on the value of kmem_guard_depth, the kernel will more or less delay
712  *    the recycling of that memory. Which means that any ulterior read/write
713  *    access to the memory will trigger a page fault, given it hasn't been
714  *    recycled yet.
715  */
716 
717 #include <sys/atomic.h>
718 #include <uvm/uvm.h>
719 
720 static bool
721 kmem_guard_init(struct kmem_guard *kg, u_int depth, vmem_t *vm)
722 {
723 	vaddr_t va;
724 
725 	/* If not enabled, we have nothing to do. */
726 	if (depth == 0) {
727 		return false;
728 	}
729 	depth = roundup(depth, PAGE_SIZE / sizeof(void *));
730 	KASSERT(depth != 0);
731 
732 	/*
733 	 * Allocate fifo.
734 	 */
735 	va = uvm_km_alloc(kernel_map, depth * sizeof(void *), PAGE_SIZE,
736 	    UVM_KMF_WIRED | UVM_KMF_ZERO);
737 	if (va == 0) {
738 		return false;
739 	}
740 
741 	/*
742 	 * Init object.
743 	 */
744 	kg->kg_vmem = vm;
745 	kg->kg_fifo = (void *)va;
746 	kg->kg_depth = depth;
747 	kg->kg_rotor = 0;
748 
749 	printf("kmem_guard(%p): depth %d\n", kg, depth);
750 	return true;
751 }
752 
753 static void *
754 kmem_guard_alloc(struct kmem_guard *kg, size_t requested_size, bool waitok)
755 {
756 	struct vm_page *pg;
757 	vm_flag_t flags;
758 	vmem_addr_t va;
759 	vaddr_t loopva;
760 	vsize_t loopsize;
761 	size_t size;
762 	void **p;
763 
764 	/*
765 	 * Compute the size: take the kmem header into account, and add a guard
766 	 * page at the end.
767 	 */
768 	size = round_page(requested_size + SIZE_SIZE) + PAGE_SIZE;
769 
770 	/* Allocate pages of kernel VA, but do not map anything in yet. */
771 	flags = VM_BESTFIT | (waitok ? VM_SLEEP : VM_NOSLEEP);
772 	if (vmem_alloc(kg->kg_vmem, size, flags, &va) != 0) {
773 		return NULL;
774 	}
775 
776 	loopva = va;
777 	loopsize = size - PAGE_SIZE;
778 
779 	while (loopsize) {
780 		pg = uvm_pagealloc(NULL, loopva, NULL, 0);
781 		if (__predict_false(pg == NULL)) {
782 			if (waitok) {
783 				uvm_wait("kmem_guard");
784 				continue;
785 			} else {
786 				uvm_km_pgremove_intrsafe(kernel_map, va,
787 				    va + size);
788 				vmem_free(kg->kg_vmem, va, size);
789 				return NULL;
790 			}
791 		}
792 
793 		pg->flags &= ~PG_BUSY;	/* new page */
794 		UVM_PAGE_OWN(pg, NULL);
795 		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
796 		    VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
797 
798 		loopva += PAGE_SIZE;
799 		loopsize -= PAGE_SIZE;
800 	}
801 
802 	pmap_update(pmap_kernel());
803 
804 	/*
805 	 * Offset the returned pointer so that the unmapped guard page sits
806 	 * immediately after the returned object.
807 	 */
808 	p = (void **)((va + (size - PAGE_SIZE) - requested_size) & ~(uintptr_t)ALIGNBYTES);
809 	kmem_size_set((uint8_t *)p - SIZE_SIZE, requested_size);
810 	return (void *)p;
811 }
812 
813 static void
814 kmem_guard_free(struct kmem_guard *kg, size_t requested_size, void *p)
815 {
816 	vaddr_t va;
817 	u_int rotor;
818 	size_t size;
819 	uint8_t *ptr;
820 
821 	ptr = (uint8_t *)p - SIZE_SIZE;
822 	kmem_size_check(ptr, requested_size);
823 	va = trunc_page((vaddr_t)ptr);
824 	size = round_page(requested_size + SIZE_SIZE) + PAGE_SIZE;
825 
826 	KASSERT(pmap_extract(pmap_kernel(), va, NULL));
827 	KASSERT(!pmap_extract(pmap_kernel(), va + (size - PAGE_SIZE), NULL));
828 
829 	/*
830 	 * Unmap and free the pages. The last one is never allocated.
831 	 */
832 	uvm_km_pgremove_intrsafe(kernel_map, va, va + size);
833 	pmap_update(pmap_kernel());
834 
835 #if 0
836 	/*
837 	 * XXX: Here, we need to atomically register the va and its size in the
838 	 * fifo.
839 	 */
840 
841 	/*
842 	 * Put the VA allocation into the list and swap an old one out to free.
843 	 * This behaves mostly like a fifo.
844 	 */
845 	rotor = atomic_inc_uint_nv(&kg->kg_rotor) % kg->kg_depth;
846 	va = (vaddr_t)atomic_swap_ptr(&kg->kg_fifo[rotor], (void *)va);
847 	if (va != 0) {
848 		vmem_free(kg->kg_vmem, va, size);
849 	}
850 #else
851 	(void)rotor;
852 	vmem_free(kg->kg_vmem, va, size);
853 #endif
854 }
855 
856 #endif /* defined(KMEM_GUARD) */
857