xref: /netbsd-src/sys/kern/subr_kmem.c (revision c0179c282a5968435315a82f4128c61372c68fc3)
1 /*	$NetBSD: subr_kmem.c,v 1.11 2006/11/01 10:17:58 yamt Exp $	*/
2 
3 /*-
4  * Copyright (c)2006 YAMAMOTO Takashi,
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * allocator of kernel wired memory.
31  *
32  * TODO:
33  * -	worth to have "intrsafe" version?  maybe..
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.11 2006/11/01 10:17:58 yamt Exp $");
38 
39 #include <sys/param.h>
40 #include <sys/callback.h>
41 #include <sys/kmem.h>
42 #include <sys/vmem.h>
43 
44 #include <uvm/uvm_extern.h>
45 #include <uvm/uvm_map.h>
46 
47 #include <lib/libkern/libkern.h>
48 
49 #define	KMEM_QUANTUM_SIZE	(ALIGNBYTES + 1)
50 
51 static vmem_t *kmem_arena;
52 static struct callback_entry kmem_kva_reclaim_entry;
53 
54 #if defined(DEBUG)
55 static void kmem_poison_fill(void *, size_t);
56 static void kmem_poison_check(void *, size_t);
57 #else /* defined(DEBUG) */
58 #define	kmem_poison_fill(p, sz)		/* nothing */
59 #define	kmem_poison_check(p, sz)	/* nothing */
60 #endif /* defined(DEBUG) */
61 
62 static vmem_addr_t kmem_backend_alloc(vmem_t *, vmem_size_t, vmem_size_t *,
63     vm_flag_t);
64 static void kmem_backend_free(vmem_t *, vmem_addr_t, vmem_size_t);
65 static int kmem_kva_reclaim_callback(struct callback_entry *, void *, void *);
66 
67 static inline vm_flag_t
68 kmf_to_vmf(km_flag_t kmflags)
69 {
70 	vm_flag_t vmflags;
71 
72 	KASSERT((kmflags & (KM_SLEEP|KM_NOSLEEP)) != 0);
73 	KASSERT((~kmflags & (KM_SLEEP|KM_NOSLEEP)) != 0);
74 
75 	vmflags = 0;
76 	if ((kmflags & KM_SLEEP) != 0) {
77 		vmflags |= VM_SLEEP;
78 	}
79 	if ((kmflags & KM_NOSLEEP) != 0) {
80 		vmflags |= VM_NOSLEEP;
81 	}
82 
83 	return vmflags;
84 }
85 
86 /* ---- kmem API */
87 
88 /*
89  * kmem_alloc: allocate wired memory.
90  *
91  * => must not be called from interrupt context.
92  */
93 
94 void *
95 kmem_alloc(size_t size, km_flag_t kmflags)
96 {
97 	void *p;
98 
99 	p = (void *)vmem_alloc(kmem_arena, size,
100 	    kmf_to_vmf(kmflags) | VM_INSTANTFIT);
101 	kmem_poison_check(p, size);
102 	return p;
103 }
104 
105 /*
106  * kmem_zalloc: allocate wired memory.
107  *
108  * => must not be called from interrupt context.
109  */
110 
111 void *
112 kmem_zalloc(size_t size, km_flag_t kmflags)
113 {
114 	void *p;
115 
116 	p = kmem_alloc(size, kmflags);
117 	if (p != NULL) {
118 		memset(p, 0, size);
119 	}
120 	return p;
121 }
122 
123 /*
124  * kmem_free: free wired memory allocated by kmem_alloc.
125  *
126  * => must not be called from interrupt context.
127  */
128 
129 void
130 kmem_free(void *p, size_t size)
131 {
132 
133 	kmem_poison_fill(p, size);
134 	vmem_free(kmem_arena, (vmem_addr_t)p, size);
135 }
136 
137 void
138 kmem_init(void)
139 {
140 
141 	kmem_arena = vmem_create("kmem", 0, 0, KMEM_QUANTUM_SIZE,
142 	    kmem_backend_alloc, kmem_backend_free, NULL,
143 	    KMEM_QUANTUM_SIZE * 32, VM_SLEEP);
144 	callback_register(&vm_map_to_kernel(kernel_map)->vmk_reclaim_callback,
145 	    &kmem_kva_reclaim_entry, kmem_arena, kmem_kva_reclaim_callback);
146 }
147 
148 size_t
149 kmem_roundup_size(size_t size)
150 {
151 
152 	return vmem_roundup_size(kmem_arena, size);
153 }
154 
155 /* ---- uvm glue */
156 
157 #include <uvm/uvm_extern.h>
158 
159 static vmem_addr_t
160 kmem_backend_alloc(vmem_t *dummy, vmem_size_t size, vmem_size_t *resultsize,
161     vm_flag_t vmflags)
162 {
163 	uvm_flag_t uflags;
164 	vaddr_t va;
165 
166 	KASSERT(dummy == NULL);
167 	KASSERT(size != 0);
168 	KASSERT((vmflags & (VM_SLEEP|VM_NOSLEEP)) != 0);
169 	KASSERT((~vmflags & (VM_SLEEP|VM_NOSLEEP)) != 0);
170 
171 	if ((vmflags & VM_NOSLEEP) != 0) {
172 		uflags = UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT;
173 	} else {
174 		uflags = UVM_KMF_WAITVA;
175 	}
176 	*resultsize = size = round_page(size);
177 	va = uvm_km_alloc(kernel_map, size, 0,
178 	    uflags | UVM_KMF_WIRED | UVM_KMF_CANFAIL);
179 	kmem_poison_fill((void *)va, size);
180 	return (vmem_addr_t)va;
181 }
182 
183 static void
184 kmem_backend_free(vmem_t *dummy, vmem_addr_t addr, vmem_size_t size)
185 {
186 
187 	KASSERT(dummy == NULL);
188 	KASSERT(addr != 0);
189 	KASSERT(size != 0);
190 	KASSERT(size == round_page(size));
191 
192 	kmem_poison_check((void *)addr, size);
193 	uvm_km_free(kernel_map, (vaddr_t)addr, size, UVM_KMF_WIRED);
194 }
195 
196 static int
197 kmem_kva_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
198 {
199 	vmem_t *vm = obj;
200 
201 	vmem_reap(vm);
202 	return CALLBACK_CHAIN_CONTINUE;
203 }
204 
205 /* ---- debug */
206 
207 #if defined(DEBUG)
208 
209 #if defined(_LP64)
210 #define	PRIME	0x9e37fffffffc0001UL
211 #else /* defined(_LP64) */
212 #define	PRIME	0x9e3779b1
213 #endif /* defined(_LP64) */
214 
215 static inline uint8_t
216 kmem_poison_pattern(const void *p)
217 {
218 
219 	return (uint8_t)((((uintptr_t)p) * PRIME)
220 	    >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
221 }
222 
223 static void
224 kmem_poison_fill(void *p, size_t sz)
225 {
226 	uint8_t *cp;
227 	const uint8_t *ep;
228 
229 	cp = p;
230 	ep = cp + sz;
231 	while (cp < ep) {
232 		*cp = kmem_poison_pattern(cp);
233 		cp++;
234 	}
235 }
236 
237 static void
238 kmem_poison_check(void *p, size_t sz)
239 {
240 	uint8_t *cp;
241 	const uint8_t *ep;
242 
243 	cp = p;
244 	ep = cp + sz;
245 	while (cp < ep) {
246 		const uint8_t expected = kmem_poison_pattern(cp);
247 
248 		if (*cp != expected) {
249 			panic("%s: %p: 0x%02x != 0x%02x\n",
250 			    __func__, cp, *cp, expected);
251 		}
252 		cp++;
253 	}
254 }
255 
256 #endif /* defined(DEBUG) */
257