1 /* $NetBSD: subr_kmem.c,v 1.22 2008/12/15 11:42:34 ad Exp $ */ 2 3 /*- 4 * Copyright (c)2006 YAMAMOTO Takashi, 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * allocator of kernel wired memory. 31 * 32 * TODO: 33 * - worth to have "intrsafe" version? maybe.. 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.22 2008/12/15 11:42:34 ad Exp $"); 38 39 #include <sys/param.h> 40 #include <sys/callback.h> 41 #include <sys/kmem.h> 42 #include <sys/vmem.h> 43 #include <sys/debug.h> 44 #include <sys/lockdebug.h> 45 46 #include <uvm/uvm_extern.h> 47 #include <uvm/uvm_map.h> 48 49 #include <lib/libkern/libkern.h> 50 51 #define KMEM_QUANTUM_SIZE (ALIGNBYTES + 1) 52 53 static vmem_t *kmem_arena; 54 static struct callback_entry kmem_kva_reclaim_entry; 55 56 #if defined(DEBUG) 57 static void *kmem_freecheck; 58 #define KMEM_POISON 59 #define KMEM_REDZONE 60 #endif /* defined(DEBUG) */ 61 62 #if defined(KMEM_POISON) 63 static void kmem_poison_fill(void *, size_t); 64 static void kmem_poison_check(void *, size_t); 65 #else /* defined(KMEM_POISON) */ 66 #define kmem_poison_fill(p, sz) /* nothing */ 67 #define kmem_poison_check(p, sz) /* nothing */ 68 #endif /* defined(KMEM_POISON) */ 69 70 #if defined(KMEM_REDZONE) 71 #define REDZONE_SIZE 1 72 #else /* defined(KMEM_REDZONE) */ 73 #define REDZONE_SIZE 0 74 #endif /* defined(KMEM_REDZONE) */ 75 76 static vmem_addr_t kmem_backend_alloc(vmem_t *, vmem_size_t, vmem_size_t *, 77 vm_flag_t); 78 static void kmem_backend_free(vmem_t *, vmem_addr_t, vmem_size_t); 79 static int kmem_kva_reclaim_callback(struct callback_entry *, void *, void *); 80 81 static inline vm_flag_t 82 kmf_to_vmf(km_flag_t kmflags) 83 { 84 vm_flag_t vmflags; 85 86 KASSERT((kmflags & (KM_SLEEP|KM_NOSLEEP)) != 0); 87 KASSERT((~kmflags & (KM_SLEEP|KM_NOSLEEP)) != 0); 88 89 vmflags = 0; 90 if ((kmflags & KM_SLEEP) != 0) { 91 vmflags |= VM_SLEEP; 92 } 93 if ((kmflags & KM_NOSLEEP) != 0) { 94 vmflags |= VM_NOSLEEP; 95 } 96 97 return vmflags; 98 } 99 100 /* ---- kmem API */ 101 102 /* 103 * kmem_alloc: allocate wired memory. 104 * 105 * => must not be called from interrupt context. 106 */ 107 108 void * 109 kmem_alloc(size_t size, km_flag_t kmflags) 110 { 111 void *p; 112 113 size += REDZONE_SIZE; 114 p = (void *)vmem_alloc(kmem_arena, size, 115 kmf_to_vmf(kmflags) | VM_INSTANTFIT); 116 if (p != NULL) { 117 kmem_poison_check(p, kmem_roundup_size(size)); 118 FREECHECK_OUT(&kmem_freecheck, p); 119 } 120 return p; 121 } 122 123 /* 124 * kmem_zalloc: allocate wired memory. 125 * 126 * => must not be called from interrupt context. 127 */ 128 129 void * 130 kmem_zalloc(size_t size, km_flag_t kmflags) 131 { 132 void *p; 133 134 p = kmem_alloc(size, kmflags); 135 if (p != NULL) { 136 memset(p, 0, size); 137 } 138 return p; 139 } 140 141 /* 142 * kmem_free: free wired memory allocated by kmem_alloc. 143 * 144 * => must not be called from interrupt context. 145 */ 146 147 void 148 kmem_free(void *p, size_t size) 149 { 150 151 FREECHECK_IN(&kmem_freecheck, p); 152 LOCKDEBUG_MEM_CHECK(p, size); 153 kmem_poison_check((char *)p + size, 154 kmem_roundup_size(size + REDZONE_SIZE) - size); 155 kmem_poison_fill(p, size); 156 vmem_free(kmem_arena, (vmem_addr_t)p, size + REDZONE_SIZE); 157 } 158 159 void 160 kmem_init(void) 161 { 162 163 kmem_arena = vmem_create("kmem", 0, 0, KMEM_QUANTUM_SIZE, 164 kmem_backend_alloc, kmem_backend_free, NULL, 165 KMEM_QUANTUM_SIZE * 32, VM_SLEEP, IPL_NONE); 166 callback_register(&vm_map_to_kernel(kernel_map)->vmk_reclaim_callback, 167 &kmem_kva_reclaim_entry, kmem_arena, kmem_kva_reclaim_callback); 168 } 169 170 size_t 171 kmem_roundup_size(size_t size) 172 { 173 174 return vmem_roundup_size(kmem_arena, size); 175 } 176 177 /* ---- uvm glue */ 178 179 static vmem_addr_t 180 kmem_backend_alloc(vmem_t *dummy, vmem_size_t size, vmem_size_t *resultsize, 181 vm_flag_t vmflags) 182 { 183 uvm_flag_t uflags; 184 vaddr_t va; 185 186 KASSERT(dummy == NULL); 187 KASSERT(size != 0); 188 KASSERT((vmflags & (VM_SLEEP|VM_NOSLEEP)) != 0); 189 KASSERT((~vmflags & (VM_SLEEP|VM_NOSLEEP)) != 0); 190 191 if ((vmflags & VM_NOSLEEP) != 0) { 192 uflags = UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT; 193 } else { 194 uflags = UVM_KMF_WAITVA; 195 } 196 *resultsize = size = round_page(size); 197 va = uvm_km_alloc(kernel_map, size, 0, 198 uflags | UVM_KMF_WIRED | UVM_KMF_CANFAIL); 199 if (va != 0) { 200 kmem_poison_fill((void *)va, size); 201 } 202 return (vmem_addr_t)va; 203 } 204 205 static void 206 kmem_backend_free(vmem_t *dummy, vmem_addr_t addr, vmem_size_t size) 207 { 208 209 KASSERT(dummy == NULL); 210 KASSERT(addr != 0); 211 KASSERT(size != 0); 212 KASSERT(size == round_page(size)); 213 214 kmem_poison_check((void *)addr, size); 215 uvm_km_free(kernel_map, (vaddr_t)addr, size, UVM_KMF_WIRED); 216 } 217 218 static int 219 kmem_kva_reclaim_callback(struct callback_entry *ce, void *obj, void *arg) 220 { 221 vmem_t *vm = obj; 222 223 vmem_reap(vm); 224 return CALLBACK_CHAIN_CONTINUE; 225 } 226 227 /* ---- debug */ 228 229 #if defined(KMEM_POISON) 230 231 #if defined(_LP64) 232 #define PRIME 0x9e37fffffffc0001UL 233 #else /* defined(_LP64) */ 234 #define PRIME 0x9e3779b1 235 #endif /* defined(_LP64) */ 236 237 static inline uint8_t 238 kmem_poison_pattern(const void *p) 239 { 240 241 return (uint8_t)((((uintptr_t)p) * PRIME) 242 >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT); 243 } 244 245 static void 246 kmem_poison_fill(void *p, size_t sz) 247 { 248 uint8_t *cp; 249 const uint8_t *ep; 250 251 cp = p; 252 ep = cp + sz; 253 while (cp < ep) { 254 *cp = kmem_poison_pattern(cp); 255 cp++; 256 } 257 } 258 259 static void 260 kmem_poison_check(void *p, size_t sz) 261 { 262 uint8_t *cp; 263 const uint8_t *ep; 264 265 cp = p; 266 ep = cp + sz; 267 while (cp < ep) { 268 const uint8_t expected = kmem_poison_pattern(cp); 269 270 if (*cp != expected) { 271 panic("%s: %p: 0x%02x != 0x%02x\n", 272 __func__, cp, *cp, expected); 273 } 274 cp++; 275 } 276 } 277 278 #endif /* defined(KMEM_POISON) */ 279