1 /* $NetBSD: subr_kmem.c,v 1.17 2007/11/07 00:23:23 ad Exp $ */ 2 3 /*- 4 * Copyright (c)2006 YAMAMOTO Takashi, 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * allocator of kernel wired memory. 31 * 32 * TODO: 33 * - worth to have "intrsafe" version? maybe.. 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.17 2007/11/07 00:23:23 ad Exp $"); 38 39 #include <sys/param.h> 40 #include <sys/callback.h> 41 #include <sys/kmem.h> 42 #include <sys/vmem.h> 43 #include <sys/debug.h> 44 #include <sys/lockdebug.h> 45 46 #include <uvm/uvm_extern.h> 47 #include <uvm/uvm_map.h> 48 49 #include <lib/libkern/libkern.h> 50 51 #define KMEM_QUANTUM_SIZE (ALIGNBYTES + 1) 52 53 static vmem_t *kmem_arena; 54 static struct callback_entry kmem_kva_reclaim_entry; 55 56 #if defined(DEBUG) 57 static void *kmem_freecheck; 58 static void kmem_poison_fill(void *, size_t); 59 static void kmem_poison_check(void *, size_t); 60 #else /* defined(DEBUG) */ 61 #define kmem_poison_fill(p, sz) /* nothing */ 62 #define kmem_poison_check(p, sz) /* nothing */ 63 #endif /* defined(DEBUG) */ 64 65 static vmem_addr_t kmem_backend_alloc(vmem_t *, vmem_size_t, vmem_size_t *, 66 vm_flag_t); 67 static void kmem_backend_free(vmem_t *, vmem_addr_t, vmem_size_t); 68 static int kmem_kva_reclaim_callback(struct callback_entry *, void *, void *); 69 70 static inline vm_flag_t 71 kmf_to_vmf(km_flag_t kmflags) 72 { 73 vm_flag_t vmflags; 74 75 KASSERT((kmflags & (KM_SLEEP|KM_NOSLEEP)) != 0); 76 KASSERT((~kmflags & (KM_SLEEP|KM_NOSLEEP)) != 0); 77 78 vmflags = 0; 79 if ((kmflags & KM_SLEEP) != 0) { 80 vmflags |= VM_SLEEP; 81 } 82 if ((kmflags & KM_NOSLEEP) != 0) { 83 vmflags |= VM_NOSLEEP; 84 } 85 86 return vmflags; 87 } 88 89 /* ---- kmem API */ 90 91 /* 92 * kmem_alloc: allocate wired memory. 93 * 94 * => must not be called from interrupt context. 95 */ 96 97 void * 98 kmem_alloc(size_t size, km_flag_t kmflags) 99 { 100 void *p; 101 102 p = (void *)vmem_alloc(kmem_arena, size, 103 kmf_to_vmf(kmflags) | VM_INSTANTFIT); 104 if (p != NULL) { 105 kmem_poison_check(p, size); 106 FREECHECK_OUT(&kmem_freecheck, p); 107 } 108 return p; 109 } 110 111 /* 112 * kmem_zalloc: allocate wired memory. 113 * 114 * => must not be called from interrupt context. 115 */ 116 117 void * 118 kmem_zalloc(size_t size, km_flag_t kmflags) 119 { 120 void *p; 121 122 p = kmem_alloc(size, kmflags); 123 if (p != NULL) { 124 memset(p, 0, size); 125 } 126 return p; 127 } 128 129 /* 130 * kmem_free: free wired memory allocated by kmem_alloc. 131 * 132 * => must not be called from interrupt context. 133 */ 134 135 void 136 kmem_free(void *p, size_t size) 137 { 138 139 FREECHECK_IN(&kmem_freecheck, p); 140 LOCKDEBUG_MEM_CHECK(p, size); 141 kmem_poison_fill(p, size); 142 vmem_free(kmem_arena, (vmem_addr_t)p, size); 143 } 144 145 void 146 kmem_init(void) 147 { 148 149 kmem_arena = vmem_create("kmem", 0, 0, KMEM_QUANTUM_SIZE, 150 kmem_backend_alloc, kmem_backend_free, NULL, 151 KMEM_QUANTUM_SIZE * 32, VM_SLEEP, IPL_NONE); 152 callback_register(&vm_map_to_kernel(kernel_map)->vmk_reclaim_callback, 153 &kmem_kva_reclaim_entry, kmem_arena, kmem_kva_reclaim_callback); 154 } 155 156 size_t 157 kmem_roundup_size(size_t size) 158 { 159 160 return vmem_roundup_size(kmem_arena, size); 161 } 162 163 /* ---- uvm glue */ 164 165 static vmem_addr_t 166 kmem_backend_alloc(vmem_t *dummy, vmem_size_t size, vmem_size_t *resultsize, 167 vm_flag_t vmflags) 168 { 169 uvm_flag_t uflags; 170 vaddr_t va; 171 172 KASSERT(dummy == NULL); 173 KASSERT(size != 0); 174 KASSERT((vmflags & (VM_SLEEP|VM_NOSLEEP)) != 0); 175 KASSERT((~vmflags & (VM_SLEEP|VM_NOSLEEP)) != 0); 176 177 if ((vmflags & VM_NOSLEEP) != 0) { 178 uflags = UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT; 179 } else { 180 uflags = UVM_KMF_WAITVA; 181 } 182 *resultsize = size = round_page(size); 183 va = uvm_km_alloc(kernel_map, size, 0, 184 uflags | UVM_KMF_WIRED | UVM_KMF_CANFAIL); 185 if (va != 0) { 186 kmem_poison_fill((void *)va, size); 187 } 188 return (vmem_addr_t)va; 189 } 190 191 static void 192 kmem_backend_free(vmem_t *dummy, vmem_addr_t addr, vmem_size_t size) 193 { 194 195 KASSERT(dummy == NULL); 196 KASSERT(addr != 0); 197 KASSERT(size != 0); 198 KASSERT(size == round_page(size)); 199 200 kmem_poison_check((void *)addr, size); 201 uvm_km_free(kernel_map, (vaddr_t)addr, size, UVM_KMF_WIRED); 202 } 203 204 static int 205 kmem_kva_reclaim_callback(struct callback_entry *ce, void *obj, void *arg) 206 { 207 vmem_t *vm = obj; 208 209 vmem_reap(vm); 210 return CALLBACK_CHAIN_CONTINUE; 211 } 212 213 /* ---- debug */ 214 215 #if defined(DEBUG) 216 217 #if defined(_LP64) 218 #define PRIME 0x9e37fffffffc0001UL 219 #else /* defined(_LP64) */ 220 #define PRIME 0x9e3779b1 221 #endif /* defined(_LP64) */ 222 223 static inline uint8_t 224 kmem_poison_pattern(const void *p) 225 { 226 227 return (uint8_t)((((uintptr_t)p) * PRIME) 228 >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT); 229 } 230 231 static void 232 kmem_poison_fill(void *p, size_t sz) 233 { 234 uint8_t *cp; 235 const uint8_t *ep; 236 237 cp = p; 238 ep = cp + sz; 239 while (cp < ep) { 240 *cp = kmem_poison_pattern(cp); 241 cp++; 242 } 243 } 244 245 static void 246 kmem_poison_check(void *p, size_t sz) 247 { 248 uint8_t *cp; 249 const uint8_t *ep; 250 251 cp = p; 252 ep = cp + sz; 253 while (cp < ep) { 254 const uint8_t expected = kmem_poison_pattern(cp); 255 256 if (*cp != expected) { 257 panic("%s: %p: 0x%02x != 0x%02x\n", 258 __func__, cp, *cp, expected); 259 } 260 cp++; 261 } 262 } 263 264 #endif /* defined(DEBUG) */ 265