1 /* 2 * (MPSAFE) 3 * 4 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 5 * 6 * Copyright (c) 2003,2004,2010 The DragonFly Project. All rights reserved. 7 * 8 * This code is derived from software contributed to The DragonFly Project 9 * by Matthew Dillon <dillon@backplane.com> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in 19 * the documentation and/or other materials provided with the 20 * distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.55 2008/10/22 01:42:17 dillon Exp $ 39 * 40 * This module implements a slab allocator drop-in replacement for the 41 * kernel malloc(). 42 * 43 * A slab allocator reserves a ZONE for each chunk size, then lays the 44 * chunks out in an array within the zone. Allocation and deallocation 45 * is nearly instantanious, and fragmentation/overhead losses are limited 46 * to a fixed worst-case amount. 47 * 48 * The downside of this slab implementation is in the chunk size 49 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 50 * In a kernel implementation all this memory will be physical so 51 * the zone size is adjusted downward on machines with less physical 52 * memory. The upside is that overhead is bounded... this is the *worst* 53 * case overhead. 54 * 55 * Slab management is done on a per-cpu basis and no locking or mutexes 56 * are required, only a critical section. When one cpu frees memory 57 * belonging to another cpu's slab manager an asynchronous IPI message 58 * will be queued to execute the operation. In addition, both the 59 * high level slab allocator and the low level zone allocator optimize 60 * M_ZERO requests, and the slab allocator does not have to pre initialize 61 * the linked list of chunks. 62 * 63 * XXX Balancing is needed between cpus. Balance will be handled through 64 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 65 * 66 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 67 * the new zone should be restricted to M_USE_RESERVE requests only. 68 * 69 * Alloc Size Chunking Number of zones 70 * 0-127 8 16 71 * 128-255 16 8 72 * 256-511 32 8 73 * 512-1023 64 8 74 * 1024-2047 128 8 75 * 2048-4095 256 8 76 * 4096-8191 512 8 77 * 8192-16383 1024 8 78 * 16384-32767 2048 8 79 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 80 * 81 * Allocations >= ZoneLimit go directly to kmem. 82 * 83 * API REQUIREMENTS AND SIDE EFFECTS 84 * 85 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 86 * have remained compatible with the following API requirements: 87 * 88 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty) 89 * + all power-of-2 sized allocations are power-of-2 aligned (twe) 90 * + malloc(0) is allowed and returns non-NULL (ahc driver) 91 * + ability to allocate arbitrarily large chunks of memory 92 */ 93 94 #include "opt_vm.h" 95 96 #include <sys/param.h> 97 #include <sys/systm.h> 98 #include <sys/kernel.h> 99 #include <sys/slaballoc.h> 100 #include <sys/mbuf.h> 101 #include <sys/vmmeter.h> 102 #include <sys/lock.h> 103 #include <sys/thread.h> 104 #include <sys/globaldata.h> 105 #include <sys/sysctl.h> 106 #include <sys/ktr.h> 107 108 #include <vm/vm.h> 109 #include <vm/vm_param.h> 110 #include <vm/vm_kern.h> 111 #include <vm/vm_extern.h> 112 #include <vm/vm_object.h> 113 #include <vm/pmap.h> 114 #include <vm/vm_map.h> 115 #include <vm/vm_page.h> 116 #include <vm/vm_pageout.h> 117 118 #include <machine/cpu.h> 119 120 #include <sys/thread2.h> 121 122 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0])) 123 124 #define MEMORY_STRING "ptr=%p type=%p size=%d flags=%04x" 125 #define MEMORY_ARG_SIZE (sizeof(void *) * 2 + sizeof(unsigned long) + \ 126 sizeof(int)) 127 128 #if !defined(KTR_MEMORY) 129 #define KTR_MEMORY KTR_ALL 130 #endif 131 KTR_INFO_MASTER(memory); 132 KTR_INFO(KTR_MEMORY, memory, malloc, 0, MEMORY_STRING, MEMORY_ARG_SIZE); 133 KTR_INFO(KTR_MEMORY, memory, free_zero, 1, MEMORY_STRING, MEMORY_ARG_SIZE); 134 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 2, MEMORY_STRING, MEMORY_ARG_SIZE); 135 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 3, MEMORY_STRING, MEMORY_ARG_SIZE); 136 KTR_INFO(KTR_MEMORY, memory, free_chunk, 4, MEMORY_STRING, MEMORY_ARG_SIZE); 137 #ifdef SMP 138 KTR_INFO(KTR_MEMORY, memory, free_request, 5, MEMORY_STRING, MEMORY_ARG_SIZE); 139 KTR_INFO(KTR_MEMORY, memory, free_remote, 6, MEMORY_STRING, MEMORY_ARG_SIZE); 140 #endif 141 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin", 0); 142 KTR_INFO(KTR_MEMORY, memory, free_beg, 0, "free begin", 0); 143 KTR_INFO(KTR_MEMORY, memory, free_end, 0, "free end", 0); 144 145 #define logmemory(name, ptr, type, size, flags) \ 146 KTR_LOG(memory_ ## name, ptr, type, size, flags) 147 #define logmemory_quick(name) \ 148 KTR_LOG(memory_ ## name) 149 150 /* 151 * Fixed globals (not per-cpu) 152 */ 153 static int ZoneSize; 154 static int ZoneLimit; 155 static int ZonePageCount; 156 static int ZoneMask; 157 static int ZoneBigAlloc; /* in KB */ 158 static int ZoneGenAlloc; /* in KB */ 159 struct malloc_type *kmemstatistics; /* exported to vmstat */ 160 static struct kmemusage *kmemusage; 161 static int32_t weirdary[16]; 162 163 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags); 164 static void kmem_slab_free(void *ptr, vm_size_t bytes); 165 #if defined(INVARIANTS) 166 static void chunk_mark_allocated(SLZone *z, void *chunk); 167 static void chunk_mark_free(SLZone *z, void *chunk); 168 #endif 169 170 /* 171 * Misc constants. Note that allocations that are exact multiples of 172 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 173 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists. 174 */ 175 #define MIN_CHUNK_SIZE 8 /* in bytes */ 176 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1) 177 #define ZONE_RELS_THRESH 2 /* threshold number of zones */ 178 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK) 179 180 /* 181 * The WEIRD_ADDR is used as known text to copy into free objects to 182 * try to create deterministic failure cases if the data is accessed after 183 * free. 184 */ 185 #define WEIRD_ADDR 0xdeadc0de 186 #define MAX_COPY sizeof(weirdary) 187 #define ZERO_LENGTH_PTR ((void *)-8) 188 189 /* 190 * Misc global malloc buckets 191 */ 192 193 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 194 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 195 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 196 197 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 198 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 199 200 /* 201 * Initialize the slab memory allocator. We have to choose a zone size based 202 * on available physical memory. We choose a zone side which is approximately 203 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 204 * 128K. The zone size is limited to the bounds set in slaballoc.h 205 * (typically 32K min, 128K max). 206 */ 207 static void kmeminit(void *dummy); 208 209 char *ZeroPage; 210 211 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL) 212 213 #ifdef INVARIANTS 214 /* 215 * If enabled any memory allocated without M_ZERO is initialized to -1. 216 */ 217 static int use_malloc_pattern; 218 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW, 219 &use_malloc_pattern, 0, ""); 220 #endif 221 222 SYSCTL_INT(_kern, OID_AUTO, zone_big_alloc, CTLFLAG_RD, &ZoneBigAlloc, 0, ""); 223 SYSCTL_INT(_kern, OID_AUTO, zone_gen_alloc, CTLFLAG_RD, &ZoneGenAlloc, 0, ""); 224 225 static void 226 kmeminit(void *dummy) 227 { 228 size_t limsize; 229 int usesize; 230 int i; 231 vm_offset_t npg; 232 233 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE; 234 if (limsize > KvaSize) 235 limsize = KvaSize; 236 237 usesize = (int)(limsize / 1024); /* convert to KB */ 238 239 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 240 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 241 ZoneSize <<= 1; 242 ZoneLimit = ZoneSize / 4; 243 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 244 ZoneLimit = ZALLOC_ZONE_LIMIT; 245 ZoneMask = ZoneSize - 1; 246 ZonePageCount = ZoneSize / PAGE_SIZE; 247 248 npg = KvaSize / PAGE_SIZE; 249 kmemusage = kmem_slab_alloc(npg * sizeof(struct kmemusage), 250 PAGE_SIZE, M_WAITOK|M_ZERO); 251 252 for (i = 0; i < arysize(weirdary); ++i) 253 weirdary[i] = WEIRD_ADDR; 254 255 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO); 256 257 if (bootverbose) 258 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 259 } 260 261 /* 262 * Initialize a malloc type tracking structure. 263 */ 264 void 265 malloc_init(void *data) 266 { 267 struct malloc_type *type = data; 268 size_t limsize; 269 270 if (type->ks_magic != M_MAGIC) 271 panic("malloc type lacks magic"); 272 273 if (type->ks_limit != 0) 274 return; 275 276 if (vmstats.v_page_count == 0) 277 panic("malloc_init not allowed before vm init"); 278 279 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE; 280 if (limsize > KvaSize) 281 limsize = KvaSize; 282 type->ks_limit = limsize / 10; 283 284 type->ks_next = kmemstatistics; 285 kmemstatistics = type; 286 } 287 288 void 289 malloc_uninit(void *data) 290 { 291 struct malloc_type *type = data; 292 struct malloc_type *t; 293 #ifdef INVARIANTS 294 int i; 295 long ttl; 296 #endif 297 298 if (type->ks_magic != M_MAGIC) 299 panic("malloc type lacks magic"); 300 301 if (vmstats.v_page_count == 0) 302 panic("malloc_uninit not allowed before vm init"); 303 304 if (type->ks_limit == 0) 305 panic("malloc_uninit on uninitialized type"); 306 307 #ifdef SMP 308 /* Make sure that all pending kfree()s are finished. */ 309 lwkt_synchronize_ipiqs("muninit"); 310 #endif 311 312 #ifdef INVARIANTS 313 /* 314 * memuse is only correct in aggregation. Due to memory being allocated 315 * on one cpu and freed on another individual array entries may be 316 * negative or positive (canceling each other out). 317 */ 318 for (i = ttl = 0; i < ncpus; ++i) 319 ttl += type->ks_memuse[i]; 320 if (ttl) { 321 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 322 ttl, type->ks_shortdesc, i); 323 } 324 #endif 325 if (type == kmemstatistics) { 326 kmemstatistics = type->ks_next; 327 } else { 328 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 329 if (t->ks_next == type) { 330 t->ks_next = type->ks_next; 331 break; 332 } 333 } 334 } 335 type->ks_next = NULL; 336 type->ks_limit = 0; 337 } 338 339 /* 340 * Increase the kmalloc pool limit for the specified pool. No changes 341 * are the made if the pool would shrink. 342 */ 343 void 344 kmalloc_raise_limit(struct malloc_type *type, size_t bytes) 345 { 346 if (type->ks_limit == 0) 347 malloc_init(type); 348 if (bytes == 0) 349 bytes = KvaSize; 350 if (type->ks_limit < bytes) 351 type->ks_limit = bytes; 352 } 353 354 /* 355 * Dynamically create a malloc pool. This function is a NOP if *typep is 356 * already non-NULL. 357 */ 358 void 359 kmalloc_create(struct malloc_type **typep, const char *descr) 360 { 361 struct malloc_type *type; 362 363 if (*typep == NULL) { 364 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO); 365 type->ks_magic = M_MAGIC; 366 type->ks_shortdesc = descr; 367 malloc_init(type); 368 *typep = type; 369 } 370 } 371 372 /* 373 * Destroy a dynamically created malloc pool. This function is a NOP if 374 * the pool has already been destroyed. 375 */ 376 void 377 kmalloc_destroy(struct malloc_type **typep) 378 { 379 if (*typep != NULL) { 380 malloc_uninit(*typep); 381 kfree(*typep, M_TEMP); 382 *typep = NULL; 383 } 384 } 385 386 /* 387 * Calculate the zone index for the allocation request size and set the 388 * allocation request size to that particular zone's chunk size. 389 */ 390 static __inline int 391 zoneindex(unsigned long *bytes) 392 { 393 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 394 if (n < 128) { 395 *bytes = n = (n + 7) & ~7; 396 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 397 } 398 if (n < 256) { 399 *bytes = n = (n + 15) & ~15; 400 return(n / 16 + 7); 401 } 402 if (n < 8192) { 403 if (n < 512) { 404 *bytes = n = (n + 31) & ~31; 405 return(n / 32 + 15); 406 } 407 if (n < 1024) { 408 *bytes = n = (n + 63) & ~63; 409 return(n / 64 + 23); 410 } 411 if (n < 2048) { 412 *bytes = n = (n + 127) & ~127; 413 return(n / 128 + 31); 414 } 415 if (n < 4096) { 416 *bytes = n = (n + 255) & ~255; 417 return(n / 256 + 39); 418 } 419 *bytes = n = (n + 511) & ~511; 420 return(n / 512 + 47); 421 } 422 #if ZALLOC_ZONE_LIMIT > 8192 423 if (n < 16384) { 424 *bytes = n = (n + 1023) & ~1023; 425 return(n / 1024 + 55); 426 } 427 #endif 428 #if ZALLOC_ZONE_LIMIT > 16384 429 if (n < 32768) { 430 *bytes = n = (n + 2047) & ~2047; 431 return(n / 2048 + 63); 432 } 433 #endif 434 panic("Unexpected byte count %d", n); 435 return(0); 436 } 437 438 /* 439 * malloc() (SLAB ALLOCATOR) 440 * 441 * Allocate memory via the slab allocator. If the request is too large, 442 * or if it page-aligned beyond a certain size, we fall back to the 443 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 444 * &SlabMisc if you don't care. 445 * 446 * M_RNOWAIT - don't block. 447 * M_NULLOK - return NULL instead of blocking. 448 * M_ZERO - zero the returned memory. 449 * M_USE_RESERVE - allow greater drawdown of the free list 450 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 451 * 452 * MPSAFE 453 */ 454 455 void * 456 kmalloc(unsigned long size, struct malloc_type *type, int flags) 457 { 458 SLZone *z; 459 SLChunk *chunk; 460 SLGlobalData *slgd; 461 struct globaldata *gd; 462 int zi; 463 #ifdef INVARIANTS 464 int i; 465 #endif 466 467 logmemory_quick(malloc_beg); 468 gd = mycpu; 469 slgd = &gd->gd_slab; 470 471 /* 472 * XXX silly to have this in the critical path. 473 */ 474 if (type->ks_limit == 0) { 475 crit_enter(); 476 if (type->ks_limit == 0) 477 malloc_init(type); 478 crit_exit(); 479 } 480 ++type->ks_calls; 481 482 /* 483 * Handle the case where the limit is reached. Panic if we can't return 484 * NULL. The original malloc code looped, but this tended to 485 * simply deadlock the computer. 486 * 487 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used 488 * to determine if a more complete limit check should be done. The 489 * actual memory use is tracked via ks_memuse[cpu]. 490 */ 491 while (type->ks_loosememuse >= type->ks_limit) { 492 int i; 493 long ttl; 494 495 for (i = ttl = 0; i < ncpus; ++i) 496 ttl += type->ks_memuse[i]; 497 type->ks_loosememuse = ttl; /* not MP synchronized */ 498 if (ttl >= type->ks_limit) { 499 if (flags & M_NULLOK) { 500 logmemory(malloc, NULL, type, size, flags); 501 return(NULL); 502 } 503 panic("%s: malloc limit exceeded", type->ks_shortdesc); 504 } 505 } 506 507 /* 508 * Handle the degenerate size == 0 case. Yes, this does happen. 509 * Return a special pointer. This is to maintain compatibility with 510 * the original malloc implementation. Certain devices, such as the 511 * adaptec driver, not only allocate 0 bytes, they check for NULL and 512 * also realloc() later on. Joy. 513 */ 514 if (size == 0) { 515 logmemory(malloc, ZERO_LENGTH_PTR, type, size, flags); 516 return(ZERO_LENGTH_PTR); 517 } 518 519 /* 520 * Handle hysteresis from prior frees here in malloc(). We cannot 521 * safely manipulate the kernel_map in free() due to free() possibly 522 * being called via an IPI message or from sensitive interrupt code. 523 */ 524 while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_RNOWAIT) == 0) { 525 crit_enter(); 526 if (slgd->NFreeZones > ZONE_RELS_THRESH) { /* crit sect race */ 527 z = slgd->FreeZones; 528 slgd->FreeZones = z->z_Next; 529 --slgd->NFreeZones; 530 kmem_slab_free(z, ZoneSize); /* may block */ 531 atomic_add_int(&ZoneGenAlloc, -(int)ZoneSize / 1024); 532 } 533 crit_exit(); 534 } 535 /* 536 * XXX handle oversized frees that were queued from free(). 537 */ 538 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) { 539 crit_enter(); 540 if ((z = slgd->FreeOvZones) != NULL) { 541 vm_size_t tsize; 542 543 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 544 slgd->FreeOvZones = z->z_Next; 545 tsize = z->z_ChunkSize; 546 kmem_slab_free(z, tsize); /* may block */ 547 atomic_add_int(&ZoneBigAlloc, -(int)tsize / 1024); 548 } 549 crit_exit(); 550 } 551 552 /* 553 * Handle large allocations directly. There should not be very many of 554 * these so performance is not a big issue. 555 * 556 * The backend allocator is pretty nasty on a SMP system. Use the 557 * slab allocator for one and two page-sized chunks even though we lose 558 * some efficiency. XXX maybe fix mmio and the elf loader instead. 559 */ 560 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 561 struct kmemusage *kup; 562 563 size = round_page(size); 564 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 565 if (chunk == NULL) { 566 logmemory(malloc, NULL, type, size, flags); 567 return(NULL); 568 } 569 atomic_add_int(&ZoneBigAlloc, (int)size / 1024); 570 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 571 flags |= M_PASSIVE_ZERO; 572 kup = btokup(chunk); 573 kup->ku_pagecnt = size / PAGE_SIZE; 574 crit_enter(); 575 goto done; 576 } 577 578 /* 579 * Attempt to allocate out of an existing zone. First try the free list, 580 * then allocate out of unallocated space. If we find a good zone move 581 * it to the head of the list so later allocations find it quickly 582 * (we might have thousands of zones in the list). 583 * 584 * Note: zoneindex() will panic of size is too large. 585 */ 586 zi = zoneindex(&size); 587 KKASSERT(zi < NZONES); 588 crit_enter(); 589 if ((z = slgd->ZoneAry[zi]) != NULL) { 590 KKASSERT(z->z_NFree > 0); 591 592 /* 593 * Remove us from the ZoneAry[] when we become empty 594 */ 595 if (--z->z_NFree == 0) { 596 slgd->ZoneAry[zi] = z->z_Next; 597 z->z_Next = NULL; 598 } 599 600 /* 601 * Locate a chunk in a free page. This attempts to localize 602 * reallocations into earlier pages without us having to sort 603 * the chunk list. A chunk may still overlap a page boundary. 604 */ 605 while (z->z_FirstFreePg < ZonePageCount) { 606 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) { 607 #ifdef DIAGNOSTIC 608 /* 609 * Diagnostic: c_Next is not total garbage. 610 */ 611 KKASSERT(chunk->c_Next == NULL || 612 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) == 613 ((intptr_t)chunk & IN_SAME_PAGE_MASK)); 614 #endif 615 #ifdef INVARIANTS 616 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 617 panic("chunk %p FFPG %d/%d", chunk, z->z_FirstFreePg, ZonePageCount); 618 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 619 panic("chunkNEXT %p %p FFPG %d/%d", chunk, chunk->c_Next, z->z_FirstFreePg, ZonePageCount); 620 chunk_mark_allocated(z, chunk); 621 #endif 622 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next; 623 goto done; 624 } 625 ++z->z_FirstFreePg; 626 } 627 628 /* 629 * No chunks are available but NFree said we had some memory, so 630 * it must be available in the never-before-used-memory area 631 * governed by UIndex. The consequences are very serious if our zone 632 * got corrupted so we use an explicit panic rather then a KASSERT. 633 */ 634 if (z->z_UIndex + 1 != z->z_NMax) 635 z->z_UIndex = z->z_UIndex + 1; 636 else 637 z->z_UIndex = 0; 638 if (z->z_UIndex == z->z_UEndIndex) 639 panic("slaballoc: corrupted zone"); 640 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 641 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 642 flags &= ~M_ZERO; 643 flags |= M_PASSIVE_ZERO; 644 } 645 #if defined(INVARIANTS) 646 chunk_mark_allocated(z, chunk); 647 #endif 648 goto done; 649 } 650 651 /* 652 * If all zones are exhausted we need to allocate a new zone for this 653 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 654 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 655 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 656 * we do not pre-zero it because we do not want to mess up the L1 cache. 657 * 658 * At least one subsystem, the tty code (see CROUND) expects power-of-2 659 * allocations to be power-of-2 aligned. We maintain compatibility by 660 * adjusting the base offset below. 661 */ 662 { 663 int off; 664 665 if ((z = slgd->FreeZones) != NULL) { 666 slgd->FreeZones = z->z_Next; 667 --slgd->NFreeZones; 668 bzero(z, sizeof(SLZone)); 669 z->z_Flags |= SLZF_UNOTZEROD; 670 } else { 671 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 672 if (z == NULL) 673 goto fail; 674 atomic_add_int(&ZoneGenAlloc, (int)ZoneSize / 1024); 675 } 676 677 /* 678 * How big is the base structure? 679 */ 680 #if defined(INVARIANTS) 681 /* 682 * Make room for z_Bitmap. An exact calculation is somewhat more 683 * complicated so don't make an exact calculation. 684 */ 685 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]); 686 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 687 #else 688 off = sizeof(SLZone); 689 #endif 690 691 /* 692 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 693 * Otherwise just 8-byte align the data. 694 */ 695 if ((size | (size - 1)) + 1 == (size << 1)) 696 off = (off + size - 1) & ~(size - 1); 697 else 698 off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK; 699 z->z_Magic = ZALLOC_SLAB_MAGIC; 700 z->z_ZoneIndex = zi; 701 z->z_NMax = (ZoneSize - off) / size; 702 z->z_NFree = z->z_NMax - 1; 703 z->z_BasePtr = (char *)z + off; 704 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 705 z->z_ChunkSize = size; 706 z->z_FirstFreePg = ZonePageCount; 707 z->z_CpuGd = gd; 708 z->z_Cpu = gd->gd_cpuid; 709 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 710 z->z_Next = slgd->ZoneAry[zi]; 711 slgd->ZoneAry[zi] = z; 712 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 713 flags &= ~M_ZERO; /* already zero'd */ 714 flags |= M_PASSIVE_ZERO; 715 } 716 #if defined(INVARIANTS) 717 chunk_mark_allocated(z, chunk); 718 #endif 719 720 /* 721 * Slide the base index for initial allocations out of the next 722 * zone we create so we do not over-weight the lower part of the 723 * cpu memory caches. 724 */ 725 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 726 & (ZALLOC_MAX_ZONE_SIZE - 1); 727 } 728 done: 729 ++type->ks_inuse[gd->gd_cpuid]; 730 type->ks_memuse[gd->gd_cpuid] += size; 731 type->ks_loosememuse += size; /* not MP synchronized */ 732 crit_exit(); 733 if (flags & M_ZERO) 734 bzero(chunk, size); 735 #ifdef INVARIANTS 736 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) { 737 if (use_malloc_pattern) { 738 for (i = 0; i < size; i += sizeof(int)) { 739 *(int *)((char *)chunk + i) = -1; 740 } 741 } 742 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 743 } 744 #endif 745 logmemory(malloc, chunk, type, size, flags); 746 return(chunk); 747 fail: 748 crit_exit(); 749 logmemory(malloc, NULL, type, size, flags); 750 return(NULL); 751 } 752 753 /* 754 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE) 755 * 756 * Generally speaking this routine is not called very often and we do 757 * not attempt to optimize it beyond reusing the same pointer if the 758 * new size fits within the chunking of the old pointer's zone. 759 */ 760 void * 761 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 762 { 763 SLZone *z; 764 void *nptr; 765 unsigned long osize; 766 767 KKASSERT((flags & M_ZERO) == 0); /* not supported */ 768 769 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 770 return(kmalloc(size, type, flags)); 771 if (size == 0) { 772 kfree(ptr, type); 773 return(NULL); 774 } 775 776 /* 777 * Handle oversized allocations. XXX we really should require that a 778 * size be passed to free() instead of this nonsense. 779 */ 780 { 781 struct kmemusage *kup; 782 783 kup = btokup(ptr); 784 if (kup->ku_pagecnt) { 785 osize = kup->ku_pagecnt << PAGE_SHIFT; 786 if (osize == round_page(size)) 787 return(ptr); 788 if ((nptr = kmalloc(size, type, flags)) == NULL) 789 return(NULL); 790 bcopy(ptr, nptr, min(size, osize)); 791 kfree(ptr, type); 792 return(nptr); 793 } 794 } 795 796 /* 797 * Get the original allocation's zone. If the new request winds up 798 * using the same chunk size we do not have to do anything. 799 */ 800 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 801 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 802 803 /* 804 * Allocate memory for the new request size. Note that zoneindex has 805 * already adjusted the request size to the appropriate chunk size, which 806 * should optimize our bcopy(). Then copy and return the new pointer. 807 * 808 * Resizing a non-power-of-2 allocation to a power-of-2 size does not 809 * necessary align the result. 810 * 811 * We can only zoneindex (to align size to the chunk size) if the new 812 * size is not too large. 813 */ 814 if (size < ZoneLimit) { 815 zoneindex(&size); 816 if (z->z_ChunkSize == size) 817 return(ptr); 818 } 819 if ((nptr = kmalloc(size, type, flags)) == NULL) 820 return(NULL); 821 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 822 kfree(ptr, type); 823 return(nptr); 824 } 825 826 /* 827 * Return the kmalloc limit for this type, in bytes. 828 */ 829 long 830 kmalloc_limit(struct malloc_type *type) 831 { 832 if (type->ks_limit == 0) { 833 crit_enter(); 834 if (type->ks_limit == 0) 835 malloc_init(type); 836 crit_exit(); 837 } 838 return(type->ks_limit); 839 } 840 841 /* 842 * Allocate a copy of the specified string. 843 * 844 * (MP SAFE) (MAY BLOCK) 845 */ 846 char * 847 kstrdup(const char *str, struct malloc_type *type) 848 { 849 int zlen; /* length inclusive of terminating NUL */ 850 char *nstr; 851 852 if (str == NULL) 853 return(NULL); 854 zlen = strlen(str) + 1; 855 nstr = kmalloc(zlen, type, M_WAITOK); 856 bcopy(str, nstr, zlen); 857 return(nstr); 858 } 859 860 #ifdef SMP 861 /* 862 * free() (SLAB ALLOCATOR) 863 * 864 * Free the specified chunk of memory. 865 */ 866 static 867 void 868 free_remote(void *ptr) 869 { 870 logmemory(free_remote, ptr, *(struct malloc_type **)ptr, -1, 0); 871 kfree(ptr, *(struct malloc_type **)ptr); 872 } 873 874 #endif 875 876 /* 877 * free (SLAB ALLOCATOR) 878 * 879 * Free a memory block previously allocated by malloc. Note that we do not 880 * attempt to uplodate ks_loosememuse as MP races could prevent us from 881 * checking memory limits in malloc. 882 * 883 * MPSAFE 884 */ 885 void 886 kfree(void *ptr, struct malloc_type *type) 887 { 888 SLZone *z; 889 SLChunk *chunk; 890 SLGlobalData *slgd; 891 struct globaldata *gd; 892 int pgno; 893 894 logmemory_quick(free_beg); 895 gd = mycpu; 896 slgd = &gd->gd_slab; 897 898 if (ptr == NULL) 899 panic("trying to free NULL pointer"); 900 901 /* 902 * Handle special 0-byte allocations 903 */ 904 if (ptr == ZERO_LENGTH_PTR) { 905 logmemory(free_zero, ptr, type, -1, 0); 906 logmemory_quick(free_end); 907 return; 908 } 909 910 /* 911 * Handle oversized allocations. XXX we really should require that a 912 * size be passed to free() instead of this nonsense. 913 * 914 * This code is never called via an ipi. 915 */ 916 { 917 struct kmemusage *kup; 918 unsigned long size; 919 920 kup = btokup(ptr); 921 if (kup->ku_pagecnt) { 922 size = kup->ku_pagecnt << PAGE_SHIFT; 923 kup->ku_pagecnt = 0; 924 #ifdef INVARIANTS 925 KKASSERT(sizeof(weirdary) <= size); 926 bcopy(weirdary, ptr, sizeof(weirdary)); 927 #endif 928 /* 929 * NOTE: For oversized allocations we do not record the 930 * originating cpu. It gets freed on the cpu calling 931 * kfree(). The statistics are in aggregate. 932 * 933 * note: XXX we have still inherited the interrupts-can't-block 934 * assumption. An interrupt thread does not bump 935 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 936 * primarily until we can fix softupdate's assumptions about free(). 937 */ 938 crit_enter(); 939 --type->ks_inuse[gd->gd_cpuid]; 940 type->ks_memuse[gd->gd_cpuid] -= size; 941 if (mycpu->gd_intr_nesting_level || 942 (gd->gd_curthread->td_flags & TDF_INTTHREAD)) 943 { 944 logmemory(free_ovsz_delayed, ptr, type, size, 0); 945 z = (SLZone *)ptr; 946 z->z_Magic = ZALLOC_OVSZ_MAGIC; 947 z->z_Next = slgd->FreeOvZones; 948 z->z_ChunkSize = size; 949 slgd->FreeOvZones = z; 950 crit_exit(); 951 } else { 952 crit_exit(); 953 logmemory(free_ovsz, ptr, type, size, 0); 954 kmem_slab_free(ptr, size); /* may block */ 955 atomic_add_int(&ZoneBigAlloc, -(int)size / 1024); 956 } 957 logmemory_quick(free_end); 958 return; 959 } 960 } 961 962 /* 963 * Zone case. Figure out the zone based on the fact that it is 964 * ZoneSize aligned. 965 */ 966 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 967 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 968 969 /* 970 * If we do not own the zone then forward the request to the 971 * cpu that does. Since the timing is non-critical, a passive 972 * message is sent. 973 */ 974 if (z->z_CpuGd != gd) { 975 *(struct malloc_type **)ptr = type; 976 #ifdef SMP 977 logmemory(free_request, ptr, type, z->z_ChunkSize, 0); 978 lwkt_send_ipiq_passive(z->z_CpuGd, free_remote, ptr); 979 #else 980 panic("Corrupt SLZone"); 981 #endif 982 logmemory_quick(free_end); 983 return; 984 } 985 986 logmemory(free_chunk, ptr, type, z->z_ChunkSize, 0); 987 988 if (type->ks_magic != M_MAGIC) 989 panic("free: malloc type lacks magic"); 990 991 crit_enter(); 992 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT; 993 chunk = ptr; 994 995 #ifdef INVARIANTS 996 /* 997 * Attempt to detect a double-free. To reduce overhead we only check 998 * if there appears to be link pointer at the base of the data. 999 */ 1000 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) { 1001 SLChunk *scan; 1002 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) { 1003 if (scan == chunk) 1004 panic("Double free at %p", chunk); 1005 } 1006 } 1007 chunk_mark_free(z, chunk); 1008 #endif 1009 1010 /* 1011 * Put weird data into the memory to detect modifications after freeing, 1012 * illegal pointer use after freeing (we should fault on the odd address), 1013 * and so forth. XXX needs more work, see the old malloc code. 1014 */ 1015 #ifdef INVARIANTS 1016 if (z->z_ChunkSize < sizeof(weirdary)) 1017 bcopy(weirdary, chunk, z->z_ChunkSize); 1018 else 1019 bcopy(weirdary, chunk, sizeof(weirdary)); 1020 #endif 1021 1022 /* 1023 * Add this free non-zero'd chunk to a linked list for reuse, adjust 1024 * z_FirstFreePg. 1025 */ 1026 #ifdef INVARIANTS 1027 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 1028 panic("BADFREE %p", chunk); 1029 #endif 1030 chunk->c_Next = z->z_PageAry[pgno]; 1031 z->z_PageAry[pgno] = chunk; 1032 #ifdef INVARIANTS 1033 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 1034 panic("BADFREE2"); 1035 #endif 1036 if (z->z_FirstFreePg > pgno) 1037 z->z_FirstFreePg = pgno; 1038 1039 /* 1040 * Bump the number of free chunks. If it becomes non-zero the zone 1041 * must be added back onto the appropriate list. 1042 */ 1043 if (z->z_NFree++ == 0) { 1044 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1045 slgd->ZoneAry[z->z_ZoneIndex] = z; 1046 } 1047 1048 --type->ks_inuse[z->z_Cpu]; 1049 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize; 1050 1051 /* 1052 * If the zone becomes totally free, and there are other zones we 1053 * can allocate from, move this zone to the FreeZones list. Since 1054 * this code can be called from an IPI callback, do *NOT* try to mess 1055 * with kernel_map here. Hysteresis will be performed at malloc() time. 1056 */ 1057 if (z->z_NFree == z->z_NMax && 1058 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) 1059 ) { 1060 SLZone **pz; 1061 1062 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next) 1063 ; 1064 *pz = z->z_Next; 1065 z->z_Magic = -1; 1066 z->z_Next = slgd->FreeZones; 1067 slgd->FreeZones = z; 1068 ++slgd->NFreeZones; 1069 } 1070 logmemory_quick(free_end); 1071 crit_exit(); 1072 } 1073 1074 #if defined(INVARIANTS) 1075 /* 1076 * Helper routines for sanity checks 1077 */ 1078 static 1079 void 1080 chunk_mark_allocated(SLZone *z, void *chunk) 1081 { 1082 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1083 __uint32_t *bitptr; 1084 1085 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal", chunk, bitdex)); 1086 bitptr = &z->z_Bitmap[bitdex >> 5]; 1087 bitdex &= 31; 1088 KASSERT((*bitptr & (1 << bitdex)) == 0, ("memory chunk %p is already allocated!", chunk)); 1089 *bitptr |= 1 << bitdex; 1090 } 1091 1092 static 1093 void 1094 chunk_mark_free(SLZone *z, void *chunk) 1095 { 1096 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1097 __uint32_t *bitptr; 1098 1099 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal!", chunk, bitdex)); 1100 bitptr = &z->z_Bitmap[bitdex >> 5]; 1101 bitdex &= 31; 1102 KASSERT((*bitptr & (1 << bitdex)) != 0, ("memory chunk %p is already free!", chunk)); 1103 *bitptr &= ~(1 << bitdex); 1104 } 1105 1106 #endif 1107 1108 /* 1109 * kmem_slab_alloc() 1110 * 1111 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 1112 * specified alignment. M_* flags are expected in the flags field. 1113 * 1114 * Alignment must be a multiple of PAGE_SIZE. 1115 * 1116 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 1117 * but when we move zalloc() over to use this function as its backend 1118 * we will have to switch to kreserve/krelease and call reserve(0) 1119 * after the new space is made available. 1120 * 1121 * Interrupt code which has preempted other code is not allowed to 1122 * use PQ_CACHE pages. However, if an interrupt thread is run 1123 * non-preemptively or blocks and then runs non-preemptively, then 1124 * it is free to use PQ_CACHE pages. 1125 */ 1126 static void * 1127 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 1128 { 1129 vm_size_t i; 1130 vm_offset_t addr; 1131 int count, vmflags, base_vmflags; 1132 thread_t td; 1133 1134 size = round_page(size); 1135 addr = vm_map_min(&kernel_map); 1136 1137 /* 1138 * Reserve properly aligned space from kernel_map. RNOWAIT allocations 1139 * cannot block. 1140 */ 1141 if (flags & M_RNOWAIT) { 1142 if (lwkt_trytoken(&vm_token) == 0) 1143 return(NULL); 1144 } else { 1145 lwkt_gettoken(&vm_token); 1146 } 1147 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1148 crit_enter(); 1149 vm_map_lock(&kernel_map); 1150 if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) { 1151 vm_map_unlock(&kernel_map); 1152 if ((flags & M_NULLOK) == 0) 1153 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 1154 vm_map_entry_release(count); 1155 crit_exit(); 1156 lwkt_reltoken(&vm_token); 1157 return(NULL); 1158 } 1159 1160 /* 1161 * kernel_object maps 1:1 to kernel_map. 1162 */ 1163 vm_object_reference(&kernel_object); 1164 vm_map_insert(&kernel_map, &count, 1165 &kernel_object, addr, addr, addr + size, 1166 VM_MAPTYPE_NORMAL, 1167 VM_PROT_ALL, VM_PROT_ALL, 1168 0); 1169 1170 td = curthread; 1171 1172 base_vmflags = 0; 1173 if (flags & M_ZERO) 1174 base_vmflags |= VM_ALLOC_ZERO; 1175 if (flags & M_USE_RESERVE) 1176 base_vmflags |= VM_ALLOC_SYSTEM; 1177 if (flags & M_USE_INTERRUPT_RESERVE) 1178 base_vmflags |= VM_ALLOC_INTERRUPT; 1179 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) { 1180 panic("kmem_slab_alloc: bad flags %08x (%p)", 1181 flags, ((int **)&size)[-1]); 1182 } 1183 1184 1185 /* 1186 * Allocate the pages. Do not mess with the PG_ZERO flag yet. 1187 */ 1188 for (i = 0; i < size; i += PAGE_SIZE) { 1189 vm_page_t m; 1190 1191 /* 1192 * VM_ALLOC_NORMAL can only be set if we are not preempting. 1193 * 1194 * VM_ALLOC_SYSTEM is automatically set if we are preempting and 1195 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is 1196 * implied in this case), though I'm not sure if we really need to 1197 * do that. 1198 */ 1199 vmflags = base_vmflags; 1200 if (flags & M_WAITOK) { 1201 if (td->td_preempted) 1202 vmflags |= VM_ALLOC_SYSTEM; 1203 else 1204 vmflags |= VM_ALLOC_NORMAL; 1205 } 1206 1207 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags); 1208 1209 /* 1210 * If the allocation failed we either return NULL or we retry. 1211 * 1212 * If M_WAITOK is specified we wait for more memory and retry. 1213 * If M_WAITOK is specified from a preemption we yield instead of 1214 * wait. Livelock will not occur because the interrupt thread 1215 * will not be preempting anyone the second time around after the 1216 * yield. 1217 */ 1218 if (m == NULL) { 1219 if (flags & M_WAITOK) { 1220 if (td->td_preempted) { 1221 vm_map_unlock(&kernel_map); 1222 lwkt_switch(); 1223 vm_map_lock(&kernel_map); 1224 } else { 1225 vm_map_unlock(&kernel_map); 1226 vm_wait(0); 1227 vm_map_lock(&kernel_map); 1228 } 1229 i -= PAGE_SIZE; /* retry */ 1230 continue; 1231 } 1232 1233 /* 1234 * We were unable to recover, cleanup and return NULL 1235 * 1236 * (vm_token already held) 1237 */ 1238 while (i != 0) { 1239 i -= PAGE_SIZE; 1240 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1241 /* page should already be busy */ 1242 vm_page_free(m); 1243 } 1244 vm_map_delete(&kernel_map, addr, addr + size, &count); 1245 vm_map_unlock(&kernel_map); 1246 vm_map_entry_release(count); 1247 crit_exit(); 1248 lwkt_reltoken(&vm_token); 1249 return(NULL); 1250 } 1251 } 1252 1253 /* 1254 * Success! 1255 * 1256 * Mark the map entry as non-pageable using a routine that allows us to 1257 * populate the underlying pages. 1258 * 1259 * The pages were busied by the allocations above. 1260 */ 1261 vm_map_set_wired_quick(&kernel_map, addr, size, &count); 1262 crit_exit(); 1263 1264 /* 1265 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO. 1266 */ 1267 lwkt_gettoken(&vm_token); 1268 for (i = 0; i < size; i += PAGE_SIZE) { 1269 vm_page_t m; 1270 1271 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1272 m->valid = VM_PAGE_BITS_ALL; 1273 /* page should already be busy */ 1274 vm_page_wire(m); 1275 vm_page_wakeup(m); 1276 pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 1277 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO)) 1278 bzero((char *)addr + i, PAGE_SIZE); 1279 vm_page_flag_clear(m, PG_ZERO); 1280 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED)); 1281 vm_page_flag_set(m, PG_REFERENCED); 1282 } 1283 lwkt_reltoken(&vm_token); 1284 vm_map_unlock(&kernel_map); 1285 vm_map_entry_release(count); 1286 lwkt_reltoken(&vm_token); 1287 return((void *)addr); 1288 } 1289 1290 /* 1291 * kmem_slab_free() 1292 */ 1293 static void 1294 kmem_slab_free(void *ptr, vm_size_t size) 1295 { 1296 crit_enter(); 1297 lwkt_gettoken(&vm_token); 1298 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1299 lwkt_reltoken(&vm_token); 1300 crit_exit(); 1301 } 1302 1303