1*1aba0c0bSmpi /* $OpenBSD: uvm_km.c,v 1.155 2024/11/01 20:26:18 mpi Exp $ */ 21414b0faSart /* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */ 3cd7ee8acSart 4cd7ee8acSart /* 5cd7ee8acSart * Copyright (c) 1997 Charles D. Cranor and Washington University. 6cd7ee8acSart * Copyright (c) 1991, 1993, The Regents of the University of California. 7cd7ee8acSart * 8cd7ee8acSart * All rights reserved. 9cd7ee8acSart * 10cd7ee8acSart * This code is derived from software contributed to Berkeley by 11cd7ee8acSart * The Mach Operating System project at Carnegie-Mellon University. 12cd7ee8acSart * 13cd7ee8acSart * Redistribution and use in source and binary forms, with or without 14cd7ee8acSart * modification, are permitted provided that the following conditions 15cd7ee8acSart * are met: 16cd7ee8acSart * 1. Redistributions of source code must retain the above copyright 17cd7ee8acSart * notice, this list of conditions and the following disclaimer. 18cd7ee8acSart * 2. Redistributions in binary form must reproduce the above copyright 19cd7ee8acSart * notice, this list of conditions and the following disclaimer in the 20cd7ee8acSart * documentation and/or other materials provided with the distribution. 21188f0ea4Sjsg * 3. Neither the name of the University nor the names of its contributors 22cd7ee8acSart * may be used to endorse or promote products derived from this software 23cd7ee8acSart * without specific prior written permission. 24cd7ee8acSart * 25cd7ee8acSart * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26cd7ee8acSart * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27cd7ee8acSart * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28cd7ee8acSart * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29cd7ee8acSart * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30cd7ee8acSart * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31cd7ee8acSart * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32cd7ee8acSart * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33cd7ee8acSart * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34cd7ee8acSart * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35cd7ee8acSart * SUCH DAMAGE. 36cd7ee8acSart * 37cd7ee8acSart * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 38cd7ee8acSart * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 39cd7ee8acSart * 40cd7ee8acSart * 41cd7ee8acSart * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42cd7ee8acSart * All rights reserved. 43cd7ee8acSart * 44cd7ee8acSart * Permission to use, copy, modify and distribute this software and 45cd7ee8acSart * its documentation is hereby granted, provided that both the copyright 46cd7ee8acSart * notice and this permission notice appear in all copies of the 47cd7ee8acSart * software, derivative works or modified versions, and any portions 48cd7ee8acSart * thereof, and that both notices appear in supporting documentation. 49cd7ee8acSart * 50cd7ee8acSart * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51cd7ee8acSart * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52cd7ee8acSart * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53cd7ee8acSart * 54cd7ee8acSart * Carnegie Mellon requests users of this software to return to 55cd7ee8acSart * 56cd7ee8acSart * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57cd7ee8acSart * School of Computer Science 58cd7ee8acSart * Carnegie Mellon University 59cd7ee8acSart * Pittsburgh PA 15213-3890 60cd7ee8acSart * 61cd7ee8acSart * any improvements or extensions that they make and grant Carnegie the 62cd7ee8acSart * rights to redistribute these changes. 63cd7ee8acSart */ 64cd7ee8acSart 65cd7ee8acSart /* 66cd7ee8acSart * uvm_km.c: handle kernel memory allocation and management 67cd7ee8acSart */ 68cd7ee8acSart 69cd7ee8acSart /* 70cd7ee8acSart * overview of kernel memory management: 71cd7ee8acSart * 72cd7ee8acSart * the kernel virtual address space is mapped by "kernel_map." kernel_map 735de1d0f0Smiod * starts at a machine-dependent address and is VM_KERNEL_SPACE_SIZE bytes 745de1d0f0Smiod * large. 75cd7ee8acSart * 76cd7ee8acSart * the kernel_map has several "submaps." submaps can only appear in 77cd7ee8acSart * the kernel_map (user processes can't use them). submaps "take over" 78cd7ee8acSart * the management of a sub-range of the kernel's address space. submaps 79cd7ee8acSart * are typically allocated at boot time and are never released. kernel 80cd7ee8acSart * virtual address space that is mapped by a submap is locked by the 81cd7ee8acSart * submap's lock -- not the kernel_map's lock. 82cd7ee8acSart * 83cd7ee8acSart * thus, the useful feature of submaps is that they allow us to break 84cd7ee8acSart * up the locking and protection of the kernel address space into smaller 85cd7ee8acSart * chunks. 86cd7ee8acSart * 8769099dcdSthib * The VM system has several standard kernel submaps: 8869099dcdSthib * kmem_map: Contains only wired kernel memory for malloc(9). 8969099dcdSthib * Note: All access to this map must be protected by splvm as 9069099dcdSthib * calls to malloc(9) are allowed in interrupt handlers. 9169099dcdSthib * exec_map: Memory to hold arguments to system calls are allocated from 9269099dcdSthib * this map. 9369099dcdSthib * XXX: This is primeraly used to artificially limit the number 9469099dcdSthib * of concurrent processes doing an exec. 9569099dcdSthib * phys_map: Buffers for vmapbuf (physio) are allocated from this map. 96cd7ee8acSart * 97cd7ee8acSart * the kernel allocates its private memory out of special uvm_objects whose 98cd7ee8acSart * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 99cd7ee8acSart * are "special" and never die). all kernel objects should be thought of 100cd7ee8acSart * as large, fixed-sized, sparsely populated uvm_objects. each kernel 1015de1d0f0Smiod * object is equal to the size of kernel virtual address space (i.e. 1025de1d0f0Smiod * VM_KERNEL_SPACE_SIZE). 103cd7ee8acSart * 104cd7ee8acSart * most kernel private memory lives in kernel_object. the only exception 105cd7ee8acSart * to this is for memory that belongs to submaps that must be protected 106b7c6e7a5Sart * by splvm(). each of these submaps manages their own pages. 107cd7ee8acSart * 1085ee951e4Sderaadt * note that just because a kernel object spans the entire kernel virtual 109cd7ee8acSart * address space doesn't mean that it has to be mapped into the entire space. 110cd7ee8acSart * large chunks of a kernel object's space go unused either because 111cd7ee8acSart * that area of kernel VM is unmapped, or there is some other type of 112cd7ee8acSart * object mapped into that range (e.g. a vnode). for submap's kernel 113cd7ee8acSart * objects, the only part of the object that can ever be populated is the 114cd7ee8acSart * offsets that are managed by the submap. 115cd7ee8acSart * 116cd7ee8acSart * note that the "offset" in a kernel object is always the kernel virtual 1175de1d0f0Smiod * address minus the vm_map_min(kernel_map). 118cd7ee8acSart * example: 1195de1d0f0Smiod * suppose kernel_map starts at 0xf8000000 and the kernel does a 120d2ff86caSmpi * km_alloc(PAGE_SIZE, &kv_any, &kp_none, &kd_waitok)) [allocate 1 wired 121d2ff86caSmpi * down page in the kernel map]. if km_alloc() returns virtual address 122d2ff86caSmpi * 0xf8235000, then that means that the page at offset 0x235000 in 123d2ff86caSmpi * kernel_object is mapped at 0xf8235000. 124cd7ee8acSart * 1255ee951e4Sderaadt * kernel objects have one other special property: when the kernel virtual 126cd7ee8acSart * memory mapping them is unmapped, the backing memory in the object is 127cd7ee8acSart * freed right away. this is done with the uvm_km_pgremove() function. 128cd7ee8acSart * this has to be done because there is no backing store for kernel pages 129cd7ee8acSart * and no need to save them after they are no longer referenced. 130cd7ee8acSart */ 131cd7ee8acSart 132cd7ee8acSart #include <sys/param.h> 133cd7ee8acSart #include <sys/systm.h> 13463cf9471Smpi #include <sys/proc.h> 135151c02abStedu #include <sys/kthread.h> 136cd7ee8acSart #include <uvm/uvm.h> 137cd7ee8acSart 138cd7ee8acSart /* 139cd7ee8acSart * global data structures 140cd7ee8acSart */ 141cd7ee8acSart 1420fefbcf6Sart struct vm_map *kernel_map = NULL; 1431414b0faSart 144a072d974Sthib /* Unconstraint range. */ 145a072d974Sthib struct uvm_constraint_range no_constraint = { 0x0, (paddr_t)-1 }; 146a072d974Sthib 147cd7ee8acSart /* 1484af3577fSjsg * local data structures 149cd7ee8acSart */ 150cd7ee8acSart static struct vm_map kernel_map_store; 151cd7ee8acSart 152cd7ee8acSart /* 153cd7ee8acSart * uvm_km_init: init kernel maps and objects to reflect reality (i.e. 154cd7ee8acSart * KVM already allocated for text, data, bss, and static data structures). 155cd7ee8acSart * 1565de1d0f0Smiod * => KVM is defined by [base.. base + VM_KERNEL_SPACE_SIZE]. 1575de1d0f0Smiod * we assume that [base -> start] has already been allocated and that 1585de1d0f0Smiod * "end" is the end of the kernel image span. 159cd7ee8acSart */ 160cd7ee8acSart void 1615de1d0f0Smiod uvm_km_init(vaddr_t base, vaddr_t start, vaddr_t end) 162cd7ee8acSart { 163cd7ee8acSart /* kernel_object: for pageable anonymous kernel memory */ 16428fbabcfSart uao_init(); 1655de1d0f0Smiod uvm.kernel_object = uao_create(VM_KERNEL_SPACE_SIZE, UAO_FLAG_KERNOBJ); 166cd7ee8acSart 1677cb53682Sart /* 1685ee951e4Sderaadt * init the map and reserve already allocated kernel space 169cd7ee8acSart * before installing. 170cd7ee8acSart */ 171cd7ee8acSart 172bcf611cdSvisa uvm_map_setup(&kernel_map_store, pmap_kernel(), base, end, 173181c6205Sariane #ifdef KVA_GUARDPAGES 174181c6205Sariane VM_MAP_PAGEABLE | VM_MAP_GUARDPAGES 175181c6205Sariane #else 176181c6205Sariane VM_MAP_PAGEABLE 177181c6205Sariane #endif 178181c6205Sariane ); 179efc6a7aaSart if (base != start && uvm_map(&kernel_map_store, &base, start - base, 1806ddecd6bSderaadt NULL, UVM_UNKNOWN_OFFSET, 0, 1816ddecd6bSderaadt UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE, 18215cd8707Sguenther MAP_INHERIT_NONE, MADV_RANDOM, UVM_FLAG_FIXED)) != 0) 183cd7ee8acSart panic("uvm_km_init: could not reserve space for kernel"); 184cd7ee8acSart 185cd7ee8acSart kernel_map = &kernel_map_store; 1861b1e4826Skurt 1871b1e4826Skurt #ifndef __HAVE_PMAP_DIRECT 1881b1e4826Skurt /* allow km_alloc calls before uvm_km_thread starts */ 1891b1e4826Skurt mtx_init(&uvm_km_pages.mtx, IPL_VM); 1901b1e4826Skurt #endif 191cd7ee8acSart } 192cd7ee8acSart 193cd7ee8acSart /* 194cd7ee8acSart * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 195cd7ee8acSart * is allocated all references to that area of VM must go through it. this 196cd7ee8acSart * allows the locking of VAs in kernel_map to be broken up into regions. 197cd7ee8acSart * 198cd7ee8acSart * => if `fixed' is true, *min specifies where the region described 199cd7ee8acSart * by the submap must start 200cd7ee8acSart * => if submap is non NULL we use that as the submap, otherwise we 201cd7ee8acSart * alloc a new map 202cd7ee8acSart */ 203cd7ee8acSart struct vm_map * 2040fefbcf6Sart uvm_km_suballoc(struct vm_map *map, vaddr_t *min, vaddr_t *max, vsize_t size, 2050fefbcf6Sart int flags, boolean_t fixed, struct vm_map *submap) 206cd7ee8acSart { 207cd7ee8acSart int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 208cd7ee8acSart 209cd7ee8acSart size = round_page(size); /* round up to pagesize */ 210cd7ee8acSart 21135164244Stedu /* first allocate a blank spot in the parent map */ 212198a4b3fSart if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0, 21315cd8707Sguenther UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE, 21415cd8707Sguenther MAP_INHERIT_NONE, MADV_RANDOM, mapflags)) != 0) { 215cd7ee8acSart panic("uvm_km_suballoc: unable to allocate space in parent map"); 216cd7ee8acSart } 217cd7ee8acSart 21835164244Stedu /* set VM bounds (min is filled in by uvm_map) */ 219cd7ee8acSart *max = *min + size; 220cd7ee8acSart 22135164244Stedu /* add references to pmap and create or init the submap */ 222cd7ee8acSart pmap_reference(vm_map_pmap(map)); 223cd7ee8acSart if (submap == NULL) { 2247cb53682Sart submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags); 225cd7ee8acSart if (submap == NULL) 226cd7ee8acSart panic("uvm_km_suballoc: unable to create submap"); 227cd7ee8acSart } else { 228bcf611cdSvisa uvm_map_setup(submap, vm_map_pmap(map), *min, *max, flags); 229cd7ee8acSart } 230cd7ee8acSart 23152887a38Smpi /* 23252887a38Smpi * now let uvm_map_submap plug in it... 23352887a38Smpi */ 23473c19439Sart if (uvm_map_submap(map, *min, *max, submap) != 0) 235cd7ee8acSart panic("uvm_km_suballoc: submap allocation failed"); 236cd7ee8acSart 237cd7ee8acSart return(submap); 238cd7ee8acSart } 239cd7ee8acSart 240cd7ee8acSart /* 241cd7ee8acSart * uvm_km_pgremove: remove pages from a kernel uvm_object. 242cd7ee8acSart * 243cd7ee8acSart * => when you unmap a part of anonymous kernel memory you want to toss 244cd7ee8acSart * the pages right away. (this gets called from uvm_unmap_...). 245cd7ee8acSart */ 246cd7ee8acSart void 247c1638fe5Smpi uvm_km_pgremove(struct uvm_object *uobj, vaddr_t startva, vaddr_t endva) 248cd7ee8acSart { 249c1638fe5Smpi const voff_t start = startva - vm_map_min(kernel_map); 250c1638fe5Smpi const voff_t end = endva - vm_map_min(kernel_map); 251b7c6e7a5Sart struct vm_page *pp; 252bdfb579bSart voff_t curoff; 25306be59c1Soga int slot; 254c4a864baSmpi int swpgonlydelta = 0; 255cd7ee8acSart 25657296fa7Smpi KASSERT(UVM_OBJ_IS_AOBJ(uobj)); 25769c04514Smpi KASSERT(rw_write_held(uobj->vmobjlock)); 258cd7ee8acSart 259c1638fe5Smpi pmap_remove(pmap_kernel(), startva, endva); 260cd7ee8acSart for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) { 261cd7ee8acSart pp = uvm_pagelookup(uobj, curoff); 26206be59c1Soga if (pp && pp->pg_flags & PG_BUSY) { 2635b4619eaSmpi uvm_pagewait(pp, uobj->vmobjlock, "km_pgrm"); 2645b4619eaSmpi rw_enter(uobj->vmobjlock, RW_WRITE); 2650b0fe1a1Soga curoff -= PAGE_SIZE; /* loop back to us */ 2660b0fe1a1Soga continue; 26706be59c1Soga } 268cd7ee8acSart 26906be59c1Soga /* free the swap slot, then the page */ 27006be59c1Soga slot = uao_dropswap(uobj, curoff >> PAGE_SHIFT); 27106be59c1Soga 27206be59c1Soga if (pp != NULL) { 273cd7ee8acSart uvm_lock_pageq(); 274cd7ee8acSart uvm_pagefree(pp); 275cd7ee8acSart uvm_unlock_pageq(); 27606be59c1Soga } else if (slot != 0) { 277c4a864baSmpi swpgonlydelta++; 278cd7ee8acSart } 279cd7ee8acSart } 280c4a864baSmpi 281c4a864baSmpi if (swpgonlydelta > 0) { 282c4a864baSmpi KASSERT(uvmexp.swpgonly >= swpgonlydelta); 283c4a864baSmpi atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta); 284c4a864baSmpi } 2857cb53682Sart } 286cd7ee8acSart 2877cb53682Sart 2887cb53682Sart /* 2897cb53682Sart * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe" 2907cb53682Sart * objects 2917cb53682Sart * 2927cb53682Sart * => when you unmap a part of anonymous kernel memory you want to toss 2937cb53682Sart * the pages right away. (this gets called from uvm_unmap_...). 2947cb53682Sart * => none of the pages will ever be busy, and none of them will ever 2957cb53682Sart * be on the active or inactive queues (because these objects are 2967cb53682Sart * never allowed to "page"). 2977cb53682Sart */ 2987cb53682Sart void 299b7c6e7a5Sart uvm_km_pgremove_intrsafe(vaddr_t start, vaddr_t end) 3007cb53682Sart { 301b7c6e7a5Sart struct vm_page *pg; 302b7c6e7a5Sart vaddr_t va; 303b7c6e7a5Sart paddr_t pa; 3047cb53682Sart 305b7c6e7a5Sart for (va = start; va < end; va += PAGE_SIZE) { 306b7c6e7a5Sart if (!pmap_extract(pmap_kernel(), va, &pa)) 307d8e7342aSart continue; 308b7c6e7a5Sart pg = PHYS_TO_VM_PAGE(pa); 309b7c6e7a5Sart if (pg == NULL) 310b7c6e7a5Sart panic("uvm_km_pgremove_intrsafe: no page"); 311b7c6e7a5Sart uvm_pagefree(pg); 312fd628a11Sart } 313c1638fe5Smpi pmap_kremove(start, end - start); 314cd7ee8acSart } 315cd7ee8acSart 316cd7ee8acSart /* 317cd7ee8acSart * uvm_km_kmemalloc: lower level kernel memory allocator for malloc() 318cd7ee8acSart * 319cd7ee8acSart * => we map wired memory into the specified map using the obj passed in 320cd7ee8acSart * => NOTE: we can return NULL even if we can wait if there is not enough 321cd7ee8acSart * free VM space in the map... caller should be prepared to handle 322cd7ee8acSart * this case. 323cd7ee8acSart * => we return KVA of memory allocated 324cd7ee8acSart * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't 325cd7ee8acSart * lock the map 326b426ab7bSthib * => low, high, alignment, boundary, nsegs are the corresponding parameters 327b426ab7bSthib * to uvm_pglistalloc 328b426ab7bSthib * => flags: ZERO - correspond to uvm_pglistalloc flags 329cd7ee8acSart */ 330cd7ee8acSart vaddr_t 331b426ab7bSthib uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object *obj, vsize_t size, 33242096da3Sart vsize_t valign, int flags, paddr_t low, paddr_t high, paddr_t alignment, 33342096da3Sart paddr_t boundary, int nsegs) 334cd7ee8acSart { 335cd7ee8acSart vaddr_t kva, loopva; 336c850818cSart voff_t offset; 337cd7ee8acSart struct vm_page *pg; 338b426ab7bSthib struct pglist pgl; 339b426ab7bSthib int pla_flags; 340cd7ee8acSart 341fd628a11Sart KASSERT(vm_map_pmap(map) == pmap_kernel()); 342b426ab7bSthib /* UVM_KMF_VALLOC => !UVM_KMF_ZERO */ 343b426ab7bSthib KASSERT(!(flags & UVM_KMF_VALLOC) || 344b426ab7bSthib !(flags & UVM_KMF_ZERO)); 345cd7ee8acSart 34635164244Stedu /* setup for call */ 347cd7ee8acSart size = round_page(size); 348cd7ee8acSart kva = vm_map_min(map); /* hint */ 349b426ab7bSthib if (nsegs == 0) 350b426ab7bSthib nsegs = atop(size); 351cd7ee8acSart 35235164244Stedu /* allocate some virtual space */ 353b1990b04Sart if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 35415cd8707Sguenther valign, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE, 35515cd8707Sguenther MAP_INHERIT_NONE, MADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) != 0)) { 356b9df1565Smpi return 0; 357cd7ee8acSart } 358cd7ee8acSart 35935164244Stedu /* if all we wanted was VA, return now */ 360cd7ee8acSart if (flags & UVM_KMF_VALLOC) { 361b9df1565Smpi return kva; 362cd7ee8acSart } 363fd628a11Sart 36435164244Stedu /* recover object offset from virtual address */ 365b7c6e7a5Sart if (obj != NULL) 366cd7ee8acSart offset = kva - vm_map_min(kernel_map); 367b7c6e7a5Sart else 368b7c6e7a5Sart offset = 0; 369b7c6e7a5Sart 370cd7ee8acSart /* 371cd7ee8acSart * now allocate and map in the memory... note that we are the only ones 372cd7ee8acSart * whom should ever get a handle on this area of VM. 373cd7ee8acSart */ 374b426ab7bSthib TAILQ_INIT(&pgl); 375b426ab7bSthib pla_flags = 0; 3763aad6debSkettenis KASSERT(uvmexp.swpgonly <= uvmexp.swpages); 377b426ab7bSthib if ((flags & UVM_KMF_NOWAIT) || 378b426ab7bSthib ((flags & UVM_KMF_CANFAIL) && 3793aad6debSkettenis uvmexp.swpages - uvmexp.swpgonly <= atop(size))) 380b426ab7bSthib pla_flags |= UVM_PLA_NOWAIT; 381b426ab7bSthib else 382b426ab7bSthib pla_flags |= UVM_PLA_WAITOK; 383b426ab7bSthib if (flags & UVM_KMF_ZERO) 384b426ab7bSthib pla_flags |= UVM_PLA_ZERO; 385b426ab7bSthib if (uvm_pglistalloc(size, low, high, alignment, boundary, &pgl, nsegs, 386b426ab7bSthib pla_flags) != 0) { 387b426ab7bSthib /* Failed. */ 388b426ab7bSthib uvm_unmap(map, kva, kva + size); 389b426ab7bSthib return (0); 390b426ab7bSthib } 391cd7ee8acSart 39269c04514Smpi if (obj != NULL) 39369c04514Smpi rw_enter(obj->vmobjlock, RW_WRITE); 39469c04514Smpi 395cd7ee8acSart loopva = kva; 3967abd2af2Sart while (loopva != kva + size) { 397b426ab7bSthib pg = TAILQ_FIRST(&pgl); 398b426ab7bSthib TAILQ_REMOVE(&pgl, pg, pageq); 399b426ab7bSthib uvm_pagealloc_pg(pg, obj, offset, NULL); 40065d6360cSart atomic_clearbits_int(&pg->pg_flags, PG_BUSY); 401cd7ee8acSart UVM_PAGE_OWN(pg, NULL); 402cd7ee8acSart 403cd7ee8acSart /* 404cd7ee8acSart * map it in: note that we call pmap_enter with the map and 405b7c6e7a5Sart * object unlocked in case we are kmem_map. 406cd7ee8acSart */ 407b7c6e7a5Sart if (obj == NULL) { 4087cb53682Sart pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 4091e8cdc2eSderaadt PROT_READ | PROT_WRITE); 4107cb53682Sart } else { 4117cb53682Sart pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 4121e8cdc2eSderaadt PROT_READ | PROT_WRITE, 4131e8cdc2eSderaadt PROT_READ | PROT_WRITE | PMAP_WIRED); 4147cb53682Sart } 415cd7ee8acSart loopva += PAGE_SIZE; 416cd7ee8acSart offset += PAGE_SIZE; 417cd7ee8acSart } 418b426ab7bSthib KASSERT(TAILQ_EMPTY(&pgl)); 4193d404988Sdrahn pmap_update(pmap_kernel()); 4203d404988Sdrahn 42169c04514Smpi if (obj != NULL) 42269c04514Smpi rw_exit(obj->vmobjlock); 42369c04514Smpi 424b9df1565Smpi return kva; 425cd7ee8acSart } 426cd7ee8acSart 427cd7ee8acSart /* 428cd7ee8acSart * uvm_km_free: free an area of kernel memory 429cd7ee8acSart */ 430cd7ee8acSart void 431b7c6e7a5Sart uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size) 432cd7ee8acSart { 433cd7ee8acSart uvm_unmap(map, trunc_page(addr), round_page(addr+size)); 434cd7ee8acSart } 435cd7ee8acSart 436c64d9fffStedu #if defined(__HAVE_PMAP_DIRECT) 4371fca5607Stedu /* 438c64d9fffStedu * uvm_km_page allocator, __HAVE_PMAP_DIRECT arch 4390869ad61Stedu * On architectures with machine memory direct mapped into a portion 4400869ad61Stedu * of KVM, we have very little work to do. Just get a physical page, 441b426ab7bSthib * and find and return its VA. 4420869ad61Stedu */ 4430869ad61Stedu void 4440869ad61Stedu uvm_km_page_init(void) 4450869ad61Stedu { 4460869ad61Stedu /* nothing */ 4470869ad61Stedu } 4480869ad61Stedu 44918b79dc7Sdlg void 4505f2b3891Sdlg uvm_km_page_lateinit(void) 45118b79dc7Sdlg { 45218b79dc7Sdlg /* nothing */ 45318b79dc7Sdlg } 45418b79dc7Sdlg 4550869ad61Stedu #else 4560869ad61Stedu /* 457c64d9fffStedu * uvm_km_page allocator, non __HAVE_PMAP_DIRECT archs 4581fca5607Stedu * This is a special allocator that uses a reserve of free pages 4591fca5607Stedu * to fulfill requests. It is fast and interrupt safe, but can only 4601fca5607Stedu * return page sized regions. Its primary use is as a backend for pool. 4611fca5607Stedu * 4621fca5607Stedu * The memory returned is allocated from the larger kernel_map, sparing 4631fca5607Stedu * pressure on the small interrupt-safe kmem_map. It is wired, but 4641fca5607Stedu * not zero filled. 4651fca5607Stedu */ 4661fca5607Stedu 467b426ab7bSthib struct uvm_km_pages uvm_km_pages; 468898645e1Stedu 469151c02abStedu void uvm_km_createthread(void *); 470151c02abStedu void uvm_km_thread(void *); 471a278a794Stedu struct uvm_km_free_page *uvm_km_doputpage(struct uvm_km_free_page *); 472151c02abStedu 4731fca5607Stedu /* 4741fca5607Stedu * Allocate the initial reserve, and create the thread which will 4751fca5607Stedu * keep the reserve full. For bootstrapping, we allocate more than 4761fca5607Stedu * the lowat amount, because it may be a while before the thread is 4771fca5607Stedu * running. 4781fca5607Stedu */ 479151c02abStedu void 480151c02abStedu uvm_km_page_init(void) 481151c02abStedu { 48260819449Smiod int lowat_min; 483151c02abStedu int i; 484181c6205Sariane int len, bulk; 485181c6205Sariane vaddr_t addr; 486151c02abStedu 487b426ab7bSthib if (!uvm_km_pages.lowat) { 488cbf7b8d1Stedu /* based on physmem, calculate a good value here */ 489b426ab7bSthib uvm_km_pages.lowat = physmem / 256; 49060819449Smiod lowat_min = physmem < atop(16 * 1024 * 1024) ? 32 : 128; 491b426ab7bSthib if (uvm_km_pages.lowat < lowat_min) 492b426ab7bSthib uvm_km_pages.lowat = lowat_min; 493cbf7b8d1Stedu } 494b426ab7bSthib if (uvm_km_pages.lowat > UVM_KM_PAGES_LOWAT_MAX) 495b426ab7bSthib uvm_km_pages.lowat = UVM_KM_PAGES_LOWAT_MAX; 496b426ab7bSthib uvm_km_pages.hiwat = 4 * uvm_km_pages.lowat; 497b426ab7bSthib if (uvm_km_pages.hiwat > UVM_KM_PAGES_HIWAT_MAX) 498b426ab7bSthib uvm_km_pages.hiwat = UVM_KM_PAGES_HIWAT_MAX; 499151c02abStedu 500181c6205Sariane /* Allocate all pages in as few allocations as possible. */ 501181c6205Sariane len = 0; 502181c6205Sariane bulk = uvm_km_pages.hiwat; 503181c6205Sariane while (len < uvm_km_pages.hiwat && bulk > 0) { 504181c6205Sariane bulk = MIN(bulk, uvm_km_pages.hiwat - len); 505181c6205Sariane addr = vm_map_min(kernel_map); 506181c6205Sariane if (uvm_map(kernel_map, &addr, (vsize_t)bulk << PAGE_SHIFT, 507181c6205Sariane NULL, UVM_UNKNOWN_OFFSET, 0, 5081e8cdc2eSderaadt UVM_MAPFLAG(PROT_READ | PROT_WRITE, 509e087cc70Sguenther PROT_READ | PROT_WRITE, MAP_INHERIT_NONE, 51015cd8707Sguenther MADV_RANDOM, UVM_KMF_TRYLOCK)) != 0) { 511181c6205Sariane bulk /= 2; 512181c6205Sariane continue; 513151c02abStedu } 514181c6205Sariane 515181c6205Sariane for (i = len; i < len + bulk; i++, addr += PAGE_SIZE) 516181c6205Sariane uvm_km_pages.page[i] = addr; 517181c6205Sariane len += bulk; 518181c6205Sariane } 519181c6205Sariane 520181c6205Sariane uvm_km_pages.free = len; 521181c6205Sariane for (i = len; i < UVM_KM_PAGES_HIWAT_MAX; i++) 52288ee6abdSmiod uvm_km_pages.page[i] = 0; 523151c02abStedu 524cbf7b8d1Stedu /* tone down if really high */ 525b426ab7bSthib if (uvm_km_pages.lowat > 512) 526b426ab7bSthib uvm_km_pages.lowat = 512; 52718b79dc7Sdlg } 528cbf7b8d1Stedu 52918b79dc7Sdlg void 53018b79dc7Sdlg uvm_km_page_lateinit(void) 53118b79dc7Sdlg { 532151c02abStedu kthread_create_deferred(uvm_km_createthread, NULL); 533151c02abStedu } 534151c02abStedu 535151c02abStedu void 536151c02abStedu uvm_km_createthread(void *arg) 537151c02abStedu { 538b426ab7bSthib kthread_create(uvm_km_thread, NULL, &uvm_km_pages.km_proc, "kmthread"); 539151c02abStedu } 540151c02abStedu 5411fca5607Stedu /* 5421fca5607Stedu * Endless loop. We grab pages in increments of 16 pages, then 543bde9ef78Stedu * quickly swap them into the list. 5441fca5607Stedu */ 545151c02abStedu void 546151c02abStedu uvm_km_thread(void *arg) 547151c02abStedu { 548b426ab7bSthib vaddr_t pg[16]; 549b426ab7bSthib int i; 550a278a794Stedu int allocmore = 0; 551c8a9920fSguenther int flags; 552a278a794Stedu struct uvm_km_free_page *fp = NULL; 553151c02abStedu 55430cd4535Skettenis KERNEL_UNLOCK(); 55530cd4535Skettenis 556b426ab7bSthib for (;;) { 557b426ab7bSthib mtx_enter(&uvm_km_pages.mtx); 558a278a794Stedu if (uvm_km_pages.free >= uvm_km_pages.lowat && 559a278a794Stedu uvm_km_pages.freelist == NULL) { 5602404448fSjsg msleep_nsec(&uvm_km_pages.km_proc, &uvm_km_pages.mtx, 5612404448fSjsg PVM, "kmalloc", INFSLP); 562b426ab7bSthib } 563a278a794Stedu allocmore = uvm_km_pages.free < uvm_km_pages.lowat; 564a278a794Stedu fp = uvm_km_pages.freelist; 565a278a794Stedu uvm_km_pages.freelist = NULL; 566a278a794Stedu uvm_km_pages.freelistlen = 0; 567d925b6f0Sthib mtx_leave(&uvm_km_pages.mtx); 568b426ab7bSthib 569a278a794Stedu if (allocmore) { 570c8a9920fSguenther /* 571c8a9920fSguenther * If there was nothing on the freelist, then we 572c8a9920fSguenther * must obtain at least one page to make progress. 573c8a9920fSguenther * So, only use UVM_KMF_TRYLOCK for the first page 574c8a9920fSguenther * if fp != NULL 575c8a9920fSguenther */ 5761e8cdc2eSderaadt flags = UVM_MAPFLAG(PROT_READ | PROT_WRITE, 57715cd8707Sguenther PROT_READ | PROT_WRITE, MAP_INHERIT_NONE, 57815cd8707Sguenther MADV_RANDOM, fp != NULL ? UVM_KMF_TRYLOCK : 0); 5796c0aa6dcStedu memset(pg, 0, sizeof(pg)); 580b426ab7bSthib for (i = 0; i < nitems(pg); i++) { 581181c6205Sariane pg[i] = vm_map_min(kernel_map); 582181c6205Sariane if (uvm_map(kernel_map, &pg[i], PAGE_SIZE, 583c8a9920fSguenther NULL, UVM_UNKNOWN_OFFSET, 0, flags) != 0) { 584181c6205Sariane pg[i] = 0; 585181c6205Sariane break; 586181c6205Sariane } 587c8a9920fSguenther 588c8a9920fSguenther /* made progress, so don't sleep for more */ 5891e8cdc2eSderaadt flags = UVM_MAPFLAG(PROT_READ | PROT_WRITE, 59015cd8707Sguenther PROT_READ | PROT_WRITE, MAP_INHERIT_NONE, 59115cd8707Sguenther MADV_RANDOM, UVM_KMF_TRYLOCK); 592b426ab7bSthib } 593b426ab7bSthib 594b426ab7bSthib mtx_enter(&uvm_km_pages.mtx); 595b426ab7bSthib for (i = 0; i < nitems(pg); i++) { 596a278a794Stedu if (uvm_km_pages.free == 597a278a794Stedu nitems(uvm_km_pages.page)) 598e2cd81a3Smickey break; 599181c6205Sariane else if (pg[i] != 0) 600a278a794Stedu uvm_km_pages.page[uvm_km_pages.free++] 601a278a794Stedu = pg[i]; 602151c02abStedu } 603b426ab7bSthib wakeup(&uvm_km_pages.free); 604b426ab7bSthib mtx_leave(&uvm_km_pages.mtx); 605151c02abStedu 606b426ab7bSthib /* Cleanup left-over pages (if any). */ 607181c6205Sariane for (; i < nitems(pg); i++) { 608181c6205Sariane if (pg[i] != 0) { 609181c6205Sariane uvm_unmap(kernel_map, 610181c6205Sariane pg[i], pg[i] + PAGE_SIZE); 611181c6205Sariane } 612181c6205Sariane } 613b426ab7bSthib } 614a278a794Stedu while (fp) { 615a278a794Stedu fp = uvm_km_doputpage(fp); 616a278a794Stedu } 617a278a794Stedu } 618b426ab7bSthib } 619b426ab7bSthib 620a278a794Stedu struct uvm_km_free_page * 621a278a794Stedu uvm_km_doputpage(struct uvm_km_free_page *fp) 622a278a794Stedu { 623a278a794Stedu vaddr_t va = (vaddr_t)fp; 624a278a794Stedu struct vm_page *pg; 62528cfa0a3Sthib int freeva = 1; 626a278a794Stedu struct uvm_km_free_page *nextfp = fp->next; 627a278a794Stedu 62859f84dd2Soga pg = uvm_atopg(va); 629b426ab7bSthib 630d925b6f0Sthib pmap_kremove(va, PAGE_SIZE); 631b426ab7bSthib pmap_update(kernel_map->pmap); 632b426ab7bSthib 633b426ab7bSthib mtx_enter(&uvm_km_pages.mtx); 63428cfa0a3Sthib if (uvm_km_pages.free < uvm_km_pages.hiwat) { 635b426ab7bSthib uvm_km_pages.page[uvm_km_pages.free++] = va; 63628cfa0a3Sthib freeva = 0; 63728cfa0a3Sthib } 638b426ab7bSthib mtx_leave(&uvm_km_pages.mtx); 63928cfa0a3Sthib 64028cfa0a3Sthib if (freeva) 641181c6205Sariane uvm_unmap(kernel_map, va, va + PAGE_SIZE); 642b426ab7bSthib 643b426ab7bSthib uvm_pagefree(pg); 644a278a794Stedu return (nextfp); 645151c02abStedu } 646a278a794Stedu #endif /* !__HAVE_PMAP_DIRECT */ 647ea41019aSart 648ea41019aSart void * 649803ae8aaSart km_alloc(size_t sz, const struct kmem_va_mode *kv, 650803ae8aaSart const struct kmem_pa_mode *kp, const struct kmem_dyn_mode *kd) 651ea41019aSart { 652ea41019aSart struct vm_map *map; 653ea41019aSart struct vm_page *pg; 654ea41019aSart struct pglist pgl; 655ea41019aSart int mapflags = 0; 656ea41019aSart vm_prot_t prot; 6576d52d019Skettenis paddr_t pla_align; 658ea41019aSart int pla_flags; 65912e839c5Sariane int pla_maxseg; 660ede08b28Sjan vaddr_t va, sva = 0; 661ea41019aSart 662ea41019aSart KASSERT(sz == round_page(sz)); 663ea41019aSart 664ea41019aSart TAILQ_INIT(&pgl); 665ea41019aSart 666ea41019aSart if (kp->kp_nomem || kp->kp_pageable) 667ea41019aSart goto alloc_va; 668ea41019aSart 669ea41019aSart pla_flags = kd->kd_waitok ? UVM_PLA_WAITOK : UVM_PLA_NOWAIT; 670ea41019aSart pla_flags |= UVM_PLA_TRYCONTIG; 671ea41019aSart if (kp->kp_zero) 672ea41019aSart pla_flags |= UVM_PLA_ZERO; 673ea41019aSart 6746d52d019Skettenis pla_align = kp->kp_align; 6756d52d019Skettenis #ifdef __HAVE_PMAP_DIRECT 6766d52d019Skettenis if (pla_align < kv->kv_align) 6776d52d019Skettenis pla_align = kv->kv_align; 6786d52d019Skettenis #endif 67912e839c5Sariane pla_maxseg = kp->kp_maxseg; 68012e839c5Sariane if (pla_maxseg == 0) 68112e839c5Sariane pla_maxseg = sz / PAGE_SIZE; 68212e839c5Sariane 683ea41019aSart if (uvm_pglistalloc(sz, kp->kp_constraint->ucr_low, 6846d52d019Skettenis kp->kp_constraint->ucr_high, pla_align, kp->kp_boundary, 68512e839c5Sariane &pgl, pla_maxseg, pla_flags)) { 686ea41019aSart return (NULL); 687ea41019aSart } 688ea41019aSart 689ea41019aSart #ifdef __HAVE_PMAP_DIRECT 690ae931b54Sart /* 6916d52d019Skettenis * Only use direct mappings for single page or single segment 6926d52d019Skettenis * allocations. 693ae931b54Sart */ 6946d52d019Skettenis if (kv->kv_singlepage || kp->kp_maxseg == 1) { 695ea41019aSart TAILQ_FOREACH(pg, &pgl, pageq) { 6966d52d019Skettenis va = pmap_map_direct(pg); 697ea41019aSart if (pg == TAILQ_FIRST(&pgl)) 6986d52d019Skettenis sva = va; 699ea41019aSart } 7006d52d019Skettenis return ((void *)sva); 701ea41019aSart } 702ea41019aSart #endif 703ea41019aSart alloc_va: 7041e8cdc2eSderaadt prot = PROT_READ | PROT_WRITE; 705ea41019aSart 706ea41019aSart if (kp->kp_pageable) { 707ea41019aSart KASSERT(kp->kp_object); 708ea41019aSart KASSERT(!kv->kv_singlepage); 709ea41019aSart } else { 710ea41019aSart KASSERT(kp->kp_object == NULL); 711ea41019aSart } 712ea41019aSart 713ea41019aSart if (kv->kv_singlepage) { 714ea41019aSart KASSERT(sz == PAGE_SIZE); 715ea41019aSart #ifdef __HAVE_PMAP_DIRECT 716fd49af7aSart panic("km_alloc: DIRECT single page"); 717ea41019aSart #else 718ea41019aSart mtx_enter(&uvm_km_pages.mtx); 719ea41019aSart while (uvm_km_pages.free == 0) { 720ea41019aSart if (kd->kd_waitok == 0) { 721ea41019aSart mtx_leave(&uvm_km_pages.mtx); 722ec684ed3Sart uvm_pglistfree(&pgl); 723ea41019aSart return NULL; 724ea41019aSart } 7252404448fSjsg msleep_nsec(&uvm_km_pages.free, &uvm_km_pages.mtx, 7262404448fSjsg PVM, "getpage", INFSLP); 727ea41019aSart } 728ea41019aSart va = uvm_km_pages.page[--uvm_km_pages.free]; 729ea41019aSart if (uvm_km_pages.free < uvm_km_pages.lowat && 730ea41019aSart curproc != uvm_km_pages.km_proc) { 731ea41019aSart if (kd->kd_slowdown) 732ea41019aSart *kd->kd_slowdown = 1; 733ea41019aSart wakeup(&uvm_km_pages.km_proc); 734ea41019aSart } 735ea41019aSart mtx_leave(&uvm_km_pages.mtx); 736ea41019aSart #endif 737ea41019aSart } else { 738ea41019aSart struct uvm_object *uobj = NULL; 739ea41019aSart 740ea41019aSart if (kd->kd_trylock) 741ea41019aSart mapflags |= UVM_KMF_TRYLOCK; 742ea41019aSart 743ea41019aSart if (kp->kp_object) 744ea41019aSart uobj = *kp->kp_object; 745ea41019aSart try_map: 746ea41019aSart map = *kv->kv_map; 747ea41019aSart va = vm_map_min(map); 748ea41019aSart if (uvm_map(map, &va, sz, uobj, kd->kd_prefer, 749e087cc70Sguenther kv->kv_align, UVM_MAPFLAG(prot, prot, MAP_INHERIT_NONE, 75015cd8707Sguenther MADV_RANDOM, mapflags))) { 751ea41019aSart if (kv->kv_wait && kd->kd_waitok) { 752744ebc9fSmpi tsleep_nsec(map, PVM, "km_allocva", INFSLP); 753ea41019aSart goto try_map; 754ea41019aSart } 755ec684ed3Sart uvm_pglistfree(&pgl); 756ea41019aSart return (NULL); 757ea41019aSart } 758ea41019aSart } 759ea41019aSart sva = va; 760ea41019aSart TAILQ_FOREACH(pg, &pgl, pageq) { 761ea41019aSart if (kp->kp_pageable) 762ea41019aSart pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pg), 763ea41019aSart prot, prot | PMAP_WIRED); 764ea41019aSart else 765ea41019aSart pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), prot); 766ea41019aSart va += PAGE_SIZE; 767ea41019aSart } 7687023c098Smatthew pmap_update(pmap_kernel()); 769ea41019aSart return ((void *)sva); 770ea41019aSart } 771ea41019aSart 772ea41019aSart void 773803ae8aaSart km_free(void *v, size_t sz, const struct kmem_va_mode *kv, 774803ae8aaSart const struct kmem_pa_mode *kp) 775ea41019aSart { 776ea41019aSart vaddr_t sva, eva, va; 777ea41019aSart struct vm_page *pg; 778ea41019aSart struct pglist pgl; 779ea41019aSart 7806d52d019Skettenis sva = (vaddr_t)v; 7816d52d019Skettenis eva = sva + sz; 782ea41019aSart 7836d52d019Skettenis if (kp->kp_nomem) 784ea41019aSart goto free_va; 785ea41019aSart 786fd49af7aSart #ifdef __HAVE_PMAP_DIRECT 7876d52d019Skettenis if (kv->kv_singlepage || kp->kp_maxseg == 1) { 7886d52d019Skettenis TAILQ_INIT(&pgl); 7896d52d019Skettenis for (va = sva; va < eva; va += PAGE_SIZE) { 790ea41019aSart pg = pmap_unmap_direct(va); 7916d52d019Skettenis TAILQ_INSERT_TAIL(&pgl, pg, pageq); 7926d52d019Skettenis } 7936d52d019Skettenis uvm_pglistfree(&pgl); 7946d52d019Skettenis return; 7956d52d019Skettenis } 796fd49af7aSart #else 7976d52d019Skettenis if (kv->kv_singlepage) { 798fd49af7aSart struct uvm_km_free_page *fp = v; 7996d52d019Skettenis 800fd49af7aSart mtx_enter(&uvm_km_pages.mtx); 801fd49af7aSart fp->next = uvm_km_pages.freelist; 802a434b199Skettenis uvm_km_pages.freelist = fp; 803fd49af7aSart if (uvm_km_pages.freelistlen++ > 16) 804fd49af7aSart wakeup(&uvm_km_pages.km_proc); 805fd49af7aSart mtx_leave(&uvm_km_pages.mtx); 806ea41019aSart return; 807ea41019aSart } 8086d52d019Skettenis #endif 809ea41019aSart 810ea41019aSart if (kp->kp_pageable) { 811ea41019aSart pmap_remove(pmap_kernel(), sva, eva); 812fd49af7aSart pmap_update(pmap_kernel()); 813ea41019aSart } else { 814ea41019aSart TAILQ_INIT(&pgl); 815ea41019aSart for (va = sva; va < eva; va += PAGE_SIZE) { 816ea41019aSart paddr_t pa; 817ea41019aSart 818ea41019aSart if (!pmap_extract(pmap_kernel(), va, &pa)) 819ea41019aSart continue; 820ea41019aSart 821ea41019aSart pg = PHYS_TO_VM_PAGE(pa); 822fd49af7aSart if (pg == NULL) { 8234123b6a7Sderaadt panic("km_free: unmanaged page 0x%lx", pa); 824ea41019aSart } 825fd49af7aSart TAILQ_INSERT_TAIL(&pgl, pg, pageq); 826ea41019aSart } 827ea41019aSart pmap_kremove(sva, sz); 828ea41019aSart pmap_update(pmap_kernel()); 829fd49af7aSart uvm_pglistfree(&pgl); 830fd49af7aSart } 831ea41019aSart free_va: 832ea41019aSart uvm_unmap(*kv->kv_map, sva, eva); 833ea41019aSart if (kv->kv_wait) 834ea41019aSart wakeup(*kv->kv_map); 835ea41019aSart } 836ea41019aSart 837803ae8aaSart const struct kmem_va_mode kv_any = { 838ea41019aSart .kv_map = &kernel_map, 839ea41019aSart }; 840ea41019aSart 841803ae8aaSart const struct kmem_va_mode kv_intrsafe = { 842ea41019aSart .kv_map = &kmem_map, 843ea41019aSart }; 844ea41019aSart 845803ae8aaSart const struct kmem_va_mode kv_page = { 846ea41019aSart .kv_singlepage = 1 847ea41019aSart }; 848ea41019aSart 849803ae8aaSart const struct kmem_pa_mode kp_dirty = { 850ea41019aSart .kp_constraint = &no_constraint 851ea41019aSart }; 852ea41019aSart 853803ae8aaSart const struct kmem_pa_mode kp_dma = { 854ea41019aSart .kp_constraint = &dma_constraint 855ea41019aSart }; 856ea41019aSart 85712e839c5Sariane const struct kmem_pa_mode kp_dma_contig = { 85812e839c5Sariane .kp_constraint = &dma_constraint, 85912e839c5Sariane .kp_maxseg = 1 86012e839c5Sariane }; 86112e839c5Sariane 862803ae8aaSart const struct kmem_pa_mode kp_dma_zero = { 863ea41019aSart .kp_constraint = &dma_constraint, 864ea41019aSart .kp_zero = 1 865ea41019aSart }; 866ea41019aSart 867803ae8aaSart const struct kmem_pa_mode kp_zero = { 868ea41019aSart .kp_constraint = &no_constraint, 869ea41019aSart .kp_zero = 1 870ea41019aSart }; 871ea41019aSart 872803ae8aaSart const struct kmem_pa_mode kp_pageable = { 873ea41019aSart .kp_object = &uvm.kernel_object, 874ea41019aSart .kp_pageable = 1 875ea41019aSart /* XXX - kp_nomem, maybe, but we'll need to fix km_free. */ 876ea41019aSart }; 877ea41019aSart 878803ae8aaSart const struct kmem_pa_mode kp_none = { 879ea41019aSart .kp_nomem = 1 880ea41019aSart }; 881ea41019aSart 882803ae8aaSart const struct kmem_dyn_mode kd_waitok = { 883ea41019aSart .kd_waitok = 1, 884ea41019aSart .kd_prefer = UVM_UNKNOWN_OFFSET 885ea41019aSart }; 886ea41019aSart 887803ae8aaSart const struct kmem_dyn_mode kd_nowait = { 888ea41019aSart .kd_prefer = UVM_UNKNOWN_OFFSET 889ea41019aSart }; 890ea41019aSart 891803ae8aaSart const struct kmem_dyn_mode kd_trylock = { 892ea41019aSart .kd_trylock = 1, 893ea41019aSart .kd_prefer = UVM_UNKNOWN_OFFSET 894ea41019aSart }; 895