xref: /netbsd-src/sys/uvm/uvm_km.c (revision 9fd88e8f517a254f9972c875e9c4bc5fddd5c717)
1 /*	$NetBSD: uvm_km.c,v 1.166 2024/12/07 23:19:07 chs Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993, The Regents of the University of California.
6  *
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vm_kern.c   8.3 (Berkeley) 1/12/94
37  * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
38  *
39  *
40  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41  * All rights reserved.
42  *
43  * Permission to use, copy, modify and distribute this software and
44  * its documentation is hereby granted, provided that both the copyright
45  * notice and this permission notice appear in all copies of the
46  * software, derivative works or modified versions, and any portions
47  * thereof, and that both notices appear in supporting documentation.
48  *
49  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52  *
53  * Carnegie Mellon requests users of this software to return to
54  *
55  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
56  *  School of Computer Science
57  *  Carnegie Mellon University
58  *  Pittsburgh PA 15213-3890
59  *
60  * any improvements or extensions that they make and grant Carnegie the
61  * rights to redistribute these changes.
62  */
63 
64 /*
65  * uvm_km.c: handle kernel memory allocation and management
66  */
67 
68 /*
69  * overview of kernel memory management:
70  *
71  * the kernel virtual address space is mapped by "kernel_map."   kernel_map
72  * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
73  * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
74  *
75  * the kernel_map has several "submaps."   submaps can only appear in
76  * the kernel_map (user processes can't use them).   submaps "take over"
77  * the management of a sub-range of the kernel's address space.  submaps
78  * are typically allocated at boot time and are never released.   kernel
79  * virtual address space that is mapped by a submap is locked by the
80  * submap's lock -- not the kernel_map's lock.
81  *
82  * thus, the useful feature of submaps is that they allow us to break
83  * up the locking and protection of the kernel address space into smaller
84  * chunks.
85  *
86  * the vm system has several standard kernel submaps/arenas, including:
87  *   kmem_arena => used for kmem/pool (memoryallocators(9))
88  *   pager_map => used to map "buf" structures into kernel space
89  *   exec_map => used during exec to handle exec args
90  *   etc...
91  *
92  * The kmem_arena is a "special submap", as it lives in a fixed map entry
93  * within the kernel_map and is controlled by vmem(9).
94  *
95  * the kernel allocates its private memory out of special uvm_objects whose
96  * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
97  * are "special" and never die).   all kernel objects should be thought of
98  * as large, fixed-sized, sparsely populated uvm_objects.   each kernel
99  * object is equal to the size of kernel virtual address space (i.e. the
100  * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
101  *
102  * note that just because a kernel object spans the entire kernel virtual
103  * address space doesn't mean that it has to be mapped into the entire space.
104  * large chunks of a kernel object's space go unused either because
105  * that area of kernel VM is unmapped, or there is some other type of
106  * object mapped into that range (e.g. a vnode).    for submap's kernel
107  * objects, the only part of the object that can ever be populated is the
108  * offsets that are managed by the submap.
109  *
110  * note that the "offset" in a kernel object is always the kernel virtual
111  * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
112  * example:
113  *   suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
114  *   uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
115  *   kernel map].    if uvm_km_alloc returns virtual address 0xf8235000,
116  *   then that means that the page at offset 0x235000 in kernel_object is
117  *   mapped at 0xf8235000.
118  *
119  * kernel object have one other special property: when the kernel virtual
120  * memory mapping them is unmapped, the backing memory in the object is
121  * freed right away.   this is done with the uvm_km_pgremove() function.
122  * this has to be done because there is no backing store for kernel pages
123  * and no need to save them after they are no longer referenced.
124  *
125  * Generic arenas:
126  *
127  * kmem_arena:
128  *	Main arena controlling the kernel KVA used by other arenas.
129  *
130  * kmem_va_arena:
131  *	Implements quantum caching in order to speedup allocations and
132  *	reduce fragmentation.  The pool(9), unless created with a custom
133  *	meta-data allocator, and kmem(9) subsystems use this arena.
134  *
135  * Arenas for meta-data allocations are used by vmem(9) and pool(9).
136  * These arenas cannot use quantum cache.  However, kmem_va_meta_arena
137  * compensates this by importing larger chunks from kmem_arena.
138  *
139  * kmem_va_meta_arena:
140  *	Space for meta-data.
141  *
142  * kmem_meta_arena:
143  *	Imports from kmem_va_meta_arena.  Allocations from this arena are
144  *	backed with the pages.
145  *
146  * Arena stacking:
147  *
148  *	kmem_arena
149  *		kmem_va_arena
150  *		kmem_va_meta_arena
151  *			kmem_meta_arena
152  */
153 
154 #include <sys/cdefs.h>
155 __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.166 2024/12/07 23:19:07 chs Exp $");
156 
157 #include "opt_uvmhist.h"
158 
159 #include "opt_kmempages.h"
160 
161 #ifndef NKMEMPAGES
162 #define NKMEMPAGES 0
163 #endif
164 
165 /*
166  * Defaults for lower and upper-bounds for the kmem_arena page count.
167  * Can be overridden by kernel config options.
168  */
169 #ifndef NKMEMPAGES_MIN
170 #define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT
171 #endif
172 
173 #ifndef NKMEMPAGES_MAX
174 #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT
175 #endif
176 
177 
178 #include <sys/param.h>
179 #include <sys/systm.h>
180 #include <sys/atomic.h>
181 #include <sys/proc.h>
182 #include <sys/pool.h>
183 #include <sys/vmem.h>
184 #include <sys/vmem_impl.h>
185 #include <sys/kmem.h>
186 #include <sys/msan.h>
187 
188 #include <uvm/uvm.h>
189 
190 /*
191  * global data structures
192  */
193 
194 struct vm_map *kernel_map = NULL;
195 
196 /*
197  * local data structures
198  */
199 
200 static struct vm_map		kernel_map_store;
201 static struct vm_map_entry	kernel_image_mapent_store;
202 static struct vm_map_entry	kernel_kmem_mapent_store;
203 
204 size_t nkmempages = 0;
205 vaddr_t kmembase;
206 vsize_t kmemsize;
207 
208 static struct vmem kmem_arena_store;
209 vmem_t *kmem_arena = NULL;
210 static struct vmem kmem_va_arena_store;
211 vmem_t *kmem_va_arena;
212 
213 /*
214  * kmeminit_nkmempages: calculate the size of kmem_arena.
215  */
216 void
217 kmeminit_nkmempages(void)
218 {
219 	size_t npages;
220 
221 	if (nkmempages != 0) {
222 		/*
223 		 * It's already been set (by us being here before)
224 		 * bail out now;
225 		 */
226 		return;
227 	}
228 
229 #if defined(NKMEMPAGES_MAX_UNLIMITED) && !defined(KMSAN)
230 	/*
231 	 * The extra 1/9 here is to account for uvm_km_va_starved_p()
232 	 * wanting to keep 10% of kmem virtual space free.
233 	 * The intent is that on "unlimited" platforms we should be able
234 	 * to allocate all of physical memory as kmem without behaving
235 	 * as though we running short of kmem virtual space.
236 	 */
237 	npages = (physmem * 10) / 9;
238 #else
239 
240 #if defined(KMSAN)
241 	npages = (physmem / 4);
242 #elif defined(PMAP_MAP_POOLPAGE)
243 	npages = (physmem / 4);
244 #else
245 	npages = (physmem / 3) * 2;
246 #endif /* defined(PMAP_MAP_POOLPAGE) */
247 
248 #if !defined(NKMEMPAGES_MAX_UNLIMITED)
249 	if (npages > NKMEMPAGES_MAX)
250 		npages = NKMEMPAGES_MAX;
251 #endif
252 
253 #endif
254 
255 	if (npages < NKMEMPAGES_MIN)
256 		npages = NKMEMPAGES_MIN;
257 
258 	nkmempages = npages;
259 }
260 
261 /*
262  * uvm_km_bootstrap: init kernel maps and objects to reflect reality (i.e.
263  * KVM already allocated for text, data, bss, and static data structures).
264  *
265  * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
266  *    we assume that [vmin -> start] has already been allocated and that
267  *    "end" is the end.
268  */
269 
270 void
271 uvm_km_bootstrap(vaddr_t start, vaddr_t end)
272 {
273 	bool kmem_arena_small;
274 	vaddr_t base = VM_MIN_KERNEL_ADDRESS;
275 	struct uvm_map_args args;
276 	int error;
277 
278 	UVMHIST_FUNC(__func__);
279 	UVMHIST_CALLARGS(maphist, "start=%#jx end=%#jx", start, end, 0,0);
280 
281 	kmeminit_nkmempages();
282 	kmemsize = (vsize_t)nkmempages * PAGE_SIZE;
283 	kmem_arena_small = kmemsize < 64 * 1024 * 1024;
284 
285 	UVMHIST_LOG(maphist, "kmemsize=%#jx", kmemsize, 0,0,0);
286 
287 	/*
288 	 * next, init kernel memory objects.
289 	 */
290 
291 	/* kernel_object: for pageable anonymous kernel memory */
292 	uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
293 				VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
294 
295 	/*
296 	 * init the map and reserve any space that might already
297 	 * have been allocated kernel space before installing.
298 	 */
299 
300 	uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
301 	kernel_map_store.pmap = pmap_kernel();
302 	if (start != base) {
303 		error = uvm_map_prepare(&kernel_map_store,
304 		    base, start - base,
305 		    NULL, UVM_UNKNOWN_OFFSET, 0,
306 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
307 		    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
308 		if (!error) {
309 			kernel_image_mapent_store.flags =
310 			    UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE;
311 			error = uvm_map_enter(&kernel_map_store, &args,
312 			    &kernel_image_mapent_store);
313 		}
314 
315 		if (error)
316 			panic(
317 			    "uvm_km_bootstrap: could not reserve space for kernel");
318 
319 		kmembase = args.uma_start + args.uma_size;
320 	} else {
321 		kmembase = base;
322 	}
323 
324 	error = uvm_map_prepare(&kernel_map_store,
325 	    kmembase, kmemsize,
326 	    NULL, UVM_UNKNOWN_OFFSET, 0,
327 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
328 	    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
329 	if (!error) {
330 		kernel_kmem_mapent_store.flags =
331 		    UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE;
332 		error = uvm_map_enter(&kernel_map_store, &args,
333 		    &kernel_kmem_mapent_store);
334 	}
335 
336 	if (error)
337 		panic("uvm_km_bootstrap: could not reserve kernel kmem");
338 
339 	/*
340 	 * install!
341 	 */
342 
343 	kernel_map = &kernel_map_store;
344 
345 	pool_subsystem_init();
346 
347 	kmem_arena = vmem_init(&kmem_arena_store, "kmem",
348 	    kmembase, kmemsize, PAGE_SIZE, NULL, NULL, NULL,
349 	    0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
350 #ifdef PMAP_GROWKERNEL
351 	/*
352 	 * kmem_arena VA allocations happen independently of uvm_map.
353 	 * grow kernel to accommodate the kmem_arena.
354 	 */
355 	if (uvm_maxkaddr < kmembase + kmemsize) {
356 		uvm_maxkaddr = pmap_growkernel(kmembase + kmemsize);
357 		KASSERTMSG(uvm_maxkaddr >= kmembase + kmemsize,
358 		    "%#"PRIxVADDR" %#"PRIxVADDR" %#"PRIxVSIZE,
359 		    uvm_maxkaddr, kmembase, kmemsize);
360 	}
361 #endif
362 
363 	vmem_subsystem_init(kmem_arena);
364 
365 	UVMHIST_LOG(maphist, "kmem vmem created (base=%#jx, size=%#jx",
366 	    kmembase, kmemsize, 0,0);
367 
368 	kmem_va_arena = vmem_init(&kmem_va_arena_store, "kva",
369 	    0, 0, PAGE_SIZE, vmem_alloc, vmem_free, kmem_arena,
370 	    (kmem_arena_small ? 4 : VMEM_QCACHE_IDX_MAX) * PAGE_SIZE,
371 	    VM_NOSLEEP, IPL_VM);
372 
373 	UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
374 }
375 
376 /*
377  * uvm_km_init: init the kernel maps virtual memory caches
378  * and start the pool/kmem allocator.
379  */
380 void
381 uvm_km_init(void)
382 {
383 	kmem_init();
384 }
385 
386 /*
387  * uvm_km_suballoc: allocate a submap in the kernel map.   once a submap
388  * is allocated all references to that area of VM must go through it.  this
389  * allows the locking of VAs in kernel_map to be broken up into regions.
390  *
391  * => if `fixed' is true, *vmin specifies where the region described
392  *   pager_map => used to map "buf" structures into kernel space
393  *      by the submap must start
394  * => if submap is non NULL we use that as the submap, otherwise we
395  *	alloc a new map
396  */
397 
398 struct vm_map *
399 uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */,
400     vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed,
401     struct vm_map *submap)
402 {
403 	int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
404 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
405 
406 	KASSERT(vm_map_pmap(map) == pmap_kernel());
407 
408 	size = round_page(size);	/* round up to pagesize */
409 
410 	/*
411 	 * first allocate a blank spot in the parent map
412 	 */
413 
414 	if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0,
415 	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
416 	    UVM_ADV_RANDOM, mapflags)) != 0) {
417 		panic("%s: unable to allocate space in parent map", __func__);
418 	}
419 
420 	/*
421 	 * set VM bounds (vmin is filled in by uvm_map)
422 	 */
423 
424 	*vmax = *vmin + size;
425 
426 	/*
427 	 * add references to pmap and create or init the submap
428 	 */
429 
430 	pmap_reference(vm_map_pmap(map));
431 	if (submap == NULL) {
432 		submap = kmem_alloc(sizeof(*submap), KM_SLEEP);
433 	}
434 	uvm_map_setup(submap, *vmin, *vmax, flags);
435 	submap->pmap = vm_map_pmap(map);
436 
437 	/*
438 	 * now let uvm_map_submap plug in it...
439 	 */
440 
441 	if (uvm_map_submap(map, *vmin, *vmax, submap) != 0)
442 		panic("uvm_km_suballoc: submap allocation failed");
443 
444 	return(submap);
445 }
446 
447 /*
448  * uvm_km_pgremove: remove pages from a kernel uvm_object and KVA.
449  */
450 
451 void
452 uvm_km_pgremove(vaddr_t startva, vaddr_t endva)
453 {
454 	struct uvm_object * const uobj = uvm_kernel_object;
455 	const voff_t start = startva - vm_map_min(kernel_map);
456 	const voff_t end = endva - vm_map_min(kernel_map);
457 	struct vm_page *pg;
458 	voff_t curoff, nextoff;
459 	int swpgonlydelta = 0;
460 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
461 
462 	KASSERT(VM_MIN_KERNEL_ADDRESS <= startva);
463 	KASSERT(startva < endva);
464 	KASSERT(endva <= VM_MAX_KERNEL_ADDRESS);
465 
466 	rw_enter(uobj->vmobjlock, RW_WRITER);
467 	pmap_remove(pmap_kernel(), startva, endva);
468 	for (curoff = start; curoff < end; curoff = nextoff) {
469 		nextoff = curoff + PAGE_SIZE;
470 		pg = uvm_pagelookup(uobj, curoff);
471 		if (pg != NULL && pg->flags & PG_BUSY) {
472 			uvm_pagewait(pg, uobj->vmobjlock, "km_pgrm");
473 			rw_enter(uobj->vmobjlock, RW_WRITER);
474 			nextoff = curoff;
475 			continue;
476 		}
477 
478 		/*
479 		 * free the swap slot, then the page.
480 		 */
481 
482 		if (pg == NULL &&
483 		    uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) {
484 			swpgonlydelta++;
485 		}
486 		uao_dropswap(uobj, curoff >> PAGE_SHIFT);
487 		if (pg != NULL) {
488 			uvm_pagefree(pg);
489 		}
490 	}
491 	rw_exit(uobj->vmobjlock);
492 
493 	if (swpgonlydelta > 0) {
494 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
495 		atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta);
496 	}
497 }
498 
499 
500 /*
501  * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed
502  *    regions.
503  *
504  * => when you unmap a part of anonymous kernel memory you want to toss
505  *    the pages right away.    (this is called from uvm_unmap_...).
506  * => none of the pages will ever be busy, and none of them will ever
507  *    be on the active or inactive queues (because they have no object).
508  */
509 
510 void
511 uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end)
512 {
513 #define __PGRM_BATCH 16
514 	struct vm_page *pg;
515 	paddr_t pa[__PGRM_BATCH];
516 	int npgrm, i;
517 	vaddr_t va, batch_vastart;
518 
519 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
520 
521 	KASSERT(VM_MAP_IS_KERNEL(map));
522 	KASSERTMSG(vm_map_min(map) <= start,
523 	    "vm_map_min(map) [%#"PRIxVADDR"] <= start [%#"PRIxVADDR"]"
524 	    " (size=%#"PRIxVSIZE")",
525 	    vm_map_min(map), start, end - start);
526 	KASSERT(start < end);
527 	KASSERT(end <= vm_map_max(map));
528 
529 	for (va = start; va < end;) {
530 		batch_vastart = va;
531 		/* create a batch of at most __PGRM_BATCH pages to free */
532 		for (i = 0;
533 		     i < __PGRM_BATCH && va < end;
534 		     va += PAGE_SIZE) {
535 			if (!pmap_extract(pmap_kernel(), va, &pa[i])) {
536 				continue;
537 			}
538 			i++;
539 		}
540 		npgrm = i;
541 		/* now remove the mappings */
542 		pmap_kremove(batch_vastart, va - batch_vastart);
543 		/* and free the pages */
544 		for (i = 0; i < npgrm; i++) {
545 			pg = PHYS_TO_VM_PAGE(pa[i]);
546 			KASSERT(pg);
547 			KASSERT(pg->uobject == NULL);
548 			KASSERT(pg->uanon == NULL);
549 			KASSERT((pg->flags & PG_BUSY) == 0);
550 			uvm_pagefree(pg);
551 		}
552 	}
553 #undef __PGRM_BATCH
554 }
555 
556 #if defined(DEBUG)
557 void
558 uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end)
559 {
560 	vaddr_t va;
561 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
562 
563 	KDASSERT(VM_MAP_IS_KERNEL(map));
564 	KDASSERT(vm_map_min(map) <= start);
565 	KDASSERT(start < end);
566 	KDASSERT(end <= vm_map_max(map));
567 
568 	for (va = start; va < end; va += PAGE_SIZE) {
569 		paddr_t pa;
570 
571 		if (pmap_extract(pmap_kernel(), va, &pa)) {
572 			panic("uvm_km_check_empty: va %p has pa %#llx",
573 			    (void *)va, (long long)pa);
574 		}
575 		/*
576 		 * kernel_object should not have pages for the corresponding
577 		 * region.  check it.
578 		 *
579 		 * why trylock?  because:
580 		 * - caller might not want to block.
581 		 * - we can recurse when allocating radix_node for
582 		 *   kernel_object.
583 		 */
584 		if (rw_tryenter(uvm_kernel_object->vmobjlock, RW_READER)) {
585 			struct vm_page *pg;
586 
587 			pg = uvm_pagelookup(uvm_kernel_object,
588 			    va - vm_map_min(kernel_map));
589 			rw_exit(uvm_kernel_object->vmobjlock);
590 			if (pg) {
591 				panic("uvm_km_check_empty: "
592 				    "has page hashed at %p",
593 				    (const void *)va);
594 			}
595 		}
596 	}
597 }
598 #endif /* defined(DEBUG) */
599 
600 /*
601  * uvm_km_alloc: allocate an area of kernel memory.
602  *
603  * => NOTE: we can return 0 even if we can wait if there is not enough
604  *	free VM space in the map... caller should be prepared to handle
605  *	this case.
606  * => we return KVA of memory allocated
607  */
608 
609 vaddr_t
610 uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
611 {
612 	vaddr_t kva, loopva;
613 	vaddr_t offset;
614 	vsize_t loopsize;
615 	struct vm_page *pg;
616 	struct uvm_object *obj;
617 	int pgaflags;
618 	vm_prot_t prot, vaprot;
619 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
620 
621 	KASSERT(vm_map_pmap(map) == pmap_kernel());
622 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
623 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
624 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
625 	KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0);
626 	KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0);
627 
628 	/*
629 	 * setup for call
630 	 */
631 
632 	kva = vm_map_min(map);	/* hint */
633 	size = round_page(size);
634 	obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL;
635 	UVMHIST_LOG(maphist,"  (map=%#jx, obj=%#jx, size=%#jx, flags=%#jx)",
636 	    (uintptr_t)map, (uintptr_t)obj, size, flags);
637 
638 	/*
639 	 * allocate some virtual space
640 	 */
641 
642 	vaprot = (flags & UVM_KMF_EXEC) ? UVM_PROT_ALL : UVM_PROT_RW;
643 	if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
644 	    align, UVM_MAPFLAG(vaprot, UVM_PROT_ALL, UVM_INH_NONE,
645 	    UVM_ADV_RANDOM,
646 	    (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA
647 	     | UVM_KMF_COLORMATCH)))) != 0)) {
648 		UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
649 		return(0);
650 	}
651 
652 	/*
653 	 * if all we wanted was VA, return now
654 	 */
655 
656 	if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) {
657 		UVMHIST_LOG(maphist,"<- done valloc (kva=%#jx)", kva,0,0,0);
658 		return(kva);
659 	}
660 
661 	/*
662 	 * recover object offset from virtual address
663 	 */
664 
665 	offset = kva - vm_map_min(kernel_map);
666 	UVMHIST_LOG(maphist, "  kva=%#jx, offset=%#jx", kva, offset,0,0);
667 
668 	/*
669 	 * now allocate and map in the memory... note that we are the only ones
670 	 * whom should ever get a handle on this area of VM.
671 	 */
672 
673 	loopva = kva;
674 	loopsize = size;
675 
676 	pgaflags = UVM_FLAG_COLORMATCH;
677 	if (flags & UVM_KMF_NOWAIT)
678 		pgaflags |= UVM_PGA_USERESERVE;
679 	if (flags & UVM_KMF_ZERO)
680 		pgaflags |= UVM_PGA_ZERO;
681 	prot = VM_PROT_READ | VM_PROT_WRITE;
682 	if (flags & UVM_KMF_EXEC)
683 		prot |= VM_PROT_EXECUTE;
684 	while (loopsize) {
685 		KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, NULL),
686 		    "loopva=%#"PRIxVADDR, loopva);
687 
688 		pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags,
689 #ifdef UVM_KM_VMFREELIST
690 		   UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST
691 #else
692 		   UVM_PGA_STRAT_NORMAL, 0
693 #endif
694 		   );
695 
696 		/*
697 		 * out of memory?
698 		 */
699 
700 		if (__predict_false(pg == NULL)) {
701 			if ((flags & UVM_KMF_NOWAIT) ||
702 			    ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) {
703 				/* free everything! */
704 				uvm_km_free(map, kva, size,
705 				    flags & UVM_KMF_TYPEMASK);
706 				return (0);
707 			} else {
708 				uvm_wait("km_getwait2");	/* sleep here */
709 				continue;
710 			}
711 		}
712 
713 		pg->flags &= ~PG_BUSY;	/* new page */
714 		UVM_PAGE_OWN(pg, NULL);
715 
716 		/*
717 		 * map it in
718 		 */
719 
720 		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
721 		    prot, PMAP_KMPAGE);
722 		loopva += PAGE_SIZE;
723 		offset += PAGE_SIZE;
724 		loopsize -= PAGE_SIZE;
725 	}
726 
727 	pmap_update(pmap_kernel());
728 
729 	if ((flags & UVM_KMF_ZERO) == 0) {
730 		kmsan_orig((void *)kva, size, KMSAN_TYPE_UVM, __RET_ADDR);
731 		kmsan_mark((void *)kva, size, KMSAN_STATE_UNINIT);
732 	}
733 
734 	UVMHIST_LOG(maphist,"<- done (kva=%#jx)", kva,0,0,0);
735 	return(kva);
736 }
737 
738 /*
739  * uvm_km_protect: change the protection of an allocated area
740  */
741 
742 int
743 uvm_km_protect(struct vm_map *map, vaddr_t addr, vsize_t size, vm_prot_t prot)
744 {
745 	return uvm_map_protect(map, addr, addr + round_page(size), prot, false);
746 }
747 
748 /*
749  * uvm_km_free: free an area of kernel memory
750  */
751 
752 void
753 uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags)
754 {
755 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
756 
757 	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
758 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
759 		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
760 	KASSERT((addr & PAGE_MASK) == 0);
761 	KASSERT(vm_map_pmap(map) == pmap_kernel());
762 
763 	size = round_page(size);
764 
765 	if (flags & UVM_KMF_PAGEABLE) {
766 		uvm_km_pgremove(addr, addr + size);
767 	} else if (flags & UVM_KMF_WIRED) {
768 		/*
769 		 * Note: uvm_km_pgremove_intrsafe() extracts mapping, thus
770 		 * remove it after.  See comment below about KVA visibility.
771 		 */
772 		uvm_km_pgremove_intrsafe(map, addr, addr + size);
773 	}
774 
775 	/*
776 	 * Note: uvm_unmap_remove() calls pmap_update() for us, before
777 	 * KVA becomes globally available.
778 	 */
779 
780 	uvm_unmap1(map, addr, addr + size, UVM_FLAG_VAONLY);
781 }
782 
783 /* Sanity; must specify both or none. */
784 #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
785     (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
786 #error Must specify MAP and UNMAP together.
787 #endif
788 
789 #if defined(PMAP_ALLOC_POOLPAGE) && \
790     !defined(PMAP_MAP_POOLPAGE) && !defined(PMAP_UNMAP_POOLPAGE)
791 #error Must specify ALLOC with MAP and UNMAP
792 #endif
793 
794 int
795 uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags,
796     vmem_addr_t *addr)
797 {
798 	struct vm_page *pg;
799 	vmem_addr_t va;
800 	int rc;
801 	vaddr_t loopva;
802 	vsize_t loopsize;
803 
804 	size = round_page(size);
805 
806 #if defined(PMAP_MAP_POOLPAGE)
807 	if (size == PAGE_SIZE) {
808 again:
809 #ifdef PMAP_ALLOC_POOLPAGE
810 		pg = PMAP_ALLOC_POOLPAGE((flags & VM_SLEEP) ?
811 		   0 : UVM_PGA_USERESERVE);
812 #else
813 		pg = uvm_pagealloc(NULL, 0, NULL,
814 		   (flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE);
815 #endif /* PMAP_ALLOC_POOLPAGE */
816 		if (__predict_false(pg == NULL)) {
817 			if (flags & VM_SLEEP) {
818 				uvm_wait("plpg");
819 				goto again;
820 			}
821 			return ENOMEM;
822 		}
823 		va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
824 		KASSERT(va != 0);
825 		*addr = va;
826 		return 0;
827 	}
828 #endif /* PMAP_MAP_POOLPAGE */
829 
830 	rc = vmem_alloc(vm, size, flags, &va);
831 	if (rc != 0)
832 		return rc;
833 
834 #ifdef PMAP_GROWKERNEL
835 	/*
836 	 * These VA allocations happen independently of uvm_map
837 	 * so this allocation must not extend beyond the current limit.
838 	 */
839 	KASSERTMSG(uvm_maxkaddr >= va + size,
840 	    "%#"PRIxVADDR" %#"PRIxPTR" %#zx",
841 	    uvm_maxkaddr, va, size);
842 #endif
843 
844 	loopva = va;
845 	loopsize = size;
846 
847 	while (loopsize) {
848 		paddr_t pa __diagused;
849 		KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, &pa),
850 		    "loopva=%#"PRIxVADDR" loopsize=%#"PRIxVSIZE
851 		    " pa=%#"PRIxPADDR" vmem=%p",
852 		    loopva, loopsize, pa, vm);
853 
854 		pg = uvm_pagealloc(NULL, loopva, NULL,
855 		    UVM_FLAG_COLORMATCH
856 		    | ((flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE));
857 		if (__predict_false(pg == NULL)) {
858 			if (flags & VM_SLEEP) {
859 				uvm_wait("plpg");
860 				continue;
861 			} else {
862 				uvm_km_pgremove_intrsafe(kernel_map, va,
863 				    va + size);
864 				vmem_free(vm, va, size);
865 				return ENOMEM;
866 			}
867 		}
868 
869 		pg->flags &= ~PG_BUSY;	/* new page */
870 		UVM_PAGE_OWN(pg, NULL);
871 		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
872 		    VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
873 
874 		loopva += PAGE_SIZE;
875 		loopsize -= PAGE_SIZE;
876 	}
877 	pmap_update(pmap_kernel());
878 
879 	*addr = va;
880 
881 	return 0;
882 }
883 
884 void
885 uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, size_t size)
886 {
887 
888 	size = round_page(size);
889 #if defined(PMAP_UNMAP_POOLPAGE)
890 	if (size == PAGE_SIZE) {
891 		paddr_t pa;
892 
893 		pa = PMAP_UNMAP_POOLPAGE(addr);
894 		uvm_pagefree(PHYS_TO_VM_PAGE(pa));
895 		return;
896 	}
897 #endif /* PMAP_UNMAP_POOLPAGE */
898 	uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size);
899 	pmap_update(pmap_kernel());
900 
901 	vmem_free(vm, addr, size);
902 }
903 
904 bool
905 uvm_km_va_starved_p(void)
906 {
907 	vmem_size_t total;
908 	vmem_size_t free;
909 
910 	if (kmem_arena == NULL)
911 		return false;
912 
913 	total = vmem_size(kmem_arena, VMEM_ALLOC|VMEM_FREE);
914 	free = vmem_size(kmem_arena, VMEM_FREE);
915 
916 	return (free < (total / 10));
917 }
918