xref: /dflybsd-src/sys/vm/vm_kern.c (revision d83c779ab2c938232fa7b53777cd18cc9c4fc8e4)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_kern.c	8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_kern.c,v 1.61.2.2 2002/03/12 18:25:26 tegge Exp $
65  * $DragonFly: src/sys/vm/vm_kern.c,v 1.29 2007/06/07 23:14:29 dillon Exp $
66  */
67 
68 /*
69  *	Kernel memory management.
70  */
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/proc.h>
75 #include <sys/malloc.h>
76 #include <sys/kernel.h>
77 #include <sys/sysctl.h>
78 
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <sys/lock.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_kern.h>
88 #include <vm/vm_extern.h>
89 
90 struct vm_map kernel_map;
91 struct vm_map clean_map;
92 struct vm_map buffer_map;
93 
94 /*
95  *	kmem_alloc_pageable:
96  *
97  *	Allocate pageable memory to the kernel's address map.
98  *	"map" must be kernel_map or a submap of kernel_map.
99  */
100 vm_offset_t
101 kmem_alloc_pageable(vm_map_t map, vm_size_t size)
102 {
103 	vm_offset_t addr;
104 	int result;
105 
106 	size = round_page(size);
107 	addr = vm_map_min(map);
108 	result = vm_map_find(map, NULL, (vm_offset_t) 0,
109 			     &addr, size, PAGE_SIZE,
110 			     TRUE, VM_MAPTYPE_NORMAL,
111 			     VM_PROT_ALL, VM_PROT_ALL,
112 			     0);
113 	if (result != KERN_SUCCESS) {
114 		return (0);
115 	}
116 	return (addr);
117 }
118 
119 /*
120  *	kmem_alloc_nofault:
121  *
122  *	Same as kmem_alloc_pageable, except that it create a nofault entry.
123  */
124 vm_offset_t
125 kmem_alloc_nofault(vm_map_t map, vm_size_t size, vm_size_t align)
126 {
127 	vm_offset_t addr;
128 	int result;
129 
130 	size = round_page(size);
131 	addr = vm_map_min(map);
132 	result = vm_map_find(map, NULL, (vm_offset_t) 0,
133 			     &addr, size, align,
134 			     TRUE, VM_MAPTYPE_NORMAL,
135 			     VM_PROT_ALL, VM_PROT_ALL,
136 			     MAP_NOFAULT);
137 	if (result != KERN_SUCCESS) {
138 		return (0);
139 	}
140 	return (addr);
141 }
142 
143 /*
144  *	Allocate wired-down memory in the kernel's address map
145  *	or a submap.
146  */
147 vm_offset_t
148 kmem_alloc3(vm_map_t map, vm_size_t size, int kmflags)
149 {
150 	vm_offset_t addr;
151 	vm_offset_t i;
152 	int count;
153 
154 	size = round_page(size);
155 
156 	if (kmflags & KM_KRESERVE)
157 		count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
158 	else
159 		count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
160 
161 	/*
162 	 * Use the kernel object for wired-down kernel pages. Assume that no
163 	 * region of the kernel object is referenced more than once.
164 	 *
165 	 * Locate sufficient space in the map.  This will give us the final
166 	 * virtual address for the new memory, and thus will tell us the
167 	 * offset within the kernel map.
168 	 */
169 	vm_map_lock(map);
170 	if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE, 0, &addr)) {
171 		vm_map_unlock(map);
172 		if (kmflags & KM_KRESERVE)
173 			vm_map_entry_krelease(count);
174 		else
175 			vm_map_entry_release(count);
176 		return (0);
177 	}
178 	vm_object_reference(&kernel_object);
179 	vm_map_insert(map, &count,
180 		      &kernel_object, addr, addr, addr + size,
181 		      VM_MAPTYPE_NORMAL,
182 		      VM_PROT_ALL, VM_PROT_ALL,
183 		      0);
184 	vm_map_unlock(map);
185 	if (kmflags & KM_KRESERVE)
186 		vm_map_entry_krelease(count);
187 	else
188 		vm_map_entry_release(count);
189 
190 	/*
191 	 * Guarantee that there are pages already in this object before
192 	 * calling vm_map_wire.  This is to prevent the following
193 	 * scenario:
194 	 *
195 	 * 1) Threads have swapped out, so that there is a pager for the
196 	 * kernel_object. 2) The kmsg zone is empty, and so we are
197 	 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault;
198 	 * there is no page, but there is a pager, so we call
199 	 * pager_data_request.  But the kmsg zone is empty, so we must
200 	 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
201 	 * we get the data back from the pager, it will be (very stale)
202 	 * non-zero data.  kmem_alloc is defined to return zero-filled memory.
203 	 *
204 	 * We're intentionally not activating the pages we allocate to prevent a
205 	 * race with page-out.  vm_map_wire will wire the pages.
206 	 */
207 
208 	for (i = 0; i < size; i += PAGE_SIZE) {
209 		vm_page_t mem;
210 
211 		mem = vm_page_grab(&kernel_object, OFF_TO_IDX(addr + i),
212 			    VM_ALLOC_ZERO | VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
213 		if ((mem->flags & PG_ZERO) == 0)
214 			vm_page_zero_fill(mem);
215 		mem->valid = VM_PAGE_BITS_ALL;
216 		vm_page_flag_clear(mem, PG_ZERO);
217 		vm_page_wakeup(mem);
218 	}
219 
220 	/*
221 	 * And finally, mark the data as non-pageable.
222 	 */
223 
224 	vm_map_wire(map, (vm_offset_t) addr, addr + size, kmflags);
225 
226 	return (addr);
227 }
228 
229 /*
230  *	kmem_free:
231  *
232  *	Release a region of kernel virtual memory allocated
233  *	with kmem_alloc, and return the physical pages
234  *	associated with that region.
235  *
236  *	This routine may not block on kernel maps.
237  */
238 void
239 kmem_free(vm_map_t map, vm_offset_t addr, vm_size_t size)
240 {
241 	vm_map_remove(map, trunc_page(addr), round_page(addr + size));
242 }
243 
244 /*
245  *	kmem_suballoc:
246  *
247  *	Used to break a system map into smaller maps, usually to reduce
248  *	contention and to provide large KVA spaces for subsystems like the
249  *	buffer cache.
250  *
251  *	parent		Map to take range from
252  *	result
253  *	size		Size of range to find
254  *	min, max	Returned endpoints of map
255  *	pageable	Can the region be paged
256  */
257 void
258 kmem_suballoc(vm_map_t parent, vm_map_t result,
259 	      vm_offset_t *min, vm_offset_t *max, vm_size_t size)
260 {
261 	int ret;
262 
263 	size = round_page(size);
264 
265 	*min = (vm_offset_t) vm_map_min(parent);
266 	ret = vm_map_find(parent, NULL, (vm_offset_t) 0,
267 			  min, size, PAGE_SIZE,
268 			  TRUE, VM_MAPTYPE_UNSPECIFIED,
269 			  VM_PROT_ALL, VM_PROT_ALL,
270 			  0);
271 	if (ret != KERN_SUCCESS) {
272 		kprintf("kmem_suballoc: bad status return of %d.\n", ret);
273 		panic("kmem_suballoc");
274 	}
275 	*max = *min + size;
276 	pmap_reference(vm_map_pmap(parent));
277 	vm_map_init(result, *min, *max, vm_map_pmap(parent));
278 	if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS)
279 		panic("kmem_suballoc: unable to change range to submap");
280 }
281 
282 /*
283  *	kmem_alloc_wait:
284  *
285  *	Allocates pageable memory from a sub-map of the kernel.  If the submap
286  *	has no room, the caller sleeps waiting for more memory in the submap.
287  *
288  *	This routine may block.
289  */
290 
291 vm_offset_t
292 kmem_alloc_wait(vm_map_t map, vm_size_t size)
293 {
294 	vm_offset_t addr;
295 	int count;
296 
297 	size = round_page(size);
298 
299 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
300 
301 	for (;;) {
302 		/*
303 		 * To make this work for more than one map, use the map's lock
304 		 * to lock out sleepers/wakers.
305 		 */
306 		vm_map_lock(map);
307 		if (vm_map_findspace(map, vm_map_min(map),
308 				     size, PAGE_SIZE, 0, &addr) == 0) {
309 			break;
310 		}
311 		/* no space now; see if we can ever get space */
312 		if (vm_map_max(map) - vm_map_min(map) < size) {
313 			vm_map_entry_release(count);
314 			vm_map_unlock(map);
315 			return (0);
316 		}
317 		vm_map_unlock(map);
318 		tsleep(map, 0, "kmaw", 0);
319 	}
320 	vm_map_insert(map, &count,
321 		      NULL, (vm_offset_t) 0,
322 		      addr, addr + size,
323 		      VM_MAPTYPE_NORMAL,
324 		      VM_PROT_ALL, VM_PROT_ALL,
325 		      0);
326 	vm_map_unlock(map);
327 	vm_map_entry_release(count);
328 	return (addr);
329 }
330 
331 /*
332  *	kmem_free_wakeup:
333  *
334  *	Returns memory to a submap of the kernel, and wakes up any processes
335  *	waiting for memory in that map.
336  */
337 void
338 kmem_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size)
339 {
340 	int count;
341 
342 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
343 	vm_map_lock(map);
344 	vm_map_delete(map, trunc_page(addr), round_page(addr + size), &count);
345 	wakeup(map);
346 	vm_map_unlock(map);
347 	vm_map_entry_release(count);
348 }
349 
350 /*
351  * 	kmem_init:
352  *
353  *	Create the kernel_map and insert mappings to cover areas already
354  *	allocated or reserved thus far.  That is, the area (KvaStart,start)
355  *	and (end,KvaEnd) must be marked as allocated.
356  *
357  *	virtual2_start/end is a cutout Between KvaStart and start,
358  *	for x86_64 due to the location of KERNBASE (at -2G).
359  *
360  *	We could use a min_offset of 0 instead of KvaStart, but since the
361  *	min_offset is not used for any calculations other then a bounds check
362  *	it does not effect readability.  KvaStart is more appropriate.
363  *
364  *	Depend on the zalloc bootstrap cache to get our vm_map_entry_t.
365  */
366 void
367 kmem_init(vm_offset_t start, vm_offset_t end)
368 {
369 	vm_offset_t addr;
370 	vm_map_t m;
371 	int count;
372 
373 	m = vm_map_create(&kernel_map, &kernel_pmap, KvaStart, KvaEnd);
374 	vm_map_lock(m);
375 	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
376 	m->system_map = 1;
377 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
378 	addr = KvaStart;
379 	if (virtual2_start) {
380 		if (addr < virtual2_start) {
381 			vm_map_insert(m, &count, NULL, (vm_offset_t) 0,
382 				      addr, virtual2_start,
383 				      VM_MAPTYPE_NORMAL,
384 				      VM_PROT_ALL, VM_PROT_ALL,
385 				      0);
386 		}
387 		addr = virtual2_end;
388 	}
389 	if (addr < start) {
390 		vm_map_insert(m, &count, NULL, (vm_offset_t) 0,
391 			      addr, start,
392 			      VM_MAPTYPE_NORMAL,
393 			      VM_PROT_ALL, VM_PROT_ALL,
394 			      0);
395 	}
396 	addr = end;
397 	if (addr < KvaEnd) {
398 		vm_map_insert(m, &count, NULL, (vm_offset_t) 0,
399 			      addr, KvaEnd,
400 			      VM_MAPTYPE_NORMAL,
401 			      VM_PROT_ALL, VM_PROT_ALL,
402 			      0);
403 	}
404 	/* ... and ending with the completion of the above `insert' */
405 	vm_map_unlock(m);
406 	vm_map_entry_release(count);
407 }
408 
409 static int
410 kvm_size(SYSCTL_HANDLER_ARGS)
411 {
412 	unsigned long ksize = KvaSize;
413 
414 	return sysctl_handle_long(oidp, &ksize, 0, req);
415 }
416 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
417     0, 0, kvm_size, "IU", "Size of KVM");
418 
419 static int
420 kvm_free(SYSCTL_HANDLER_ARGS)
421 {
422 	unsigned long kfree = virtual_end - kernel_vm_end;
423 
424 	return sysctl_handle_long(oidp, &kfree, 0, req);
425 }
426 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
427     0, 0, kvm_free, "IU", "Amount of KVM free");
428 
429