xref: /dflybsd-src/sys/vm/vm_contig.c (revision 70e491c09b1cb18f96facdbc91052080f2a455c3)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 2003, 2004 The DragonFly Project.  All rights reserved.
5  *
6  * This code is derived from software contributed to The DragonFly Project
7  * by Hiten Pandya <hmp@backplane.com>.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  */
37 /*
38  * Copyright (c) 1991 Regents of the University of California.
39  * All rights reserved.
40  *
41  * This code is derived from software contributed to Berkeley by
42  * The Mach Operating System project at Carnegie-Mellon University.
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  * 1. Redistributions of source code must retain the above copyright
48  *    notice, this list of conditions and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  * 3. Neither the name of the University nor the names of its contributors
53  *    may be used to endorse or promote products derived from this software
54  *    without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66  * SUCH DAMAGE.
67  *
68  *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
69  * $DragonFly: src/sys/vm/vm_contig.c,v 1.21 2006/12/28 21:24:02 dillon Exp $
70  */
71 
72 /*
73  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
74  * All rights reserved.
75  *
76  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
77  *
78  * Permission to use, copy, modify and distribute this software and
79  * its documentation is hereby granted, provided that both the copyright
80  * notice and this permission notice appear in all copies of the
81  * software, derivative works or modified versions, and any portions
82  * thereof, and that both notices appear in supporting documentation.
83  *
84  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
85  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
86  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
87  *
88  * Carnegie Mellon requests users of this software to return to
89  *
90  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
91  *  School of Computer Science
92  *  Carnegie Mellon University
93  *  Pittsburgh PA 15213-3890
94  *
95  * any improvements or extensions that they make and grant Carnegie the
96  * rights to redistribute these changes.
97  */
98 
99 /*
100  * Contiguous memory allocation API.
101  */
102 
103 #include <sys/param.h>
104 #include <sys/systm.h>
105 #include <sys/malloc.h>
106 #include <sys/proc.h>
107 #include <sys/lock.h>
108 #include <sys/vmmeter.h>
109 #include <sys/vnode.h>
110 
111 #include <vm/vm.h>
112 #include <vm/vm_param.h>
113 #include <vm/vm_kern.h>
114 #include <vm/pmap.h>
115 #include <vm/vm_map.h>
116 #include <vm/vm_object.h>
117 #include <vm/vm_page.h>
118 #include <vm/vm_pageout.h>
119 #include <vm/vm_pager.h>
120 #include <vm/vm_extern.h>
121 
122 #include <sys/thread2.h>
123 #include <vm/vm_page2.h>
124 
125 /*
126  * vm_contig_pg_clean:
127  *
128  * Do a thorough cleanup of the specified 'queue', which can be either
129  * PQ_ACTIVE or PQ_INACTIVE by doing a walkthrough.  If the page is not
130  * marked dirty, it is shoved into the page cache, provided no one has
131  * currently aqcuired it, otherwise localized action per object type
132  * is taken for cleanup:
133  *
134  * 	In the OBJT_VNODE case, the whole page range is cleaned up
135  * 	using the vm_object_page_clean() routine, by specyfing a
136  * 	start and end of '0'.
137  *
138  * 	Otherwise if the object is of any other type, the generic
139  * 	pageout (daemon) flush routine is invoked.
140  *
141  * The caller must hold vm_token.
142  */
143 static int
144 vm_contig_pg_clean(int queue)
145 {
146 	vm_object_t object;
147 	vm_page_t m, m_tmp, next;
148 
149 	ASSERT_LWKT_TOKEN_HELD(&vm_token);
150 
151 	for (m = TAILQ_FIRST(&vm_page_queues[queue].pl); m != NULL; m = next) {
152 		KASSERT(m->queue == queue,
153 			("vm_contig_clean: page %p's queue is not %d",
154 			m, queue));
155 		next = TAILQ_NEXT(m, pageq);
156 
157 		if (vm_page_sleep_busy(m, TRUE, "vpctw0"))
158 			return (TRUE);
159 
160 		vm_page_test_dirty(m);
161 		if (m->dirty) {
162 			object = m->object;
163 			if (object->type == OBJT_VNODE) {
164 				vn_lock(object->handle, LK_EXCLUSIVE|LK_RETRY);
165 				vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
166 				vn_unlock(((struct vnode *)object->handle));
167 				return (TRUE);
168 			} else if (object->type == OBJT_SWAP ||
169 					object->type == OBJT_DEFAULT) {
170 				m_tmp = m;
171 				vm_pageout_flush(&m_tmp, 1, 0);
172 				return (TRUE);
173 			}
174 		}
175 		if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0))
176 			vm_page_cache(m);
177 	}
178 	return (FALSE);
179 }
180 
181 /*
182  * vm_contig_pg_flush:
183  *
184  * Attempt to flush (count) pages from the given page queue.   This may or
185  * may not succeed.  Take up to <count> passes and delay 1/20 of a second
186  * between each pass.
187  *
188  * The caller must hold vm_token.
189  */
190 static void
191 vm_contig_pg_flush(int queue, int count)
192 {
193 	while (count > 0) {
194 		if (!vm_contig_pg_clean(queue))
195 			break;
196 		--count;
197 	}
198 }
199 /*
200  * vm_contig_pg_alloc:
201  *
202  * Allocate contiguous pages from the VM.  This function does not
203  * map the allocated pages into the kernel map, otherwise it is
204  * impossible to make large allocations (i.e. >2G).
205  *
206  * Malloc()'s data structures have been used for collection of
207  * statistics and for allocations of less than a page.
208  *
209  * The caller must hold vm_token.
210  */
211 static int
212 vm_contig_pg_alloc(unsigned long size, vm_paddr_t low, vm_paddr_t high,
213 		   unsigned long alignment, unsigned long boundary, int mflags)
214 {
215 	int i, start, pass;
216 	vm_offset_t phys;
217 	vm_page_t pga = vm_page_array;
218 	vm_page_t m;
219 	int pqtype;
220 
221 	size = round_page(size);
222 	if (size == 0)
223 		panic("vm_contig_pg_alloc: size must not be 0");
224 	if ((alignment & (alignment - 1)) != 0)
225 		panic("vm_contig_pg_alloc: alignment must be a power of 2");
226 	if ((boundary & (boundary - 1)) != 0)
227 		panic("vm_contig_pg_alloc: boundary must be a power of 2");
228 
229 	start = 0;
230 	crit_enter();
231 
232 	/*
233 	 * Three passes (0, 1, 2).  Each pass scans the VM page list for
234 	 * free or cached pages.  After each pass if the entire scan failed
235 	 * we attempt to flush inactive pages and reset the start index back
236 	 * to 0.  For passes 1 and 2 we also attempt to flush active pages.
237 	 */
238 	for (pass = 0; pass < 3; pass++) {
239 		/*
240 		 * Find first page in array that is free, within range,
241 		 * aligned, and such that the boundary won't be crossed.
242 		 */
243 again:
244 		for (i = start; i < vmstats.v_page_count; i++) {
245 			m = &pga[i];
246 			phys = VM_PAGE_TO_PHYS(m);
247 			pqtype = m->queue - m->pc;
248 			if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
249 			    (phys >= low) && (phys < high) &&
250 			    ((phys & (alignment - 1)) == 0) &&
251 			    (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0) &&
252 			    m->busy == 0 && m->wire_count == 0 &&
253 			    m->hold_count == 0 && (m->flags & PG_BUSY) == 0
254 
255 			) {
256 				break;
257 			}
258 		}
259 
260 		/*
261 		 * If we cannot find the page in the given range, or we have
262 		 * crossed the boundary, call the vm_contig_pg_clean() function
263 		 * for flushing out the queues, and returning it back to
264 		 * normal state.
265 		 */
266 		if ((i == vmstats.v_page_count) ||
267 			((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
268 
269 			/*
270 			 * Best effort flush of all inactive pages.
271 			 * This is quite quick, for now stall all
272 			 * callers, even if they've specified M_NOWAIT.
273 			 */
274 			vm_contig_pg_flush(PQ_INACTIVE,
275 					    vmstats.v_inactive_count);
276 
277 			crit_exit(); /* give interrupts a chance */
278 			crit_enter();
279 
280 			/*
281 			 * Best effort flush of active pages.
282 			 *
283 			 * This is very, very slow.
284 			 * Only do this if the caller has agreed to M_WAITOK.
285 			 *
286 			 * If enough pages are flushed, we may succeed on
287 			 * next (final) pass, if not the caller, contigmalloc(),
288 			 * will fail in the index < 0 case.
289 			 */
290 			if (pass > 0 && (mflags & M_WAITOK)) {
291 				vm_contig_pg_flush (PQ_ACTIVE,
292 						    vmstats.v_active_count);
293 			}
294 
295 			/*
296 			 * We're already too high in the address space
297 			 * to succeed, reset to 0 for the next iteration.
298 			 */
299 			start = 0;
300 			crit_exit(); /* give interrupts a chance */
301 			crit_enter();
302 			continue;	/* next pass */
303 		}
304 		start = i;
305 
306 		/*
307 		 * Check successive pages for contiguous and free.
308 		 *
309 		 * (still in critical section)
310 		 */
311 		for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
312 			m = &pga[i];
313 			pqtype = m->queue - m->pc;
314 			if ((VM_PAGE_TO_PHYS(&m[0]) !=
315 			    (VM_PAGE_TO_PHYS(&m[-1]) + PAGE_SIZE)) ||
316 			    ((pqtype != PQ_FREE) && (pqtype != PQ_CACHE)) ||
317 			    m->busy || m->wire_count ||
318 			    m->hold_count || (m->flags & PG_BUSY)
319 			) {
320 				start++;
321 				goto again;
322 			}
323 		}
324 
325 		/*
326 		 * (still in critical section)
327 		 */
328 		for (i = start; i < (start + size / PAGE_SIZE); i++) {
329 			m = &pga[i];
330 			pqtype = m->queue - m->pc;
331 			if (pqtype == PQ_CACHE) {
332 				vm_page_busy(m);
333 				vm_page_free(m);
334 			}
335 			KKASSERT(m->object == NULL);
336 			vm_page_unqueue_nowakeup(m);
337 			m->valid = VM_PAGE_BITS_ALL;
338 			if (m->flags & PG_ZERO)
339 				vm_page_zero_count--;
340 			/* Don't clear the PG_ZERO flag, we'll need it later. */
341 			m->flags &= PG_ZERO;
342 			KASSERT(m->dirty == 0,
343 				("vm_contig_pg_alloc: page %p was dirty", m));
344 			m->wire_count = 0;
345 			m->busy = 0;
346 		}
347 
348 		/*
349 		 * Our job is done, return the index page of vm_page_array.
350 		 */
351 		crit_exit();
352 		return (start); /* aka &pga[start] */
353 	}
354 
355 	/*
356 	 * Failed.
357 	 */
358 	crit_exit();
359 	return (-1);
360 }
361 
362 /*
363  * vm_contig_pg_free:
364  *
365  * Remove pages previously allocated by vm_contig_pg_alloc, and
366  * assume all references to the pages have been removed, and that
367  * it is OK to add them back to the free list.
368  *
369  * Caller must ensure no races on the page range in question.
370  * No other requirements.
371  */
372 void
373 vm_contig_pg_free(int start, u_long size)
374 {
375 	vm_page_t pga = vm_page_array;
376 	vm_page_t m;
377 	int i;
378 
379 	size = round_page(size);
380 	if (size == 0)
381 		panic("vm_contig_pg_free: size must not be 0");
382 
383 	lwkt_gettoken(&vm_token);
384 	for (i = start; i < (start + size / PAGE_SIZE); i++) {
385 		m = &pga[i];
386 		vm_page_busy(m);
387 		vm_page_free(m);
388 	}
389 	lwkt_reltoken(&vm_token);
390 }
391 
392 /*
393  * vm_contig_pg_kmap:
394  *
395  * Map previously allocated (vm_contig_pg_alloc) range of pages from
396  * vm_page_array[] into the KVA.  Once mapped, the pages are part of
397  * the Kernel, and are to free'ed with kmem_free(&kernel_map, addr, size).
398  *
399  * No requirements.
400  */
401 vm_offset_t
402 vm_contig_pg_kmap(int start, u_long size, vm_map_t map, int flags)
403 {
404 	vm_offset_t addr, tmp_addr;
405 	vm_page_t pga = vm_page_array;
406 	int i, count;
407 
408 	size = round_page(size);
409 	if (size == 0)
410 		panic("vm_contig_pg_kmap: size must not be 0");
411 
412 	crit_enter();
413 	lwkt_gettoken(&vm_token);
414 
415 	/*
416 	 * We've found a contiguous chunk that meets our requirements.
417 	 * Allocate KVM, and assign phys pages and return a kernel VM
418 	 * pointer.
419 	 */
420 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
421 	vm_map_lock(map);
422 	if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE, 0, &addr) !=
423 	    KERN_SUCCESS) {
424 		/*
425 		 * XXX We almost never run out of kernel virtual
426 		 * space, so we don't make the allocated memory
427 		 * above available.
428 		 */
429 		vm_map_unlock(map);
430 		vm_map_entry_release(count);
431 		lwkt_reltoken(&vm_token);
432 		crit_exit();
433 		return (0);
434 	}
435 
436 	/*
437 	 * kernel_object maps 1:1 to kernel_map.
438 	 */
439 	vm_object_reference(&kernel_object);
440 	vm_map_insert(map, &count,
441 		      &kernel_object, addr,
442 		      addr, addr + size,
443 		      VM_MAPTYPE_NORMAL,
444 		      VM_PROT_ALL, VM_PROT_ALL,
445 		      0);
446 	vm_map_unlock(map);
447 	vm_map_entry_release(count);
448 
449 	tmp_addr = addr;
450 	for (i = start; i < (start + size / PAGE_SIZE); i++) {
451 		vm_page_t m = &pga[i];
452 		vm_page_insert(m, &kernel_object, OFF_TO_IDX(tmp_addr));
453 		if ((flags & M_ZERO) && !(m->flags & PG_ZERO))
454 			pmap_zero_page(VM_PAGE_TO_PHYS(m));
455 		m->flags = 0;
456 		tmp_addr += PAGE_SIZE;
457  	}
458 	vm_map_wire(map, addr, addr + size, 0);
459 
460 	lwkt_reltoken(&vm_token);
461 	crit_exit();
462 	return (addr);
463 }
464 
465 /*
466  * No requirements.
467  */
468 void *
469 contigmalloc(
470 	unsigned long size,	/* should be size_t here and for malloc() */
471 	struct malloc_type *type,
472 	int flags,
473 	vm_paddr_t low,
474 	vm_paddr_t high,
475 	unsigned long alignment,
476 	unsigned long boundary)
477 {
478 	return contigmalloc_map(size, type, flags, low, high, alignment,
479 			boundary, &kernel_map);
480 }
481 
482 /*
483  * No requirements.
484  */
485 void *
486 contigmalloc_map(
487 	unsigned long size,	/* should be size_t here and for malloc() */
488 	struct malloc_type *type,
489 	int flags,
490 	vm_paddr_t low,
491 	vm_paddr_t high,
492 	unsigned long alignment,
493 	unsigned long boundary,
494 	vm_map_t map)
495 {
496 	int index;
497 	void *rv;
498 
499 	lwkt_gettoken(&vm_token);
500 	index = vm_contig_pg_alloc(size, low, high, alignment, boundary, flags);
501 	if (index < 0) {
502 		kprintf("contigmalloc_map: failed size %lu low=%llx "
503 			"high=%llx align=%lu boundary=%lu flags=%08x\n",
504 			size, (long long)low, (long long)high,
505 			alignment, boundary, flags);
506 		lwkt_reltoken(&vm_token);
507 		return NULL;
508 	}
509 
510 	rv = (void *)vm_contig_pg_kmap(index, size, map, flags);
511 	if (rv == NULL)
512 		vm_contig_pg_free(index, size);
513 	lwkt_reltoken(&vm_token);
514 
515 	return rv;
516 }
517 
518 /*
519  * No requirements.
520  */
521 void
522 contigfree(void *addr, unsigned long size, struct malloc_type *type)
523 {
524 	kmem_free(&kernel_map, (vm_offset_t)addr, size);
525 }
526 
527 /*
528  * No requirements.
529  */
530 vm_offset_t
531 vm_page_alloc_contig(
532 	vm_offset_t size,
533 	vm_paddr_t low,
534 	vm_paddr_t high,
535 	vm_offset_t alignment)
536 {
537 	return ((vm_offset_t)contigmalloc_map(size, M_DEVBUF, M_NOWAIT, low,
538 				high, alignment, 0ul, &kernel_map));
539 }
540