xref: /netbsd-src/sys/uvm/uvm_pager.c (revision 3816d47b2c42fcd6e549e3407f842a5b1a1d23ad)
1 /*	$NetBSD: uvm_pager.c,v 1.97 2009/11/07 07:27:50 cegger Exp $	*/
2 
3 /*
4  *
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Charles D. Cranor and
19  *      Washington University.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35  */
36 
37 /*
38  * uvm_pager.c: generic functions used to assist the pagers.
39  */
40 
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.97 2009/11/07 07:27:50 cegger Exp $");
43 
44 #include "opt_uvmhist.h"
45 #include "opt_readahead.h"
46 #include "opt_pagermap.h"
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/malloc.h>
52 #include <sys/vnode.h>
53 #include <sys/buf.h>
54 
55 #include <uvm/uvm.h>
56 
57 /*
58  * XXX
59  * this is needed until the device strategy interface
60  * is changed to do physically-addressed i/o.
61  */
62 
63 #ifndef PAGER_MAP_DEFAULT_SIZE
64 #define PAGER_MAP_DEFAULT_SIZE	(16 * 1024 * 1024)
65 #endif
66 
67 #ifndef PAGER_MAP_SIZE
68 #define PAGER_MAP_SIZE	PAGER_MAP_DEFAULT_SIZE
69 #endif
70 
71 size_t pager_map_size = PAGER_MAP_SIZE;
72 
73 /*
74  * list of uvm pagers in the system
75  */
76 
77 const struct uvm_pagerops * const uvmpagerops[] = {
78 	&aobj_pager,
79 	&uvm_deviceops,
80 	&uvm_vnodeops,
81 	&ubc_pager,
82 };
83 
84 /*
85  * the pager map: provides KVA for I/O
86  */
87 
88 struct vm_map *pager_map;		/* XXX */
89 kmutex_t pager_map_wanted_lock;
90 bool pager_map_wanted;	/* locked by pager map */
91 static vaddr_t emergva;
92 static bool emerginuse;
93 
94 /*
95  * uvm_pager_init: init pagers (at boot time)
96  */
97 
98 void
99 uvm_pager_init(void)
100 {
101 	u_int lcv;
102 	vaddr_t sva, eva;
103 
104 	/*
105 	 * init pager map
106 	 */
107 
108 	sva = 0;
109 	pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, pager_map_size, 0,
110 	    false, NULL);
111 	mutex_init(&pager_map_wanted_lock, MUTEX_DEFAULT, IPL_NONE);
112 	pager_map_wanted = false;
113 	emergva = uvm_km_alloc(kernel_map, round_page(MAXPHYS), 0,
114 	    UVM_KMF_VAONLY);
115 #if defined(DEBUG)
116 	if (emergva == 0)
117 		panic("emergva");
118 #endif
119 	emerginuse = false;
120 
121 	/*
122 	 * init ASYNC I/O queue
123 	 */
124 
125 	TAILQ_INIT(&uvm.aio_done);
126 
127 	/*
128 	 * call pager init functions
129 	 */
130 	for (lcv = 0 ; lcv < __arraycount(uvmpagerops); lcv++) {
131 		if (uvmpagerops[lcv]->pgo_init)
132 			uvmpagerops[lcv]->pgo_init();
133 	}
134 }
135 
136 /*
137  * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
138  *
139  * we basically just map in a blank map entry to reserve the space in the
140  * map and then use pmap_enter() to put the mappings in by hand.
141  */
142 
143 vaddr_t
144 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
145 {
146 	vsize_t size;
147 	vaddr_t kva;
148 	vaddr_t cva;
149 	struct vm_page *pp;
150 	vm_prot_t prot;
151 	const bool pdaemon = curlwp == uvm.pagedaemon_lwp;
152 	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
153 
154 	UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
155 
156 	/*
157 	 * compute protection.  outgoing I/O only needs read
158 	 * access to the page, whereas incoming needs read/write.
159 	 */
160 
161 	prot = VM_PROT_READ;
162 	if (flags & UVMPAGER_MAPIN_READ)
163 		prot |= VM_PROT_WRITE;
164 
165 ReStart:
166 	size = npages << PAGE_SHIFT;
167 	kva = 0;			/* let system choose VA */
168 
169 	if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET, 0,
170 	    UVM_FLAG_NOMERGE | (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) {
171 		if (pdaemon) {
172 			mutex_enter(&pager_map_wanted_lock);
173 			if (emerginuse) {
174 				UVM_UNLOCK_AND_WAIT(&emergva,
175 				    &pager_map_wanted_lock, false,
176 				    "emergva", 0);
177 				goto ReStart;
178 			}
179 			emerginuse = true;
180 			mutex_exit(&pager_map_wanted_lock);
181 			kva = emergva;
182 			/* The shift implicitly truncates to PAGE_SIZE */
183 			KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT));
184 			goto enter;
185 		}
186 		if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
187 			UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
188 			return(0);
189 		}
190 		mutex_enter(&pager_map_wanted_lock);
191 		pager_map_wanted = true;
192 		UVMHIST_LOG(maphist, "  SLEEPING on pager_map",0,0,0,0);
193 		UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, false,
194 		    "pager_map", 0);
195 		goto ReStart;
196 	}
197 
198 enter:
199 	/* got it */
200 	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
201 		pp = *pps++;
202 		KASSERT(pp);
203 		KASSERT(pp->flags & PG_BUSY);
204 		pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot, 0);
205 	}
206 	pmap_update(vm_map_pmap(pager_map));
207 
208 	UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
209 	return(kva);
210 }
211 
212 /*
213  * uvm_pagermapout: remove pager_map mapping
214  *
215  * we remove our mappings by hand and then remove the mapping (waking
216  * up anyone wanting space).
217  */
218 
219 void
220 uvm_pagermapout(vaddr_t kva, int npages)
221 {
222 	vsize_t size = npages << PAGE_SHIFT;
223 	struct vm_map_entry *entries;
224 	UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
225 
226 	UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
227 
228 	/*
229 	 * duplicate uvm_unmap, but add in pager_map_wanted handling.
230 	 */
231 
232 	pmap_kremove(kva, npages << PAGE_SHIFT);
233 	if (kva == emergva) {
234 		mutex_enter(&pager_map_wanted_lock);
235 		emerginuse = false;
236 		wakeup(&emergva);
237 		mutex_exit(&pager_map_wanted_lock);
238 		return;
239 	}
240 
241 	vm_map_lock(pager_map);
242 	uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0);
243 	mutex_enter(&pager_map_wanted_lock);
244 	if (pager_map_wanted) {
245 		pager_map_wanted = false;
246 		wakeup(pager_map);
247 	}
248 	mutex_exit(&pager_map_wanted_lock);
249 	vm_map_unlock(pager_map);
250 	if (entries)
251 		uvm_unmap_detach(entries, 0);
252 	pmap_update(pmap_kernel());
253 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
254 }
255 
256 /*
257  * interrupt-context iodone handler for single-buf i/os
258  * or the top-level buf of a nested-buf i/o.
259  */
260 
261 void
262 uvm_aio_biodone(struct buf *bp)
263 {
264 	/* reset b_iodone for when this is a single-buf i/o. */
265 	bp->b_iodone = uvm_aio_aiodone;
266 
267 	workqueue_enqueue(uvm.aiodone_queue, &bp->b_work, NULL);
268 }
269 
270 void
271 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
272 {
273 	struct uvm_object *uobj;
274 	struct vm_page *pg;
275 	kmutex_t *slock;
276 	int pageout_done;
277 	int swslot;
278 	int i;
279 	bool swap;
280 	UVMHIST_FUNC("uvm_aio_aiodone_pages"); UVMHIST_CALLED(ubchist);
281 
282 	swslot = 0;
283 	pageout_done = 0;
284 	slock = NULL;
285 	uobj = NULL;
286 	pg = pgs[0];
287 	swap = (pg->uanon != NULL && pg->uobject == NULL) ||
288 		(pg->pqflags & PQ_AOBJ) != 0;
289 	if (!swap) {
290 		uobj = pg->uobject;
291 		slock = &uobj->vmobjlock;
292 		mutex_enter(slock);
293 		mutex_enter(&uvm_pageqlock);
294 	} else {
295 #if defined(VMSWAP)
296 		if (error) {
297 			if (pg->uobject != NULL) {
298 				swslot = uao_find_swslot(pg->uobject,
299 				    pg->offset >> PAGE_SHIFT);
300 			} else {
301 				KASSERT(pg->uanon != NULL);
302 				swslot = pg->uanon->an_swslot;
303 			}
304 			KASSERT(swslot);
305 		}
306 #else /* defined(VMSWAP) */
307 		panic("%s: swap", __func__);
308 #endif /* defined(VMSWAP) */
309 	}
310 	for (i = 0; i < npages; i++) {
311 		pg = pgs[i];
312 		KASSERT(swap || pg->uobject == uobj);
313 		UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
314 
315 #if defined(VMSWAP)
316 		/*
317 		 * for swap i/os, lock each page's object (or anon)
318 		 * individually since each page may need a different lock.
319 		 */
320 
321 		if (swap) {
322 			if (pg->uobject != NULL) {
323 				slock = &pg->uobject->vmobjlock;
324 			} else {
325 				slock = &pg->uanon->an_lock;
326 			}
327 			mutex_enter(slock);
328 			mutex_enter(&uvm_pageqlock);
329 		}
330 #endif /* defined(VMSWAP) */
331 
332 		/*
333 		 * process errors.  for reads, just mark the page to be freed.
334 		 * for writes, if the error was ENOMEM, we assume this was
335 		 * a transient failure so we mark the page dirty so that
336 		 * we'll try to write it again later.  for all other write
337 		 * errors, we assume the error is permanent, thus the data
338 		 * in the page is lost.  bummer.
339 		 */
340 
341 		if (error) {
342 			int slot;
343 			if (!write) {
344 				pg->flags |= PG_RELEASED;
345 				continue;
346 			} else if (error == ENOMEM) {
347 				if (pg->flags & PG_PAGEOUT) {
348 					pg->flags &= ~PG_PAGEOUT;
349 					pageout_done++;
350 				}
351 				pg->flags &= ~PG_CLEAN;
352 				uvm_pageactivate(pg);
353 				slot = 0;
354 			} else
355 				slot = SWSLOT_BAD;
356 
357 #if defined(VMSWAP)
358 			if (swap) {
359 				if (pg->uobject != NULL) {
360 					int oldslot;
361 					oldslot = uao_set_swslot(pg->uobject,
362 						pg->offset >> PAGE_SHIFT, slot);
363 					KASSERT(oldslot == swslot + i);
364 				} else {
365 					KASSERT(pg->uanon->an_swslot ==
366 						swslot + i);
367 					pg->uanon->an_swslot = slot;
368 				}
369 			}
370 #endif /* defined(VMSWAP) */
371 		}
372 
373 		/*
374 		 * if the page is PG_FAKE, this must have been a read to
375 		 * initialize the page.  clear PG_FAKE and activate the page.
376 		 * we must also clear the pmap "modified" flag since it may
377 		 * still be set from the page's previous identity.
378 		 */
379 
380 		if (pg->flags & PG_FAKE) {
381 			KASSERT(!write);
382 			pg->flags &= ~PG_FAKE;
383 #if defined(READAHEAD_STATS)
384 			pg->pqflags |= PQ_READAHEAD;
385 			uvm_ra_total.ev_count++;
386 #endif /* defined(READAHEAD_STATS) */
387 			KASSERT((pg->flags & PG_CLEAN) != 0);
388 			uvm_pageenqueue(pg);
389 			pmap_clear_modify(pg);
390 		}
391 
392 		/*
393 		 * do accounting for pagedaemon i/o and arrange to free
394 		 * the pages instead of just unbusying them.
395 		 */
396 
397 		if (pg->flags & PG_PAGEOUT) {
398 			pg->flags &= ~PG_PAGEOUT;
399 			pageout_done++;
400 			uvmexp.pdfreed++;
401 			pg->flags |= PG_RELEASED;
402 		}
403 
404 #if defined(VMSWAP)
405 		/*
406 		 * for swap pages, unlock everything for this page now.
407 		 */
408 
409 		if (swap) {
410 			if (pg->uobject == NULL && pg->uanon->an_ref == 0 &&
411 			    (pg->flags & PG_RELEASED) != 0) {
412 				mutex_exit(&uvm_pageqlock);
413 				uvm_anon_release(pg->uanon);
414 			} else {
415 				uvm_page_unbusy(&pg, 1);
416 				mutex_exit(&uvm_pageqlock);
417 				mutex_exit(slock);
418 			}
419 		}
420 #endif /* defined(VMSWAP) */
421 	}
422 	uvm_pageout_done(pageout_done);
423 	if (!swap) {
424 		uvm_page_unbusy(pgs, npages);
425 		mutex_exit(&uvm_pageqlock);
426 		mutex_exit(slock);
427 	} else {
428 #if defined(VMSWAP)
429 		KASSERT(write);
430 
431 		/* these pages are now only in swap. */
432 		mutex_enter(&uvm_swap_data_lock);
433 		KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse);
434 		if (error != ENOMEM)
435 			uvmexp.swpgonly += npages;
436 		mutex_exit(&uvm_swap_data_lock);
437 		if (error) {
438 			if (error != ENOMEM)
439 				uvm_swap_markbad(swslot, npages);
440 			else
441 				uvm_swap_free(swslot, npages);
442 		}
443 		uvmexp.pdpending--;
444 #endif /* defined(VMSWAP) */
445 	}
446 }
447 
448 /*
449  * uvm_aio_aiodone: do iodone processing for async i/os.
450  * this should be called in thread context, not interrupt context.
451  */
452 
453 void
454 uvm_aio_aiodone(struct buf *bp)
455 {
456 	int npages = bp->b_bufsize >> PAGE_SHIFT;
457 	struct vm_page *pgs[npages];
458 	int i, error;
459 	bool write;
460 	UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
461 	UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
462 
463 	error = bp->b_error;
464 	write = (bp->b_flags & B_READ) == 0;
465 
466 	for (i = 0; i < npages; i++) {
467 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
468 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
469 	}
470 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
471 
472 	uvm_aio_aiodone_pages(pgs, npages, write, error);
473 
474 	if (write && (bp->b_cflags & BC_AGE) != 0) {
475 		mutex_enter(bp->b_objlock);
476 		vwakeup(bp);
477 		mutex_exit(bp->b_objlock);
478 	}
479 	putiobuf(bp);
480 }
481 
482 /*
483  * uvm_pageratop: convert KVAs in the pager map back to their page
484  * structures.
485  */
486 
487 struct vm_page *
488 uvm_pageratop(vaddr_t kva)
489 {
490 	struct vm_page *pg;
491 	paddr_t pa;
492 	bool rv;
493 
494 	rv = pmap_extract(pmap_kernel(), kva, &pa);
495 	KASSERT(rv);
496 	pg = PHYS_TO_VM_PAGE(pa);
497 	KASSERT(pg != NULL);
498 	return (pg);
499 }
500