xref: /netbsd-src/sys/uvm/uvm_pager.c (revision 46f5119e40af2e51998f686b2fdcc76b5488f7f3)
1 /*	$NetBSD: uvm_pager.c,v 1.100 2011/04/23 18:14:12 rmind Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
28  */
29 
30 /*
31  * uvm_pager.c: generic functions used to assist the pagers.
32  */
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.100 2011/04/23 18:14:12 rmind Exp $");
36 
37 #include "opt_uvmhist.h"
38 #include "opt_readahead.h"
39 #include "opt_pagermap.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/vnode.h>
44 #include <sys/buf.h>
45 
46 #include <uvm/uvm.h>
47 
48 /*
49  * XXX
50  * this is needed until the device strategy interface
51  * is changed to do physically-addressed i/o.
52  */
53 
54 #ifndef PAGER_MAP_DEFAULT_SIZE
55 #define PAGER_MAP_DEFAULT_SIZE	(16 * 1024 * 1024)
56 #endif
57 
58 #ifndef PAGER_MAP_SIZE
59 #define PAGER_MAP_SIZE	PAGER_MAP_DEFAULT_SIZE
60 #endif
61 
62 size_t pager_map_size = PAGER_MAP_SIZE;
63 
64 /*
65  * list of uvm pagers in the system
66  */
67 
68 const struct uvm_pagerops * const uvmpagerops[] = {
69 	&aobj_pager,
70 	&uvm_deviceops,
71 	&uvm_vnodeops,
72 	&ubc_pager,
73 };
74 
75 /*
76  * the pager map: provides KVA for I/O
77  */
78 
79 struct vm_map *pager_map;		/* XXX */
80 kmutex_t pager_map_wanted_lock;
81 bool pager_map_wanted;	/* locked by pager map */
82 static vaddr_t emergva;
83 static bool emerginuse;
84 
85 /*
86  * uvm_pager_init: init pagers (at boot time)
87  */
88 
89 void
90 uvm_pager_init(void)
91 {
92 	u_int lcv;
93 	vaddr_t sva, eva;
94 
95 	/*
96 	 * init pager map
97 	 */
98 
99 	sva = 0;
100 	pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, pager_map_size, 0,
101 	    false, NULL);
102 	mutex_init(&pager_map_wanted_lock, MUTEX_DEFAULT, IPL_NONE);
103 	pager_map_wanted = false;
104 	emergva = uvm_km_alloc(kernel_map, round_page(MAXPHYS), 0,
105 	    UVM_KMF_VAONLY);
106 #if defined(DEBUG)
107 	if (emergva == 0)
108 		panic("emergva");
109 #endif
110 	emerginuse = false;
111 
112 	/*
113 	 * init ASYNC I/O queue
114 	 */
115 
116 	TAILQ_INIT(&uvm.aio_done);
117 
118 	/*
119 	 * call pager init functions
120 	 */
121 	for (lcv = 0 ; lcv < __arraycount(uvmpagerops); lcv++) {
122 		if (uvmpagerops[lcv]->pgo_init)
123 			uvmpagerops[lcv]->pgo_init();
124 	}
125 }
126 
127 /*
128  * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
129  *
130  * we basically just map in a blank map entry to reserve the space in the
131  * map and then use pmap_enter() to put the mappings in by hand.
132  */
133 
134 vaddr_t
135 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
136 {
137 	vsize_t size;
138 	vaddr_t kva;
139 	vaddr_t cva;
140 	struct vm_page *pp;
141 	vm_prot_t prot;
142 	const bool pdaemon = curlwp == uvm.pagedaemon_lwp;
143 	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
144 
145 	UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
146 
147 	/*
148 	 * compute protection.  outgoing I/O only needs read
149 	 * access to the page, whereas incoming needs read/write.
150 	 */
151 
152 	prot = VM_PROT_READ;
153 	if (flags & UVMPAGER_MAPIN_READ)
154 		prot |= VM_PROT_WRITE;
155 
156 ReStart:
157 	size = npages << PAGE_SHIFT;
158 	kva = 0;			/* let system choose VA */
159 
160 	if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET, 0,
161 	    UVM_FLAG_NOMERGE | (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) {
162 		if (pdaemon) {
163 			mutex_enter(&pager_map_wanted_lock);
164 			if (emerginuse) {
165 				UVM_UNLOCK_AND_WAIT(&emergva,
166 				    &pager_map_wanted_lock, false,
167 				    "emergva", 0);
168 				goto ReStart;
169 			}
170 			emerginuse = true;
171 			mutex_exit(&pager_map_wanted_lock);
172 			kva = emergva;
173 			/* The shift implicitly truncates to PAGE_SIZE */
174 			KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT));
175 			goto enter;
176 		}
177 		if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
178 			UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
179 			return(0);
180 		}
181 		mutex_enter(&pager_map_wanted_lock);
182 		pager_map_wanted = true;
183 		UVMHIST_LOG(maphist, "  SLEEPING on pager_map",0,0,0,0);
184 		UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, false,
185 		    "pager_map", 0);
186 		goto ReStart;
187 	}
188 
189 enter:
190 	/* got it */
191 	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
192 		pp = *pps++;
193 		KASSERT(pp);
194 		KASSERT(pp->flags & PG_BUSY);
195 		pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot, 0);
196 	}
197 	pmap_update(vm_map_pmap(pager_map));
198 
199 	UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
200 	return(kva);
201 }
202 
203 /*
204  * uvm_pagermapout: remove pager_map mapping
205  *
206  * we remove our mappings by hand and then remove the mapping (waking
207  * up anyone wanting space).
208  */
209 
210 void
211 uvm_pagermapout(vaddr_t kva, int npages)
212 {
213 	vsize_t size = npages << PAGE_SHIFT;
214 	struct vm_map_entry *entries;
215 	UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
216 
217 	UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
218 
219 	/*
220 	 * duplicate uvm_unmap, but add in pager_map_wanted handling.
221 	 */
222 
223 	pmap_kremove(kva, npages << PAGE_SHIFT);
224 	pmap_update(pmap_kernel());
225 
226 	if (kva == emergva) {
227 		mutex_enter(&pager_map_wanted_lock);
228 		emerginuse = false;
229 		wakeup(&emergva);
230 		mutex_exit(&pager_map_wanted_lock);
231 		return;
232 	}
233 
234 	vm_map_lock(pager_map);
235 	uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0);
236 	mutex_enter(&pager_map_wanted_lock);
237 	if (pager_map_wanted) {
238 		pager_map_wanted = false;
239 		wakeup(pager_map);
240 	}
241 	mutex_exit(&pager_map_wanted_lock);
242 	vm_map_unlock(pager_map);
243 	if (entries)
244 		uvm_unmap_detach(entries, 0);
245 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
246 }
247 
248 /*
249  * interrupt-context iodone handler for single-buf i/os
250  * or the top-level buf of a nested-buf i/o.
251  */
252 
253 void
254 uvm_aio_biodone(struct buf *bp)
255 {
256 	/* reset b_iodone for when this is a single-buf i/o. */
257 	bp->b_iodone = uvm_aio_aiodone;
258 
259 	workqueue_enqueue(uvm.aiodone_queue, &bp->b_work, NULL);
260 }
261 
262 void
263 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
264 {
265 	struct uvm_object *uobj;
266 	struct vm_page *pg;
267 	kmutex_t *slock;
268 	int pageout_done;
269 	int swslot;
270 	int i;
271 	bool swap;
272 	UVMHIST_FUNC("uvm_aio_aiodone_pages"); UVMHIST_CALLED(ubchist);
273 
274 	swslot = 0;
275 	pageout_done = 0;
276 	slock = NULL;
277 	uobj = NULL;
278 	pg = pgs[0];
279 	swap = (pg->uanon != NULL && pg->uobject == NULL) ||
280 		(pg->pqflags & PQ_AOBJ) != 0;
281 	if (!swap) {
282 		uobj = pg->uobject;
283 		slock = &uobj->vmobjlock;
284 		mutex_enter(slock);
285 		mutex_enter(&uvm_pageqlock);
286 	} else {
287 #if defined(VMSWAP)
288 		if (error) {
289 			if (pg->uobject != NULL) {
290 				swslot = uao_find_swslot(pg->uobject,
291 				    pg->offset >> PAGE_SHIFT);
292 			} else {
293 				KASSERT(pg->uanon != NULL);
294 				swslot = pg->uanon->an_swslot;
295 			}
296 			KASSERT(swslot);
297 		}
298 #else /* defined(VMSWAP) */
299 		panic("%s: swap", __func__);
300 #endif /* defined(VMSWAP) */
301 	}
302 	for (i = 0; i < npages; i++) {
303 		pg = pgs[i];
304 		KASSERT(swap || pg->uobject == uobj);
305 		UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
306 
307 #if defined(VMSWAP)
308 		/*
309 		 * for swap i/os, lock each page's object (or anon)
310 		 * individually since each page may need a different lock.
311 		 */
312 
313 		if (swap) {
314 			if (pg->uobject != NULL) {
315 				slock = &pg->uobject->vmobjlock;
316 			} else {
317 				slock = &pg->uanon->an_lock;
318 			}
319 			mutex_enter(slock);
320 			mutex_enter(&uvm_pageqlock);
321 		}
322 #endif /* defined(VMSWAP) */
323 
324 		/*
325 		 * process errors.  for reads, just mark the page to be freed.
326 		 * for writes, if the error was ENOMEM, we assume this was
327 		 * a transient failure so we mark the page dirty so that
328 		 * we'll try to write it again later.  for all other write
329 		 * errors, we assume the error is permanent, thus the data
330 		 * in the page is lost.  bummer.
331 		 */
332 
333 		if (error) {
334 			int slot;
335 			if (!write) {
336 				pg->flags |= PG_RELEASED;
337 				continue;
338 			} else if (error == ENOMEM) {
339 				if (pg->flags & PG_PAGEOUT) {
340 					pg->flags &= ~PG_PAGEOUT;
341 					pageout_done++;
342 				}
343 				pg->flags &= ~PG_CLEAN;
344 				uvm_pageactivate(pg);
345 				slot = 0;
346 			} else
347 				slot = SWSLOT_BAD;
348 
349 #if defined(VMSWAP)
350 			if (swap) {
351 				if (pg->uobject != NULL) {
352 					int oldslot;
353 					oldslot = uao_set_swslot(pg->uobject,
354 						pg->offset >> PAGE_SHIFT, slot);
355 					KASSERT(oldslot == swslot + i);
356 				} else {
357 					KASSERT(pg->uanon->an_swslot ==
358 						swslot + i);
359 					pg->uanon->an_swslot = slot;
360 				}
361 			}
362 #endif /* defined(VMSWAP) */
363 		}
364 
365 		/*
366 		 * if the page is PG_FAKE, this must have been a read to
367 		 * initialize the page.  clear PG_FAKE and activate the page.
368 		 * we must also clear the pmap "modified" flag since it may
369 		 * still be set from the page's previous identity.
370 		 */
371 
372 		if (pg->flags & PG_FAKE) {
373 			KASSERT(!write);
374 			pg->flags &= ~PG_FAKE;
375 #if defined(READAHEAD_STATS)
376 			pg->pqflags |= PQ_READAHEAD;
377 			uvm_ra_total.ev_count++;
378 #endif /* defined(READAHEAD_STATS) */
379 			KASSERT((pg->flags & PG_CLEAN) != 0);
380 			uvm_pageenqueue(pg);
381 			pmap_clear_modify(pg);
382 		}
383 
384 		/*
385 		 * do accounting for pagedaemon i/o and arrange to free
386 		 * the pages instead of just unbusying them.
387 		 */
388 
389 		if (pg->flags & PG_PAGEOUT) {
390 			pg->flags &= ~PG_PAGEOUT;
391 			pageout_done++;
392 			uvmexp.pdfreed++;
393 			pg->flags |= PG_RELEASED;
394 		}
395 
396 #if defined(VMSWAP)
397 		/*
398 		 * for swap pages, unlock everything for this page now.
399 		 */
400 
401 		if (swap) {
402 			if (pg->uobject == NULL && pg->uanon->an_ref == 0 &&
403 			    (pg->flags & PG_RELEASED) != 0) {
404 				mutex_exit(&uvm_pageqlock);
405 				uvm_anon_release(pg->uanon);
406 			} else {
407 				uvm_page_unbusy(&pg, 1);
408 				mutex_exit(&uvm_pageqlock);
409 				mutex_exit(slock);
410 			}
411 		}
412 #endif /* defined(VMSWAP) */
413 	}
414 	uvm_pageout_done(pageout_done);
415 	if (!swap) {
416 		uvm_page_unbusy(pgs, npages);
417 		mutex_exit(&uvm_pageqlock);
418 		mutex_exit(slock);
419 	} else {
420 #if defined(VMSWAP)
421 		KASSERT(write);
422 
423 		/* these pages are now only in swap. */
424 		mutex_enter(&uvm_swap_data_lock);
425 		KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse);
426 		if (error != ENOMEM)
427 			uvmexp.swpgonly += npages;
428 		mutex_exit(&uvm_swap_data_lock);
429 		if (error) {
430 			if (error != ENOMEM)
431 				uvm_swap_markbad(swslot, npages);
432 			else
433 				uvm_swap_free(swslot, npages);
434 		}
435 		uvmexp.pdpending--;
436 #endif /* defined(VMSWAP) */
437 	}
438 }
439 
440 /*
441  * uvm_aio_aiodone: do iodone processing for async i/os.
442  * this should be called in thread context, not interrupt context.
443  */
444 
445 void
446 uvm_aio_aiodone(struct buf *bp)
447 {
448 	int npages = bp->b_bufsize >> PAGE_SHIFT;
449 	struct vm_page *pgs[npages];
450 	int i, error;
451 	bool write;
452 	UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
453 	UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
454 
455 	error = bp->b_error;
456 	write = (bp->b_flags & B_READ) == 0;
457 
458 	for (i = 0; i < npages; i++) {
459 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
460 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
461 	}
462 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
463 
464 	uvm_aio_aiodone_pages(pgs, npages, write, error);
465 
466 	if (write && (bp->b_cflags & BC_AGE) != 0) {
467 		mutex_enter(bp->b_objlock);
468 		vwakeup(bp);
469 		mutex_exit(bp->b_objlock);
470 	}
471 	putiobuf(bp);
472 }
473 
474 /*
475  * uvm_pageratop: convert KVAs in the pager map back to their page
476  * structures.
477  */
478 
479 struct vm_page *
480 uvm_pageratop(vaddr_t kva)
481 {
482 	struct vm_page *pg;
483 	paddr_t pa;
484 	bool rv;
485 
486 	rv = pmap_extract(pmap_kernel(), kva, &pa);
487 	KASSERT(rv);
488 	pg = PHYS_TO_VM_PAGE(pa);
489 	KASSERT(pg != NULL);
490 	return (pg);
491 }
492