xref: /netbsd-src/sys/uvm/uvm_pager.c (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /*	$NetBSD: uvm_pager.c,v 1.93 2008/11/16 19:34:29 pooka Exp $	*/
2 
3 /*
4  *
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Charles D. Cranor and
19  *      Washington University.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35  */
36 
37 /*
38  * uvm_pager.c: generic functions used to assist the pagers.
39  */
40 
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.93 2008/11/16 19:34:29 pooka Exp $");
43 
44 #include "opt_uvmhist.h"
45 #include "opt_readahead.h"
46 #include "opt_pagermap.h"
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/malloc.h>
52 #include <sys/pool.h>
53 #include <sys/vnode.h>
54 #include <sys/buf.h>
55 
56 #include <uvm/uvm.h>
57 
58 /*
59  * XXX
60  * this is needed until the device strategy interface
61  * is changed to do physically-addressed i/o.
62  */
63 
64 #ifndef PAGER_MAP_DEFAULT_SIZE
65 #define PAGER_MAP_DEFAULT_SIZE	(16 * 1024 * 1024)
66 #endif
67 
68 #ifndef PAGER_MAP_SIZE
69 #define PAGER_MAP_SIZE	PAGER_MAP_DEFAULT_SIZE
70 #endif
71 
72 size_t pager_map_size = PAGER_MAP_SIZE;
73 
74 struct pool *uvm_aiobuf_pool;
75 
76 /*
77  * list of uvm pagers in the system
78  */
79 
80 const struct uvm_pagerops * const uvmpagerops[] = {
81 	&aobj_pager,
82 	&uvm_deviceops,
83 	&uvm_vnodeops,
84 	&ubc_pager,
85 };
86 
87 /*
88  * the pager map: provides KVA for I/O
89  */
90 
91 struct vm_map *pager_map;		/* XXX */
92 kmutex_t pager_map_wanted_lock;
93 bool pager_map_wanted;	/* locked by pager map */
94 static vaddr_t emergva;
95 static bool emerginuse;
96 
97 /*
98  * uvm_pager_init: init pagers (at boot time)
99  */
100 
101 void
102 uvm_pager_init(void)
103 {
104 	u_int lcv;
105 	vaddr_t sva, eva;
106 
107 	/*
108 	 * init pager map
109 	 */
110 
111 	sva = 0;
112 	pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, pager_map_size, 0,
113 	    false, NULL);
114 	mutex_init(&pager_map_wanted_lock, MUTEX_DEFAULT, IPL_NONE);
115 	pager_map_wanted = false;
116 	emergva = uvm_km_alloc(kernel_map, round_page(MAXPHYS), 0,
117 	    UVM_KMF_VAONLY);
118 #if defined(DEBUG)
119 	if (emergva == 0)
120 		panic("emergva");
121 #endif
122 	emerginuse = false;
123 
124 	/*
125 	 * init ASYNC I/O queue
126 	 */
127 
128 	TAILQ_INIT(&uvm.aio_done);
129 
130 	/*
131 	 * call pager init functions
132 	 */
133 	for (lcv = 0 ; lcv < __arraycount(uvmpagerops); lcv++) {
134 		if (uvmpagerops[lcv]->pgo_init)
135 			uvmpagerops[lcv]->pgo_init();
136 	}
137 }
138 
139 /*
140  * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
141  *
142  * we basically just map in a blank map entry to reserve the space in the
143  * map and then use pmap_enter() to put the mappings in by hand.
144  */
145 
146 vaddr_t
147 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
148 {
149 	vsize_t size;
150 	vaddr_t kva;
151 	vaddr_t cva;
152 	struct vm_page *pp;
153 	vm_prot_t prot;
154 	const bool pdaemon = curlwp == uvm.pagedaemon_lwp;
155 	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
156 
157 	UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
158 
159 	/*
160 	 * compute protection.  outgoing I/O only needs read
161 	 * access to the page, whereas incoming needs read/write.
162 	 */
163 
164 	prot = VM_PROT_READ;
165 	if (flags & UVMPAGER_MAPIN_READ)
166 		prot |= VM_PROT_WRITE;
167 
168 ReStart:
169 	size = npages << PAGE_SHIFT;
170 	kva = 0;			/* let system choose VA */
171 
172 	if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET, 0,
173 	    UVM_FLAG_NOMERGE | (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) {
174 		if (pdaemon) {
175 			mutex_enter(&pager_map_wanted_lock);
176 			if (emerginuse) {
177 				UVM_UNLOCK_AND_WAIT(&emergva,
178 				    &pager_map_wanted_lock, false,
179 				    "emergva", 0);
180 				goto ReStart;
181 			}
182 			emerginuse = true;
183 			mutex_exit(&pager_map_wanted_lock);
184 			kva = emergva;
185 			/* The shift implicitly truncates to PAGE_SIZE */
186 			KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT));
187 			goto enter;
188 		}
189 		if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
190 			UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
191 			return(0);
192 		}
193 		mutex_enter(&pager_map_wanted_lock);
194 		pager_map_wanted = true;
195 		UVMHIST_LOG(maphist, "  SLEEPING on pager_map",0,0,0,0);
196 		UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, false,
197 		    "pager_map", 0);
198 		goto ReStart;
199 	}
200 
201 enter:
202 	/* got it */
203 	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
204 		pp = *pps++;
205 		KASSERT(pp);
206 		KASSERT(pp->flags & PG_BUSY);
207 		pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot);
208 	}
209 	pmap_update(vm_map_pmap(pager_map));
210 
211 	UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
212 	return(kva);
213 }
214 
215 /*
216  * uvm_pagermapout: remove pager_map mapping
217  *
218  * we remove our mappings by hand and then remove the mapping (waking
219  * up anyone wanting space).
220  */
221 
222 void
223 uvm_pagermapout(vaddr_t kva, int npages)
224 {
225 	vsize_t size = npages << PAGE_SHIFT;
226 	struct vm_map_entry *entries;
227 	UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
228 
229 	UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
230 
231 	/*
232 	 * duplicate uvm_unmap, but add in pager_map_wanted handling.
233 	 */
234 
235 	pmap_kremove(kva, npages << PAGE_SHIFT);
236 	if (kva == emergva) {
237 		mutex_enter(&pager_map_wanted_lock);
238 		emerginuse = false;
239 		wakeup(&emergva);
240 		mutex_exit(&pager_map_wanted_lock);
241 		return;
242 	}
243 
244 	vm_map_lock(pager_map);
245 	uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0);
246 	mutex_enter(&pager_map_wanted_lock);
247 	if (pager_map_wanted) {
248 		pager_map_wanted = false;
249 		wakeup(pager_map);
250 	}
251 	mutex_exit(&pager_map_wanted_lock);
252 	vm_map_unlock(pager_map);
253 	if (entries)
254 		uvm_unmap_detach(entries, 0);
255 	pmap_update(pmap_kernel());
256 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
257 }
258 
259 /*
260  * interrupt-context iodone handler for nested i/o bufs.
261  *
262  * => the buffer is private so need not be locked here
263  */
264 
265 void
266 uvm_aio_biodone1(struct buf *bp)
267 {
268 	struct buf *mbp = bp->b_private;
269 
270 	KASSERT(mbp != bp);
271 	if (bp->b_error != 0) {
272 		mbp->b_error = bp->b_error;
273 	}
274 	mbp->b_resid -= bp->b_bcount;
275 	putiobuf(bp);
276 	if (mbp->b_resid == 0) {
277 		biodone(mbp);
278 	}
279 }
280 
281 /*
282  * interrupt-context iodone handler for single-buf i/os
283  * or the top-level buf of a nested-buf i/o.
284  */
285 
286 void
287 uvm_aio_biodone(struct buf *bp)
288 {
289 	/* reset b_iodone for when this is a single-buf i/o. */
290 	bp->b_iodone = uvm_aio_aiodone;
291 
292 	workqueue_enqueue(uvm.aiodone_queue, &bp->b_work, NULL);
293 }
294 
295 void
296 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
297 {
298 	struct uvm_object *uobj;
299 	struct vm_page *pg;
300 	kmutex_t *slock;
301 	int pageout_done;
302 	int swslot;
303 	int i;
304 	bool swap;
305 	UVMHIST_FUNC("uvm_aio_aiodone_pages"); UVMHIST_CALLED(ubchist);
306 
307 	swslot = 0;
308 	pageout_done = 0;
309 	slock = NULL;
310 	uobj = NULL;
311 	pg = pgs[0];
312 	swap = (pg->uanon != NULL && pg->uobject == NULL) ||
313 		(pg->pqflags & PQ_AOBJ) != 0;
314 	if (!swap) {
315 		uobj = pg->uobject;
316 		slock = &uobj->vmobjlock;
317 		mutex_enter(slock);
318 		mutex_enter(&uvm_pageqlock);
319 	} else {
320 #if defined(VMSWAP)
321 		if (error) {
322 			if (pg->uobject != NULL) {
323 				swslot = uao_find_swslot(pg->uobject,
324 				    pg->offset >> PAGE_SHIFT);
325 			} else {
326 				KASSERT(pg->uanon != NULL);
327 				swslot = pg->uanon->an_swslot;
328 			}
329 			KASSERT(swslot);
330 		}
331 #else /* defined(VMSWAP) */
332 		panic("%s: swap", __func__);
333 #endif /* defined(VMSWAP) */
334 	}
335 	for (i = 0; i < npages; i++) {
336 		pg = pgs[i];
337 		KASSERT(swap || pg->uobject == uobj);
338 		UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
339 
340 #if defined(VMSWAP)
341 		/*
342 		 * for swap i/os, lock each page's object (or anon)
343 		 * individually since each page may need a different lock.
344 		 */
345 
346 		if (swap) {
347 			if (pg->uobject != NULL) {
348 				slock = &pg->uobject->vmobjlock;
349 			} else {
350 				slock = &pg->uanon->an_lock;
351 			}
352 			mutex_enter(slock);
353 			mutex_enter(&uvm_pageqlock);
354 		}
355 #endif /* defined(VMSWAP) */
356 
357 		/*
358 		 * process errors.  for reads, just mark the page to be freed.
359 		 * for writes, if the error was ENOMEM, we assume this was
360 		 * a transient failure so we mark the page dirty so that
361 		 * we'll try to write it again later.  for all other write
362 		 * errors, we assume the error is permanent, thus the data
363 		 * in the page is lost.  bummer.
364 		 */
365 
366 		if (error) {
367 			int slot;
368 			if (!write) {
369 				pg->flags |= PG_RELEASED;
370 				continue;
371 			} else if (error == ENOMEM) {
372 				if (pg->flags & PG_PAGEOUT) {
373 					pg->flags &= ~PG_PAGEOUT;
374 					pageout_done++;
375 				}
376 				pg->flags &= ~PG_CLEAN;
377 				uvm_pageactivate(pg);
378 				slot = 0;
379 			} else
380 				slot = SWSLOT_BAD;
381 
382 #if defined(VMSWAP)
383 			if (swap) {
384 				if (pg->uobject != NULL) {
385 					int oldslot;
386 					oldslot = uao_set_swslot(pg->uobject,
387 						pg->offset >> PAGE_SHIFT, slot);
388 					KASSERT(oldslot == swslot + i);
389 				} else {
390 					KASSERT(pg->uanon->an_swslot ==
391 						swslot + i);
392 					pg->uanon->an_swslot = slot;
393 				}
394 			}
395 #endif /* defined(VMSWAP) */
396 		}
397 
398 		/*
399 		 * if the page is PG_FAKE, this must have been a read to
400 		 * initialize the page.  clear PG_FAKE and activate the page.
401 		 * we must also clear the pmap "modified" flag since it may
402 		 * still be set from the page's previous identity.
403 		 */
404 
405 		if (pg->flags & PG_FAKE) {
406 			KASSERT(!write);
407 			pg->flags &= ~PG_FAKE;
408 #if defined(READAHEAD_STATS)
409 			pg->pqflags |= PQ_READAHEAD;
410 			uvm_ra_total.ev_count++;
411 #endif /* defined(READAHEAD_STATS) */
412 			KASSERT((pg->flags & PG_CLEAN) != 0);
413 			uvm_pageenqueue(pg);
414 			pmap_clear_modify(pg);
415 		}
416 
417 		/*
418 		 * do accounting for pagedaemon i/o and arrange to free
419 		 * the pages instead of just unbusying them.
420 		 */
421 
422 		if (pg->flags & PG_PAGEOUT) {
423 			pg->flags &= ~PG_PAGEOUT;
424 			pageout_done++;
425 			uvmexp.pdfreed++;
426 			pg->flags |= PG_RELEASED;
427 		}
428 
429 #if defined(VMSWAP)
430 		/*
431 		 * for swap pages, unlock everything for this page now.
432 		 */
433 
434 		if (swap) {
435 			if (pg->uobject == NULL && pg->uanon->an_ref == 0 &&
436 			    (pg->flags & PG_RELEASED) != 0) {
437 				mutex_exit(&uvm_pageqlock);
438 				uvm_anon_release(pg->uanon);
439 			} else {
440 				uvm_page_unbusy(&pg, 1);
441 				mutex_exit(&uvm_pageqlock);
442 				mutex_exit(slock);
443 			}
444 		}
445 #endif /* defined(VMSWAP) */
446 	}
447 	uvm_pageout_done(pageout_done);
448 	if (!swap) {
449 		uvm_page_unbusy(pgs, npages);
450 		mutex_exit(&uvm_pageqlock);
451 		mutex_exit(slock);
452 	} else {
453 #if defined(VMSWAP)
454 		KASSERT(write);
455 
456 		/* these pages are now only in swap. */
457 		mutex_enter(&uvm_swap_data_lock);
458 		KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse);
459 		if (error != ENOMEM)
460 			uvmexp.swpgonly += npages;
461 		mutex_exit(&uvm_swap_data_lock);
462 		if (error) {
463 			if (error != ENOMEM)
464 				uvm_swap_markbad(swslot, npages);
465 			else
466 				uvm_swap_free(swslot, npages);
467 		}
468 		uvmexp.pdpending--;
469 #endif /* defined(VMSWAP) */
470 	}
471 }
472 
473 /*
474  * uvm_aio_aiodone: do iodone processing for async i/os.
475  * this should be called in thread context, not interrupt context.
476  */
477 
478 void
479 uvm_aio_aiodone(struct buf *bp)
480 {
481 	int npages = bp->b_bufsize >> PAGE_SHIFT;
482 	struct vm_page *pgs[npages];
483 	int i, error;
484 	bool write;
485 	UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
486 	UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
487 
488 	error = bp->b_error;
489 	write = (bp->b_flags & B_READ) == 0;
490 	/* XXXUBC BC_NOCACHE is for swap pager, should be done differently */
491 	if (write && !(bp->b_cflags & BC_NOCACHE) && bioopsp != NULL)
492 		(*bioopsp->io_pageiodone)(bp);
493 
494 	for (i = 0; i < npages; i++) {
495 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
496 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
497 	}
498 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
499 
500 	uvm_aio_aiodone_pages(pgs, npages, write, error);
501 
502 	if (write && (bp->b_cflags & BC_AGE) != 0) {
503 		mutex_enter(bp->b_objlock);
504 		vwakeup(bp);
505 		mutex_exit(bp->b_objlock);
506 	}
507 	putiobuf(bp);
508 }
509 
510 /*
511  * uvm_pageratop: convert KVAs in the pager map back to their page
512  * structures.
513  */
514 
515 struct vm_page *
516 uvm_pageratop(vaddr_t kva)
517 {
518 	struct vm_page *pg;
519 	paddr_t pa;
520 	bool rv;
521 
522 	rv = pmap_extract(pmap_kernel(), kva, &pa);
523 	KASSERT(rv);
524 	pg = PHYS_TO_VM_PAGE(pa);
525 	KASSERT(pg != NULL);
526 	return (pg);
527 }
528