xref: /netbsd-src/sys/uvm/uvm_pager.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: uvm_pager.c,v 1.89 2007/12/01 10:40:28 yamt Exp $	*/
2 
3 /*
4  *
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Charles D. Cranor and
19  *      Washington University.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35  */
36 
37 /*
38  * uvm_pager.c: generic functions used to assist the pagers.
39  */
40 
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.89 2007/12/01 10:40:28 yamt Exp $");
43 
44 #include "opt_uvmhist.h"
45 #include "opt_readahead.h"
46 #include "opt_pagermap.h"
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/malloc.h>
52 #include <sys/pool.h>
53 #include <sys/vnode.h>
54 
55 #include <uvm/uvm.h>
56 
57 /*
58  * XXX
59  * this is needed until the device strategy interface
60  * is changed to do physically-addressed i/o.
61  */
62 
63 #ifndef PAGER_MAP_DEFAULT_SIZE
64 #define PAGER_MAP_DEFAULT_SIZE	(16 * 1024 * 1024)
65 #endif
66 
67 #ifndef PAGER_MAP_SIZE
68 #define PAGER_MAP_SIZE	PAGER_MAP_DEFAULT_SIZE
69 #endif
70 
71 size_t pager_map_size = PAGER_MAP_SIZE;
72 
73 struct pool *uvm_aiobuf_pool;
74 
75 /*
76  * list of uvm pagers in the system
77  */
78 
79 const struct uvm_pagerops * const uvmpagerops[] = {
80 	&aobj_pager,
81 	&uvm_deviceops,
82 	&uvm_vnodeops,
83 	&ubc_pager,
84 };
85 
86 /*
87  * the pager map: provides KVA for I/O
88  */
89 
90 struct vm_map *pager_map;		/* XXX */
91 kmutex_t pager_map_wanted_lock;
92 bool pager_map_wanted;	/* locked by pager map */
93 static vaddr_t emergva;
94 static bool emerginuse;
95 
96 /*
97  * uvm_pager_init: init pagers (at boot time)
98  */
99 
100 void
101 uvm_pager_init(void)
102 {
103 	u_int lcv;
104 	vaddr_t sva, eva;
105 
106 	/*
107 	 * init pager map
108 	 */
109 
110 	sva = 0;
111 	pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, pager_map_size, 0,
112 	    false, NULL);
113 	mutex_init(&pager_map_wanted_lock, MUTEX_DEFAULT, IPL_NONE);
114 	pager_map_wanted = false;
115 	emergva = uvm_km_alloc(kernel_map, round_page(MAXPHYS), 0,
116 	    UVM_KMF_VAONLY);
117 #if defined(DEBUG)
118 	if (emergva == 0)
119 		panic("emergva");
120 #endif
121 	emerginuse = false;
122 
123 	/*
124 	 * init ASYNC I/O queue
125 	 */
126 
127 	TAILQ_INIT(&uvm.aio_done);
128 
129 	/*
130 	 * call pager init functions
131 	 */
132 	for (lcv = 0 ; lcv < __arraycount(uvmpagerops); lcv++) {
133 		if (uvmpagerops[lcv]->pgo_init)
134 			uvmpagerops[lcv]->pgo_init();
135 	}
136 }
137 
138 /*
139  * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
140  *
141  * we basically just map in a blank map entry to reserve the space in the
142  * map and then use pmap_enter() to put the mappings in by hand.
143  */
144 
145 vaddr_t
146 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
147 {
148 	vsize_t size;
149 	vaddr_t kva;
150 	vaddr_t cva;
151 	struct vm_page *pp;
152 	vm_prot_t prot;
153 	const bool pdaemon = curlwp == uvm.pagedaemon_lwp;
154 	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
155 
156 	UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
157 
158 	/*
159 	 * compute protection.  outgoing I/O only needs read
160 	 * access to the page, whereas incoming needs read/write.
161 	 */
162 
163 	prot = VM_PROT_READ;
164 	if (flags & UVMPAGER_MAPIN_READ)
165 		prot |= VM_PROT_WRITE;
166 
167 ReStart:
168 	size = npages << PAGE_SHIFT;
169 	kva = 0;			/* let system choose VA */
170 
171 	if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET, 0,
172 	    UVM_FLAG_NOMERGE | (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) {
173 		if (pdaemon) {
174 			mutex_enter(&pager_map_wanted_lock);
175 			if (emerginuse) {
176 				mtsleep(&emergva, PVM | PNORELOCK, "emergva",
177 				    0, &pager_map_wanted_lock);
178 				goto ReStart;
179 			}
180 			emerginuse = true;
181 			mutex_exit(&pager_map_wanted_lock);
182 			kva = emergva;
183 			/* The shift implicitly truncates to PAGE_SIZE */
184 			KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT));
185 			goto enter;
186 		}
187 		if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
188 			UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
189 			return(0);
190 		}
191 		mutex_enter(&pager_map_wanted_lock);
192 		pager_map_wanted = true;
193 		UVMHIST_LOG(maphist, "  SLEEPING on pager_map",0,0,0,0);
194 		mtsleep(pager_map, PVM | PNORELOCK, "pager_map", 0,
195 		    &pager_map_wanted_lock);
196 		goto ReStart;
197 	}
198 
199 enter:
200 	/* got it */
201 	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
202 		pp = *pps++;
203 		KASSERT(pp);
204 		KASSERT(pp->flags & PG_BUSY);
205 		pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot);
206 	}
207 	pmap_update(vm_map_pmap(pager_map));
208 
209 	UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
210 	return(kva);
211 }
212 
213 /*
214  * uvm_pagermapout: remove pager_map mapping
215  *
216  * we remove our mappings by hand and then remove the mapping (waking
217  * up anyone wanting space).
218  */
219 
220 void
221 uvm_pagermapout(vaddr_t kva, int npages)
222 {
223 	vsize_t size = npages << PAGE_SHIFT;
224 	struct vm_map_entry *entries;
225 	UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
226 
227 	UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
228 
229 	/*
230 	 * duplicate uvm_unmap, but add in pager_map_wanted handling.
231 	 */
232 
233 	pmap_kremove(kva, npages << PAGE_SHIFT);
234 	if (kva == emergva) {
235 		mutex_enter(&pager_map_wanted_lock);
236 		emerginuse = false;
237 		wakeup(&emergva);
238 		mutex_exit(&pager_map_wanted_lock);
239 		return;
240 	}
241 
242 	vm_map_lock(pager_map);
243 	uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0);
244 	mutex_enter(&pager_map_wanted_lock);
245 	if (pager_map_wanted) {
246 		pager_map_wanted = false;
247 		wakeup(pager_map);
248 	}
249 	mutex_exit(&pager_map_wanted_lock);
250 	vm_map_unlock(pager_map);
251 	if (entries)
252 		uvm_unmap_detach(entries, 0);
253 	pmap_update(pmap_kernel());
254 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
255 }
256 
257 /*
258  * interrupt-context iodone handler for nested i/o bufs.
259  *
260  * => must be at splbio().
261  */
262 
263 void
264 uvm_aio_biodone1(struct buf *bp)
265 {
266 	struct buf *mbp = bp->b_private;
267 
268 	KASSERT(mbp != bp);
269 	if (bp->b_error != 0)
270 		mbp->b_error = bp->b_error;
271 	mbp->b_resid -= bp->b_bcount;
272 	putiobuf(bp);
273 	if (mbp->b_resid == 0) {
274 		biodone(mbp);
275 	}
276 }
277 
278 /*
279  * interrupt-context iodone handler for single-buf i/os
280  * or the top-level buf of a nested-buf i/o.
281  *
282  * => must be at splbio().
283  */
284 
285 void
286 uvm_aio_biodone(struct buf *bp)
287 {
288 	/* reset b_iodone for when this is a single-buf i/o. */
289 	bp->b_iodone = uvm_aio_aiodone;
290 
291 	workqueue_enqueue(uvm.aiodone_queue, &bp->b_work, NULL);
292 }
293 
294 /*
295  * uvm_aio_aiodone: do iodone processing for async i/os.
296  * this should be called in thread context, not interrupt context.
297  */
298 
299 void
300 uvm_aio_aiodone(struct buf *bp)
301 {
302 	int npages = bp->b_bufsize >> PAGE_SHIFT;
303 	struct vm_page *pg, *pgs[npages];
304 	struct uvm_object *uobj;
305 	struct simplelock *slock;
306 	int s, i, error, swslot;
307 	bool write, swap;
308 	UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
309 	UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
310 
311 	error = bp->b_error;
312 	write = (bp->b_flags & B_READ) == 0;
313 	/* XXXUBC B_NOCACHE is for swap pager, should be done differently */
314 	if (write && !(bp->b_flags & B_NOCACHE) && bioopsp) {
315 		bioopsp->io_pageiodone(bp);
316 	}
317 
318 	uobj = NULL;
319 	for (i = 0; i < npages; i++) {
320 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
321 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
322 	}
323 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
324 
325 	swslot = 0;
326 	slock = NULL;
327 	pg = pgs[0];
328 	swap = (pg->uanon != NULL && pg->uobject == NULL) ||
329 		(pg->pqflags & PQ_AOBJ) != 0;
330 	if (!swap) {
331 		uobj = pg->uobject;
332 		slock = &uobj->vmobjlock;
333 		simple_lock(slock);
334 		uvm_lock_pageq();
335 	} else {
336 #if defined(VMSWAP)
337 		if (error) {
338 			if (pg->uobject != NULL) {
339 				swslot = uao_find_swslot(pg->uobject,
340 				    pg->offset >> PAGE_SHIFT);
341 			} else {
342 				KASSERT(pg->uanon != NULL);
343 				swslot = pg->uanon->an_swslot;
344 			}
345 			KASSERT(swslot);
346 		}
347 #else /* defined(VMSWAP) */
348 		panic("%s: swap", __func__);
349 #endif /* defined(VMSWAP) */
350 	}
351 	for (i = 0; i < npages; i++) {
352 		pg = pgs[i];
353 		KASSERT(swap || pg->uobject == uobj);
354 		UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
355 
356 #if defined(VMSWAP)
357 		/*
358 		 * for swap i/os, lock each page's object (or anon)
359 		 * individually since each page may need a different lock.
360 		 */
361 
362 		if (swap) {
363 			if (pg->uobject != NULL) {
364 				slock = &pg->uobject->vmobjlock;
365 			} else {
366 				slock = &pg->uanon->an_lock;
367 			}
368 			simple_lock(slock);
369 			uvm_lock_pageq();
370 		}
371 #endif /* defined(VMSWAP) */
372 
373 		/*
374 		 * process errors.  for reads, just mark the page to be freed.
375 		 * for writes, if the error was ENOMEM, we assume this was
376 		 * a transient failure so we mark the page dirty so that
377 		 * we'll try to write it again later.  for all other write
378 		 * errors, we assume the error is permanent, thus the data
379 		 * in the page is lost.  bummer.
380 		 */
381 
382 		if (error) {
383 			int slot;
384 			if (!write) {
385 				pg->flags |= PG_RELEASED;
386 				continue;
387 			} else if (error == ENOMEM) {
388 				if (pg->flags & PG_PAGEOUT) {
389 					pg->flags &= ~PG_PAGEOUT;
390 					uvmexp.paging--;
391 				}
392 				pg->flags &= ~PG_CLEAN;
393 				uvm_pageactivate(pg);
394 				slot = 0;
395 			} else
396 				slot = SWSLOT_BAD;
397 
398 #if defined(VMSWAP)
399 			if (swap) {
400 				if (pg->uobject != NULL) {
401 					int oldslot;
402 					oldslot = uao_set_swslot(pg->uobject,
403 						pg->offset >> PAGE_SHIFT, slot);
404 					KASSERT(oldslot == swslot + i);
405 				} else {
406 					KASSERT(pg->uanon->an_swslot ==
407 						swslot + i);
408 					pg->uanon->an_swslot = slot;
409 				}
410 			}
411 #endif /* defined(VMSWAP) */
412 		}
413 
414 		/*
415 		 * if the page is PG_FAKE, this must have been a read to
416 		 * initialize the page.  clear PG_FAKE and activate the page.
417 		 * we must also clear the pmap "modified" flag since it may
418 		 * still be set from the page's previous identity.
419 		 */
420 
421 		if (pg->flags & PG_FAKE) {
422 			KASSERT(!write);
423 			pg->flags &= ~PG_FAKE;
424 #if defined(READAHEAD_STATS)
425 			pg->pqflags |= PQ_READAHEAD;
426 			uvm_ra_total.ev_count++;
427 #endif /* defined(READAHEAD_STATS) */
428 			KASSERT((pg->flags & PG_CLEAN) != 0);
429 			uvm_pageenqueue(pg);
430 			pmap_clear_modify(pg);
431 		}
432 
433 		/*
434 		 * do accounting for pagedaemon i/o and arrange to free
435 		 * the pages instead of just unbusying them.
436 		 */
437 
438 		if (pg->flags & PG_PAGEOUT) {
439 			pg->flags &= ~PG_PAGEOUT;
440 			uvmexp.paging--;
441 			uvmexp.pdfreed++;
442 			pg->flags |= PG_RELEASED;
443 		}
444 
445 #if defined(VMSWAP)
446 		/*
447 		 * for swap pages, unlock everything for this page now.
448 		 */
449 
450 		if (swap) {
451 			if (pg->uobject == NULL && pg->uanon->an_ref == 0 &&
452 			    (pg->flags & PG_RELEASED) != 0) {
453 				uvm_unlock_pageq();
454 				uvm_anon_release(pg->uanon);
455 			} else {
456 				uvm_page_unbusy(&pg, 1);
457 				uvm_unlock_pageq();
458 				simple_unlock(slock);
459 			}
460 		}
461 #endif /* defined(VMSWAP) */
462 	}
463 	if (!swap) {
464 		uvm_page_unbusy(pgs, npages);
465 		uvm_unlock_pageq();
466 		simple_unlock(slock);
467 	} else {
468 #if defined(VMSWAP)
469 		KASSERT(write);
470 
471 		/* these pages are now only in swap. */
472 		mutex_enter(&uvm_swap_data_lock);
473 		KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse);
474 		if (error != ENOMEM)
475 			uvmexp.swpgonly += npages;
476 		mutex_exit(&uvm_swap_data_lock);
477 		if (error) {
478 			if (error != ENOMEM)
479 				uvm_swap_markbad(swslot, npages);
480 			else
481 				uvm_swap_free(swslot, npages);
482 		}
483 		uvmexp.pdpending--;
484 #endif /* defined(VMSWAP) */
485 	}
486 	s = splbio();
487 	if (write && (bp->b_flags & B_AGE) != 0) {
488 		vwakeup(bp);
489 	}
490 	putiobuf(bp);
491 	splx(s);
492 }
493 
494 /*
495  * uvm_pageratop: convert KVAs in the pager map back to their page
496  * structures.
497  */
498 
499 struct vm_page *
500 uvm_pageratop(vaddr_t kva)
501 {
502 	struct vm_page *pg;
503 	paddr_t pa;
504 	bool rv;
505 
506 	rv = pmap_extract(pmap_kernel(), kva, &pa);
507 	KASSERT(rv);
508 	pg = PHYS_TO_VM_PAGE(pa);
509 	KASSERT(pg != NULL);
510 	return (pg);
511 }
512