xref: /netbsd-src/sys/uvm/uvm_pager.c (revision 20e85ad185ab16980f1219a557c42e057edb42ea)
1 /*	$NetBSD: uvm_pager.c,v 1.66 2005/04/01 11:59:39 yamt Exp $	*/
2 
3 /*
4  *
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Charles D. Cranor and
19  *      Washington University.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35  */
36 
37 /*
38  * uvm_pager.c: generic functions used to assist the pagers.
39  */
40 
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.66 2005/04/01 11:59:39 yamt Exp $");
43 
44 #include "opt_uvmhist.h"
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/proc.h>
49 #include <sys/malloc.h>
50 #include <sys/pool.h>
51 #include <sys/vnode.h>
52 
53 #define UVM_PAGER
54 #include <uvm/uvm.h>
55 
56 struct pool *uvm_aiobuf_pool;
57 
58 /*
59  * list of uvm pagers in the system
60  */
61 
62 struct uvm_pagerops * const uvmpagerops[] = {
63 	&aobj_pager,
64 	&uvm_deviceops,
65 	&uvm_vnodeops,
66 	&ubc_pager,
67 };
68 
69 /*
70  * the pager map: provides KVA for I/O
71  */
72 
73 struct vm_map *pager_map;		/* XXX */
74 struct simplelock pager_map_wanted_lock;
75 boolean_t pager_map_wanted;	/* locked by pager map */
76 static vaddr_t emergva;
77 static boolean_t emerginuse;
78 
79 /*
80  * uvm_pager_init: init pagers (at boot time)
81  */
82 
83 void
84 uvm_pager_init()
85 {
86 	u_int lcv;
87 	vaddr_t sva, eva;
88 
89 	/*
90 	 * init pager map
91 	 */
92 
93 	sva = 0;
94 	pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, PAGER_MAP_SIZE, 0,
95 	    FALSE, NULL);
96 	simple_lock_init(&pager_map_wanted_lock);
97 	pager_map_wanted = FALSE;
98 	emergva = uvm_km_alloc(kernel_map, round_page(MAXPHYS), 0,
99 	    UVM_KMF_VAONLY);
100 #if defined(DEBUG)
101 	if (emergva == 0)
102 		panic("emergva");
103 #endif
104 	emerginuse = FALSE;
105 
106 	/*
107 	 * init ASYNC I/O queue
108 	 */
109 
110 	TAILQ_INIT(&uvm.aio_done);
111 
112 	/*
113 	 * call pager init functions
114 	 */
115 	for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
116 	    lcv++) {
117 		if (uvmpagerops[lcv]->pgo_init)
118 			uvmpagerops[lcv]->pgo_init();
119 	}
120 }
121 
122 /*
123  * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
124  *
125  * we basically just map in a blank map entry to reserve the space in the
126  * map and then use pmap_enter() to put the mappings in by hand.
127  */
128 
129 vaddr_t
130 uvm_pagermapin(pps, npages, flags)
131 	struct vm_page **pps;
132 	int npages;
133 	int flags;
134 {
135 	vsize_t size;
136 	vaddr_t kva;
137 	vaddr_t cva;
138 	struct vm_page *pp;
139 	vm_prot_t prot;
140 	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
141 
142 	UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
143 
144 	/*
145 	 * compute protection.  outgoing I/O only needs read
146 	 * access to the page, whereas incoming needs read/write.
147 	 */
148 
149 	prot = VM_PROT_READ;
150 	if (flags & UVMPAGER_MAPIN_READ)
151 		prot |= VM_PROT_WRITE;
152 
153 ReStart:
154 	size = npages << PAGE_SHIFT;
155 	kva = 0;			/* let system choose VA */
156 
157 	if (uvm_map(pager_map, &kva, size, NULL,
158 	      UVM_UNKNOWN_OFFSET, 0, UVM_FLAG_NOMERGE) != 0) {
159 		if (curproc == uvm.pagedaemon_proc) {
160 			simple_lock(&pager_map_wanted_lock);
161 			if (emerginuse) {
162 				UVM_UNLOCK_AND_WAIT(&emergva,
163 				    &pager_map_wanted_lock, FALSE,
164 				    "emergva", 0);
165 				goto ReStart;
166 			}
167 			emerginuse = TRUE;
168 			simple_unlock(&pager_map_wanted_lock);
169 			kva = emergva;
170 			/* The shift implicitly truncates to PAGE_SIZE */
171 			KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT));
172 			goto enter;
173 		}
174 		if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
175 			UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
176 			return(0);
177 		}
178 		simple_lock(&pager_map_wanted_lock);
179 		pager_map_wanted = TRUE;
180 		UVMHIST_LOG(maphist, "  SLEEPING on pager_map",0,0,0,0);
181 		UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
182 		    "pager_map", 0);
183 		goto ReStart;
184 	}
185 
186 enter:
187 	/* got it */
188 	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
189 		pp = *pps++;
190 		KASSERT(pp);
191 		KASSERT(pp->flags & PG_BUSY);
192 		pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot);
193 	}
194 	pmap_update(vm_map_pmap(pager_map));
195 
196 	UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
197 	return(kva);
198 }
199 
200 /*
201  * uvm_pagermapout: remove pager_map mapping
202  *
203  * we remove our mappings by hand and then remove the mapping (waking
204  * up anyone wanting space).
205  */
206 
207 void
208 uvm_pagermapout(kva, npages)
209 	vaddr_t kva;
210 	int npages;
211 {
212 	vsize_t size = npages << PAGE_SHIFT;
213 	struct vm_map_entry *entries;
214 	UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
215 
216 	UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
217 
218 	/*
219 	 * duplicate uvm_unmap, but add in pager_map_wanted handling.
220 	 */
221 
222 	pmap_kremove(kva, npages << PAGE_SHIFT);
223 	if (kva == emergva) {
224 		simple_lock(&pager_map_wanted_lock);
225 		emerginuse = FALSE;
226 		wakeup(&emergva);
227 		simple_unlock(&pager_map_wanted_lock);
228 		return;
229 	}
230 
231 	vm_map_lock(pager_map);
232 	uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0);
233 	simple_lock(&pager_map_wanted_lock);
234 	if (pager_map_wanted) {
235 		pager_map_wanted = FALSE;
236 		wakeup(pager_map);
237 	}
238 	simple_unlock(&pager_map_wanted_lock);
239 	vm_map_unlock(pager_map);
240 	if (entries)
241 		uvm_unmap_detach(entries, 0);
242 	pmap_update(pmap_kernel());
243 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
244 }
245 
246 /*
247  * interrupt-context iodone handler for nested i/o bufs.
248  *
249  * => must be at splbio().
250  */
251 
252 void
253 uvm_aio_biodone1(bp)
254 	struct buf *bp;
255 {
256 	struct buf *mbp = bp->b_private;
257 
258 	KASSERT(mbp != bp);
259 	if (bp->b_flags & B_ERROR) {
260 		mbp->b_flags |= B_ERROR;
261 		mbp->b_error = bp->b_error;
262 	}
263 	mbp->b_resid -= bp->b_bcount;
264 	pool_put(&bufpool, bp);
265 	if (mbp->b_resid == 0) {
266 		biodone(mbp);
267 	}
268 }
269 
270 /*
271  * interrupt-context iodone handler for single-buf i/os
272  * or the top-level buf of a nested-buf i/o.
273  *
274  * => must be at splbio().
275  */
276 
277 void
278 uvm_aio_biodone(bp)
279 	struct buf *bp;
280 {
281 	/* reset b_iodone for when this is a single-buf i/o. */
282 	bp->b_iodone = uvm_aio_aiodone;
283 
284 	simple_lock(&uvm.aiodoned_lock);	/* locks uvm.aio_done */
285 	TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
286 	wakeup(&uvm.aiodoned);
287 	simple_unlock(&uvm.aiodoned_lock);
288 }
289 
290 /*
291  * uvm_aio_aiodone: do iodone processing for async i/os.
292  * this should be called in thread context, not interrupt context.
293  */
294 
295 void
296 uvm_aio_aiodone(bp)
297 	struct buf *bp;
298 {
299 	int npages = bp->b_bufsize >> PAGE_SHIFT;
300 	struct vm_page *pg, *pgs[npages];
301 	struct uvm_object *uobj;
302 	struct simplelock *slock;
303 	int s, i, error, swslot;
304 	boolean_t write, swap;
305 	UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
306 	UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
307 
308 	error = (bp->b_flags & B_ERROR) ? (bp->b_error ? bp->b_error : EIO) : 0;
309 	write = (bp->b_flags & B_READ) == 0;
310 	/* XXXUBC B_NOCACHE is for swap pager, should be done differently */
311 	if (write && !(bp->b_flags & B_NOCACHE) && bioops.io_pageiodone) {
312 		(*bioops.io_pageiodone)(bp);
313 	}
314 
315 	uobj = NULL;
316 	for (i = 0; i < npages; i++) {
317 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
318 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
319 	}
320 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
321 
322 	swslot = 0;
323 	slock = NULL;
324 	pg = pgs[0];
325 	swap = (pg->uanon != NULL && pg->uobject == NULL) ||
326 		(pg->pqflags & PQ_AOBJ) != 0;
327 	if (!swap) {
328 		uobj = pg->uobject;
329 		slock = &uobj->vmobjlock;
330 		simple_lock(slock);
331 		uvm_lock_pageq();
332 	} else if (error) {
333 		if (pg->uobject != NULL) {
334 			swslot = uao_find_swslot(pg->uobject,
335 			    pg->offset >> PAGE_SHIFT);
336 		} else {
337 			swslot = pg->uanon->an_swslot;
338 		}
339 		KASSERT(swslot);
340 	}
341 	for (i = 0; i < npages; i++) {
342 		pg = pgs[i];
343 		KASSERT(swap || pg->uobject == uobj);
344 		UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
345 
346 		/*
347 		 * for swap i/os, lock each page's object (or anon)
348 		 * individually since each page may need a different lock.
349 		 */
350 
351 		if (swap) {
352 			if (pg->uobject != NULL) {
353 				slock = &pg->uobject->vmobjlock;
354 			} else {
355 				slock = &pg->uanon->an_lock;
356 			}
357 			simple_lock(slock);
358 			uvm_lock_pageq();
359 		}
360 
361 		/*
362 		 * process errors.  for reads, just mark the page to be freed.
363 		 * for writes, if the error was ENOMEM, we assume this was
364 		 * a transient failure so we mark the page dirty so that
365 		 * we'll try to write it again later.  for all other write
366 		 * errors, we assume the error is permanent, thus the data
367 		 * in the page is lost.  bummer.
368 		 */
369 
370 		if (error) {
371 			int slot;
372 			if (!write) {
373 				pg->flags |= PG_RELEASED;
374 				continue;
375 			} else if (error == ENOMEM) {
376 				if (pg->flags & PG_PAGEOUT) {
377 					pg->flags &= ~PG_PAGEOUT;
378 					uvmexp.paging--;
379 				}
380 				pg->flags &= ~PG_CLEAN;
381 				uvm_pageactivate(pg);
382 				slot = 0;
383 			} else
384 				slot = SWSLOT_BAD;
385 
386 			if (swap) {
387 				if (pg->uobject != NULL) {
388 					int oldslot;
389 					oldslot = uao_set_swslot(pg->uobject,
390 						pg->offset >> PAGE_SHIFT, slot);
391 					KASSERT(oldslot == swslot + i);
392 				} else {
393 					KASSERT(pg->uanon->an_swslot ==
394 						swslot + i);
395 					pg->uanon->an_swslot = slot;
396 				}
397 			}
398 		}
399 
400 		/*
401 		 * if the page is PG_FAKE, this must have been a read to
402 		 * initialize the page.  clear PG_FAKE and activate the page.
403 		 * we must also clear the pmap "modified" flag since it may
404 		 * still be set from the page's previous identity.
405 		 */
406 
407 		if (pg->flags & PG_FAKE) {
408 			KASSERT(!write);
409 			pg->flags &= ~PG_FAKE;
410 			uvm_pageactivate(pg);
411 			pmap_clear_modify(pg);
412 		}
413 
414 		/*
415 		 * do accounting for pagedaemon i/o and arrange to free
416 		 * the pages instead of just unbusying them.
417 		 */
418 
419 		if (pg->flags & PG_PAGEOUT) {
420 			pg->flags &= ~PG_PAGEOUT;
421 			uvmexp.paging--;
422 			uvmexp.pdfreed++;
423 			pg->flags |= PG_RELEASED;
424 		}
425 
426 		/*
427 		 * for swap pages, unlock everything for this page now.
428 		 */
429 
430 		if (swap) {
431 			if (pg->uobject == NULL && pg->uanon->an_ref == 0 &&
432 			    (pg->flags & PG_RELEASED) != 0) {
433 				uvm_unlock_pageq();
434 				uvm_anon_release(pg->uanon);
435 			} else {
436 				uvm_page_unbusy(&pg, 1);
437 				uvm_unlock_pageq();
438 				simple_unlock(slock);
439 			}
440 		}
441 	}
442 	if (!swap) {
443 		uvm_page_unbusy(pgs, npages);
444 		uvm_unlock_pageq();
445 		simple_unlock(slock);
446 	} else {
447 		KASSERT(write);
448 
449 		/* these pages are now only in swap. */
450 		simple_lock(&uvm.swap_data_lock);
451 		KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse);
452 		if (error != ENOMEM)
453 			uvmexp.swpgonly += npages;
454 		simple_unlock(&uvm.swap_data_lock);
455 		if (error) {
456 			if (error != ENOMEM)
457 				uvm_swap_markbad(swslot, npages);
458 			else
459 				uvm_swap_free(swslot, npages);
460 		}
461 		uvmexp.pdpending--;
462 	}
463 	s = splbio();
464 	if (write && (bp->b_flags & B_AGE) != 0) {
465 		vwakeup(bp);
466 	}
467 	pool_put(&bufpool, bp);
468 	splx(s);
469 }
470