xref: /netbsd-src/sys/uvm/uvm_pager.c (revision 7cc2f76925f078d01ddc9e640a98f4ccfc9f8c3b)
1 /*	$NetBSD: uvm_pager.c,v 1.38 2000/12/09 23:26:27 chs Exp $	*/
2 
3 /*
4  *
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Charles D. Cranor and
19  *      Washington University.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35  */
36 
37 #include "opt_uvmhist.h"
38 
39 /*
40  * uvm_pager.c: generic functions used to assist the pagers.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/proc.h>
46 #include <sys/malloc.h>
47 #include <sys/pool.h>
48 #include <sys/vnode.h>
49 
50 #define UVM_PAGER
51 #include <uvm/uvm.h>
52 
53 struct pool *uvm_aiobuf_pool;
54 
55 /*
56  * list of uvm pagers in the system
57  */
58 
59 extern struct uvm_pagerops uvm_deviceops;
60 extern struct uvm_pagerops uvm_vnodeops;
61 extern struct uvm_pagerops ubc_pager;
62 
63 struct uvm_pagerops *uvmpagerops[] = {
64 	&aobj_pager,
65 	&uvm_deviceops,
66 	&uvm_vnodeops,
67 	&ubc_pager,
68 };
69 
70 /*
71  * the pager map: provides KVA for I/O
72  */
73 
74 vm_map_t pager_map;		/* XXX */
75 simple_lock_data_t pager_map_wanted_lock;
76 boolean_t pager_map_wanted;	/* locked by pager map */
77 static vaddr_t emergva;
78 static boolean_t emerginuse;
79 
80 /*
81  * uvm_pager_init: init pagers (at boot time)
82  */
83 
84 void
85 uvm_pager_init()
86 {
87 	int lcv;
88 
89 	/*
90 	 * init pager map
91 	 */
92 
93 	pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
94 	 			    PAGER_MAP_SIZE, 0, FALSE, NULL);
95 	simple_lock_init(&pager_map_wanted_lock);
96 	pager_map_wanted = FALSE;
97 	emergva = uvm_km_valloc(kernel_map, MAXBSIZE);
98 	emerginuse = FALSE;
99 
100 	/*
101 	 * init ASYNC I/O queue
102 	 */
103 
104 	TAILQ_INIT(&uvm.aio_done);
105 
106 	/*
107 	 * call pager init functions
108 	 */
109 	for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
110 	    lcv++) {
111 		if (uvmpagerops[lcv]->pgo_init)
112 			uvmpagerops[lcv]->pgo_init();
113 	}
114 }
115 
116 /*
117  * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
118  *
119  * we basically just map in a blank map entry to reserve the space in the
120  * map and then use pmap_enter() to put the mappings in by hand.
121  */
122 
123 vaddr_t
124 uvm_pagermapin(pps, npages, flags)
125 	struct vm_page **pps;
126 	int npages;
127 	int flags;
128 {
129 	vsize_t size;
130 	vaddr_t kva;
131 	vaddr_t cva;
132 	struct vm_page *pp;
133 	vm_prot_t prot;
134 	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
135 
136 	UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
137 
138 	/*
139 	 * compute protection.  outgoing I/O only needs read
140 	 * access to the page, whereas incoming needs read/write.
141 	 */
142 
143 	prot = VM_PROT_READ;
144 	if (flags & UVMPAGER_MAPIN_READ)
145 		prot |= VM_PROT_WRITE;
146 
147 ReStart:
148 	size = npages << PAGE_SHIFT;
149 	kva = 0;			/* let system choose VA */
150 
151 	if (uvm_map(pager_map, &kva, size, NULL,
152 	      UVM_UNKNOWN_OFFSET, 0, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
153 		if (curproc == uvm.pagedaemon_proc) {
154 			simple_lock(&pager_map_wanted_lock);
155 			if (emerginuse) {
156 				UVM_UNLOCK_AND_WAIT(&emergva,
157 				    &pager_map_wanted_lock, FALSE,
158 				    "emergva", 0);
159 				goto ReStart;
160 			}
161 			emerginuse = TRUE;
162 			simple_unlock(&pager_map_wanted_lock);
163 			kva = emergva;
164 			KASSERT(npages <= MAXBSIZE >> PAGE_SHIFT);
165 			goto enter;
166 		}
167 		if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
168 			UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
169 			return(0);
170 		}
171 		simple_lock(&pager_map_wanted_lock);
172 		pager_map_wanted = TRUE;
173 		UVMHIST_LOG(maphist, "  SLEEPING on pager_map",0,0,0,0);
174 		UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
175 		    "pager_map", 0);
176 		goto ReStart;
177 	}
178 
179 enter:
180 	/* got it */
181 	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
182 		pp = *pps++;
183 		KASSERT(pp->flags & PG_BUSY);
184 		pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
185 		    prot, PMAP_WIRED | ((pp->flags & PG_FAKE) ? prot :
186 					VM_PROT_READ));
187 	}
188 
189 	UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
190 	return(kva);
191 }
192 
193 /*
194  * uvm_pagermapout: remove pager_map mapping
195  *
196  * we remove our mappings by hand and then remove the mapping (waking
197  * up anyone wanting space).
198  */
199 
200 void
201 uvm_pagermapout(kva, npages)
202 	vaddr_t kva;
203 	int npages;
204 {
205 	vsize_t size = npages << PAGE_SHIFT;
206 	vm_map_entry_t entries;
207 	UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
208 
209 	UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
210 
211 	/*
212 	 * duplicate uvm_unmap, but add in pager_map_wanted handling.
213 	 */
214 
215 	if (kva == emergva) {
216 		simple_lock(&pager_map_wanted_lock);
217 		emerginuse = FALSE;
218 		wakeup(&emergva);
219 		simple_unlock(&pager_map_wanted_lock);
220 		entries = NULL;
221 		goto remove;
222 	}
223 
224 	vm_map_lock(pager_map);
225 	(void) uvm_unmap_remove(pager_map, kva, kva + size, &entries);
226 	simple_lock(&pager_map_wanted_lock);
227 	if (pager_map_wanted) {
228 		pager_map_wanted = FALSE;
229 		wakeup(pager_map);
230 	}
231 	simple_unlock(&pager_map_wanted_lock);
232 	vm_map_unlock(pager_map);
233 remove:
234 	pmap_remove(pmap_kernel(), kva, kva + (npages << PAGE_SHIFT));
235 	if (entries)
236 		uvm_unmap_detach(entries, 0);
237 
238 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
239 }
240 
241 /*
242  * uvm_mk_pcluster
243  *
244  * generic "make 'pager put' cluster" function.  a pager can either
245  * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
246  * generic function, or [3] set it to a pager specific function.
247  *
248  * => caller must lock object _and_ pagequeues (since we need to look
249  *    at active vs. inactive bits, etc.)
250  * => caller must make center page busy and write-protect it
251  * => we mark all cluster pages busy for the caller
252  * => the caller must unbusy all pages (and check wanted/released
253  *    status if it drops the object lock)
254  * => flags:
255  *      PGO_ALLPAGES:  all pages in object are valid targets
256  *      !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
257  *      PGO_DOACTCLUST: include active pages in cluster.
258  *        NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
259  *              PG_CLEANCHK is only a hint, but clearing will help reduce
260  *		the number of calls we make to the pmap layer.
261  */
262 
263 struct vm_page **
264 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
265 	struct uvm_object *uobj;	/* IN */
266 	struct vm_page **pps, *center;  /* IN/OUT, IN */
267 	int *npages, flags;		/* IN/OUT, IN */
268 	voff_t mlo, mhi;		/* IN (if !PGO_ALLPAGES) */
269 {
270 	struct vm_page **ppsp, *pclust;
271 	voff_t lo, hi, curoff;
272 	int center_idx, forward, incr;
273 	UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
274 
275 	/*
276 	 * center page should already be busy and write protected.  XXX:
277 	 * suppose page is wired?  if we lock, then a process could
278 	 * fault/block on it.  if we don't lock, a process could write the
279 	 * pages in the middle of an I/O.  (consider an msync()).  let's
280 	 * lock it for now (better to delay than corrupt data?).
281 	 */
282 
283 	/*
284 	 * get cluster boundaries, check sanity, and apply our limits as well.
285 	 */
286 
287 	uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
288 	if ((flags & PGO_ALLPAGES) == 0) {
289 		if (lo < mlo)
290 			lo = mlo;
291 		if (hi > mhi)
292 			hi = mhi;
293 	}
294 	if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */
295 #ifdef DIAGNOSTIC
296 		printf("uvm_mk_pcluster uobj %p npages %d lo 0x%llx hi 0x%llx "
297 		       "flags 0x%x\n", uobj, *npages, (long long)lo,
298 		       (long long)hi, flags);
299 #endif
300 		pps[0] = center;
301 		*npages = 1;
302 		return(pps);
303 	}
304 
305 	/*
306 	 * now determine the center and attempt to cluster around the
307 	 * edges
308 	 */
309 
310 	center_idx = (center->offset - lo) >> PAGE_SHIFT;
311 	pps[center_idx] = center;	/* plug in the center page */
312 	ppsp = &pps[center_idx];
313 	*npages = 1;
314 
315 	/*
316 	 * attempt to cluster around the left [backward], and then
317 	 * the right side [forward].
318 	 *
319 	 * note that for inactive pages (pages that have been deactivated)
320 	 * there are no valid mappings and PG_CLEAN should be up to date.
321 	 * [i.e. there is no need to query the pmap with pmap_is_modified
322 	 * since there are no mappings].
323 	 */
324 
325 	for (forward  = 0 ; forward <= 1 ; forward++) {
326 		incr = forward ? PAGE_SIZE : -PAGE_SIZE;
327 		curoff = center->offset + incr;
328 		for ( ;(forward == 0 && curoff >= lo) ||
329 		       (forward && curoff < hi);
330 		      curoff += incr) {
331 
332 			pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
333 			if (pclust == NULL) {
334 				break;			/* no page */
335 			}
336 			/* handle active pages */
337 			/* NOTE: inactive pages don't have pmap mappings */
338 			if ((pclust->pqflags & PQ_INACTIVE) == 0) {
339 				if ((flags & PGO_DOACTCLUST) == 0) {
340 					/* dont want mapped pages at all */
341 					break;
342 				}
343 
344 				/* make sure "clean" bit is sync'd */
345 				if ((pclust->flags & PG_CLEANCHK) == 0) {
346 					if ((pclust->flags & (PG_CLEAN|PG_BUSY))
347 					   == PG_CLEAN &&
348 					   pmap_is_modified(pclust))
349 						pclust->flags &= ~PG_CLEAN;
350 
351 					/* now checked */
352 					pclust->flags |= PG_CLEANCHK;
353 				}
354 			}
355 
356 			/* is page available for cleaning and does it need it */
357 			if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0) {
358 				break;	/* page is already clean or is busy */
359 			}
360 
361 			/* yes!   enroll the page in our array */
362 			pclust->flags |= PG_BUSY;		/* busy! */
363 			UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
364 
365 			/* XXX: protect wired page?   see above comment. */
366 			pmap_page_protect(pclust, VM_PROT_READ);
367 			if (!forward) {
368 				ppsp--;			/* back up one page */
369 				*ppsp = pclust;
370 			} else {
371 				/* move forward one page */
372 				ppsp[*npages] = pclust;
373 			}
374 			(*npages)++;
375 		}
376 	}
377 
378 	/*
379 	 * done!  return the cluster array to the caller!!!
380 	 */
381 
382 	UVMHIST_LOG(maphist, "<- done",0,0,0,0);
383 	return(ppsp);
384 }
385 
386 /*
387  * uvm_pager_put: high level pageout routine
388  *
389  * we want to pageout page "pg" to backing store, clustering if
390  * possible.
391  *
392  * => page queues must be locked by caller
393  * => if page is not swap-backed, then "uobj" points to the object
394  *	backing it.   this object should be locked by the caller.
395  * => if page is swap-backed, then "uobj" should be NULL.
396  * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
397  *    for swap-backed memory, "pg" can be NULL if there is no page
398  *    of interest [sometimes the case for the pagedaemon]
399  * => "ppsp_ptr" should point to an array of npages vm_page pointers
400  *	for possible cluster building
401  * => flags (first two for non-swap-backed pages)
402  *	PGO_ALLPAGES: all pages in uobj are valid targets
403  *	PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
404  *	PGO_SYNCIO: do SYNC I/O (no async)
405  *	PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
406  * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
407  *		  if (!uobj) start is the (daddr_t) of the starting swapblk
408  * => return state:
409  *	1. we return the VM_PAGER status code of the pageout
410  *	2. we return with the page queues unlocked
411  *	3. if (uobj != NULL) [!swap_backed] we return with
412  *		uobj locked _only_ if PGO_PDFREECLUST is set
413  *		AND result != VM_PAGER_PEND.   in all other cases
414  *		we return with uobj unlocked.   [this is a hack
415  *		that allows the pagedaemon to save one lock/unlock
416  *		pair in the !swap_backed case since we have to
417  *		lock the uobj to drop the cluster anyway]
418  *	4. on errors we always drop the cluster.   thus, if we return
419  *		!PEND, !OK, then the caller only has to worry about
420  *		un-busying the main page (not the cluster pages).
421  *	5. on success, if !PGO_PDFREECLUST, we return the cluster
422  *		with all pages busy (caller must un-busy and check
423  *		wanted/released flags).
424  */
425 
426 int
427 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
428 	struct uvm_object *uobj;	/* IN */
429 	struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
430 	int *npages;			/* IN/OUT */
431 	int flags;			/* IN */
432 	voff_t start, stop;		/* IN, IN */
433 {
434 	int result;
435 	daddr_t swblk;
436 	struct vm_page **ppsp = *ppsp_ptr;
437 	UVMHIST_FUNC("uvm_pager_put"); UVMHIST_CALLED(ubchist);
438 
439 	/*
440 	 * note that uobj is null  if we are doing a swap-backed pageout.
441 	 * note that uobj is !null if we are doing normal object pageout.
442 	 * note that the page queues must be locked to cluster.
443 	 */
444 
445 	if (uobj) {	/* if !swap-backed */
446 
447 		/*
448 		 * attempt to build a cluster for pageout using its
449 		 * make-put-cluster function (if it has one).
450 		 */
451 
452 		if (uobj->pgops->pgo_mk_pcluster) {
453 			ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
454 			    npages, pg, flags, start, stop);
455 			*ppsp_ptr = ppsp;  /* update caller's pointer */
456 		} else {
457 			ppsp[0] = pg;
458 			*npages = 1;
459 		}
460 
461 		swblk = 0;		/* XXX: keep gcc happy */
462 
463 	} else {
464 
465 		/*
466 		 * for swap-backed pageout, the caller (the pagedaemon) has
467 		 * already built the cluster for us.   the starting swap
468 		 * block we are writing to has been passed in as "start."
469 		 * "pg" could be NULL if there is no page we are especially
470 		 * interested in (in which case the whole cluster gets dropped
471 		 * in the event of an error or a sync "done").
472 		 */
473 		swblk = (daddr_t) start;
474 		/* ppsp and npages should be ok */
475 	}
476 
477 	/* now that we've clustered we can unlock the page queues */
478 	uvm_unlock_pageq();
479 
480 	/*
481 	 * now attempt the I/O.   if we have a failure and we are
482 	 * clustered, we will drop the cluster and try again.
483 	 */
484 
485 ReTry:
486 	if (uobj) {
487 		/* object is locked */
488 		result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags);
489 		UVMHIST_LOG(ubchist, "put -> %d", result, 0,0,0);
490 		/* object is now unlocked */
491 	} else {
492 		/* nothing locked */
493 		result = uvm_swap_put(swblk, ppsp, *npages, flags);
494 		/* nothing locked */
495 	}
496 
497 	/*
498 	 * we have attempted the I/O.
499 	 *
500 	 * if the I/O was a success then:
501 	 * 	if !PGO_PDFREECLUST, we return the cluster to the
502 	 *		caller (who must un-busy all pages)
503 	 *	else we un-busy cluster pages for the pagedaemon
504 	 *
505 	 * if I/O is pending (async i/o) then we return the pending code.
506 	 * [in this case the async i/o done function must clean up when
507 	 *  i/o is done...]
508 	 */
509 
510 	if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
511 		if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
512 			/*
513 			 * drop cluster and relock object (only if I/O is
514 			 * not pending)
515 			 */
516 			if (uobj)
517 				/* required for dropcluster */
518 				simple_lock(&uobj->vmobjlock);
519 			if (*npages > 1 || pg == NULL)
520 				uvm_pager_dropcluster(uobj, pg, ppsp, npages,
521 				    PGO_PDFREECLUST);
522 			/* if (uobj): object still locked, as per
523 			 * return-state item #3 */
524 		}
525 		return (result);
526 	}
527 
528 	/*
529 	 * a pager error occured.
530 	 * for transient errors, drop to a cluster of 1 page ("pg")
531 	 * and try again.  for hard errors, don't bother retrying.
532 	 */
533 
534 	if (*npages > 1 || pg == NULL) {
535 		if (uobj) {
536 			simple_lock(&uobj->vmobjlock);
537 		}
538 		uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP);
539 
540 		/*
541 		 * for failed swap-backed pageouts with a "pg",
542 		 * we need to reset pg's swslot to either:
543 		 * "swblk" (for transient errors, so we can retry),
544 		 * or 0 (for hard errors).
545 		 */
546 
547 		if (uobj == NULL && pg != NULL) {
548 			int nswblk = (result == VM_PAGER_AGAIN) ? swblk : 0;
549 			if (pg->pqflags & PQ_ANON) {
550 				simple_lock(&pg->uanon->an_lock);
551 				pg->uanon->an_swslot = nswblk;
552 				simple_unlock(&pg->uanon->an_lock);
553 			} else {
554 				simple_lock(&pg->uobject->vmobjlock);
555 				uao_set_swslot(pg->uobject,
556 					       pg->offset >> PAGE_SHIFT,
557 					       nswblk);
558 				simple_unlock(&pg->uobject->vmobjlock);
559 			}
560 		}
561 		if (result == VM_PAGER_AGAIN) {
562 
563 			/*
564 			 * for transient failures, free all the swslots that
565 			 * we're not going to retry with.
566 			 */
567 
568 			if (uobj == NULL) {
569 				if (pg) {
570 					uvm_swap_free(swblk + 1, *npages - 1);
571 				} else {
572 					uvm_swap_free(swblk, *npages);
573 				}
574 			}
575 			if (pg) {
576 				ppsp[0] = pg;
577 				*npages = 1;
578 				goto ReTry;
579 			}
580 		} else if (uobj == NULL) {
581 
582 			/*
583 			 * for hard errors on swap-backed pageouts,
584 			 * mark the swslots as bad.  note that we do not
585 			 * free swslots that we mark bad.
586 			 */
587 
588 			uvm_swap_markbad(swblk, *npages);
589 		}
590 	}
591 
592 	/*
593 	 * a pager error occured (even after dropping the cluster, if there
594 	 * was one).  give up! the caller only has one page ("pg")
595 	 * to worry about.
596 	 */
597 
598 	if (uobj && (flags & PGO_PDFREECLUST) != 0)
599 		simple_lock(&uobj->vmobjlock);
600 	return(result);
601 }
602 
603 /*
604  * uvm_pager_dropcluster: drop a cluster we have built (because we
605  * got an error, or, if PGO_PDFREECLUST we are un-busying the
606  * cluster pages on behalf of the pagedaemon).
607  *
608  * => uobj, if non-null, is a non-swap-backed object that is
609  *	locked by the caller.   we return with this object still
610  *	locked.
611  * => page queues are not locked
612  * => pg is our page of interest (the one we clustered around, can be null)
613  * => ppsp/npages is our current cluster
614  * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
615  *	pages on behalf of the pagedaemon.
616  *           PGO_REALLOCSWAP: drop previously allocated swap slots for
617  *		clustered swap-backed pages (except for "pg" if !NULL)
618  *		"swblk" is the start of swap alloc (e.g. for ppsp[0])
619  *		[only meaningful if swap-backed (uobj == NULL)]
620  */
621 
622 void
623 uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags)
624 	struct uvm_object *uobj;	/* IN */
625 	struct vm_page *pg, **ppsp;	/* IN, IN/OUT */
626 	int *npages;			/* IN/OUT */
627 	int flags;
628 {
629 	int lcv;
630 	boolean_t obj_is_alive;
631 	struct uvm_object *saved_uobj;
632 
633 	/*
634 	 * drop all pages but "pg"
635 	 */
636 
637 	for (lcv = 0 ; lcv < *npages ; lcv++) {
638 
639 		/* skip "pg" or empty slot */
640 		if (ppsp[lcv] == pg || ppsp[lcv] == NULL)
641 			continue;
642 
643 		/*
644 		 * if swap-backed, gain lock on object that owns page.  note
645 		 * that PQ_ANON bit can't change as long as we are holding
646 		 * the PG_BUSY bit (so there is no need to lock the page
647 		 * queues to test it).
648 		 *
649 		 * once we have the lock, dispose of the pointer to swap, if
650 		 * requested
651 		 */
652 		if (!uobj) {
653 			if (ppsp[lcv]->pqflags & PQ_ANON) {
654 				simple_lock(&ppsp[lcv]->uanon->an_lock);
655 				if (flags & PGO_REALLOCSWAP)
656 					  /* zap swap block */
657 					  ppsp[lcv]->uanon->an_swslot = 0;
658 			} else {
659 				simple_lock(&ppsp[lcv]->uobject->vmobjlock);
660 				if (flags & PGO_REALLOCSWAP)
661 					uao_set_swslot(ppsp[lcv]->uobject,
662 					    ppsp[lcv]->offset >> PAGE_SHIFT, 0);
663 			}
664 		}
665 
666 		/* did someone want the page while we had it busy-locked? */
667 		if (ppsp[lcv]->flags & PG_WANTED) {
668 			/* still holding obj lock */
669 			wakeup(ppsp[lcv]);
670 		}
671 
672 		/* if page was released, release it.  otherwise un-busy it */
673 		if (ppsp[lcv]->flags & PG_RELEASED) {
674 
675 			if (ppsp[lcv]->pqflags & PQ_ANON) {
676 				/* so that anfree will free */
677 				ppsp[lcv]->flags &= ~(PG_BUSY);
678 				UVM_PAGE_OWN(ppsp[lcv], NULL);
679 
680 				pmap_page_protect(ppsp[lcv], VM_PROT_NONE);
681 				simple_unlock(&ppsp[lcv]->uanon->an_lock);
682 				/* kills anon and frees pg */
683 				uvm_anfree(ppsp[lcv]->uanon);
684 
685 				continue;
686 			}
687 
688 			/*
689 			 * pgo_releasepg will dump the page for us
690 			 */
691 
692 #ifdef DIAGNOSTIC
693 			if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL)
694 				panic("uvm_pager_dropcluster: no releasepg "
695 				    "function");
696 #endif
697 			saved_uobj = ppsp[lcv]->uobject;
698 			obj_is_alive =
699 			    saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL);
700 
701 #ifdef DIAGNOSTIC
702 			/* for normal objects, "pg" is still PG_BUSY by us,
703 			 * so obj can't die */
704 			if (uobj && !obj_is_alive)
705 				panic("uvm_pager_dropcluster: object died "
706 				    "with active page");
707 #endif
708 			/* only unlock the object if it is still alive...  */
709 			if (obj_is_alive && saved_uobj != uobj)
710 				simple_unlock(&saved_uobj->vmobjlock);
711 
712 			/*
713 			 * XXXCDC: suppose uobj died in the pgo_releasepg?
714 			 * how pass that
715 			 * info up to caller.  we are currently ignoring it...
716 			 */
717 
718 			continue;		/* next page */
719 
720 		} else {
721 			ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED|PG_FAKE);
722 			UVM_PAGE_OWN(ppsp[lcv], NULL);
723 		}
724 
725 		/*
726 		 * if we are operating on behalf of the pagedaemon and we
727 		 * had a successful pageout update the page!
728 		 */
729 		if (flags & PGO_PDFREECLUST) {
730 			pmap_clear_reference(ppsp[lcv]);
731 			pmap_clear_modify(ppsp[lcv]);
732 			ppsp[lcv]->flags |= PG_CLEAN;
733 		}
734 
735 		/* if anonymous cluster, unlock object and move on */
736 		if (!uobj) {
737 			if (ppsp[lcv]->pqflags & PQ_ANON)
738 				simple_unlock(&ppsp[lcv]->uanon->an_lock);
739 			else
740 				simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
741 		}
742 	}
743 }
744 
745 /*
746  * interrupt-context iodone handler for nested i/o bufs.
747  *
748  * => must be at splbio().
749  */
750 
751 void
752 uvm_aio_biodone1(bp)
753 	struct buf *bp;
754 {
755 	struct buf *mbp = bp->b_private;
756 
757 	KASSERT(mbp != bp);
758 	if (bp->b_flags & B_ERROR) {
759 		mbp->b_flags |= B_ERROR;
760 		mbp->b_error = bp->b_error;
761 	}
762 	mbp->b_resid -= bp->b_bcount;
763 	pool_put(&bufpool, bp);
764 	if (mbp->b_resid == 0) {
765 		biodone(mbp);
766 	}
767 }
768 
769 /*
770  * interrupt-context iodone handler for single-buf i/os
771  * or the top-level buf of a nested-buf i/o.
772  *
773  * => must be at splbio().
774  */
775 
776 void
777 uvm_aio_biodone(bp)
778 	struct buf *bp;
779 {
780 	/* reset b_iodone for when this is a single-buf i/o. */
781 	bp->b_iodone = uvm_aio_aiodone;
782 
783 	simple_lock(&uvm.aiodoned_lock);	/* locks uvm.aio_done */
784 	TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
785 	wakeup(&uvm.aiodoned);
786 	simple_unlock(&uvm.aiodoned_lock);
787 }
788 
789 /*
790  * uvm_aio_aiodone: do iodone processing for async i/os.
791  * this should be called in thread context, not interrupt context.
792  */
793 
794 void
795 uvm_aio_aiodone(bp)
796 	struct buf *bp;
797 {
798 	int npages = bp->b_bufsize >> PAGE_SHIFT;
799 	struct vm_page *pg, *pgs[npages];
800 	struct uvm_object *uobj;
801 	int s, i;
802 	boolean_t release, write, swap;
803 	UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
804 	UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
805 
806 	release = (bp->b_flags & (B_ERROR|B_READ)) == (B_ERROR|B_READ);
807 	write = (bp->b_flags & B_READ) == 0;
808 	/* XXXUBC B_NOCACHE is for swap pager, should be done differently */
809 	if (write && !(bp->b_flags & B_NOCACHE) && bioops.io_pageiodone) {
810 		(*bioops.io_pageiodone)(bp);
811 	}
812 
813 	uobj = NULL;
814 	for (i = 0; i < npages; i++) {
815 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
816 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
817 	}
818 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
819 	for (i = 0; i < npages; i++) {
820 		pg = pgs[i];
821 
822 		if (i == 0) {
823 			swap = (pg->pqflags & PQ_SWAPBACKED) != 0;
824 			if (!swap) {
825 				uobj = pg->uobject;
826 				simple_lock(&uobj->vmobjlock);
827 			}
828 		}
829 		KASSERT(swap || pg->uobject == uobj);
830 		if (swap) {
831 			if (pg->pqflags & PQ_ANON) {
832 				simple_lock(&pg->uanon->an_lock);
833 			} else {
834 				simple_lock(&pg->uobject->vmobjlock);
835 			}
836 		}
837 
838 		/*
839 		 * if this is a read and we got an error, mark the pages
840 		 * PG_RELEASED so that uvm_page_unbusy() will free them.
841 		 */
842 
843 		if (release) {
844 			pg->flags |= PG_RELEASED;
845 			continue;
846 		}
847 		KASSERT(!write || (pgs[i]->flags & PG_FAKE) == 0);
848 
849 		/*
850 		 * if this is a read and the page is PG_FAKE
851 		 * or this was a write, mark the page PG_CLEAN and not PG_FAKE.
852 		 */
853 
854 		if (pgs[i]->flags & PG_FAKE || write) {
855 			pmap_clear_reference(pgs[i]);
856 			pmap_clear_modify(pgs[i]);
857 			pgs[i]->flags |= PG_CLEAN;
858 			pgs[i]->flags &= ~PG_FAKE;
859 		}
860 		if (pg->wire_count == 0) {
861 			uvm_pageactivate(pg);
862 		}
863 		if (swap) {
864 			if (pg->pqflags & PQ_ANON) {
865 				simple_unlock(&pg->uanon->an_lock);
866 			} else {
867 				simple_unlock(&pg->uobject->vmobjlock);
868 			}
869 		}
870 	}
871 	uvm_page_unbusy(pgs, npages);
872 	if (!swap) {
873 		simple_unlock(&uobj->vmobjlock);
874 	}
875 
876 	s = splbio();
877 	if (write && (bp->b_flags & B_AGE) != 0) {
878 		vwakeup(bp);
879 	}
880 	pool_put(&bufpool, bp);
881 	splx(s);
882 }
883 
884 /*
885  * translate unix errno values to VM_PAGER_*.
886  */
887 
888 int
889 uvm_errno2vmerror(errno)
890 	int errno;
891 {
892 	switch (errno) {
893 	case 0:
894 		return VM_PAGER_OK;
895 	case EINVAL:
896 		return VM_PAGER_BAD;
897 	case EINPROGRESS:
898 		return VM_PAGER_PEND;
899 	case EIO:
900 		return VM_PAGER_ERROR;
901 	case EAGAIN:
902 		return VM_PAGER_AGAIN;
903 	case EBUSY:
904 		return VM_PAGER_UNLOCK;
905 	default:
906 		return VM_PAGER_ERROR;
907 	}
908 }
909